Skip to content

Commit

Permalink
sfc: move event queue management code
Browse files Browse the repository at this point in the history
Signed-off-by: Alexandru-Mihai Maftei <amaftei@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Alex Maftei (amaftei) authored and David S. Miller committed Jan 8, 2020
1 parent 37c45a4 commit 5f99925
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 92 deletions.
92 changes: 0 additions & 92 deletions drivers/net/ethernet/sfc/efx.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,98 +128,6 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
ASSERT_RTNL(); \
} while (0)

/**************************************************************************
*
* Event queue processing
*
*************************************************************************/

/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling.
*/
int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;

netif_dbg(efx, probe, efx->net_dev,
"chan %d create event queue\n", channel->channel);

/* Build an event queue with room for one event per tx and rx buffer,
* plus some extra for link state events and MCDI completions. */
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;

return efx_nic_probe_eventq(channel);
}

/* Prepare channel's event queue */
int efx_init_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
int rc;

EFX_WARN_ON_PARANOID(channel->eventq_init);

netif_dbg(efx, drv, efx->net_dev,
"chan %d init event queue\n", channel->channel);

rc = efx_nic_init_eventq(channel);
if (rc == 0) {
efx->type->push_irq_moderation(channel);
channel->eventq_read_ptr = 0;
channel->eventq_init = true;
}
return rc;
}

/* Enable event queue processing and NAPI */
void efx_start_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);

/* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();

napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
}

/* Disable event queue processing and NAPI */
void efx_stop_eventq(struct efx_channel *channel)
{
if (!channel->enabled)
return;

napi_disable(&channel->napi_str);
channel->enabled = false;
}

void efx_fini_eventq(struct efx_channel *channel)
{
if (!channel->eventq_init)
return;

netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);

efx_nic_fini_eventq(channel);
channel->eventq_init = false;
}

void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);

efx_nic_remove_eventq(channel);
}

/**************************************************************************
*
* Channel handling
Expand Down
91 changes: 91 additions & 0 deletions drivers/net/ethernet/sfc/efx_channels.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,97 @@ void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}

/***************
* EVENT QUEUES
***************/

/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling.
*/
int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;

netif_dbg(efx, probe, efx->net_dev,
"chan %d create event queue\n", channel->channel);

/* Build an event queue with room for one event per tx and rx buffer,
* plus some extra for link state events and MCDI completions.
*/
entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;

return efx_nic_probe_eventq(channel);
}

/* Prepare channel's event queue */
int efx_init_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
int rc;

EFX_WARN_ON_PARANOID(channel->eventq_init);

netif_dbg(efx, drv, efx->net_dev,
"chan %d init event queue\n", channel->channel);

rc = efx_nic_init_eventq(channel);
if (rc == 0) {
efx->type->push_irq_moderation(channel);
channel->eventq_read_ptr = 0;
channel->eventq_init = true;
}
return rc;
}

/* Enable event queue processing and NAPI */
void efx_start_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);

/* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();

napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel);
}

/* Disable event queue processing and NAPI */
void efx_stop_eventq(struct efx_channel *channel)
{
if (!channel->enabled)
return;

napi_disable(&channel->napi_str);
channel->enabled = false;
}

void efx_fini_eventq(struct efx_channel *channel)
{
if (!channel->eventq_init)
return;

netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);

efx_nic_fini_eventq(channel);
channel->eventq_init = false;
}

void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);

efx_nic_remove_eventq(channel);
}

/**************************************************************************
*
* Channel handling
Expand Down

0 comments on commit 5f99925

Please sign in to comment.