Skip to content

Commit

Permalink
net: Fix build failure with 'make mandocs'.
Browse files Browse the repository at this point in the history
The function header comments have to go with the functions
they are documenting, or things go horribly wrong when we
try to process them with the docbook tools.

Warning(include/linux/netdevice.h:1006): No description found for parameter 'dev_queue'
Warning(include/linux/netdevice.h:1033): No description found for parameter 'dev_queue'
Warning(include/linux/netdevice.h:1067): No description found for parameter 'dev_queue'
Warning(include/linux/netdevice.h:1093): No description found for parameter 'dev_queue'
Warning(include/linux/netdevice.h:1474): No description found for parameter 'txq'
Error(net/core/dev.c:1674): cannot understand prototype: 'u32 simple_tx_hashrnd; '

Signed-off-by: Dave Jones <davej@redhat.com>
Acked-by: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Dave Jones authored and David S. Miller committed Jul 22, 2008
1 parent b32d131 commit d29f749
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 55 deletions.
58 changes: 29 additions & 29 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -996,17 +996,17 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}

static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

/**
* netif_start_queue - allow transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

static inline void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
Expand All @@ -1022,13 +1022,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
}
}

/**
* netif_wake_queue - restart transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
Expand All @@ -1041,6 +1034,13 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
__netif_schedule(dev_queue->qdisc);
}

/**
* netif_wake_queue - restart transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
Expand All @@ -1056,18 +1056,18 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
}
}

static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
*
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

static inline void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
Expand All @@ -1083,17 +1083,17 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
}
}

static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

/**
* netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device
*
* Test if transmit queue on device is currently unable to send.
*/
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}

static inline int netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
Expand Down Expand Up @@ -1463,13 +1463,6 @@ static inline void netif_rx_complete(struct net_device *dev,
local_irq_restore(flags);
}

/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
* @cpu: cpu number of lock owner
*
* Get network device transmit lock
*/
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
Expand All @@ -1482,6 +1475,13 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
txq->xmit_lock_owner = smp_processor_id();
}

/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
* @cpu: cpu number of lock owner
*
* Get network device transmit lock
*/
static inline void netif_tx_lock(struct net_device *dev)
{
int cpu = smp_processor_id();
Expand Down
51 changes: 25 additions & 26 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1645,32 +1645,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
return 0;
}

/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
*
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*
* -----------------------------------------------------------------------------------
* I notice this method can also return errors from the queue disciplines,
* including NET_XMIT_DROP, which is a positive value. So, errors can also
* be positive.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/

static u32 simple_tx_hashrnd;
static int simple_tx_hashrnd_initialized = 0;

Expand Down Expand Up @@ -1738,6 +1712,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
return netdev_get_tx_queue(dev, queue_index);
}

/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
* this function. The function can be called from an interrupt.
*
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*
* -----------------------------------------------------------------------------------
* I notice this method can also return errors from the queue disciplines,
* including NET_XMIT_DROP, which is a positive value. So, errors can also
* be positive.
*
* Regardless of the return value, the skb is consumed, so it is currently
* difficult to retry a send to this method. (You can bump the ref count
* before sending to hold a reference for retry if you are careful.)
*
* When calling this method, interrupts MUST be enabled. This is because
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
int dev_queue_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
Expand Down

0 comments on commit d29f749

Please sign in to comment.