Skip to content

Commit

Permalink
octeon_ep: Implement helper for iterating packets in Rx queue
Browse files Browse the repository at this point in the history
The common code with some packet and index manipulations is extracted and
moved to newly implemented helper to make the code more readable and avoid
duplication. This is a preparation for skb allocation failure handling.

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Suggested-by: Simon Horman <horms@kernel.org>
Suggested-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
  • Loading branch information
Aleksandr Mishin authored and Andrew Lunn committed Oct 19, 2024
1 parent 4ab3e49 commit bd28df2
Showing 1 changed file with 32 additions and 23 deletions.
55 changes: 32 additions & 23 deletions drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,30 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
return new_pkts;
}

/**
* octep_oq_next_pkt() - Move to the next packet in Rx queue.
*
* @oq: Octeon Rx queue data structure.
* @buff_info: Current packet buffer info.
* @read_idx: Current packet index in the ring.
* @desc_used: Current packet descriptor number.
*
* Free the resources associated with a packet.
* Increment packet index in the ring and packet descriptor number.
*/
static void octep_oq_next_pkt(struct octep_oq *oq,
struct octep_rx_buffer *buff_info,
u32 *read_idx, u32 *desc_used)
{
dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
buff_info->page = NULL;
(*read_idx)++;
(*desc_used)++;
if (*read_idx == oq->max_count)
*read_idx = 0;
}

/**
* __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
*
Expand Down Expand Up @@ -367,10 +391,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
desc_used = 0;
for (pkt = 0; pkt < pkts_to_process; pkt++) {
buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
resp_hw = page_address(buff_info->page);
buff_info->page = NULL;

/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
Expand All @@ -394,36 +415,27 @@ static int __octep_oq_process_rx(struct octep_device *oct,
data_offset = OCTEP_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}

octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);

skb = build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);

rx_bytes += buff_info->len;

if (buff_info->len <= oq->max_single_buffer_size) {
skb = build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
} else {
struct skb_shared_info *shinfo;
u16 data_len;

skb = build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;

shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
while (data_len) {
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
buff_info = (struct octep_rx_buffer *)
&oq->buff_info[read_idx];
if (data_len < oq->buffer_size) {
Expand All @@ -438,11 +450,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
buff_info->page, 0,
buff_info->len,
buff_info->len);
buff_info->page = NULL;
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;

octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
}
}

Expand Down

0 comments on commit bd28df2

Please sign in to comment.