Skip to content

Commit

Permalink
xen-netback: pass hash value to the frontend
Browse files Browse the repository at this point in the history
My recent patch to include/xen/interface/io/netif.h defines a new extra
info type that can be used to pass hash values between backend and guest
frontend.

This patch adds code to xen-netback to pass hash values calculated for
guest receive-side packets (i.e. netback transmit side) to the frontend.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Paul Durrant authored and David S. Miller committed May 16, 2016
1 parent 40d8abd commit f07f989
Show file tree
Hide file tree
Showing 2 changed files with 77 additions and 14 deletions.
13 changes: 11 additions & 2 deletions drivers/net/xen-netback/interface.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,17 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;

if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb) % dev->real_num_tx_queues;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
u16 index = fallback(dev, skb) % dev->real_num_tx_queues;

/* Make sure there is no hash information in the socket
* buffer otherwise it would be incorrectly forwarded
* to the frontend.
*/
skb_clear_hash(skb);

return index;
}

xenvif_set_skb_hash(vif, skb);

Expand Down
78 changes: 66 additions & 12 deletions drivers/net/xen-netback/netback.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
if (skb->sw_hash)
needed++;

do {
prod = queue->rx.sring->req_prod;
Expand Down Expand Up @@ -285,6 +287,8 @@ struct gop_frag_copy {
struct xenvif_rx_meta *meta;
int head;
int gso_type;
int protocol;
int hash_present;

struct page *page;
};
Expand Down Expand Up @@ -331,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
npo->copy_off += *len;
info->meta->size += *len;

if (!info->head)
return;

/* Leave a gap for the GSO descriptor. */
if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
if ((1 << info->gso_type) & queue->vif->gso_mask)
queue->rx.req_cons++;

/* Leave a gap for the hash extra segment. */
if (info->hash_present)
queue->rx.req_cons++;

info->head = 0; /* There must be something in this buffer now */
Expand Down Expand Up @@ -367,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
.npo = npo,
.head = *head,
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
/* xenvif_set_skb_hash() will have either set a s/w
* hash or cleared the hash depending on
* whether the the frontend wants a hash for this skb.
*/
.hash_present = skb->sw_hash,
};
unsigned long bytes;

Expand Down Expand Up @@ -555,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)

static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct xenvif *vif = queue->vif;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
Expand Down Expand Up @@ -590,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);

while ((skb = __skb_dequeue(&rxq)) != NULL) {
struct xen_netif_extra_info *extra = NULL;

if ((1 << queue->meta[npo.meta_cons].gso_type) &
queue->vif->gso_prefix_mask) {
vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);

Expand All @@ -610,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;

status = xenvif_check_gop(queue->vif,
status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);

Expand All @@ -632,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
flags);

if ((1 << queue->meta[npo.meta_cons].gso_type) &
queue->vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
vif->gso_mask) {
extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);

resp->flags |= XEN_NETRXF_extra_info;

gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
extra->u.gso.pad = 0;
extra->u.gso.features = 0;

extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
extra->flags = 0;
}

if (skb->sw_hash) {
/* Since the skb got here via xenvif_select_queue()
* we know that the hash has been re-calculated
* according to a configuration set by the frontend
* and therefore we know that it is legitimate to
* pass it to the frontend.
*/
if (resp->flags & XEN_NETRXF_extra_info)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
else
resp->flags |= XEN_NETRXF_extra_info;

extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);

gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
extra->u.hash.algorithm =
XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;

if (skb->l4_hash)
extra->u.hash.type =
skb->protocol == htons(ETH_P_IP) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
else
extra->u.hash.type =
skb->protocol == htons(ETH_P_IP) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6;

*(uint32_t *)extra->u.hash.value =
skb_get_hash_raw(skb);

extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
extra->flags = 0;
}

xenvif_add_frag_responses(queue, status,
Expand Down

0 comments on commit f07f989

Please sign in to comment.