Skip to content

Commit

Permalink
net: Add asynchronous callbacks for xfrm on layer 2.
Browse files Browse the repository at this point in the history
This patch implements asynchronous crypto callbacks
and a backlog handler that can be used when IPsec
is done at layer 2 in the TX path. It also extends
the skb validate functions so that we can update
the driver transmit return codes based on async
crypto operation or to indicate that we queued the
packet in a backlog queue.

Joint work with: Aviv Heller <avivh@mellanox.com>

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
  • Loading branch information
Steffen Klassert committed Dec 20, 2017
1 parent 3dca3f3 commit f53c723
Show file tree
Hide file tree
Showing 8 changed files with 175 additions and 36 deletions.
6 changes: 4 additions & 2 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -2793,7 +2793,9 @@ struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct sk_buff *completion_queue;

#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
Expand Down Expand Up @@ -3325,7 +3327,7 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);

Expand Down
22 changes: 19 additions & 3 deletions include/net/xfrm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,7 @@ struct xfrm_offload {
#define XFRM_GSO_SEGMENT 16
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128

__u32 status;
#define CRYPTO_SUCCESS 1
Expand Down Expand Up @@ -1874,21 +1875,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
return skb->sp->xvec[skb->sp->len - 1];
}
#endif

static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
struct sec_path *sp = skb->sp;

if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;

return &sp->ovec[sp->olen - 1];
}
#else
return NULL;
#endif
}

void __net_init xfrm_dev_init(void);

#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
Expand Down Expand Up @@ -1929,7 +1937,15 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
}
}
#else
static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
static inline void xfrm_dev_resume(struct sk_buff *skb)
{
}

static inline void xfrm_dev_backlog(struct softnet_data *sd)
{
}

static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
{
return skb;
}
Expand Down
16 changes: 11 additions & 5 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);

static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
{
netdev_features_t features;

Expand Down Expand Up @@ -3099,7 +3099,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
}
}

skb = validate_xmit_xfrm(skb, features);
skb = validate_xmit_xfrm(skb, features, again);

return skb;

Expand All @@ -3110,7 +3110,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
return NULL;
}

struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
{
struct sk_buff *next, *head = NULL, *tail;

Expand All @@ -3121,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
/* in case skb wont be segmented, point to itself */
skb->prev = skb;

skb = validate_xmit_skb(skb, dev);
skb = validate_xmit_skb(skb, dev, again);
if (!skb)
continue;

Expand Down Expand Up @@ -3448,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
struct netdev_queue *txq;
struct Qdisc *q;
int rc = -ENOMEM;
bool again = false;

skb_reset_mac_header(skb);

Expand Down Expand Up @@ -3509,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
XMIT_RECURSION_LIMIT))
goto recursion_alert;

skb = validate_xmit_skb(skb, dev);
skb = validate_xmit_skb(skb, dev, &again);
if (!skb)
goto out;

Expand Down Expand Up @@ -4193,6 +4194,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
spin_unlock(root_lock);
}
}

xfrm_dev_backlog(sd);
}

#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
Expand Down Expand Up @@ -8874,6 +8877,9 @@ static int __init net_dev_init(void)

skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
#ifdef CONFIG_XFRM_OFFLOAD
skb_queue_head_init(&sd->xfrm_backlog);
#endif
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
Expand Down
24 changes: 21 additions & 3 deletions net/ipv4/esp4.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
struct xfrm_state *x;

if (xo && (xo->flags & XFRM_DEV_RESUME))
x = skb->sp->xvec[skb->sp->len - 1];
else
x = skb_dst(skb)->xfrm;

tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
xfrm_output_resume(skb, err);

if (xo && (xo->flags & XFRM_DEV_RESUME)) {
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return;
}

skb_push(skb, skb->data - skb_mac_header(skb));
secpath_reset(skb);
xfrm_dev_resume(skb);
} else {
xfrm_output_resume(skb, err);
}
}

/* Move ESP header back into place. */
Expand Down
24 changes: 21 additions & 3 deletions net/ipv6/esp6.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
struct xfrm_offload *xo = xfrm_offload(skb);
void *tmp;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
struct xfrm_state *x;

if (xo && (xo->flags & XFRM_DEV_RESUME))
x = skb->sp->xvec[skb->sp->len - 1];
else
x = skb_dst(skb)->xfrm;

tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
kfree(tmp);
xfrm_output_resume(skb, err);

if (xo && (xo->flags & XFRM_DEV_RESUME)) {
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
kfree_skb(skb);
return;
}

skb_push(skb, skb->data - skb_mac_header(skb));
secpath_reset(skb);
xfrm_dev_resume(skb);
} else {
xfrm_output_resume(skb, err);
}
}

/* Move ESP header back into place. */
Expand Down
3 changes: 2 additions & 1 deletion net/packet/af_packet.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
bool again = false;

if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
goto drop;

skb = validate_xmit_skb_list(skb, dev);
skb = validate_xmit_skb_list(skb, dev, &again);
if (skb != orig_skb)
goto drop;

Expand Down
16 changes: 15 additions & 1 deletion net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include <net/pkt_sched.h>
#include <net/dst.h>
#include <trace/events/qdisc.h>
#include <net/xfrm.h>

/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
Expand Down Expand Up @@ -230,6 +231,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,

/* skb in gso_skb were already validated */
*validate = false;
if (xfrm_offload(skb))
*validate = true;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
Expand Down Expand Up @@ -285,14 +288,25 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
bool again = false;

/* And release qdisc */
if (root_lock)
spin_unlock(root_lock);

/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
skb = validate_xmit_skb_list(skb, dev);
skb = validate_xmit_skb_list(skb, dev, &again);

#ifdef CONFIG_XFRM_OFFLOAD
if (unlikely(again)) {
if (root_lock)
spin_lock(root_lock);

dev_requeue_skb(skb, q);
return false;
}
#endif

if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
Expand Down
Loading

0 comments on commit f53c723

Please sign in to comment.