Skip to content

Commit

Permalink
Merge branch 'nfp-update-to-control-structures'
Browse files Browse the repository at this point in the history
Jakub Kicinski says:

====================
nfp: update to control structures

This series prepares NFP control structures for crypto offloads.
So far we mostly dealt with configuration requests under rtnl lock.
This will no longer be the case with crypto.  Additionally we will
try to reuse the BPF control message format, so we move common code
out of BPF.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Apr 13, 2019
2 parents 1deeb64 + bcf0caf commit 9d60f0e
Show file tree
Hide file tree
Showing 12 changed files with 451 additions and 293 deletions.
1 change: 1 addition & 0 deletions drivers/net/ethernet/netronome/nfp/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
ccm.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
Expand Down
8 changes: 6 additions & 2 deletions drivers/net/ethernet/netronome/nfp/abm/ctrl.c
Original file line number Diff line number Diff line change
Expand Up @@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)

int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{
const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
struct nfp_net *nn = alink->vnic;
unsigned int i;
int err;

err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
if (err)
return err;

/* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len);
Expand All @@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]);

err = nfp_net_reconfig_mbox(nn,
NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
if (err)
nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err);
Expand Down
236 changes: 19 additions & 217 deletions drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,48 +6,13 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/wait.h>

#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"

#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)

static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
{
u16 used_tags;

used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;

return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
}

static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
{
/* All FW communication for BPF is request-reply. To make sure we
* don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (nfp_bpf_all_tags_busy(bpf)) {
cmsg_warn(bpf, "all FW request contexts busy!\n");
return -EAGAIN;
}

WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
return bpf->tag_alloc_next++;
}

static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));

while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
bpf->tag_alloc_last != bpf->tag_alloc_next)
bpf->tag_alloc_last++;
}

static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
Expand Down Expand Up @@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size;
}

static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;

hdr = (struct cmsg_hdr *)skb->data;

return hdr->type;
}

static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;

hdr = (struct cmsg_hdr *)skb->data;

return be16_to_cpu(hdr->tag);
}

static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;

skb_queue_walk(&bpf->cmsg_replies, skb) {
msg_tag = nfp_bpf_cmsg_get_tag(skb);
if (msg_tag == tag) {
nfp_bpf_free_tag(bpf, tag);
__skb_unlink(skb, &bpf->cmsg_replies);
return skb;
}
}

return NULL;
}

static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;

nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);

return skb;
}

static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;

nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
if (!skb)
nfp_bpf_free_tag(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);

return skb;
}

static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
int tag)
{
struct sk_buff *skb;
int i, err;

for (i = 0; i < 50; i++) {
udelay(4);
skb = nfp_bpf_reply(bpf, tag);
if (skb)
return skb;
}

err = wait_event_interruptible_timeout(bpf->cmsg_wq,
skb = nfp_bpf_reply(bpf, tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_bpf_reply_drop_tag(bpf, tag);
if (err < 0) {
cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
type);
return ERR_PTR(-ETIMEDOUT);
}

return skb;
}

static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
enum nfp_bpf_cmsg_type type, unsigned int reply_size)
{
struct cmsg_hdr *hdr;
int tag;

nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_alloc_tag(bpf);
if (tag < 0) {
nfp_ctrl_unlock(bpf->app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}

hdr = (void *)skb->data;
hdr->ver = CMSG_MAP_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);

__nfp_app_ctrl_tx(bpf->app, skb);

nfp_ctrl_unlock(bpf->app->ctrl);

skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
if (IS_ERR(skb))
return skb;

hdr = (struct cmsg_hdr *)skb->data;
if (hdr->type != __CMSG_REPLY(type)) {
cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
hdr->type, __CMSG_REPLY(type));
goto err_free;
}
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
type, skb->len, reply_size);
goto err_free;
}

return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}

static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply)
Expand Down Expand Up @@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0;

skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
sizeof(*reply));
skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
sizeof(*reply));
if (IS_ERR(skb))
return PTR_ERR(skb);

Expand Down Expand Up @@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);

skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
sizeof(*reply));
skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
sizeof(*reply));
if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n");
return;
Expand Down Expand Up @@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
}

static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
enum nfp_bpf_cmsg_type op,
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
Expand Down Expand Up @@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);

skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
nfp_bpf_cmsg_map_reply_size(bpf, 1));
skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb))
return PTR_ERR(skb);

Expand Down Expand Up @@ -415,34 +236,34 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL);
}

int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL);
}

int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value);
}

int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL);
}

int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL);
}

Expand All @@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
unsigned int tag;

if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
goto err_free;
dev_kfree_skb_any(skb);
return;
}

if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
return;
}

nfp_ctrl_lock(bpf->app->ctrl);

tag = nfp_bpf_cmsg_get_tag(skb);
if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
}

__skb_queue_tail(&bpf->cmsg_replies, skb);
wake_up_interruptible_all(&bpf->cmsg_wq);

nfp_ctrl_unlock(bpf->app->ctrl);

return;
err_unlock:
nfp_ctrl_unlock(bpf->app->ctrl);
err_free:
dev_kfree_skb_any(skb);
nfp_ccm_rx(&bpf->ccm, skb);
}

void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{
const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv;
const struct cmsg_hdr *hdr = data;

if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return;
}

if (hdr->type == CMSG_TYPE_BPF_EVENT)
if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len);
else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
Expand Down
Loading

0 comments on commit 9d60f0e

Please sign in to comment.