Skip to content

Commit

Permalink
net/mlx5: Maintain separate page trees for ECPF and PF functions
Browse files Browse the repository at this point in the history
Pages for the host PF and ECPF were stored in the same tree, so the ECPF
pages were being freed along with the host PF's when the host driver
unloaded.

Combine the function ID and ECPF flag to use as an index into the
x-array containing the trees to get a different tree for the host PF and
ECPF.

Fixes: c616816 ("net/mlx5: Add support for release all pages event")
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
  • Loading branch information
Daniel Jurgens authored and Saeed Mahameed committed Jan 26, 2021
1 parent 45c9a30 commit 0aa1284
Showing 1 changed file with 34 additions and 24 deletions.
58 changes: 34 additions & 24 deletions drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
u16 func_id;
u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
Expand All @@ -74,20 +74,25 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};

static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
static u32 get_function(u16 func_id, bool ec_function)
{
return func_id & (ec_function << 16);
}

static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;

root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;

root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);

err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
Expand All @@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}

static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
Expand All @@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;

root = page_root_per_func_id(dev, func_id);
root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);

Expand All @@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u

nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
Expand All @@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}

static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
u32 func_id)
u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;

root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;

Expand Down Expand Up @@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}

static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;

list_for_each_entry(iter, &dev->priv.free_list, list) {
if (iter->func_id != func_id)
if (iter->function != function)
continue;
fp = iter;
}
Expand Down Expand Up @@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;

root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;

Expand All @@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}

static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;

fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
Expand All @@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}

static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
Expand Down Expand Up @@ -291,7 +296,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
goto map;
}

err = insert_page(dev, addr, page, func_id);
err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
Expand Down Expand Up @@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
Expand All @@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,

for (i = 0; i < npages; i++) {
retry:
err = alloc_4k(dev, &addr, func_id);
err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, func_id);
err = alloc_system_page(dev, function);
if (err)
goto out_4k;

Expand Down Expand Up @@ -384,22 +390,23 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,

out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
page_notify_fail(dev, func_id, ec_function);
return err;
}

static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;

root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;

Expand Down Expand Up @@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
Expand All @@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);

root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;

Expand All @@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}

static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
Expand Down Expand Up @@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}

for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);

if (nclaimed)
*nclaimed = num_claimed;
Expand Down

0 comments on commit 0aa1284

Please sign in to comment.