Skip to content

Commit

Permalink
RDMA/device: Call ib_cache_release_one() only from ib_device_release()
Browse files Browse the repository at this point in the history
Instead of complicated logic about when this memory is freed, always free
it during device release(). All the cache pointers start out as NULL, so
it is safe to call this before the cache is initialized.

This makes for a simpler error unwind flow, and a simpler understanding of
the lifetime of the memory allocations inside the struct ib_device.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
Jason Gunthorpe committed Feb 8, 2019
1 parent b34b269 commit d45f89d
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 29 deletions.
3 changes: 3 additions & 0 deletions drivers/infiniband/core/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1460,6 +1460,9 @@ void ib_cache_release_one(struct ib_device *device)
{
int p;

if (!device->cache.ports)
return;

/*
* The release function frees all the cache elements.
* This function should be called as part of freeing
Expand Down
41 changes: 12 additions & 29 deletions drivers/infiniband/core/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,17 +244,10 @@ static void ib_device_release(struct device *device)
struct ib_device *dev = container_of(device, struct ib_device, dev);

WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
if (dev->reg_state == IB_DEV_UNREGISTERED) {
/*
* In IB_DEV_UNINITIALIZED state, cache or port table
* is not even created. Free cache and port table only when
* device reaches UNREGISTERED state.
*/
ib_cache_release_one(dev);
kfree(dev->port_immutable);
}
ib_cache_release_one(dev);
ib_security_release_port_pkey_list(dev);
kfree(dev->port_pkey_list);
kfree(dev->port_immutable);
kfree(dev);
}

Expand Down Expand Up @@ -520,13 +513,6 @@ static void setup_dma_device(struct ib_device *device)
}
}

static void cleanup_device(struct ib_device *device)
{
ib_cache_cleanup_one(device);
ib_cache_release_one(device);
kfree(device->port_immutable);
}

static int setup_device(struct ib_device *device)
{
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
Expand All @@ -548,26 +534,16 @@ static int setup_device(struct ib_device *device)
if (ret) {
dev_warn(&device->dev,
"Couldn't query the device attributes\n");
goto port_cleanup;
return ret;
}

ret = setup_port_pkey_list(device);
if (ret) {
dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
goto port_cleanup;
}

ret = ib_cache_setup_one(device);
if (ret) {
dev_warn(&device->dev,
"Couldn't set up InfiniBand P_Key/GID cache\n");
return ret;
}
return 0;

port_cleanup:
kfree(device->port_immutable);
return ret;
return 0;
}

/**
Expand Down Expand Up @@ -607,6 +583,13 @@ int ib_register_device(struct ib_device *device, const char *name)
if (ret)
goto out;

ret = ib_cache_setup_one(device);
if (ret) {
dev_warn(&device->dev,
"Couldn't set up InfiniBand P_Key/GID cache\n");
goto out;
}

device->index = __dev_new_index();

ib_device_register_rdmacg(device);
Expand All @@ -633,7 +616,7 @@ int ib_register_device(struct ib_device *device, const char *name)

cg_cleanup:
ib_device_unregister_rdmacg(device);
cleanup_device(device);
ib_cache_cleanup_one(device);
out:
mutex_unlock(&device_mutex);
return ret;
Expand Down

0 comments on commit d45f89d

Please sign in to comment.