Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Here is the first rc pull request for RDMA. This includes an important
  core fix for a regression in iWarp if SELinux is enabled, a fix for a
  compilation regression introduced in this merge window, and one
  obscure kconfig combination that oops's the kernel.

  For drivers, we have hns fixes needed to make their devices work on
  certain ARM IOMMU configurations, a stack data leak for hfi1, and
  various testing discovered -rc bug fixes for i40iw.

  This cycle we pushed back on the driver maintainers to have better
  commit messages for -rc material"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/core: Only enforce security for InfiniBand
  RDMA/hns: Get rid of page operation after dma_alloc_coherent
  RDMA/hns: Get rid of virt_to_page and vmap calls after dma_alloc_coherent
  RDMA/hns: Fix the issue of IOVA not page continuous in hip08
  IB/core: Init subsys if compiled to vmlinuz-core
  RDMA/cma: Make sure that PSN is not over max allowed
  i40iw: Notify user of established connection after QP in RTS
  i40iw: Move MPA request event for loopback after connect
  i40iw: Correct ARP index mask
  i40iw: Do not free sqbuf when event is I40IW_TIMER_TYPE_CLOSE
  i40iw: Allocate a sdbuf per CQP WQE
  IB: INFINIBAND should depend on HAS_DMA
  IB/hfi1: Initialize bth1 in 16B rc ack builder
  • Loading branch information
Linus Torvalds committed Dec 5, 2017
2 parents 6a5e05a + 315d160 commit e6cdd80
Show file tree
Hide file tree
Showing 13 changed files with 134 additions and 78 deletions.
2 changes: 1 addition & 1 deletion drivers/infiniband/Kconfig
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
menuconfig INFINIBAND
tristate "InfiniBand support"
depends on HAS_IOMEM
depends on HAS_IOMEM && HAS_DMA
depends on NET
depends on INET
depends on m || IPV6 != m
Expand Down
1 change: 1 addition & 0 deletions drivers/infiniband/core/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;

return &id_priv->id;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/core/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)

MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);

module_init(ib_core_init);
subsys_initcall(ib_core_init);
module_exit(ib_core_cleanup);
50 changes: 46 additions & 4 deletions drivers/infiniband/core/security.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)

int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
{
u8 i = rdma_start_port(dev);
bool is_ib = false;
int ret;

while (i <= rdma_end_port(dev) && !is_ib)
is_ib = rdma_protocol_ib(dev, i++);

/* If this isn't an IB device don't create the security context */
if (!is_ib)
return 0;

qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
if (!qp->qp_sec)
return -ENOMEM;
Expand All @@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);

void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
{
/* Return if not IB */
if (!sec)
return;

mutex_lock(&sec->mutex);

/* Remove the QP from the lists so it won't get added to
Expand Down Expand Up @@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
int ret;
int i;

/* Return if not IB */
if (!sec)
return;

/* If a concurrent cache update is in progress this
* QP security could be marked for an error state
* transition. Wait for this to complete.
Expand Down Expand Up @@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
{
int i;

/* Return if not IB */
if (!sec)
return;

/* If a concurrent cache update is occurring we must
* wait until this QP security structure is processed
* in the QP to error flow before destroying it because
Expand Down Expand Up @@ -557,26 +578,35 @@ int ib_security_modify_qp(struct ib_qp *qp,
{
int ret = 0;
struct ib_ports_pkeys *tmp_pps;
struct ib_ports_pkeys *new_pps;
struct ib_ports_pkeys *new_pps = NULL;
struct ib_qp *real_qp = qp->real_qp;
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
real_qp->qp_type == IB_QPT_GSI ||
real_qp->qp_type >= IB_QPT_RESERVED1);
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
(qp_attr_mask & IB_QP_ALT_PATH));

WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
!real_qp->qp_sec),
"%s: QP security is not initialized for IB QP: %d\n",
__func__, real_qp->qp_num);

/* The port/pkey settings are maintained only for the real QP. Open
* handles on the real QP will be in the shared_qp_list. When
* enforcing security on the real QP all the shared QPs will be
* checked as well.
*/

if (pps_change && !special_qp) {
if (pps_change && !special_qp && real_qp->qp_sec) {
mutex_lock(&real_qp->qp_sec->mutex);
new_pps = get_new_pps(real_qp,
qp_attr,
qp_attr_mask);

if (!new_pps) {
mutex_unlock(&real_qp->qp_sec->mutex);
return -ENOMEM;
}
/* Add this QP to the lists for the new port
* and pkey settings before checking for permission
* in case there is a concurrent cache update
Expand All @@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
qp_attr_mask,
udata);

if (pps_change && !special_qp) {
if (new_pps) {
/* Clean up the lists and free the appropriate
* ports_pkeys structure.
*/
Expand Down Expand Up @@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
u16 pkey;
int ret;

if (!rdma_protocol_ib(dev, port_num))
return 0;

ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
if (ret)
return ret;
Expand Down Expand Up @@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
{
int ret;

if (!rdma_protocol_ib(agent->device, agent->port_num))
return 0;

ret = security_ib_alloc_security(&agent->security);
if (ret)
return ret;
Expand All @@ -690,13 +726,19 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,

void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
{
if (!rdma_protocol_ib(agent->device, agent->port_num))
return;

security_ib_free_security(agent->security);
if (agent->lsm_nb_reg)
unregister_lsm_notifier(&agent->lsm_nb);
}

int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
{
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
return 0;

if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
return -EACCES;

Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/hfi1/rc.c
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_16b_header *hdr = &opa_hdr->opah;
struct ib_other_headers *ohdr;
u32 bth0, bth1;
u32 bth0, bth1 = 0;
u16 len, pkey;
u8 becn = !!is_fecn;
u8 l4 = OPA_16B_L4_IB_LOCAL;
Expand Down
23 changes: 0 additions & 23 deletions drivers/infiniband/hw/hns/hns_roce_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
{
int i;
struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;

if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
vunmap(buf->direct.buf);

for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
Expand All @@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
{
int i = 0;
dma_addr_t t;
struct page **pages;
struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
u32 page_size = 1 << page_shift;
u32 order;

Expand Down Expand Up @@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, page_size);
}
if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
goto err_free;

for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);

buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
} else {
buf->direct.buf = NULL;
}
}

return 0;
Expand Down
4 changes: 1 addition & 3 deletions drivers/infiniband/hw/hns/hns_roce_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -726,11 +726,9 @@ static inline struct hns_roce_qp

static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
u32 bits_per_long_val = BITS_PER_LONG;
u32 page_size = 1 << buf->page_shift;

if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
buf->nbufs == 1)
if (buf->nbufs == 1)
return (char *)(buf->direct.buf) + offset;
else
return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
Expand Down
25 changes: 13 additions & 12 deletions drivers/infiniband/hw/hns/hns_roce_hem.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
memset(chunk->buf, 0, sizeof(chunk->buf));
list_add_tail(&chunk->list, &hem->chunk_list);
}

Expand All @@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
if (!buf)
goto fail;

sg_set_buf(mem, buf, PAGE_SIZE << order);
WARN_ON(mem->offset);
chunk->buf[chunk->npages] = buf;
sg_dma_len(mem) = PAGE_SIZE << order;

++chunk->npages;
Expand All @@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(hr_dev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_len(&chunk->mem[i]),
chunk->buf[i],
sg_dma_address(&chunk->mem[i]));
kfree(chunk);
}
Expand Down Expand Up @@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
struct page *page = NULL;
void *addr = NULL;
unsigned long mhop_obj = obj;
unsigned long obj_per_chunk;
unsigned long idx_offset;
int offset, dma_offset;
int length;
int i, j;
u32 hem_idx = 0;

Expand Down Expand Up @@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,

list_for_each_entry(chunk, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
length = sg_dma_len(&chunk->mem[i]);
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) >
(u32)dma_offset)
if (length > (u32)dma_offset)
*dma_handle = sg_dma_address(
&chunk->mem[i]) + dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
dma_offset -= length;
}

if (chunk->mem[i].length > (u32)offset) {
page = sg_page(&chunk->mem[i]);
if (length > (u32)offset) {
addr = chunk->buf[i] + offset;
goto out;
}
offset -= chunk->mem[i].length;
offset -= length;
}
}

out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
return addr;
}
EXPORT_SYMBOL_GPL(hns_roce_table_find);

Expand Down
1 change: 1 addition & 0 deletions drivers/infiniband/hw/hns/hns_roce_hem.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
int npages;
int nsg;
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
void *buf[HNS_ROCE_HEM_CHUNK_LEN];
};

struct hns_roce_hem {
Expand Down
22 changes: 15 additions & 7 deletions drivers/infiniband/hw/hns/hns_roce_hw_v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
{
struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg;
u64 page_addr;
u64 *pages;
int i, j;
int len;
int entry;
int i;

mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
Expand Down Expand Up @@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,

i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
pages[i] = ((u64)sg_dma_address(sg)) >> 6;

/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
break;
i++;
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; ++j) {
page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift);
pages[i] = page_addr >> 6;

/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found;
i++;
}
}

found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S,
Expand Down
Loading

0 comments on commit e6cdd80

Please sign in to comment.