From 75c2c4e25b6453953c93f7c87674f4ec9a4e25dc Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Wed, 11 Jul 2012 15:39:27 +0000 Subject: [PATCH] --- yaml --- r: 316375 b: refs/heads/master c: 9bbeb6663ea2f069bd3bb6387fe7a824def21064 h: refs/heads/master i: 316373: 5cdf431d92f64f1d441cb95a381d56c2f3cfa61a 316371: 75812debcd1cb72678b61c2925826f27876c6028 316367: d3bb68407ed00e915b3227b66861af42d6e70b86 v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/core/cm_msgs.h | 12 ++ trunk/drivers/infiniband/core/sa_query.c | 133 ----------------- trunk/drivers/infiniband/hw/mlx4/mad.c | 141 ++++-------------- trunk/drivers/infiniband/hw/mlx4/main.c | 31 +--- trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h | 19 --- trunk/drivers/infiniband/hw/mlx4/qp.c | 27 +--- trunk/drivers/infiniband/hw/mthca/mthca_qp.c | 3 +- .../net/ethernet/mellanox/mlx4/en_main.c | 5 +- trunk/drivers/net/ethernet/mellanox/mlx4/eq.c | 22 +-- trunk/drivers/net/ethernet/mellanox/mlx4/fw.c | 132 ++++------------ .../drivers/net/ethernet/mellanox/mlx4/intf.c | 5 +- .../drivers/net/ethernet/mellanox/mlx4/main.c | 86 +++-------- .../drivers/net/ethernet/mellanox/mlx4/mlx4.h | 67 ++++++++- .../drivers/net/ethernet/mellanox/mlx4/port.c | 11 +- trunk/include/linux/mlx4/device.h | 119 +-------------- trunk/include/linux/mlx4/driver.h | 3 +- trunk/include/rdma/ib_cm.h | 12 -- trunk/include/rdma/ib_sa.h | 33 ---- 19 files changed, 178 insertions(+), 685 deletions(-) diff --git a/[refs] b/[refs] index cfc1cb8645c8..b0a9b7f5879c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6634961c14d38ef64ec284c07aecb03d3dd03b4a +refs/heads/master: 9bbeb6663ea2f069bd3bb6387fe7a824def21064 diff --git a/trunk/drivers/infiniband/core/cm_msgs.h b/trunk/drivers/infiniband/core/cm_msgs.h index be068f47e47e..7da9b2102341 100644 --- a/trunk/drivers/infiniband/core/cm_msgs.h +++ b/trunk/drivers/infiniband/core/cm_msgs.h @@ -44,6 +44,18 @@ #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ +#define CM_REQ_ATTR_ID cpu_to_be16(0x0010) +#define CM_MRA_ATTR_ID cpu_to_be16(0x0011) +#define CM_REJ_ATTR_ID cpu_to_be16(0x0012) +#define CM_REP_ATTR_ID cpu_to_be16(0x0013) +#define CM_RTU_ATTR_ID cpu_to_be16(0x0014) +#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) +#define CM_DREP_ATTR_ID cpu_to_be16(0x0016) +#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) +#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) +#define CM_LAP_ATTR_ID cpu_to_be16(0x0019) +#define CM_APR_ATTR_ID cpu_to_be16(0x001A) + enum cm_msg_sequence { CM_MSG_SEQUENCE_REQ, CM_MSG_SEQUENCE_LAP, diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c index a8905abc56e4..fbbfa24cf572 100644 --- a/trunk/drivers/infiniband/core/sa_query.c +++ b/trunk/drivers/infiniband/core/sa_query.c @@ -94,12 +94,6 @@ struct ib_sa_path_query { struct ib_sa_query sa_query; }; -struct ib_sa_guidinfo_query { - void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); - void *context; - struct ib_sa_query sa_query; -}; - struct ib_sa_mcmember_query { void (*callback)(int, struct ib_sa_mcmember_rec *, void *); void *context; @@ -353,34 +347,6 @@ static const struct ib_field service_rec_table[] = { .size_bits = 2*64 }, }; -#define GUIDINFO_REC_FIELD(field) \ - .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ - .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ - .field_name = "sa_guidinfo_rec:" #field - -static const struct ib_field guidinfo_rec_table[] = { - { GUIDINFO_REC_FIELD(lid), - .offset_words = 0, - .offset_bits = 0, - .size_bits = 16 }, - { GUIDINFO_REC_FIELD(block_num), - .offset_words = 0, - .offset_bits = 16, - .size_bits = 8 }, - { GUIDINFO_REC_FIELD(res1), - .offset_words = 0, - .offset_bits = 24, - .size_bits = 8 }, - { GUIDINFO_REC_FIELD(res2), - .offset_words = 1, - .offset_bits = 0, - .size_bits = 32 }, - { GUIDINFO_REC_FIELD(guid_info_list), - .offset_words = 2, - .offset_bits = 0, - .size_bits = 512 }, -}; - static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); @@ -979,105 +945,6 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client, return ret; } -/* Support GuidInfoRecord */ -static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, - int status, - struct ib_sa_mad *mad) -{ - struct ib_sa_guidinfo_query *query = - container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); - - if (mad) { - struct ib_sa_guidinfo_rec rec; - - ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), - mad->data, &rec); - query->callback(status, &rec, query->context); - } else - query->callback(status, NULL, query->context); -} - -static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) -{ - kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); -} - -int ib_sa_guid_info_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, - struct ib_sa_guidinfo_rec *rec, - ib_sa_comp_mask comp_mask, u8 method, - int timeout_ms, gfp_t gfp_mask, - void (*callback)(int status, - struct ib_sa_guidinfo_rec *resp, - void *context), - void *context, - struct ib_sa_query **sa_query) -{ - struct ib_sa_guidinfo_query *query; - struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); - struct ib_sa_port *port; - struct ib_mad_agent *agent; - struct ib_sa_mad *mad; - int ret; - - if (!sa_dev) - return -ENODEV; - - if (method != IB_MGMT_METHOD_GET && - method != IB_MGMT_METHOD_SET && - method != IB_SA_METHOD_DELETE) { - return -EINVAL; - } - - port = &sa_dev->port[port_num - sa_dev->start_port]; - agent = port->agent; - - query = kmalloc(sizeof *query, gfp_mask); - if (!query) - return -ENOMEM; - - query->sa_query.port = port; - ret = alloc_mad(&query->sa_query, gfp_mask); - if (ret) - goto err1; - - ib_sa_client_get(client); - query->sa_query.client = client; - query->callback = callback; - query->context = context; - - mad = query->sa_query.mad_buf->mad; - init_mad(mad, agent); - - query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; - query->sa_query.release = ib_sa_guidinfo_rec_release; - - mad->mad_hdr.method = method; - mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); - mad->sa_hdr.comp_mask = comp_mask; - - ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, - mad->data); - - *sa_query = &query->sa_query; - - ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); - if (ret < 0) - goto err2; - - return ret; - -err2: - *sa_query = NULL; - ib_sa_client_put(query->sa_query.client); - free_mad(&query->sa_query); - -err1: - kfree(query); - return ret; -} -EXPORT_SYMBOL(ib_sa_guid_info_rec_query); - static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { diff --git a/trunk/drivers/infiniband/hw/mlx4/mad.c b/trunk/drivers/infiniband/hw/mlx4/mad.c index c27141fef1ab..259b0670b51c 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mad.c +++ b/trunk/drivers/infiniband/hw/mlx4/mad.c @@ -147,51 +147,47 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) } /* - * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can - * synthesize LID change, Client-Rereg, GID change, and P_Key change events. + * Snoop SM MADs for port info and P_Key table sets, so we can + * synthesize LID change and P_Key change events. */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, - u16 prev_lid) + u16 prev_lid) { - struct ib_port_info *pinfo; - u16 lid; + struct ib_event event; - struct mlx4_ib_dev *dev = to_mdev(ibdev); if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && - mad->mad_hdr.method == IB_MGMT_METHOD_SET) - switch (mad->mad_hdr.attr_id) { - case IB_SMP_ATTR_PORT_INFO: - pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; - lid = be16_to_cpu(pinfo->lid); + mad->mad_hdr.method == IB_MGMT_METHOD_SET) { + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { + struct ib_port_info *pinfo = + (struct ib_port_info *) ((struct ib_smp *) mad)->data; + u16 lid = be16_to_cpu(pinfo->lid); - update_sm_ah(dev, port_num, + update_sm_ah(to_mdev(ibdev), port_num, be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); - if (pinfo->clientrereg_resv_subnetto & 0x80) - mlx4_ib_dispatch_event(dev, port_num, - IB_EVENT_CLIENT_REREGISTER); + event.device = ibdev; + event.element.port_num = port_num; - if (prev_lid != lid) - mlx4_ib_dispatch_event(dev, port_num, - IB_EVENT_LID_CHANGE); - break; + if (pinfo->clientrereg_resv_subnetto & 0x80) { + event.event = IB_EVENT_CLIENT_REREGISTER; + ib_dispatch_event(&event); + } - case IB_SMP_ATTR_PKEY_TABLE: - mlx4_ib_dispatch_event(dev, port_num, - IB_EVENT_PKEY_CHANGE); - break; + if (prev_lid != lid) { + event.event = IB_EVENT_LID_CHANGE; + ib_dispatch_event(&event); + } + } - case IB_SMP_ATTR_GUID_INFO: - /* paravirtualized master's guid is guid 0 -- does not change */ - if (!mlx4_is_master(dev->dev)) - mlx4_ib_dispatch_event(dev, port_num, - IB_EVENT_GID_CHANGE); - break; - default: - break; + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { + event.device = ibdev; + event.event = IB_EVENT_PKEY_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); } + } } static void node_desc_override(struct ib_device *dev, @@ -246,25 +242,6 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int err; struct ib_port_attr pattr; - if (in_wc && in_wc->qp->qp_num) { - pr_debug("received MAD: slid:%d sqpn:%d " - "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", - in_wc->slid, in_wc->src_qp, - in_wc->dlid_path_bits, - in_wc->qp->qp_num, - in_wc->wc_flags, - in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, - be16_to_cpu(in_mad->mad_hdr.attr_id)); - if (in_wc->wc_flags & IB_WC_GRH) { - pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", - be64_to_cpu(in_grh->sgid.global.subnet_prefix), - be64_to_cpu(in_grh->sgid.global.interface_id)); - pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", - be64_to_cpu(in_grh->dgid.global.subnet_prefix), - be64_to_cpu(in_grh->dgid.global.interface_id)); - } - } - slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { @@ -309,8 +286,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_FAILURE; if (!out_mad->mad_hdr.status) { - if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) - smp_snoop(ibdev, port_num, in_mad, prev_lid); + smp_snoop(ibdev, port_num, in_mad, prev_lid); node_desc_override(ibdev, out_mad); } @@ -451,64 +427,3 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) ib_destroy_ah(dev->sm_ah[p]); } } - -void handle_port_mgmt_change_event(struct work_struct *work) -{ - struct ib_event_work *ew = container_of(work, struct ib_event_work, work); - struct mlx4_ib_dev *dev = ew->ib_dev; - struct mlx4_eqe *eqe = &(ew->ib_eqe); - u8 port = eqe->event.port_mgmt_change.port; - u32 changed_attr; - - switch (eqe->subtype) { - case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: - changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); - - /* Update the SM ah - This should be done before handling - the other changed attributes so that MADs can be sent to the SM */ - if (changed_attr & MSTR_SM_CHANGE_MASK) { - u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); - u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; - update_sm_ah(dev, port, lid, sl); - } - - /* Check if it is a lid change event */ - if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) - mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); - - /* Generate GUID changed event */ - if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) - mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); - - if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) - mlx4_ib_dispatch_event(dev, port, - IB_EVENT_CLIENT_REREGISTER); - break; - - case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: - mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); - break; - case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: - /* paravirtualized master's guid is guid 0 -- does not change */ - if (!mlx4_is_master(dev->dev)) - mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); - break; - default: - pr_warn("Unsupported subtype 0x%x for " - "Port Management Change event\n", eqe->subtype); - } - - kfree(ew); -} - -void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, - enum ib_event_type type) -{ - struct ib_event event; - - event.device = &dev->ib_dev; - event.element.port_num = port_num; - event.event = type; - - ib_dispatch_event(&event); -} diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index 4f230c26622d..3530c41fcd1f 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -50,7 +50,7 @@ #include "mlx4_ib.h" #include "user.h" -#define DRV_NAME MLX4_IB_DRV_NAME +#define DRV_NAME "mlx4_ib" #define DRV_VERSION "1.0" #define DRV_RELDATE "April 4, 2008" @@ -898,6 +898,7 @@ static void update_gids_task(struct work_struct *work) union ib_gid *gids; int err; struct mlx4_dev *dev = gw->dev->dev; + struct ib_event event; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { @@ -915,7 +916,10 @@ static void update_gids_task(struct work_struct *work) pr_warn("set port command failed\n"); else { memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); - mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); + event.device = &gw->dev->ib_dev; + event.element.port_num = gw->port; + event.event = IB_EVENT_GID_CHANGE; + ib_dispatch_event(&event); } mlx4_free_cmd_mailbox(dev, mailbox); @@ -1379,18 +1383,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) } static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, - enum mlx4_dev_event event, unsigned long param) + enum mlx4_dev_event event, int port) { struct ib_event ibev; struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); - struct mlx4_eqe *eqe = NULL; - struct ib_event_work *ew; - int port = 0; - - if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) - eqe = (struct mlx4_eqe *)param; - else - port = (u8)param; if (port > ibdev->num_ports) return; @@ -1409,19 +1405,6 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, ibev.event = IB_EVENT_DEVICE_FATAL; break; - case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: - ew = kmalloc(sizeof *ew, GFP_ATOMIC); - if (!ew) { - pr_err("failed to allocate memory for events work\n"); - break; - } - - INIT_WORK(&ew->work, handle_port_mgmt_change_event); - memcpy(&ew->ib_eqe, eqe, sizeof *eqe); - ew->ib_dev = ibdev; - handle_port_mgmt_change_event(&ew->work); - return; - default: return; } diff --git a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h index 23bfbf9ee0e0..ff36655d23d3 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -44,16 +44,6 @@ #include #include -#define MLX4_IB_DRV_NAME "mlx4_ib" - -#ifdef pr_fmt -#undef pr_fmt -#endif -#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ - -#define mlx4_ib_warn(ibdev, format, arg...) \ - dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) - enum { MLX4_IB_SQ_MIN_WQE_SHIFT = 6, MLX4_IB_MAX_HEADROOM = 2048 @@ -224,12 +214,6 @@ struct mlx4_ib_dev { int eq_added; }; -struct ib_event_work { - struct work_struct work; - struct mlx4_ib_dev *ib_dev; - struct mlx4_eqe ib_eqe; -}; - static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) { return container_of(ibdev, struct mlx4_ib_dev, ib_dev); @@ -387,7 +371,4 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid); -void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, - enum ib_event_type type); - #endif /* MLX4_IB_H */ diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 84b26963c8d4..8d4ed24aef93 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1335,21 +1335,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { - pr_debug("qpn 0x%x: invalid attribute mask specified " - "for transition %d to %d. qp_type %d," - " attr_mask 0x%x\n", - ibqp->qp_num, cur_state, new_state, - ibqp->qp_type, attr_mask); + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto out; - } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { - pr_debug("qpn 0x%x: invalid port number (%d) specified " - "for transition %d to %d. qp_type %d\n", - ibqp->qp_num, attr->port_num, cur_state, - new_state, ibqp->qp_type); goto out; } @@ -1360,30 +1350,17 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_PKEY_INDEX) { int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; - if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { - pr_debug("qpn 0x%x: invalid pkey index (%d) specified " - "for transition %d to %d. qp_type %d\n", - ibqp->qp_num, attr->pkey_index, cur_state, - new_state, ibqp->qp_type); + if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) goto out; - } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { - pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " - "Transition %d to %d. qp_type %d\n", - ibqp->qp_num, attr->max_rd_atomic, cur_state, - new_state, ibqp->qp_type); goto out; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { - pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " - "Transition %d to %d. qp_type %d\n", - ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, - new_state, ibqp->qp_type); goto out; } diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index 9601049e14d0..c3074a1d34ed 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -247,7 +247,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, spin_unlock(&dev->qp_table.lock); if (!qp) { - mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); + mthca_warn(dev, "Async event %d for bogus QP %08x\n", + event_type, qpn); return; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_main.c index a52922ed85c1..69ba57270481 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) } static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, - enum mlx4_dev_event event, unsigned long port) + enum mlx4_dev_event event, int port) { struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; struct mlx4_en_priv *priv; @@ -156,8 +156,7 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, if (port < 1 || port > dev->caps.num_ports || !mdev->pndev[port]) return; - mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, - (int) port); + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); } } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c index 9b15d0219950..bce98d9c0039 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -82,15 +82,6 @@ enum { (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) -static u64 get_async_ev_mask(struct mlx4_dev *dev) -{ - u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) - async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); - - return async_ev_mask; -} - static void eq_set_ci(struct mlx4_eq *eq, int req_not) { __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | @@ -482,11 +473,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; - case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: - mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, - (unsigned long) eqe); - break; - case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: @@ -970,7 +956,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) priv->eq_table.have_irq = 1; } - err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", @@ -1010,7 +996,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int i; - mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, + mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); mlx4_free_irqs(dev); @@ -1054,7 +1040,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) mlx4_cmd_use_polling(dev); /* Map the new eq to handle all asyncronous events */ - err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, priv->eq_table.eq[i].eqn); if (err) { mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); @@ -1068,7 +1054,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) } /* Return to default */ - mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); return err; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index 473d63b63b4e..9c83bb8151ea 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -109,7 +109,6 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [41] = "Unicast VEP steering support", [42] = "Multicast VEP steering support", [48] = "Counters support", - [59] = "Port management change event support", }; int i; @@ -174,7 +173,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 -#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 @@ -184,44 +182,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30 -#define QUERY_FUNC_CAP_FMR_FLAG 0x80 -#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 -#define QUERY_FUNC_CAP_FLAG_ETH 0x80 - -/* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 -#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 #define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc -#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 -#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 - -#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 - if (vhcr->op_modifier == 1) { field = vhcr->in_modifier; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); - field = 0; - /* ensure force vlan and force mac bits are not set */ + field = 0; /* ensure fvl bit is not set */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - /* ensure that phy_wqe_gid bit is not set */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); - } else if (vhcr->op_modifier == 0) { - /* enable rdma and ethernet interfaces */ - field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); + field = 1 << 7; /* enable only ethernet interface */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); field = dev->caps.num_ports; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); - size = 0; /* no PF behaviour is set for now */ + size = 0; /* no PF behavious is set for now */ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); - field = 0; /* protected FMR support not available as yet */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); - size = dev->caps.num_qps; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); @@ -274,12 +253,11 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) outbox = mailbox->buf; MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); - if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { - mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); + if (!(field & (1 << 7))) { + mlx4_err(dev, "The host doesn't support eth interface\n"); err = -EPROTONOSUPPORT; goto out; } - func_cap->flags = field; MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); func_cap->num_ports = field; @@ -318,27 +296,17 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) if (err) goto out; - if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) { - MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { - mlx4_err(dev, "VLAN is enforced on this port\n"); - err = -EPROTONOSUPPORT; - goto out; - } + MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); + if (field & (1 << 7)) { + mlx4_err(dev, "VLAN is enforced on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } - if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { - mlx4_err(dev, "Force mac is enabled on this port\n"); - err = -EPROTONOSUPPORT; - goto out; - } - } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) { - MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); - if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { - mlx4_err(dev, "phy_wqe_gid is " - "enforced on this ib port\n"); - err = -EPROTONOSUPPORT; - goto out; - } + if (field & (1 << 6)) { + mlx4_err(dev, "Force mac is enabled on this port\n"); + err = -EPROTONOSUPPORT; + goto out; } MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); @@ -730,12 +698,14 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, { u64 def_mac; u8 port_type; - u16 short_field; int err; -#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 -#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c -#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e +#define MLX4_PORT_SUPPORT_IB (1 << 0) +#define MLX4_PORT_SUGGEST_TYPE (1 << 3) +#define MLX4_PORT_DEFAULT_SENSE (1 << 4) +#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \ + ~MLX4_PORT_SUGGEST_TYPE & \ + ~MLX4_PORT_DEFAULT_SENSE) err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, @@ -751,58 +721,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, MLX4_GET(port_type, outbox->buf, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - /* No link sensing allowed */ - port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; - /* set port type to currently operating port type */ - port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + /* Allow only Eth port, no link sensing allowed */ + port_type &= MLX4_VF_PORT_ETH_ONLY_MASK; + + /* check eth is enabled for this port */ + if (!(port_type & 2)) + mlx4_dbg(dev, "QUERY PORT: eth not supported by host"); MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - - short_field = 1; /* slave max gids */ - MLX4_PUT(outbox->buf, short_field, - QUERY_PORT_CUR_MAX_GID_OFFSET); - - short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; - MLX4_PUT(outbox->buf, short_field, - QUERY_PORT_CUR_MAX_PKEY_OFFSET); } return err; } -int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, - int *gid_tbl_len, int *pkey_tbl_len) -{ - struct mlx4_cmd_mailbox *mailbox; - u32 *outbox; - u16 field; - int err; - - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, - MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); - if (err) - goto out; - - outbox = mailbox->buf; - - MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); - *gid_tbl_len = field; - - MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); - *pkey_tbl_len = field; - -out: - mlx4_free_cmd_mailbox(dev, mailbox); - return err; -} -EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); - int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) { struct mlx4_cmd_mailbox *mailbox; @@ -949,12 +881,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); - MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); - dev->caps.function = lg; - if (mlx4_is_slave(dev)) goto out; + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); + dev->caps.function = lg; MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || @@ -1035,12 +966,9 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; - /* for slaves, set pci PPF ID to invalid and zero out everything - * else except FW version */ + /* for slaves, zero out everything except FW version */ outbuf[0] = outbuf[1] = 0; memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); - outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; - return 0; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c b/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c index 116895ac8b35..b4e9f6f5cc04 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -115,8 +115,7 @@ void mlx4_unregister_interface(struct mlx4_interface *intf) } EXPORT_SYMBOL_GPL(mlx4_unregister_interface); -void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param) +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_device_context *dev_ctx; @@ -126,7 +125,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) - dev_ctx->intf->event(dev, dev_ctx->context, type, param); + dev_ctx->intf->event(dev, dev_ctx->context, type, port); spin_unlock_irqrestore(&priv->ctx_lock, flags); } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 5df3ac40a490..a0313de122de 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -215,10 +215,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; - dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i]; - dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i]; - /* set gid and pkey table operating lengths by default - * to non-sriov values */ dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; @@ -292,19 +288,29 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) /* if only ETH is supported - assign ETH */ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; - /* if only IB is supported, assign IB */ + /* if only IB is supported, + * assign IB only if SRIOV is off*/ else if (dev->caps.supported_type[i] == - MLX4_PORT_TYPE_IB) - dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; + MLX4_PORT_TYPE_IB) { + if (dev->flags & MLX4_FLAG_SRIOV) + dev->caps.port_type[i] = + MLX4_PORT_TYPE_NONE; + else + dev->caps.port_type[i] = + MLX4_PORT_TYPE_IB; + /* if IB and ETH are supported, + * first of all check if SRIOV is on */ + } else if (dev->flags & MLX4_FLAG_SRIOV) + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; else { - /* if IB and ETH are supported, we set the port - * type according to user selection of port type; - * if user selected none, take the FW hint */ - if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) + /* In non-SRIOV mode, we set the port type + * according to user selection of port type, + * if usere selected none, take the FW hint */ + if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) dev->caps.port_type[i] = dev->caps.suggested_type[i] ? MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; else - dev->caps.port_type[i] = port_type_array[i - 1]; + dev->caps.port_type[i] = port_type_array[i-1]; } } /* @@ -385,23 +391,6 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) return ret; } -int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) -{ - u32 qk = MLX4_RESERVED_QKEY_BASE; - if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || - qpn < dev->caps.sqp_start) - return -EINVAL; - - if (qpn >= dev->caps.base_tunnel_sqpn) - /* tunnel qp */ - qk += qpn - dev->caps.base_tunnel_sqpn; - else - qk += qpn - dev->caps.sqp_start; - *qkey = qk; - return 0; -} -EXPORT_SYMBOL(mlx4_get_parav_qkey); - int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -502,13 +491,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } - for (i = 1; i <= dev->caps.num_ports; ++i) { + for (i = 1; i <= dev->caps.num_ports; ++i) dev->caps.port_mask[i] = dev->caps.port_type[i]; - if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, - &dev->caps.gid_table_len[i], - &dev->caps.pkey_table_len[i])) - return -ENODEV; - } if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > @@ -545,7 +529,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, for (port = 1; port <= dev->caps.num_ports; port++) { mlx4_CLOSE_PORT(dev, port); dev->caps.port_type[port] = port_types[port - 1]; - err = mlx4_SET_PORT(dev, port, -1); + err = mlx4_SET_PORT(dev, port); if (err) { mlx4_err(dev, "Failed to set port %d, " "aborting\n", port); @@ -731,7 +715,7 @@ static ssize_t set_port_ib_mtu(struct device *dev, mlx4_unregister_device(mdev); for (port = 1; port <= mdev->caps.num_ports; port++) { mlx4_CLOSE_PORT(mdev, port); - err = mlx4_SET_PORT(mdev, port, -1); + err = mlx4_SET_PORT(mdev, port); if (err) { mlx4_err(mdev, "Failed to set port %d, " "aborting\n", port); @@ -1182,17 +1166,6 @@ static int mlx4_init_slave(struct mlx4_dev *dev) return -EIO; } -static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) -{ - int i; - - for (i = 1; i <= dev->caps.num_ports; i++) { - dev->caps.gid_table_len[i] = 1; - dev->caps.pkey_table_len[i] = - dev->phys_caps.pkey_phys_table_len[i] - 1; - } -} - static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1232,9 +1205,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto err_stop_fw; } - if (mlx4_is_master(dev)) - mlx4_parav_master_pf_caps(dev); - profile = default_profile; icm_size = mlx4_make_profile(dev, &profile, &dev_cap, @@ -1507,24 +1477,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - /* initialize per-slave default ib port capabilities */ - if (mlx4_is_master(dev)) { - int i; - for (i = 0; i < dev->num_slaves; i++) { - if (i == mlx4_master_func_num(dev)) - continue; - priv->mfunc.master.slave_state[i].ib_cap_mask[port] = - ib_port_default_caps; - } - } - if (mlx4_is_mfunc(dev)) dev->caps.port_ib_mtu[port] = IB_MTU_2048; else dev->caps.port_ib_mtu[port] = IB_MTU_4096; - err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? - dev->caps.pkey_table_len[port] : -1); + err = mlx4_SET_PORT(dev, port); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", port); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h index cde6e511899f..e5d20220762c 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -338,6 +338,66 @@ struct mlx4_srq_context { __be64 db_rec_addr; }; +struct mlx4_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } __packed comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } __packed cmd; + struct { + __be32 qpn; + } __packed qp; + struct { + __be32 srqn; + } __packed srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } __packed cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } __packed port_change; + struct { + #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 + u32 reserved; + u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; + } __packed comm_channel_arm; + struct { + u8 port; + u8 reserved[3]; + __be64 mac; + } __packed mac_update; + struct { + u8 port; + } __packed sw_event; + struct { + __be32 slave_id; + } __packed flr_event; + struct { + __be16 current_temperature; + __be16 warning_threshold; + } __packed warming; + } event; + u8 slave_id; + u8 reserved3[2]; + u8 owner; +} __packed; + struct mlx4_eq { struct mlx4_dev *dev; void __iomem *doorbell; @@ -827,8 +887,7 @@ void mlx4_catas_init(void); int mlx4_restart_one(struct pci_dev *pdev); int mlx4_register_device(struct mlx4_dev *dev); void mlx4_unregister_device(struct mlx4_dev *dev); -void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, - unsigned long param); +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port); struct mlx4_dev_cap; struct mlx4_init_hca_param; @@ -969,7 +1028,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); -int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); /* resource tracker functions*/ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource resource_type, @@ -1012,8 +1071,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); -int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, - int *gid_tbl_len, int *pkey_tbl_len); int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c index 90dc47542b8b..a8fb52992c64 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/port.c @@ -726,15 +726,14 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, enum { MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ - MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, MLX4_CHANGE_PORT_VL_CAP = 21, MLX4_CHANGE_PORT_MTU_CAP = 22, }; -int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) { struct mlx4_cmd_mailbox *mailbox; - int err, vl_cap, pkey_tbl_flag = 0; + int err, vl_cap; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) return 0; @@ -747,17 +746,11 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; - if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { - pkey_tbl_flag = 1; - ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); - } - /* IB VL CAP enum isn't used by the firmware, just numerical values */ for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { ((__be32 *) mailbox->buf)[0] = cpu_to_be32( (1 << MLX4_CHANGE_PORT_MTU_CAP) | (1 << MLX4_CHANGE_PORT_VL_CAP) | - (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | (vl_cap << MLX4_SET_PORT_VL_CAP)); err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, diff --git a/trunk/include/linux/mlx4/device.h b/trunk/include/linux/mlx4/device.h index 441caf1a497d..6a8f002b8ed3 100644 --- a/trunk/include/linux/mlx4/device.h +++ b/trunk/include/linux/mlx4/device.h @@ -56,13 +56,6 @@ enum { MLX4_MAX_PORTS = 2 }; -/* base qkey for use in sriov tunnel-qp/proxy-qp communication. - * These qkeys must not be allowed for general use. This is a 64k range, - * and to test for violation, we use the mask (protect against future chg). - */ -#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000) -#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000) - enum { MLX4_BOARD_ID_LEN = 64 }; @@ -103,8 +96,7 @@ enum { MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, - MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, - MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, + MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55 }; enum { @@ -146,7 +138,6 @@ enum mlx4_event { MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, - MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -244,32 +235,12 @@ enum { MLX4_MAX_FAST_REG_PAGES = 511, }; -enum { - MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, - MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, - MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, -}; - -/* Port mgmt change event handling */ -enum { - MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0, - MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1, - MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2, - MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3, - MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4, -}; - -#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ - MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) - static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; } struct mlx4_phys_caps { - u32 gid_phys_table_len[MLX4_MAX_PORTS + 1]; - u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1]; u32 num_phys_eqs; }; @@ -302,8 +273,6 @@ struct mlx4_caps { int max_qp_init_rdma; int max_qp_dest_rdma; int sqp_start; - u32 base_sqpn; - u32 base_tunnel_sqpn; int num_srqs; int max_srq_wqes; int max_srq_sge; @@ -542,81 +511,6 @@ struct mlx4_dev { int num_vfs; }; -struct mlx4_eqe { - u8 reserved1; - u8 type; - u8 reserved2; - u8 subtype; - union { - u32 raw[6]; - struct { - __be32 cqn; - } __packed comp; - struct { - u16 reserved1; - __be16 token; - u32 reserved2; - u8 reserved3[3]; - u8 status; - __be64 out_param; - } __packed cmd; - struct { - __be32 qpn; - } __packed qp; - struct { - __be32 srqn; - } __packed srq; - struct { - __be32 cqn; - u32 reserved1; - u8 reserved2[3]; - u8 syndrome; - } __packed cq_err; - struct { - u32 reserved1[2]; - __be32 port; - } __packed port_change; - struct { - #define COMM_CHANNEL_BIT_ARRAY_SIZE 4 - u32 reserved; - u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE]; - } __packed comm_channel_arm; - struct { - u8 port; - u8 reserved[3]; - __be64 mac; - } __packed mac_update; - struct { - __be32 slave_id; - } __packed flr_event; - struct { - __be16 current_temperature; - __be16 warning_threshold; - } __packed warming; - struct { - u8 reserved[3]; - u8 port; - union { - struct { - __be16 mstr_sm_lid; - __be16 port_lid; - __be32 changed_attr; - u8 reserved[3]; - u8 mstr_sm_sl; - __be64 gid_prefix; - } __packed port_info; - struct { - __be32 block_ptr; - __be32 tbl_entries_mask; - } __packed tbl_change_info; - } params; - } __packed port_mgmt_change; - } event; - u8 slave_id; - u8 reserved3[2]; - u8 owner; -} __packed; - struct mlx4_init_port_param { int set_guid0; int set_node_guid; @@ -640,15 +534,6 @@ struct mlx4_init_port_param { if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) -#define MLX4_INVALID_SLAVE_ID 0xFF - -void handle_port_mgmt_change_event(struct work_struct *work); - -static inline int mlx4_master_func_num(struct mlx4_dev *dev) -{ - return dev->caps.function; -} - static inline int mlx4_is_master(struct mlx4_dev *dev) { return dev->flags & MLX4_FLAG_MASTER; @@ -783,6 +668,4 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); -int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey); - #endif /* MLX4_DEVICE_H */ diff --git a/trunk/include/linux/mlx4/driver.h b/trunk/include/linux/mlx4/driver.h index 0f509229fb3d..5f1298b1b5ef 100644 --- a/trunk/include/linux/mlx4/driver.h +++ b/trunk/include/linux/mlx4/driver.h @@ -42,14 +42,13 @@ enum mlx4_dev_event { MLX4_DEV_EVENT_PORT_UP, MLX4_DEV_EVENT_PORT_DOWN, MLX4_DEV_EVENT_PORT_REINIT, - MLX4_DEV_EVENT_PORT_MGMT_CHANGE, }; struct mlx4_interface { void * (*add) (struct mlx4_dev *dev); void (*remove)(struct mlx4_dev *dev, void *context); void (*event) (struct mlx4_dev *dev, void *context, - enum mlx4_dev_event event, unsigned long param); + enum mlx4_dev_event event, int port); void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); struct list_head list; enum mlx4_protocol protocol; diff --git a/trunk/include/rdma/ib_cm.h b/trunk/include/rdma/ib_cm.h index 0e3ff30647d5..83f77ac33957 100644 --- a/trunk/include/rdma/ib_cm.h +++ b/trunk/include/rdma/ib_cm.h @@ -262,18 +262,6 @@ struct ib_cm_event { void *private_data; }; -#define CM_REQ_ATTR_ID cpu_to_be16(0x0010) -#define CM_MRA_ATTR_ID cpu_to_be16(0x0011) -#define CM_REJ_ATTR_ID cpu_to_be16(0x0012) -#define CM_REP_ATTR_ID cpu_to_be16(0x0013) -#define CM_RTU_ATTR_ID cpu_to_be16(0x0014) -#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) -#define CM_DREP_ATTR_ID cpu_to_be16(0x0016) -#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) -#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) -#define CM_LAP_ATTR_ID cpu_to_be16(0x0019) -#define CM_APR_ATTR_ID cpu_to_be16(0x001A) - /** * ib_cm_handler - User-defined callback to process communication events. * @cm_id: Communication identifier associated with the reported event. diff --git a/trunk/include/rdma/ib_sa.h b/trunk/include/rdma/ib_sa.h index 8275e539bace..d44a56388a3e 100644 --- a/trunk/include/rdma/ib_sa.h +++ b/trunk/include/rdma/ib_sa.h @@ -251,28 +251,6 @@ struct ib_sa_service_rec { u64 data64[2]; }; -#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0) -#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1) -#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2) -#define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3) -#define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4) -#define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5) -#define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6) -#define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7) -#define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8) -#define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9) -#define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10) -#define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11) - -struct ib_sa_guidinfo_rec { - __be16 lid; - u8 block_num; - /* reserved */ - u8 res1; - __be32 res2; - u8 guid_info_list[64]; -}; - struct ib_sa_client { atomic_t users; struct completion comp; @@ -407,15 +385,4 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, */ void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec); -/* Support GuidInfoRecord */ -int ib_sa_guid_info_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, - struct ib_sa_guidinfo_rec *rec, - ib_sa_comp_mask comp_mask, u8 method, - int timeout_ms, gfp_t gfp_mask, - void (*callback)(int status, - struct ib_sa_guidinfo_rec *resp, - void *context), - void *context, - struct ib_sa_query **sa_query); #endif /* IB_SA_H */