From 336ba09f2ef71b82f07c1200be0ddf4eb923d69f Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:40 +0200 Subject: [PATCH 01/11] net/smc: first part of add link processing as SMC client First set of functions to process an ADD_LINK LLC request as an SMC client. Find an alternate IB device, determine the new link group type and get the index for the new link. Then ready the link, map the buffers and send an ADD_LINK LLC response. If any error occurs, send a reject LLC message and terminate the processing. Add smc_llc_alloc_alt_link() to find a free link index for a new link, depending on the new link group type. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 4 +- net/smc/smc_core.h | 2 + net/smc/smc_llc.c | 107 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 2 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 60c708f6de512..2f8faa9c9e8e4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -273,8 +273,8 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr) return link_id; } -static int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, - u8 link_idx, struct smc_init_info *ini) +int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, + u8 link_idx, struct smc_init_info *ini) { u8 rndvec[3]; int rc; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 555ada9d24232..4e00819e2db7a 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -374,6 +374,8 @@ void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr); int smc_core_init(void); void smc_core_exit(void); +int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, + u8 link_idx, struct smc_init_info *ini); void smcr_link_clear(struct smc_link *lnk); int smcr_buf_map_lgr(struct smc_link *lnk); int smcr_buf_reg_lgr(struct smc_link *lnk); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 4e3db4d4b7832..8716d8739329b 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -17,6 +17,7 @@ #include "smc_core.h" #include "smc_clc.h" #include "smc_llc.h" +#include "smc_pnet.h" #define SMC_LLC_DATA_LEN 40 @@ -541,6 +542,112 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf) /********************************* receive ***********************************/ +static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, + enum smc_lgr_type lgr_new_t) +{ + int i; + + if (lgr->type == SMC_LGR_SYMMETRIC || + (lgr->type != SMC_LGR_SINGLE && + (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || + lgr_new_t == SMC_LGR_ASYMMETRIC_PEER))) + return -EMLINK; + + if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || + lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) { + for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--) + if (lgr->lnk[i].state == SMC_LNK_UNUSED) + return i; + } else { + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) + if (lgr->lnk[i].state == SMC_LNK_UNUSED) + return i; + } + return -EMLINK; +} + +/* prepare and send an add link reject response */ +static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) +{ + qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; + qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; + qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; + return smc_llc_send_message(qentry->link, &qentry->msg); +} + +static void smc_llc_save_add_link_info(struct smc_link *link, + struct smc_llc_msg_add_link *add_llc) +{ + link->peer_qpn = ntoh24(add_llc->sender_qp_num); + memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE); + memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN); + link->peer_psn = ntoh24(add_llc->initial_psn); + link->peer_mtu = add_llc->qp_mtu; +} + +/* as an SMC client, process an add link request */ +int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) +{ + struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; + enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; + struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_link *lnk_new = NULL; + struct smc_init_info ini; + int lnk_idx, rc = 0; + + ini.vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && + !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) { + if (!ini.ib_dev) + goto out_reject; + lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; + } + if (!ini.ib_dev) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini.ib_dev = link->smcibdev; + ini.ib_port = link->ibport; + } + lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); + if (lnk_idx < 0) + goto out_reject; + lnk_new = &lgr->lnk[lnk_idx]; + rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini); + if (rc) + goto out_reject; + smc_llc_save_add_link_info(lnk_new, llc); + lnk_new->link_id = llc->link_num; + + rc = smc_ib_ready_link(lnk_new); + if (rc) + goto out_clear_lnk; + + rc = smcr_buf_map_lgr(lnk_new); + if (rc) + goto out_clear_lnk; + + rc = smc_llc_send_add_link(link, + lnk_new->smcibdev->mac[ini.ib_port - 1], + lnk_new->gid, lnk_new, SMC_LLC_RESP); + if (rc) + goto out_clear_lnk; + /* tbd: rc = smc_llc_cli_rkey_exchange(link, lnk_new); */ + if (rc) { + rc = 0; + goto out_clear_lnk; + } + /* tbd: rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); */ + if (!rc) + goto out; +out_clear_lnk: + smcr_link_clear(lnk_new); +out_reject: + smc_llc_cli_add_link_reject(qentry); +out: + kfree(qentry); + return rc; +} + /* worker to process an add link message */ static void smc_llc_add_link_work(struct work_struct *work) { From 87f88cda2128a72d79d4cc700729488af1081a06 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:41 +0200 Subject: [PATCH 02/11] net/smc: rkey processing for a new link as SMC client Part of the SMC client new link establishment process is the exchange of rkeys for all used buffers. Add new LLC message type ADD_LINK_CONTINUE which is used to exchange rkeys of all current RMB buffers. Add functions to iterate over all used RMB buffers of the link group, and implement the ADD_LINK_CONTINUE processing. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 157 +++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc_llc.h | 1 + 2 files changed, 157 insertions(+), 1 deletion(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 8716d8739329b..a06b618f172e3 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -70,6 +70,23 @@ struct smc_llc_msg_add_link { /* type 0x02 */ u8 reserved[8]; }; +struct smc_llc_msg_add_link_cont_rt { + __be32 rmb_key; + __be32 rmb_key_new; + __be64 rmb_vaddr_new; +}; + +#define SMC_LLC_RKEYS_PER_CONT_MSG 2 + +struct smc_llc_msg_add_link_cont { /* type 0x03 */ + struct smc_llc_hdr hd; + u8 link_num; + u8 num_rkeys; + u8 reserved2[2]; + struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG]; + u8 reserved[4]; +} __packed; /* format defined in RFC7609 */ + #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20 @@ -121,6 +138,7 @@ struct smc_llc_msg_delete_rkey { /* type 0x09 */ union smc_llc_msg { struct smc_llc_msg_confirm_link confirm_link; struct smc_llc_msg_add_link add_link; + struct smc_llc_msg_add_link_cont add_link_cont; struct smc_llc_msg_del_link delete_link; struct smc_llc_msg_confirm_rkey confirm_rkey; @@ -566,6 +584,137 @@ static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, return -EMLINK; } +/* return first buffer from any of the next buf lists */ +static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + struct smc_buf_desc *buf_pos; + + while (*buf_lst < SMC_RMBE_SIZES) { + buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], + struct smc_buf_desc, list); + if (buf_pos) + return buf_pos; + (*buf_lst)++; + } + return NULL; +} + +/* return next rmb from buffer lists */ +static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, + int *buf_lst, + struct smc_buf_desc *buf_pos) +{ + struct smc_buf_desc *buf_next; + + if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { + (*buf_lst)++; + return _smc_llc_get_next_rmb(lgr, buf_lst); + } + buf_next = list_next_entry(buf_pos, list); + return buf_next; +} + +static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, + int *buf_lst) +{ + *buf_lst = 0; + return smc_llc_get_next_rmb(lgr, buf_lst, NULL); +} + +/* send one add_link_continue msg */ +static int smc_llc_add_link_cont(struct smc_link *link, + struct smc_link *link_new, u8 *num_rkeys_todo, + int *buf_lst, struct smc_buf_desc **buf_pos) +{ + struct smc_llc_msg_add_link_cont *addc_llc; + struct smc_link_group *lgr = link->lgr; + int prim_lnk_idx, lnk_idx, i, rc; + struct smc_wr_tx_pend_priv *pend; + struct smc_wr_buf *wr_buf; + struct smc_buf_desc *rmb; + u8 n; + + rc = smc_llc_add_pending_send(link, &wr_buf, &pend); + if (rc) + return rc; + addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf; + memset(addc_llc, 0, sizeof(*addc_llc)); + + prim_lnk_idx = link->link_idx; + lnk_idx = link_new->link_idx; + addc_llc->link_num = link_new->link_id; + addc_llc->num_rkeys = *num_rkeys_todo; + n = *num_rkeys_todo; + for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { + if (!*buf_pos) { + addc_llc->num_rkeys = addc_llc->num_rkeys - + *num_rkeys_todo; + *num_rkeys_todo = 0; + break; + } + rmb = *buf_pos; + + addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); + addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); + addc_llc->rt[i].rmb_vaddr_new = + cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); + + (*num_rkeys_todo)--; + *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); + while (*buf_pos && !(*buf_pos)->used) + *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); + } + addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT; + addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); + if (lgr->role == SMC_CLNT) + addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; + return smc_wr_tx_send(link, pend); +} + +static int smc_llc_cli_rkey_exchange(struct smc_link *link, + struct smc_link *link_new) +{ + struct smc_llc_msg_add_link_cont *addc_llc; + struct smc_link_group *lgr = link->lgr; + u8 max, num_rkeys_send, num_rkeys_recv; + struct smc_llc_qentry *qentry; + struct smc_buf_desc *buf_pos; + int buf_lst; + int rc = 0; + int i; + + mutex_lock(&lgr->rmbs_lock); + num_rkeys_send = lgr->conns_num; + buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); + do { + qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME, + SMC_LLC_ADD_LINK_CONT); + if (!qentry) { + rc = -ETIMEDOUT; + break; + } + addc_llc = &qentry->msg.add_link_cont; + num_rkeys_recv = addc_llc->num_rkeys; + max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); + for (i = 0; i < max; i++) { + smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, + addc_llc->rt[i].rmb_key, + addc_llc->rt[i].rmb_vaddr_new, + addc_llc->rt[i].rmb_key_new); + num_rkeys_recv--; + } + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send, + &buf_lst, &buf_pos); + if (rc) + break; + } while (num_rkeys_send || num_rkeys_recv); + + mutex_unlock(&lgr->rmbs_lock); + return rc; +} + /* prepare and send an add link reject response */ static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) { @@ -631,7 +780,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) lnk_new->gid, lnk_new, SMC_LLC_RESP); if (rc) goto out_clear_lnk; - /* tbd: rc = smc_llc_cli_rkey_exchange(link, lnk_new); */ + rc = smc_llc_cli_rkey_exchange(link, lnk_new); if (rc) { rc = 0; goto out_clear_lnk; @@ -794,6 +943,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) } return; case SMC_LLC_CONFIRM_LINK: + case SMC_LLC_ADD_LINK_CONT: if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); @@ -873,6 +1023,7 @@ static void smc_llc_rx_response(struct smc_link *link, break; case SMC_LLC_ADD_LINK: case SMC_LLC_CONFIRM_LINK: + case SMC_LLC_ADD_LINK_CONT: case SMC_LLC_CONFIRM_RKEY: case SMC_LLC_DELETE_RKEY: /* assign responses to the local flow, we requested them */ @@ -1092,6 +1243,10 @@ static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { .handler = smc_llc_rx_handler, .type = SMC_LLC_ADD_LINK }, + { + .handler = smc_llc_rx_handler, + .type = SMC_LLC_ADD_LINK_CONT + }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_LINK diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 4ed4486e50823..97a4f02f5a93e 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -28,6 +28,7 @@ enum smc_llc_reqresp { enum smc_llc_msg_type { SMC_LLC_CONFIRM_LINK = 0x01, SMC_LLC_ADD_LINK = 0x02, + SMC_LLC_ADD_LINK_CONT = 0x03, SMC_LLC_DELETE_LINK = 0x04, SMC_LLC_CONFIRM_RKEY = 0x06, SMC_LLC_TEST_LINK = 0x07, From b1570a87f57e94e9f74b8942840f9bd16bd1aba5 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:42 +0200 Subject: [PATCH 03/11] net/smc: final part of add link processing as SMC client This patch finalizes the ADD_LINK processing of new links. Receive the CONFIRM_LINK request from peer, complete the link initialization, register all used buffers with the IB device and finally send the CONFIRM_LINK response, which completes the ADD_LINK processing. And activate smc_llc_cli_add_link() in af_smc.c. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 2 +- net/smc/smc_llc.c | 73 +++++++++++++++++++++++++++++++++++++++++++++-- net/smc/smc_llc.h | 1 + 3 files changed, 72 insertions(+), 4 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 6663a63be9e4c..1afb6e4275f22 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -427,7 +427,7 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc) return rc; } smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl); - /* tbd: call smc_llc_cli_add_link(link, qentry); */ + smc_llc_cli_add_link(link, qentry); return 0; } diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a06b618f172e3..d56ca60597d42 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -381,7 +381,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, hton24(confllc->sender_qp_num, link->roce_qp->qp_num); confllc->link_num = link->link_id; memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); - confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */ + confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* send llc message */ rc = smc_wr_tx_send(link, pend); return rc; @@ -724,6 +724,61 @@ static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) return smc_llc_send_message(qentry->link, &qentry->msg); } +static int smc_llc_cli_conf_link(struct smc_link *link, + struct smc_init_info *ini, + struct smc_link *link_new, + enum smc_lgr_type lgr_new_t) +{ + struct smc_link_group *lgr = link->lgr; + struct smc_llc_msg_del_link *del_llc; + struct smc_llc_qentry *qentry = NULL; + int rc = 0; + + /* receive CONFIRM LINK request over RoCE fabric */ + qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0); + if (!qentry) { + rc = smc_llc_send_delete_link(link, link_new->link_id, + SMC_LLC_REQ, false, + SMC_LLC_DEL_LOST_PATH); + return -ENOLINK; + } + if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { + /* received DELETE_LINK instead */ + del_llc = &qentry->msg.delete_link; + qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; + smc_llc_send_message(link, &qentry->msg); + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + return -ENOLINK; + } + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + + rc = smc_ib_modify_qp_rts(link_new); + if (rc) { + smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, + false, SMC_LLC_DEL_LOST_PATH); + return -ENOLINK; + } + smc_wr_remember_qp_attr(link_new); + + rc = smcr_buf_reg_lgr(link_new); + if (rc) { + smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, + false, SMC_LLC_DEL_LOST_PATH); + return -ENOLINK; + } + + /* send CONFIRM LINK response over RoCE fabric */ + rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP); + if (rc) { + smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, + false, SMC_LLC_DEL_LOST_PATH); + return -ENOLINK; + } + smc_llc_link_active(link_new); + lgr->type = lgr_new_t; + return 0; +} + static void smc_llc_save_add_link_info(struct smc_link *link, struct smc_llc_msg_add_link *add_llc) { @@ -785,7 +840,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) rc = 0; goto out_clear_lnk; } - /* tbd: rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); */ + rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); if (!rc) goto out; out_clear_lnk: @@ -797,6 +852,17 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) return rc; } +static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) +{ + struct smc_llc_qentry *qentry; + + qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); + + mutex_lock(&lgr->llc_conf_mutex); + smc_llc_cli_add_link(qentry->link, qentry); + mutex_unlock(&lgr->llc_conf_mutex); +} + /* worker to process an add link message */ static void smc_llc_add_link_work(struct work_struct *work) { @@ -809,7 +875,8 @@ static void smc_llc_add_link_work(struct work_struct *work) goto out; } - /* tbd: call smc_llc_process_cli_add_link(lgr); */ + if (lgr->role == SMC_CLNT) + smc_llc_process_cli_add_link(lgr); /* tbd: call smc_llc_process_srv_add_link(lgr); */ out: smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 97a4f02f5a93e..7c314bbef8c80 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -88,6 +88,7 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, int time_out, u8 exp_msg); struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow); void smc_llc_flow_qentry_del(struct smc_llc_flow *flow); +int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */ From 2d2209f2018943d4152a21eff5b76f1952e0b435 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:43 +0200 Subject: [PATCH 04/11] net/smc: first part of add link processing as SMC server First set of functions to process an ADD_LINK LLC request as an SMC server. Find an alternate IB device, determine the new link group type and get the index for the new link. Then initialize the link and send the ADD_LINK LLC message to the peer. Save the contents of the response, ready the link, map all used buffers and register the buffers with the IB device. If any error occurs, stop the processing and clear the link. And call smc_llc_srv_add_link() in af_smc.c to start second link establishment after the initial link of a link group was created. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 2 +- net/smc/smc_llc.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc_llc.h | 1 + 3 files changed, 92 insertions(+), 2 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 1afb6e4275f22..c67272007f411 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1067,7 +1067,7 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc) smc_llc_link_active(link); /* initial contact - try to establish second link */ - /* tbd: call smc_llc_srv_add_link(link); */ + smc_llc_srv_add_link(link); return 0; } diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index d56ca60597d42..e2f254e217594 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -863,6 +863,94 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } +int smc_llc_srv_add_link(struct smc_link *link) +{ + enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; + struct smc_link_group *lgr = link->lgr; + struct smc_llc_msg_add_link *add_llc; + struct smc_llc_qentry *qentry = NULL; + struct smc_link *link_new; + struct smc_init_info ini; + int lnk_idx, rc = 0; + + /* ignore client add link recommendation, start new flow */ + ini.vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + if (!ini.ib_dev) { + lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; + ini.ib_dev = link->smcibdev; + ini.ib_port = link->ibport; + } + lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); + if (lnk_idx < 0) + return 0; + + rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini); + if (rc) + return rc; + link_new = &lgr->lnk[lnk_idx]; + rc = smc_llc_send_add_link(link, + link_new->smcibdev->mac[ini.ib_port - 1], + link_new->gid, link_new, SMC_LLC_REQ); + if (rc) + goto out_err; + /* receive ADD LINK response over the RoCE fabric */ + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); + if (!qentry) { + rc = -ETIMEDOUT; + goto out_err; + } + add_llc = &qentry->msg.add_link; + if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) { + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + rc = -ENOLINK; + goto out_err; + } + if (lgr->type == SMC_LGR_SINGLE && + (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && + !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) { + lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; + } + smc_llc_save_add_link_info(link_new, add_llc); + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + + rc = smc_ib_ready_link(link_new); + if (rc) + goto out_err; + rc = smcr_buf_map_lgr(link_new); + if (rc) + goto out_err; + rc = smcr_buf_reg_lgr(link_new); + if (rc) + goto out_err; + /* tbd: rc = smc_llc_srv_rkey_exchange(link, link_new); */ + if (rc) + goto out_err; + /* tbd: rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); */ + if (rc) + goto out_err; + return 0; +out_err: + smcr_link_clear(link_new); + return rc; +} + +static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) +{ + struct smc_link *link = lgr->llc_flow_lcl.qentry->link; + int rc; + + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + + mutex_lock(&lgr->llc_conf_mutex); + rc = smc_llc_srv_add_link(link); + if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { + /* delete any asymmetric link */ + /* tbd: smc_llc_delete_asym_link(lgr); */ + } + mutex_unlock(&lgr->llc_conf_mutex); +} + /* worker to process an add link message */ static void smc_llc_add_link_work(struct work_struct *work) { @@ -877,7 +965,8 @@ static void smc_llc_add_link_work(struct work_struct *work) if (lgr->role == SMC_CLNT) smc_llc_process_cli_add_link(lgr); - /* tbd: call smc_llc_process_srv_add_link(lgr); */ + else + smc_llc_process_srv_add_link(lgr); out: smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 7c314bbef8c80..1a7748d0541f5 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -89,6 +89,7 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow); void smc_llc_flow_qentry_del(struct smc_llc_flow *flow); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); +int smc_llc_srv_add_link(struct smc_link *link); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */ From 57b499242cb888a32815f8663b60338bcb0b5747 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:44 +0200 Subject: [PATCH 05/11] net/smc: rkey processing for a new link as SMC server Part of SMC server new link establishment is the exchange of rkeys for used buffers. Loop over all used RMB buffers and send ADD_LINK_CONTINUE LLC messages to the peer. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index e2f254e217594..de73432bd72f6 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -863,6 +863,47 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } +static int smc_llc_srv_rkey_exchange(struct smc_link *link, + struct smc_link *link_new) +{ + struct smc_llc_msg_add_link_cont *addc_llc; + struct smc_link_group *lgr = link->lgr; + u8 max, num_rkeys_send, num_rkeys_recv; + struct smc_llc_qentry *qentry = NULL; + struct smc_buf_desc *buf_pos; + int buf_lst; + int rc = 0; + int i; + + mutex_lock(&lgr->rmbs_lock); + num_rkeys_send = lgr->conns_num; + buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); + do { + smc_llc_add_link_cont(link, link_new, &num_rkeys_send, + &buf_lst, &buf_pos); + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, + SMC_LLC_ADD_LINK_CONT); + if (!qentry) { + rc = -ETIMEDOUT; + goto out; + } + addc_llc = &qentry->msg.add_link_cont; + num_rkeys_recv = addc_llc->num_rkeys; + max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); + for (i = 0; i < max; i++) { + smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, + addc_llc->rt[i].rmb_key, + addc_llc->rt[i].rmb_vaddr_new, + addc_llc->rt[i].rmb_key_new); + num_rkeys_recv--; + } + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + } while (num_rkeys_send || num_rkeys_recv); +out: + mutex_unlock(&lgr->rmbs_lock); + return rc; +} + int smc_llc_srv_add_link(struct smc_link *link) { enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; @@ -923,7 +964,7 @@ int smc_llc_srv_add_link(struct smc_link *link) rc = smcr_buf_reg_lgr(link_new); if (rc) goto out_err; - /* tbd: rc = smc_llc_srv_rkey_exchange(link, link_new); */ + rc = smc_llc_srv_rkey_exchange(link, link_new); if (rc) goto out_err; /* tbd: rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); */ From 1551c95b61242b1a20565bae8d711f35a601c4f3 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:45 +0200 Subject: [PATCH 06/11] net/smc: final part of add link processing as SMC server This patch finalizes the ADD_LINK processing of new links. Send the CONFIRM_LINK request to the peer, receive the response and set link state to ACTIVE. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index de73432bd72f6..1fefee55e2935 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -904,6 +904,33 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link, return rc; } +static int smc_llc_srv_conf_link(struct smc_link *link, + struct smc_link *link_new, + enum smc_lgr_type lgr_new_t) +{ + struct smc_link_group *lgr = link->lgr; + struct smc_llc_qentry *qentry = NULL; + int rc; + + /* send CONFIRM LINK request over the RoCE fabric */ + rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ); + if (rc) + return -ENOLINK; + /* receive CONFIRM LINK response over the RoCE fabric */ + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, + SMC_LLC_CONFIRM_LINK); + if (!qentry) { + /* send DELETE LINK */ + smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, + false, SMC_LLC_DEL_LOST_PATH); + return -ENOLINK; + } + smc_llc_link_active(link_new); + lgr->type = lgr_new_t; + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + return 0; +} + int smc_llc_srv_add_link(struct smc_link *link) { enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; @@ -967,7 +994,7 @@ int smc_llc_srv_add_link(struct smc_link *link) rc = smc_llc_srv_rkey_exchange(link, link_new); if (rc) goto out_err; - /* tbd: rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); */ + rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); if (rc) goto out_err; return 0; From c9a5d243035161f06175a7c6d487c9860e0f179a Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:46 +0200 Subject: [PATCH 07/11] net/smc: delete an asymmetric link as SMC server When a link group moved from asymmetric to symmetric state then the dangling asymmetric link can be deleted. Add smc_llc_find_asym_link() to find the respective link and add smc_llc_delete_asym_link() to delete it. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++- net/smc/smc_wr.c | 2 +- net/smc/smc_wr.h | 1 + 3 files changed, 82 insertions(+), 2 deletions(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 1fefee55e2935..9d102c912be92 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -863,6 +863,85 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } +/* find the asymmetric link when 3 links are established */ +static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr) +{ + int asym_idx = -ENOENT; + int i, j, k; + bool found; + + /* determine asymmetric link */ + found = false; + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { + if (!smc_link_usable(&lgr->lnk[i]) || + !smc_link_usable(&lgr->lnk[j])) + continue; + if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid, + SMC_GID_SIZE)) { + found = true; /* asym_lnk is i or j */ + break; + } + } + if (found) + break; + } + if (!found) + goto out; /* no asymmetric link */ + for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) { + if (!smc_link_usable(&lgr->lnk[k])) + continue; + if (k != i && + !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid, + SMC_GID_SIZE)) { + asym_idx = i; + break; + } + if (k != j && + !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid, + SMC_GID_SIZE)) { + asym_idx = j; + break; + } + } +out: + return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx]; +} + +static void smc_llc_delete_asym_link(struct smc_link_group *lgr) +{ + struct smc_link *lnk_new = NULL, *lnk_asym; + struct smc_llc_qentry *qentry; + int rc; + + lnk_asym = smc_llc_find_asym_link(lgr); + if (!lnk_asym) + return; /* no asymmetric link */ + if (!smc_link_downing(&lnk_asym->state)) + return; + /* tbd: lnk_new = smc_switch_conns(lgr, lnk_asym, false); */ + smc_wr_tx_wait_no_pending_sends(lnk_asym); + if (!lnk_new) + goto out_free; + /* change flow type from ADD_LINK into DEL_LINK */ + lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK; + rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ, + true, SMC_LLC_DEL_NO_ASYM_NEEDED); + if (rc) { + smcr_link_down_cond(lnk_new); + goto out_free; + } + qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME, + SMC_LLC_DELETE_LINK); + if (!qentry) { + smcr_link_down_cond(lnk_new); + goto out_free; + } + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); +out_free: + smcr_link_clear(lnk_asym); +} + static int smc_llc_srv_rkey_exchange(struct smc_link *link, struct smc_link *link_new) { @@ -1014,7 +1093,7 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) rc = smc_llc_srv_add_link(link); if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { /* delete any asymmetric link */ - /* tbd: smc_llc_delete_asym_link(lgr); */ + smc_llc_delete_asym_link(lgr); } mutex_unlock(&lgr->llc_conf_mutex); } diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 031e6c9561b11..3fd27bea4f7a6 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -61,7 +61,7 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link) } /* wait till all pending tx work requests on the given link are completed */ -static inline int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) +int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) { if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link), SMC_WR_TX_WAIT_PENDING_TIME)) diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 3ac99c8984183..f7eaeb3391f38 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -106,6 +106,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, smc_wr_tx_filter filter, smc_wr_tx_dismisser dismisser, unsigned long data); +int smc_wr_tx_wait_no_pending_sends(struct smc_link *link); int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_post_init(struct smc_link *link); From 9ec6bf19ec8bb19f4211f6a2bf62c079d46b54ea Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:47 +0200 Subject: [PATCH 08/11] net/smc: llc_del_link_work and use the LLC flow for delete link Introduce a work that is scheduled when a new DELETE_LINK LLC request is received. The work will call either the SMC client or SMC server DELETE_LINK processing. And use the LLC flow framework to process incoming DELETE_LINK LLC messages, scheduling the llc_del_link_work for those events. With these changes smc_lgr_forget() is only called by one function and can be migrated into smc_lgr_cleanup_early(). Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 22 +++++++------------ net/smc/smc_core.h | 2 +- net/smc/smc_llc.c | 55 ++++++++++++++++++++++++++++++---------------- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2f8faa9c9e8e4..a964304283fa9 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -193,12 +193,19 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) void smc_lgr_cleanup_early(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; + struct list_head *lgr_list; + spinlock_t *lgr_lock; if (!lgr) return; smc_conn_free(conn); - smc_lgr_forget(lgr); + lgr_list = smc_lgr_list_head(lgr, &lgr_lock); + spin_lock_bh(lgr_lock); + /* do not use this link group for new connections */ + if (!list_empty(lgr_list)) + list_del_init(lgr_list); + spin_unlock_bh(lgr_lock); smc_lgr_schedule_free_work_fast(lgr); } @@ -653,19 +660,6 @@ static void smc_lgr_free(struct smc_link_group *lgr) kfree(lgr); } -void smc_lgr_forget(struct smc_link_group *lgr) -{ - struct list_head *lgr_list; - spinlock_t *lgr_lock; - - lgr_list = smc_lgr_list_head(lgr, &lgr_lock); - spin_lock_bh(lgr_lock); - /* do not use this link group for new connections */ - if (!list_empty(lgr_list)) - list_del_init(lgr_list); - spin_unlock_bh(lgr_lock); -} - static void smcd_unregister_all_dmbs(struct smc_link_group *lgr) { int i; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 4e00819e2db7a..7fe53feb9dc43 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -254,6 +254,7 @@ struct smc_link_group { struct mutex llc_conf_mutex; /* protects lgr reconfig. */ struct work_struct llc_add_link_work; + struct work_struct llc_del_link_work; struct work_struct llc_event_work; /* llc event worker */ wait_queue_head_t llc_waiter; @@ -343,7 +344,6 @@ struct smc_sock; struct smc_clc_msg_accept_confirm; struct smc_clc_msg_local; -void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_cleanup_early(struct smc_connection *conn); void smc_lgr_terminate_sched(struct smc_link_group *lgr); void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 9d102c912be92..e4e3910a9624f 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -1118,22 +1118,18 @@ static void smc_llc_add_link_work(struct work_struct *work) smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } -static void smc_llc_rx_delete_link(struct smc_link *link, - struct smc_llc_msg_del_link *llc) +static void smc_llc_delete_link_work(struct work_struct *work) { - struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_link_group *lgr = container_of(work, struct smc_link_group, + llc_del_link_work); - smc_lgr_forget(lgr); - if (lgr->role == SMC_SERV) { - /* client asks to delete this link, send request */ - smc_llc_send_delete_link(link, 0, SMC_LLC_REQ, true, - SMC_LLC_DEL_PROG_INIT_TERM); - } else { - /* server requests to delete this link, send response */ - smc_llc_send_delete_link(link, 0, SMC_LLC_RESP, true, - SMC_LLC_DEL_PROG_INIT_TERM); + if (list_empty(&lgr->list)) { + /* link group is terminating */ + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + goto out; } - smcr_link_down_cond(link); +out: + smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } /* process a confirm_rkey request from peer, remote flow */ @@ -1255,8 +1251,30 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) } break; case SMC_LLC_DELETE_LINK: - smc_llc_rx_delete_link(link, &llc->delete_link); - break; + if (lgr->role == SMC_CLNT) { + /* server requests to delete this link, send response */ + if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { + /* DEL LINK REQ during ADD LINK SEQ */ + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, + qentry); + wake_up_interruptible(&lgr->llc_waiter); + } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, + qentry)) { + schedule_work(&lgr->llc_del_link_work); + } + } else { + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && + !lgr->llc_flow_lcl.qentry) { + /* DEL LINK REQ during ADD LINK SEQ */ + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, + qentry); + wake_up_interruptible(&lgr->llc_waiter); + } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, + qentry)) { + schedule_work(&lgr->llc_del_link_work); + } + } + return; case SMC_LLC_CONFIRM_RKEY: /* new request from remote, assign to remote flow */ if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { @@ -1325,6 +1343,7 @@ static void smc_llc_rx_response(struct smc_link *link, complete(&link->llc_testlink_resp); break; case SMC_LLC_ADD_LINK: + case SMC_LLC_DELETE_LINK: case SMC_LLC_CONFIRM_LINK: case SMC_LLC_ADD_LINK_CONT: case SMC_LLC_CONFIRM_RKEY: @@ -1333,10 +1352,6 @@ static void smc_llc_rx_response(struct smc_link *link, smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); wake_up_interruptible(&link->lgr->llc_waiter); return; - case SMC_LLC_DELETE_LINK: - if (link->lgr->role == SMC_SERV) - smc_lgr_schedule_free_work_fast(link->lgr); - break; case SMC_LLC_CONFIRM_RKEY_CONT: /* not used because max links is 3 */ break; @@ -1424,6 +1439,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) INIT_WORK(&lgr->llc_event_work, smc_llc_event_work); INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work); + INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work); INIT_LIST_HEAD(&lgr->llc_event_q); spin_lock_init(&lgr->llc_event_q_lock); spin_lock_init(&lgr->llc_flow_lock); @@ -1439,6 +1455,7 @@ void smc_llc_lgr_clear(struct smc_link_group *lgr) wake_up_interruptible_all(&lgr->llc_waiter); cancel_work_sync(&lgr->llc_event_work); cancel_work_sync(&lgr->llc_add_link_work); + cancel_work_sync(&lgr->llc_del_link_work); if (lgr->delayed_event) { kfree(lgr->delayed_event); lgr->delayed_event = NULL; From 9c4168789cc635e1f0d265157b7617259d56bfee Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:48 +0200 Subject: [PATCH 09/11] net/smc: delete link processing as SMC client Add smc_llc_process_cli_delete_link() to process a DELETE_LINK request as SMC client. When the request is to delete ALL links then terminate the whole link group. If not, find the link to delete by its link_id, send the DELETE_LINK response LLC message and then clear the deleted link. Finally determine and update the link group state. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index e4e3910a9624f..cd57b4fb18427 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -863,6 +863,18 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } +static int smc_llc_active_link_count(struct smc_link_group *lgr) +{ + int i, link_count = 0; + + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + if (!smc_link_usable(&lgr->lnk[i])) + continue; + link_count++; + } + return link_count; +} + /* find the asymmetric link when 3 links are established */ static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr) { @@ -1118,6 +1130,63 @@ static void smc_llc_add_link_work(struct work_struct *work) smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } +static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) +{ + struct smc_link *lnk_del = NULL, *lnk_asym, *lnk; + struct smc_llc_msg_del_link *del_llc; + struct smc_llc_qentry *qentry; + int active_links; + int lnk_idx; + + qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); + lnk = qentry->link; + del_llc = &qentry->msg.delete_link; + + if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { + smc_lgr_terminate_sched(lgr); + goto out; + } + mutex_lock(&lgr->llc_conf_mutex); + /* delete single link */ + for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) { + if (lgr->lnk[lnk_idx].link_id != del_llc->link_num) + continue; + lnk_del = &lgr->lnk[lnk_idx]; + break; + } + del_llc->hd.flags |= SMC_LLC_FLAG_RESP; + if (!lnk_del) { + /* link was not found */ + del_llc->reason = htonl(SMC_LLC_DEL_NOLNK); + smc_llc_send_message(lnk, &qentry->msg); + goto out_unlock; + } + lnk_asym = smc_llc_find_asym_link(lgr); + + del_llc->reason = 0; + smc_llc_send_message(lnk, &qentry->msg); /* response */ + + if (smc_link_downing(&lnk_del->state)) { + /* tbd: call smc_switch_conns(lgr, lnk_del, false); */ + smc_wr_tx_wait_no_pending_sends(lnk_del); + } + smcr_link_clear(lnk_del); + + active_links = smc_llc_active_link_count(lgr); + if (lnk_del == lnk_asym) { + /* expected deletion of asym link, don't change lgr state */ + } else if (active_links == 1) { + lgr->type = SMC_LGR_SINGLE; + } else if (!active_links) { + lgr->type = SMC_LGR_NONE; + smc_lgr_terminate_sched(lgr); + } +out_unlock: + mutex_unlock(&lgr->llc_conf_mutex); +out: + kfree(qentry); +} + static void smc_llc_delete_link_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, @@ -1128,6 +1197,9 @@ static void smc_llc_delete_link_work(struct work_struct *work) smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); goto out; } + + if (lgr->role == SMC_CLNT) + smc_llc_process_cli_delete_link(lgr); out: smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } From 08ae27ddfb6514a8316b17256cd4262bb6931c1f Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:49 +0200 Subject: [PATCH 10/11] net/smc: delete link processing as SMC server Add smc_llc_process_srv_delete_link() to process a DELETE_LINK request as SMC server. When the request is to delete ALL links then terminate the whole link group. If not, find the link to delete by its link_id, send the DELETE_LINK request LLC message and wait for the response. No matter if a response was received, clear the deleted link and update the link group state. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_llc.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index cd57b4fb18427..ac065f6d60dc2 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -1187,6 +1187,76 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) kfree(qentry); } +static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) +{ + struct smc_llc_msg_del_link *del_llc; + struct smc_link *lnk, *lnk_del; + struct smc_llc_qentry *qentry; + int active_links; + int i; + + mutex_lock(&lgr->llc_conf_mutex); + qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); + lnk = qentry->link; + del_llc = &qentry->msg.delete_link; + + if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { + /* delete entire lgr */ + smc_lgr_terminate_sched(lgr); + goto out; + } + /* delete single link */ + lnk_del = NULL; + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + if (lgr->lnk[i].link_id == del_llc->link_num) { + lnk_del = &lgr->lnk[i]; + break; + } + } + if (!lnk_del) + goto out; /* asymmetric link already deleted */ + + if (smc_link_downing(&lnk_del->state)) { + /* tbd: call smc_switch_conns(lgr, lnk_del, false); */ + smc_wr_tx_wait_no_pending_sends(lnk_del); + } + if (!list_empty(&lgr->list)) { + /* qentry is either a request from peer (send it back to + * initiate the DELETE_LINK processing), or a locally + * enqueued DELETE_LINK request (forward it) + */ + if (!smc_llc_send_message(lnk, &qentry->msg)) { + struct smc_llc_msg_del_link *del_llc_resp; + struct smc_llc_qentry *qentry2; + + qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME, + SMC_LLC_DELETE_LINK); + if (!qentry2) { + } else { + del_llc_resp = &qentry2->msg.delete_link; + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); + } + } + } + smcr_link_clear(lnk_del); + + active_links = smc_llc_active_link_count(lgr); + if (active_links == 1) { + lgr->type = SMC_LGR_SINGLE; + } else if (!active_links) { + lgr->type = SMC_LGR_NONE; + smc_lgr_terminate_sched(lgr); + } + + if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { + /* trigger setup of asymm alt link */ + /* tbd: call smc_llc_srv_add_link_local(lnk); */ + } +out: + mutex_unlock(&lgr->llc_conf_mutex); + kfree(qentry); +} + static void smc_llc_delete_link_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, @@ -1200,6 +1270,8 @@ static void smc_llc_delete_link_work(struct work_struct *work) if (lgr->role == SMC_CLNT) smc_llc_process_cli_delete_link(lgr); + else + smc_llc_process_srv_delete_link(lgr); out: smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } From 4dadd151b26589fd0520feb97c93ee981b393a99 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 3 May 2020 14:38:50 +0200 Subject: [PATCH 11/11] net/smc: enqueue local LLC messages As SMC server, when a second link was deleted, trigger the setup of an asymmetric link. Do this by enqueueing a local ADD_LINK message which is processed by the LLC layer as if it were received from peer. Do the same when a new IB port became active and a new link could be created. smc_llc_srv_add_link_local() enqueues a local ADD_LINK message. And smc_llc_srv_delete_link_local() is used the same way to enqueue a local DELETE_LINK message. This is used when an IB port is no longer active. Signed-off-by: Karsten Graul Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 3 ++- net/smc/smc_llc.c | 30 +++++++++++++++++++++++++++++- net/smc/smc_llc.h | 2 ++ 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index a964304283fa9..32a6cadc5c1fd 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -883,7 +883,7 @@ static void smcr_link_up(struct smc_link_group *lgr, link = smc_llc_usable_link(lgr); if (!link) return; - /* tbd: call smc_llc_srv_add_link_local(link); */ + smc_llc_srv_add_link_local(link); } else { /* invite server to start add link processing */ u8 gid[SMC_GID_SIZE]; @@ -954,6 +954,7 @@ static void smcr_link_down(struct smc_link *lnk) if (lgr->role == SMC_SERV) { /* trigger local delete link processing */ + smc_llc_srv_delete_link_local(to_lnk, del_link_id); } else { if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* another llc task is ongoing */ diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index ac065f6d60dc2..7675ccd6f3c39 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -159,6 +159,8 @@ struct smc_llc_qentry { union smc_llc_msg msg; }; +static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc); + struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow) { struct smc_llc_qentry *qentry = flow->qentry; @@ -1110,6 +1112,17 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } +/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */ +void smc_llc_srv_add_link_local(struct smc_link *link) +{ + struct smc_llc_msg_add_link add_llc = {0}; + + add_llc.hd.length = sizeof(add_llc); + add_llc.hd.common.type = SMC_LLC_ADD_LINK; + /* no dev and port needed, we as server ignore client data anyway */ + smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); +} + /* worker to process an add link message */ static void smc_llc_add_link_work(struct work_struct *work) { @@ -1130,6 +1143,21 @@ static void smc_llc_add_link_work(struct work_struct *work) smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } +/* enqueue a local del_link msg to trigger a new del_link flow, + * called only for role SMC_SERV + */ +void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id) +{ + struct smc_llc_msg_del_link del_llc = {0}; + + del_llc.hd.length = sizeof(del_llc); + del_llc.hd.common.type = SMC_LLC_DELETE_LINK; + del_llc.link_num = del_link_id; + del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); + del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; + smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc); +} + static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) { struct smc_link *lnk_del = NULL, *lnk_asym, *lnk; @@ -1250,7 +1278,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { /* trigger setup of asymm alt link */ - /* tbd: call smc_llc_srv_add_link_local(lnk); */ + smc_llc_srv_add_link_local(lnk); } out: mutex_unlock(&lgr->llc_conf_mutex); diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index 1a7748d0541f5..c335fc5f363c2 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -69,6 +69,7 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, enum smc_llc_reqresp reqresp, bool orderly, u32 reason); +void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id); void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc); void smc_llc_lgr_clear(struct smc_link_group *lgr); int smc_llc_link_init(struct smc_link *link); @@ -90,6 +91,7 @@ struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow); void smc_llc_flow_qentry_del(struct smc_llc_flow *flow); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); int smc_llc_srv_add_link(struct smc_link *link); +void smc_llc_srv_add_link_local(struct smc_link *link); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */