Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 161117
b: refs/heads/master
c: b2f0091
h: refs/heads/master
i:
  161115: e2a5dba
v: v3
  • Loading branch information
Vasu Dev authored and James Bottomley committed Sep 5, 2009
1 parent 9aa1b0b commit b41295f
Show file tree
Hide file tree
Showing 4 changed files with 116 additions and 103 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e4bc50bedf0dd6c63f20a7bc0a2b46667664fba1
refs/heads/master: b2f0091fbf8b475fa09b5e1712e0ab84cb3e1ca4
19 changes: 9 additions & 10 deletions trunk/drivers/scsi/fcoe/fcoe.c
Original file line number Diff line number Diff line change
Expand Up @@ -912,8 +912,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_softc *fc;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
unsigned short oxid;
unsigned int cpu = 0;
unsigned int cpu;

fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
lp = fc->ctlr.lp;
Expand Down Expand Up @@ -947,20 +946,20 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);

oxid = ntohs(fh->fh_ox_id);

fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lp;
fr->ptype = ptype;

#ifdef CONFIG_SMP
/*
* The incoming frame exchange id(oxid) is ANDed with num of online
* cpu bits to get cpu and then this cpu is used for selecting
* a per cpu kernel thread from fcoe_percpu.
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
* was originated, otherwise just use the current cpu.
*/
cpu = oxid & (num_online_cpus() - 1);
#endif
if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
else
cpu = smp_processor_id();

fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
Expand Down
189 changes: 100 additions & 89 deletions trunk/drivers/scsi/libfc/fc_exch.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,8 @@ struct fc_exch_pool {
struct fc_exch_mgr {
enum fc_class class; /* default class for sequences */
struct kref kref; /* exchange mgr reference count */
spinlock_t em_lock; /* exchange manager lock,
must be taken before ex_lock */
u16 next_xid; /* next possible free exchange ID */
u16 min_xid; /* min exchange ID */
u16 max_xid; /* max exchange ID */
u16 max_read; /* max exchange ID for read */
u16 last_read; /* last xid allocated for read */
u32 total_exches; /* total allocated exchanges */
struct list_head ex_list; /* allocated exchanges list */
mempool_t *ep_pool; /* reserve ep's */
u16 pool_max_index; /* max exch array index in exch pool */
Expand All @@ -99,7 +93,6 @@ struct fc_exch_mgr {
atomic_t seq_not_found;
atomic_t non_bls_resp;
} stats;
struct fc_exch **exches; /* for exch pointers indexed by xid */
};
#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)

Expand Down Expand Up @@ -192,8 +185,8 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
* sequence allocation and deallocation must be locked.
* - exchange refcnt can be done atomicly without locks.
* - sequence allocation must be locked by exch lock.
* - If the em_lock and ex_lock must be taken at the same time, then the
* em_lock must be taken before the ex_lock.
* - If the EM pool lock and ex_lock must be taken at the same time, then the
* EM pool lock must be taken before the ex_lock.
*/

/*
Expand Down Expand Up @@ -335,17 +328,18 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
((struct fc_exch **)(pool + 1))[index] = ep;
}

static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
static void fc_exch_delete(struct fc_exch *ep)
{
struct fc_exch_mgr *mp;
struct fc_exch_pool *pool;

mp = ep->em;
spin_lock_bh(&mp->em_lock);
WARN_ON(mp->total_exches <= 0);
mp->total_exches--;
mp->exches[ep->xid - mp->min_xid] = NULL;
pool = ep->pool;
spin_lock_bh(&pool->lock);
WARN_ON(pool->total_exches <= 0);
pool->total_exches--;
fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
NULL);
list_del(&ep->ex_list);
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
}

Expand Down Expand Up @@ -465,7 +459,7 @@ static void fc_exch_timeout(struct work_struct *work)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
Expand Down Expand Up @@ -509,10 +503,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
struct fc_exch_mgr *mp)
{
struct fc_exch *ep;
u16 min, max, xid;

min = mp->min_xid;
max = mp->max_xid;
unsigned int cpu;
u16 index;
struct fc_exch_pool *pool;

/* allocate memory for exchange */
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
Expand All @@ -522,15 +515,17 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));

spin_lock_bh(&mp->em_lock);
xid = mp->next_xid;
/* alloc a new xid */
while (mp->exches[xid - min]) {
xid = (xid == max) ? min : xid + 1;
if (xid == mp->next_xid)
cpu = smp_processor_id();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
index = index == mp->pool_max_index ? 0 : index + 1;
if (index == pool->next_index)
goto err;
}
mp->next_xid = (xid == max) ? min : xid + 1;
pool->next_index = index == mp->pool_max_index ? 0 : index + 1;

fc_exch_hold(ep); /* hold for exch in mp */
spin_lock_init(&ep->ex_lock);
Expand All @@ -541,17 +536,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
*/
spin_lock_bh(&ep->ex_lock);

mp->exches[xid - mp->min_xid] = ep;
list_add_tail(&ep->ex_list, &mp->ex_list);
fc_exch_ptr_set(pool, index, ep);
list_add_tail(&ep->ex_list, &pool->ex_list);
fc_seq_alloc(ep, ep->seq_id++);
mp->total_exches++;
spin_unlock_bh(&mp->em_lock);
pool->total_exches++;
spin_unlock_bh(&pool->lock);

/*
* update exchange
*/
ep->oxid = ep->xid = xid;
ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
ep->em = mp;
ep->pool = pool;
ep->lp = lport;
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
ep->rxid = FC_XID_UNKNOWN;
Expand All @@ -560,7 +556,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
out:
return ep;
err:
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
atomic_inc(&mp->stats.no_free_exch_xid);
mempool_free(ep, mp->ep_pool);
return NULL;
Expand Down Expand Up @@ -597,16 +593,18 @@ EXPORT_SYMBOL(fc_exch_alloc);
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;

if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
spin_lock_bh(&mp->em_lock);
ep = mp->exches[xid - mp->min_xid];
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
if (ep) {
fc_exch_hold(ep);
WARN_ON(ep->xid != xid);
}
spin_unlock_bh(&mp->em_lock);
spin_unlock_bh(&pool->lock);
}
return ep;
}
Expand All @@ -620,7 +618,7 @@ void fc_exch_done(struct fc_seq *sp)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
}
EXPORT_SYMBOL(fc_exch_done);

Expand Down Expand Up @@ -1213,7 +1211,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
}

/*
Expand Down Expand Up @@ -1323,7 +1321,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);

if (resp)
resp(sp, fp, ex_resp_arg);
Expand Down Expand Up @@ -1466,48 +1464,76 @@ static void fc_exch_reset(struct fc_exch *ep)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);

if (resp)
resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
}

/*
* Reset an exchange manager, releasing all sequences and exchanges.
* If sid is non-zero, reset only exchanges we source from that FID.
* If did is non-zero, reset only exchanges destined to that FID.
/**
* fc_exch_pool_reset() - Resets an per cpu exches pool.
* @lport: ptr to the local port
* @pool: ptr to the per cpu exches pool
* @sid: source FC ID
* @did: destination FC ID
*
* Resets an per cpu exches pool, releasing its all sequences
* and exchanges. If sid is non-zero, then reset only exchanges
* we sourced from that FID. If did is non-zero, reset only
* exchanges destined to that FID.
*/
void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
static void fc_exch_pool_reset(struct fc_lport *lport,
struct fc_exch_pool *pool,
u32 sid, u32 did)
{
struct fc_exch *ep;
struct fc_exch *next;
struct fc_exch_mgr *mp;
struct fc_exch_mgr_anchor *ema;

list_for_each_entry(ema, &lp->ema_list, ema_list) {
mp = ema->mp;
spin_lock_bh(&mp->em_lock);
spin_lock_bh(&pool->lock);
restart:
list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
if ((lp == ep->lp) &&
(sid == 0 || sid == ep->sid) &&
(did == 0 || did == ep->did)) {
fc_exch_hold(ep);
spin_unlock_bh(&mp->em_lock);

fc_exch_reset(ep);

fc_exch_release(ep);
spin_lock_bh(&mp->em_lock);

/*
* must restart loop incase while lock
* was down multiple eps were released.
*/
goto restart;
}
list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
if ((lport == ep->lp) &&
(sid == 0 || sid == ep->sid) &&
(did == 0 || did == ep->did)) {
fc_exch_hold(ep);
spin_unlock_bh(&pool->lock);

fc_exch_reset(ep);

fc_exch_release(ep);
spin_lock_bh(&pool->lock);

/*
* must restart loop incase while lock
* was down multiple eps were released.
*/
goto restart;
}
spin_unlock_bh(&mp->em_lock);
}
spin_unlock_bh(&pool->lock);
}

/**
* fc_exch_mgr_reset() - Resets all EMs of a lport
* @lport: ptr to the local port
* @sid: source FC ID
* @did: destination FC ID
*
* Reset all EMs of a lport, releasing its all sequences and
* exchanges. If sid is non-zero, then reset only exchanges
* we sourced from that FID. If did is non-zero, reset only
* exchanges destined to that FID.
*/
void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
{
struct fc_exch_mgr_anchor *ema;
unsigned int cpu;

list_for_each_entry(ema, &lport->ema_list, ema_list) {
for_each_possible_cpu(cpu)
fc_exch_pool_reset(lport,
per_cpu_ptr(ema->mp->pool, cpu),
sid, did);
}
}
EXPORT_SYMBOL(fc_exch_mgr_reset);
Expand Down Expand Up @@ -1777,11 +1803,6 @@ static void fc_exch_mgr_destroy(struct kref *kref)
{
struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);

/*
* The total exch count must be zero
* before freeing exchange manager.
*/
WARN_ON(mp->total_exches != 0);
mempool_destroy(mp->ep_pool);
free_percpu(mp->pool);
kfree(mp);
Expand All @@ -1802,7 +1823,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
bool (*match)(struct fc_frame *))
{
struct fc_exch_mgr *mp;
size_t len;
u16 pool_exch_range;
size_t pool_size;
unsigned int cpu;
Expand All @@ -1816,25 +1836,16 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
}

/*
* Memory need for EM
* allocate memory for EM
*/
len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
len += sizeof(struct fc_exch_mgr);

mp = kzalloc(len, GFP_ATOMIC);
mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
if (!mp)
return NULL;

mp->class = class;
mp->total_exches = 0;
mp->exches = (struct fc_exch **)(mp + 1);
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
mp->max_xid = max_xid;
mp->next_xid = min_xid;

INIT_LIST_HEAD(&mp->ex_list);
spin_lock_init(&mp->em_lock);

mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
if (!mp->ep_pool)
Expand Down Expand Up @@ -1944,7 +1955,7 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_mgr_delete_ep(ep);
fc_exch_delete(ep);
return NULL;
}
EXPORT_SYMBOL(fc_exch_seq_send);
Expand Down
Loading

0 comments on commit b41295f

Please sign in to comment.