IB/ipath: Fix sendctrl locking

Code review pointed out that the locking around uses of ipath_sendctrl 
and kr_sendctrl were, in several places, incorrect and/or inconsistent.

Signed-off-by: John Gregor <john.gregor@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
John Gregor 2007-09-05 01:57:14 -07:00 committed by Roland Dreier
parent 9ab4295d1d
commit e342c11917
6 changed files with 72 additions and 29 deletions

View File

@ -803,31 +803,37 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
unsigned cnt)
{
unsigned i, last = first + cnt;
u64 sendctrl, sendorig;
unsigned long flags;
ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
sendorig = dd->ipath_sendctrl;
for (i = first; i < last; i++) {
sendctrl = sendorig | INFINIPATH_S_DISARM |
(i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
/*
* The disarm-related bits are write-only, so it
* is ok to OR them in with our copy of sendctrl
* while we hold the lock.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
sendctrl);
dd->ipath_sendctrl | INFINIPATH_S_DISARM |
(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
/* can't disarm bufs back-to-back per iba7220 spec */
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/*
* Write it again with current value, in case ipath_sendctrl changed
* while we were looping; no critical bits that would require
* locking.
*
* disable PIOAVAILUPD, then re-enable, reading scratch in
* Disable PIOAVAILUPD, then re-enable, reading scratch in
* between. This seems to avoid a chip timing race that causes
* pioavail updates to memory to stop.
* pioavail updates to memory to stop. We xor as we don't
* know the state of the bit when we're called.
*/
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/**
@ -2056,6 +2062,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
*/
void ipath_shutdown_device(struct ipath_devdata *dd)
{
unsigned long flags;
ipath_dbg("Shutting down the device\n");
dd->ipath_flags |= IPATH_LINKUNK;
@ -2076,9 +2084,13 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
* gracefully stop all sends allowing any in progress to trickle out
* first.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = 0;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
/* flush it */
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* enough for anything that's going to trickle out to have actually
* done so.

View File

@ -2149,11 +2149,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
u64 reg = dd->ipath_sendctrl;
unsigned long flags;
clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
return 0;
}

View File

@ -345,7 +345,7 @@ static int init_chip_first(struct ipath_devdata *dd,
dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
spin_lock_init(&dd->ipath_tid_lock);
spin_lock_init(&dd->ipath_sendctrl_lock);
spin_lock_init(&dd->ipath_gpio_lock);
spin_lock_init(&dd->ipath_eep_st_lock);
mutex_init(&dd->ipath_eep_lock);
@ -372,9 +372,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
*pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
if (dd->ipath_portcnt != rtmp)
@ -487,6 +487,7 @@ static void enable_chip(struct ipath_devdata *dd,
struct ipath_portdata *pd, int reinit)
{
u32 val;
unsigned long flags;
int i;
if (!reinit)
@ -495,11 +496,13 @@ static void enable_chip(struct ipath_devdata *dd,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
/* Enable PIO send, and update of PIOavail regs to memory. */
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
INFINIPATH_S_PIOBUFAVAILUPD;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* enable port 0 receive, and receive interrupt. other ports
@ -696,6 +699,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
u64 val;
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
unsigned long flags;
ret = init_housekeeping(dd, &pd, reinit);
if (ret)
@ -827,8 +831,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
INFINIPATH_S_PIOENABLE);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* before error clears, since we expect serdes pll errors during

View File

@ -795,6 +795,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
{
int i, im;
__le64 val;
unsigned long flags;
/* disable error interrupts, to avoid confusion */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@ -813,11 +814,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
dd->ipath_control);
/* ensure pio avail updates continue */
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* We just enabled pioavailupdate, so dma copy is almost certainly
@ -922,6 +926,7 @@ static noinline void ipath_bad_regread(struct ipath_devdata *dd)
static void handle_layer_pioavail(struct ipath_devdata *dd)
{
unsigned long flags;
int ret;
ret = ipath_ib_piobufavail(dd->verbs_dev);
@ -930,9 +935,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
return;
set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/*
@ -1168,9 +1176,14 @@ irqreturn_t ipath_intr(int irq, void *data)
handle_urcv(dd, istat);
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
handle_layer_pioavail(dd);
}

View File

@ -376,6 +376,7 @@ struct ipath_devdata {
dma_addr_t *ipath_physshadow;
/* lock to workaround chip bug 9437 */
spinlock_t ipath_tid_lock;
spinlock_t ipath_sendctrl_lock;
/*
* IPATH_STATUS_*,

View File

@ -479,9 +479,14 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
static void want_buffer(struct ipath_devdata *dd)
{
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/**