2006-03-29 18:23:29 -05:00
|
|
|
/*
|
2008-04-17 00:09:32 -04:00
|
|
|
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
2006-03-29 18:23:29 -05:00
|
|
|
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/pci.h>
|
2008-04-17 00:09:24 -04:00
|
|
|
#include <linux/delay.h>
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
#include "ipath_kernel.h"
|
2006-08-25 14:24:31 -04:00
|
|
|
#include "ipath_verbs.h"
|
2006-07-01 07:36:17 -04:00
|
|
|
#include "ipath_common.h"
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2007-03-15 17:45:07 -04:00
|
|
|
|
2006-09-28 12:00:18 -04:00
|
|
|
/*
|
|
|
|
* Called when we might have an error that is specific to a particular
|
|
|
|
* PIO buffer, and may need to cancel that buffer, so it can be re-used.
|
|
|
|
*/
|
2008-05-07 14:00:15 -04:00
|
|
|
void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
|
2006-09-28 12:00:18 -04:00
|
|
|
{
|
|
|
|
u32 piobcnt;
|
|
|
|
unsigned long sbuf[4];
|
|
|
|
/*
|
|
|
|
* it's possible that sendbuffererror could have bits set; might
|
|
|
|
* have already done this as a result of hardware error handling
|
|
|
|
*/
|
|
|
|
piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
|
|
|
|
/* read these before writing errorclear */
|
|
|
|
sbuf[0] = ipath_read_kreg64(
|
|
|
|
dd, dd->ipath_kregs->kr_sendbuffererror);
|
|
|
|
sbuf[1] = ipath_read_kreg64(
|
|
|
|
dd, dd->ipath_kregs->kr_sendbuffererror + 1);
|
2008-04-17 00:09:31 -04:00
|
|
|
if (piobcnt > 128)
|
2006-09-28 12:00:18 -04:00
|
|
|
sbuf[2] = ipath_read_kreg64(
|
|
|
|
dd, dd->ipath_kregs->kr_sendbuffererror + 2);
|
2008-04-17 00:09:31 -04:00
|
|
|
if (piobcnt > 192)
|
2006-09-28 12:00:18 -04:00
|
|
|
sbuf[3] = ipath_read_kreg64(
|
|
|
|
dd, dd->ipath_kregs->kr_sendbuffererror + 3);
|
2008-04-17 00:09:31 -04:00
|
|
|
else
|
|
|
|
sbuf[3] = 0;
|
2006-09-28 12:00:18 -04:00
|
|
|
|
|
|
|
if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
|
|
|
|
int i;
|
2007-06-18 17:24:41 -04:00
|
|
|
if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
|
|
|
|
dd->ipath_lastcancel > jiffies) {
|
2006-09-28 12:00:18 -04:00
|
|
|
__IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
|
|
|
|
"SendbufErrs %lx %lx", sbuf[0],
|
|
|
|
sbuf[1]);
|
|
|
|
if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
|
|
|
|
printk(" %lx %lx ", sbuf[2], sbuf[3]);
|
|
|
|
printk("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < piobcnt; i++)
|
2008-05-07 14:00:15 -04:00
|
|
|
if (test_bit(i, sbuf))
|
2006-09-28 12:00:18 -04:00
|
|
|
ipath_disarm_piobufs(dd, i, 1);
|
2007-06-18 17:24:41 -04:00
|
|
|
/* ignore armlaunch errs for a bit */
|
|
|
|
dd->ipath_lastcancel = jiffies+3;
|
2006-09-28 12:00:18 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-01 07:36:03 -04:00
|
|
|
/* These are all rcv-related errors which we want to count for stats */
|
2006-03-29 18:23:29 -05:00
|
|
|
#define E_SUM_PKTERRS \
|
|
|
|
(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
|
|
|
|
INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
|
|
|
|
INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
|
|
|
|
INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
|
|
|
|
INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
|
|
|
|
INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
|
|
|
|
|
2006-07-01 07:36:03 -04:00
|
|
|
/* These are all send-related errors which we want to count for stats */
|
2006-03-29 18:23:29 -05:00
|
|
|
#define E_SUM_ERRS \
|
|
|
|
(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
|
|
|
|
INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
|
|
|
|
INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
|
|
|
|
INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
|
|
|
|
INFINIPATH_E_INVALIDADDR)
|
|
|
|
|
2007-07-06 15:48:33 -04:00
|
|
|
/*
|
|
|
|
* this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
|
|
|
|
* errors not related to freeze and cancelling buffers. Can't ignore
|
|
|
|
* armlaunch because could get more while still cleaning up, and need
|
|
|
|
* to cancel those as they happen.
|
|
|
|
*/
|
|
|
|
#define E_SPKT_ERRS_IGNORE \
|
|
|
|
(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
|
|
|
|
INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
|
|
|
|
INFINIPATH_E_SPKTLEN)
|
|
|
|
|
2006-07-01 07:36:03 -04:00
|
|
|
/*
|
|
|
|
* these are errors that can occur when the link changes state while
|
|
|
|
* a packet is being sent or received. This doesn't cover things
|
|
|
|
* like EBP or VCRC that can be the result of a sending having the
|
|
|
|
* link change state, so we receive a "known bad" packet.
|
|
|
|
*/
|
|
|
|
#define E_SUM_LINK_PKTERRS \
|
|
|
|
(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
|
|
|
|
INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
|
|
|
|
INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
|
|
|
|
INFINIPATH_E_RUNEXPCHAR)
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
|
|
|
|
{
|
|
|
|
u64 ignore_this_time = 0;
|
|
|
|
|
2008-05-07 14:00:15 -04:00
|
|
|
ipath_disarm_senderrbufs(dd);
|
2006-07-01 07:36:03 -04:00
|
|
|
if ((errs & E_SUM_LINK_PKTERRS) &&
|
2006-03-29 18:23:29 -05:00
|
|
|
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
|
|
|
|
/*
|
|
|
|
* This can happen when SMA is trying to bring the link
|
|
|
|
* up, but the IB link changes state at the "wrong" time.
|
|
|
|
* The IB logic then complains that the packet isn't
|
|
|
|
* valid. We don't want to confuse people, so we just
|
|
|
|
* don't print them, except at debug
|
|
|
|
*/
|
2006-07-01 07:36:03 -04:00
|
|
|
ipath_dbg("Ignoring packet errors %llx, because link not "
|
|
|
|
"ACTIVE\n", (unsigned long long) errs);
|
|
|
|
ignore_this_time = errs & E_SUM_LINK_PKTERRS;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return ignore_this_time;
|
|
|
|
}
|
|
|
|
|
2006-09-28 12:00:08 -04:00
|
|
|
/* generic hw error messages... */
|
|
|
|
#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
|
|
|
|
{ \
|
|
|
|
.mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a << \
|
|
|
|
INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ), \
|
|
|
|
.msg = "TXE " #a " Memory Parity" \
|
|
|
|
}
|
|
|
|
#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
|
|
|
|
{ \
|
|
|
|
.mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a << \
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ), \
|
|
|
|
.msg = "RXE " #a " Memory Parity" \
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
|
|
|
|
INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
|
|
|
|
INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
|
|
|
|
|
|
|
|
INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
|
|
|
|
INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
|
|
|
|
INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
|
|
|
|
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
|
|
|
|
INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_format_hwmsg - format a single hwerror message
|
|
|
|
* @msg message buffer
|
|
|
|
* @msgl length of message buffer
|
|
|
|
* @hwmsg message to add to message buffer
|
|
|
|
*/
|
|
|
|
static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
|
|
|
|
{
|
|
|
|
strlcat(msg, "[", msgl);
|
|
|
|
strlcat(msg, hwmsg, msgl);
|
|
|
|
strlcat(msg, "]", msgl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_format_hwerrors - format hardware error messages for display
|
|
|
|
* @hwerrs hardware errors bit vector
|
|
|
|
* @hwerrmsgs hardware error descriptions
|
|
|
|
* @nhwerrmsgs number of hwerrmsgs
|
|
|
|
* @msg message buffer
|
|
|
|
* @msgl message buffer length
|
|
|
|
*/
|
|
|
|
void ipath_format_hwerrors(u64 hwerrs,
|
|
|
|
const struct ipath_hwerror_msgs *hwerrmsgs,
|
|
|
|
size_t nhwerrmsgs,
|
|
|
|
char *msg, size_t msgl)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const int glen =
|
|
|
|
sizeof(ipath_generic_hwerror_msgs) /
|
|
|
|
sizeof(ipath_generic_hwerror_msgs[0]);
|
|
|
|
|
|
|
|
for (i=0; i<glen; i++) {
|
|
|
|
if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
|
|
|
|
ipath_format_hwmsg(msg, msgl,
|
|
|
|
ipath_generic_hwerror_msgs[i].msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i=0; i<nhwerrmsgs; i++) {
|
|
|
|
if (hwerrs & hwerrmsgs[i].mask) {
|
|
|
|
ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
/* return the strings for the most common link states */
|
2008-04-17 00:09:24 -04:00
|
|
|
static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
char *ret;
|
2008-04-17 00:09:24 -04:00
|
|
|
u32 state;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:24 -04:00
|
|
|
state = ipath_ib_state(dd, ibcs);
|
|
|
|
if (state == dd->ib_init)
|
2006-03-29 18:23:29 -05:00
|
|
|
ret = "Init";
|
2008-04-17 00:09:24 -04:00
|
|
|
else if (state == dd->ib_arm)
|
2006-03-29 18:23:29 -05:00
|
|
|
ret = "Arm";
|
2008-04-17 00:09:24 -04:00
|
|
|
else if (state == dd->ib_active)
|
2006-03-29 18:23:29 -05:00
|
|
|
ret = "Active";
|
2008-04-17 00:09:24 -04:00
|
|
|
else
|
2006-03-29 18:23:29 -05:00
|
|
|
ret = "Down";
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-09-19 19:47:31 -04:00
|
|
|
void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
|
|
|
|
{
|
|
|
|
struct ib_event event;
|
|
|
|
|
|
|
|
event.device = &dd->verbs_dev->ibdev;
|
|
|
|
event.element.port_num = 1;
|
|
|
|
event.event = ev;
|
|
|
|
ib_dispatch_event(&event);
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
|
2008-04-17 00:09:24 -04:00
|
|
|
ipath_err_t errs)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
2008-04-17 00:09:24 -04:00
|
|
|
u32 ltstate, lstate, ibstate, lastlstate;
|
|
|
|
u32 init = dd->ib_init;
|
|
|
|
u32 arm = dd->ib_arm;
|
|
|
|
u32 active = dd->ib_active;
|
|
|
|
const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
|
|
|
|
|
|
|
|
lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
|
|
|
|
ibstate = ipath_ib_state(dd, ibcs);
|
|
|
|
/* linkstate at last interrupt */
|
|
|
|
lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
|
|
|
|
ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:29 -04:00
|
|
|
/*
|
|
|
|
* Since going into a recovery state causes the link state to go
|
|
|
|
* down and since recovery is transitory, it is better if we "miss"
|
|
|
|
* ever seeing the link training state go into recovery (i.e.,
|
|
|
|
* ignore this transition for link state special handling purposes)
|
|
|
|
* without even updating ipath_lastibcstat.
|
|
|
|
*/
|
|
|
|
if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
|
|
|
|
(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
|
|
|
|
(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
|
|
|
|
goto done;
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
/*
|
2008-04-17 00:09:24 -04:00
|
|
|
* if linkstate transitions into INIT from any of the various down
|
|
|
|
* states, or if it transitions from any of the up (INIT or better)
|
|
|
|
* states into any of the down states (except link recovery), then
|
|
|
|
* call the chip-specific code to take appropriate actions.
|
2006-03-29 18:23:29 -05:00
|
|
|
*/
|
2008-04-17 00:09:24 -04:00
|
|
|
if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
|
|
|
|
lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
|
|
|
|
/* transitioned to UP */
|
|
|
|
if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
|
2008-04-17 00:09:26 -04:00
|
|
|
/* link came up, so we must no longer be disabled */
|
|
|
|
dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
|
2008-04-17 00:09:24 -04:00
|
|
|
ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
|
|
|
|
goto skip_ibchange; /* chip-code handled */
|
|
|
|
}
|
|
|
|
} else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
|
|
|
|
(dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
|
2008-04-17 00:09:29 -04:00
|
|
|
ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
|
2008-04-17 00:09:24 -04:00
|
|
|
ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
|
|
|
|
int handled;
|
|
|
|
handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
|
|
|
|
dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
|
|
|
|
if (handled) {
|
|
|
|
ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
|
|
|
|
goto skip_ibchange; /* chip-code handled */
|
|
|
|
}
|
|
|
|
}
|
2006-07-01 07:36:03 -04:00
|
|
|
|
|
|
|
/*
|
2008-04-17 00:09:24 -04:00
|
|
|
* Significant enough to always print and get into logs, if it was
|
|
|
|
* unexpected. If it was a requested state change, we'll have
|
|
|
|
* already cleared the flags, so we won't print this warning
|
2006-07-01 07:36:03 -04:00
|
|
|
*/
|
2008-04-17 00:09:24 -04:00
|
|
|
if ((ibstate != arm && ibstate != active) &&
|
|
|
|
(dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
|
|
|
|
dev_info(&dd->pcidev->dev, "Link state changed from %s "
|
|
|
|
"to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
|
|
|
|
"ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
|
2006-07-01 07:36:03 -04:00
|
|
|
}
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
|
|
|
|
ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
|
2008-04-17 00:09:24 -04:00
|
|
|
u32 lastlts;
|
|
|
|
lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
|
2006-03-29 18:23:29 -05:00
|
|
|
/*
|
2008-04-17 00:09:24 -04:00
|
|
|
* Ignore cycling back and forth from Polling.Active to
|
|
|
|
* Polling.Quiet while waiting for the other end of the link
|
|
|
|
* to come up, except to try and decide if we are connected
|
|
|
|
* to a live IB device or not. We will cycle back and
|
|
|
|
* forth between them if no cable is plugged in, the other
|
|
|
|
* device is powered off or disabled, etc.
|
2006-03-29 18:23:29 -05:00
|
|
|
*/
|
2008-04-17 00:09:24 -04:00
|
|
|
if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
|
|
|
|
lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
|
2008-04-17 00:09:31 -04:00
|
|
|
if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
|
|
|
|
(++dd->ipath_ibpollcnt == 40)) {
|
2006-03-29 18:23:29 -05:00
|
|
|
dd->ipath_flags |= IPATH_NOCABLE;
|
|
|
|
*dd->ipath_statusp |=
|
|
|
|
IPATH_STATUS_IB_NOCABLE;
|
2008-04-17 00:09:24 -04:00
|
|
|
ipath_cdbg(LINKVERB, "Set NOCABLE\n");
|
|
|
|
}
|
|
|
|
ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
|
|
|
|
ipath_ibcstatus_str[ltstate], ibstate);
|
2006-03-29 18:23:29 -05:00
|
|
|
goto skip_ibchange;
|
|
|
|
}
|
|
|
|
}
|
2008-04-17 00:09:24 -04:00
|
|
|
|
|
|
|
dd->ipath_ibpollcnt = 0; /* not poll*, now */
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_stats.sps_iblink++;
|
2008-04-17 00:09:24 -04:00
|
|
|
|
2008-04-17 00:09:25 -04:00
|
|
|
if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
|
|
|
|
u64 linkrecov;
|
|
|
|
linkrecov = ipath_snap_cntr(dd,
|
|
|
|
dd->ipath_cregs->cr_iblinkerrrecovcnt);
|
|
|
|
if (linkrecov != dd->ipath_lastlinkrecov) {
|
|
|
|
ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
|
|
|
|
ibcs, ib_linkstate(dd, ibcs),
|
|
|
|
ipath_ibcstatus_str[ltstate],
|
|
|
|
linkrecov);
|
|
|
|
/* and no more until active again */
|
|
|
|
dd->ipath_lastlinkrecov = 0;
|
|
|
|
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
|
|
|
|
goto skip_ibchange;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-17 00:09:24 -04:00
|
|
|
if (ibstate == init || ibstate == arm || ibstate == active) {
|
|
|
|
*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
|
|
|
|
if (ibstate == init || ibstate == arm) {
|
|
|
|
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
|
|
|
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
|
|
|
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
|
|
|
}
|
|
|
|
if (ibstate == arm) {
|
|
|
|
dd->ipath_flags |= IPATH_LINKARMED;
|
|
|
|
dd->ipath_flags &= ~(IPATH_LINKUNK |
|
|
|
|
IPATH_LINKINIT | IPATH_LINKDOWN |
|
|
|
|
IPATH_LINKACTIVE | IPATH_NOCABLE);
|
|
|
|
ipath_hol_down(dd);
|
|
|
|
} else if (ibstate == init) {
|
|
|
|
/*
|
|
|
|
* set INIT and DOWN. Down is checked by
|
|
|
|
* most of the other code, but INIT is
|
|
|
|
* useful to know in a few places.
|
|
|
|
*/
|
|
|
|
dd->ipath_flags |= IPATH_LINKINIT |
|
|
|
|
IPATH_LINKDOWN;
|
|
|
|
dd->ipath_flags &= ~(IPATH_LINKUNK |
|
|
|
|
IPATH_LINKARMED | IPATH_LINKACTIVE |
|
|
|
|
IPATH_NOCABLE);
|
|
|
|
ipath_hol_down(dd);
|
|
|
|
} else { /* active */
|
2008-04-17 00:09:25 -04:00
|
|
|
dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
|
|
|
|
dd->ipath_cregs->cr_iblinkerrrecovcnt);
|
2008-04-17 00:09:24 -04:00
|
|
|
*dd->ipath_statusp |=
|
|
|
|
IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
|
|
|
|
dd->ipath_flags |= IPATH_LINKACTIVE;
|
|
|
|
dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
|
|
|
|
| IPATH_LINKDOWN | IPATH_LINKARMED |
|
|
|
|
IPATH_NOCABLE);
|
2008-04-17 00:09:32 -04:00
|
|
|
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
|
|
|
|
ipath_restart_sdma(dd);
|
2008-04-17 00:09:24 -04:00
|
|
|
signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
|
|
|
|
/* LED active not handled in chip _f_updown */
|
|
|
|
dd->ipath_f_setextled(dd, lstate, ltstate);
|
|
|
|
ipath_hol_up(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* print after we've already done the work, so as not to
|
|
|
|
* delay the state changes and notifications, for debugging
|
|
|
|
*/
|
|
|
|
if (lstate == lastlstate)
|
|
|
|
ipath_cdbg(LINKVERB, "Unchanged from last: %s "
|
|
|
|
"(%x)\n", ib_linkstate(dd, ibcs), ibstate);
|
|
|
|
else
|
|
|
|
ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
|
|
|
|
dd->ipath_unit, ib_linkstate(dd, ibcs),
|
|
|
|
ipath_ibcstatus_str[ltstate], ibstate);
|
|
|
|
} else { /* down */
|
2007-09-19 19:47:31 -04:00
|
|
|
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
|
|
|
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
2006-03-29 18:23:29 -05:00
|
|
|
dd->ipath_flags |= IPATH_LINKDOWN;
|
|
|
|
dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
|
|
|
|
| IPATH_LINKACTIVE |
|
|
|
|
IPATH_LINKARMED);
|
|
|
|
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
2006-07-01 07:36:09 -04:00
|
|
|
dd->ipath_lli_counter = 0;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:24 -04:00
|
|
|
if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
|
|
|
|
ipath_cdbg(VERBOSE, "Unit %u link state down "
|
|
|
|
"(state 0x%x), from %s\n",
|
|
|
|
dd->ipath_unit, lstate,
|
|
|
|
ib_linkstate(dd, dd->ipath_lastibcstat));
|
|
|
|
else
|
|
|
|
ipath_cdbg(LINKVERB, "Unit %u link state changed "
|
|
|
|
"to %s (0x%x) from down (%x)\n",
|
|
|
|
dd->ipath_unit,
|
|
|
|
ipath_ibcstatus_str[ltstate],
|
|
|
|
ibstate, lastlstate);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
2008-04-17 00:09:24 -04:00
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
skip_ibchange:
|
2008-04-17 00:09:24 -04:00
|
|
|
dd->ipath_lastibcstat = ibcs;
|
2008-04-17 00:09:29 -04:00
|
|
|
done:
|
|
|
|
return;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_supp_msgs(struct ipath_devdata *dd,
|
2008-04-17 00:09:32 -04:00
|
|
|
unsigned supp_msgs, char *msg, u32 msgsz)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Print the message unless it's ibc status change only, which
|
|
|
|
* happens so often we never want to count it.
|
|
|
|
*/
|
|
|
|
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
|
2007-03-15 17:44:55 -04:00
|
|
|
int iserr;
|
2008-04-17 00:09:32 -04:00
|
|
|
ipath_err_t mask;
|
|
|
|
iserr = ipath_decode_err(dd, msg, msgsz,
|
2007-10-10 01:24:36 -04:00
|
|
|
dd->ipath_lasterror &
|
|
|
|
~INFINIPATH_E_IBSTATUSCHANGED);
|
2008-04-17 00:09:32 -04:00
|
|
|
|
|
|
|
mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
|
|
|
INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
|
|
|
|
|
|
|
|
/* if we're in debug, then don't mask SDMADISABLED msgs */
|
|
|
|
if (ipath_debug & __IPATH_DBG)
|
|
|
|
mask &= ~INFINIPATH_E_SDMADISABLED;
|
|
|
|
|
|
|
|
if (dd->ipath_lasterror & ~mask)
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_dev_err(dd, "Suppressed %u messages for "
|
|
|
|
"fast-repeating errors (%s) (%llx)\n",
|
|
|
|
supp_msgs, msg,
|
|
|
|
(unsigned long long)
|
|
|
|
dd->ipath_lasterror);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* rcvegrfull and rcvhdrqfull are "normal", for some
|
|
|
|
* types of processes (mostly benchmarks) that send
|
|
|
|
* huge numbers of messages, while not processing
|
|
|
|
* them. So only complain about these at debug
|
|
|
|
* level.
|
|
|
|
*/
|
2007-03-15 17:44:55 -04:00
|
|
|
if (iserr)
|
|
|
|
ipath_dbg("Suppressed %u messages for %s\n",
|
|
|
|
supp_msgs, msg);
|
|
|
|
else
|
|
|
|
ipath_cdbg(ERRPKT,
|
|
|
|
"Suppressed %u messages for %s\n",
|
|
|
|
supp_msgs, msg);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
2007-10-10 01:24:36 -04:00
|
|
|
ipath_err_t errs, char *msg,
|
2008-04-17 00:09:32 -04:00
|
|
|
u32 msgsz, int *noprint)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
unsigned long nc;
|
|
|
|
static unsigned long nextmsg_time;
|
|
|
|
static unsigned nmsgs, supp_msgs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Throttle back "fast" messages to no more than 10 per 5 seconds.
|
|
|
|
* This isn't perfect, but it's a reasonable heuristic. If we get
|
|
|
|
* more than 10, give a 6x longer delay.
|
|
|
|
*/
|
|
|
|
nc = jiffies;
|
|
|
|
if (nmsgs > 10) {
|
|
|
|
if (time_before(nc, nextmsg_time)) {
|
|
|
|
*noprint = 1;
|
|
|
|
if (!supp_msgs++)
|
|
|
|
nextmsg_time = nc + HZ * 3;
|
|
|
|
}
|
|
|
|
else if (supp_msgs) {
|
2007-10-10 01:24:36 -04:00
|
|
|
handle_supp_msgs(dd, supp_msgs, msg, msgsz);
|
2006-03-29 18:23:29 -05:00
|
|
|
supp_msgs = 0;
|
|
|
|
nmsgs = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!nmsgs++ || time_after(nc, nextmsg_time))
|
|
|
|
nextmsg_time = nc + HZ / 2;
|
|
|
|
|
|
|
|
return supp_msgs;
|
|
|
|
}
|
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int expected;
|
|
|
|
|
|
|
|
if (ipath_debug & __IPATH_DBG) {
|
|
|
|
char msg[128];
|
|
|
|
ipath_decode_err(dd, msg, sizeof msg, errs &
|
|
|
|
INFINIPATH_E_SDMAERRS);
|
|
|
|
ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
|
|
|
|
}
|
|
|
|
if (ipath_debug & __IPATH_VERBDBG) {
|
|
|
|
unsigned long tl, hd, status, lengen;
|
|
|
|
tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
|
|
|
|
hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
|
|
|
|
status = ipath_read_kreg64(dd
|
|
|
|
, dd->ipath_kregs->kr_senddmastatus);
|
|
|
|
lengen = ipath_read_kreg64(dd,
|
|
|
|
dd->ipath_kregs->kr_senddmalengen);
|
|
|
|
ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
|
|
|
|
"lengen 0x%lx\n", tl, hd, status, lengen);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
|
|
|
__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
|
|
|
expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
|
|
|
|
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
|
|
|
if (!expected)
|
|
|
|
ipath_cancel_sends(dd, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int expected;
|
|
|
|
|
|
|
|
if ((istat & INFINIPATH_I_SDMAINT) &&
|
|
|
|
!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
|
|
|
ipath_sdma_intr(dd);
|
|
|
|
|
|
|
|
if (istat & INFINIPATH_I_SDMADISABLED) {
|
|
|
|
expected = test_bit(IPATH_SDMA_ABORTING,
|
|
|
|
&dd->ipath_sdma_status);
|
|
|
|
ipath_dbg("%s SDmaDisabled intr\n",
|
|
|
|
expected ? "expected" : "unexpected");
|
|
|
|
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
|
|
|
__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
|
|
|
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
|
|
|
if (!expected)
|
|
|
|
ipath_cancel_sends(dd, 1);
|
|
|
|
if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
|
|
|
tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_hdrq_full(struct ipath_devdata *dd)
|
|
|
|
{
|
|
|
|
int chkerrpkts = 0;
|
|
|
|
u32 hd, tl;
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
ipath_stats.sps_hdrqfull++;
|
|
|
|
for (i = 0; i < dd->ipath_cfgports; i++) {
|
|
|
|
struct ipath_portdata *pd = dd->ipath_pd[i];
|
|
|
|
|
|
|
|
if (i == 0) {
|
|
|
|
/*
|
|
|
|
* For kernel receive queues, we just want to know
|
|
|
|
* if there are packets in the queue that we can
|
|
|
|
* process.
|
|
|
|
*/
|
|
|
|
if (pd->port_head != ipath_get_hdrqtail(pd))
|
|
|
|
chkerrpkts |= 1 << i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip if user context is not open */
|
|
|
|
if (!pd || !pd->port_cnt)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Don't report the same point multiple times. */
|
|
|
|
if (dd->ipath_flags & IPATH_NODMA_RTAIL)
|
|
|
|
tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
|
|
|
|
else
|
|
|
|
tl = ipath_get_rcvhdrtail(pd);
|
|
|
|
if (tl == pd->port_lastrcvhdrqtail)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
|
|
|
|
if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
|
|
|
|
pd->port_lastrcvhdrqtail = tl;
|
|
|
|
pd->port_hdrqfull++;
|
|
|
|
/* flush hdrqfull so that poll() sees it */
|
|
|
|
wmb();
|
|
|
|
wake_up_interruptible(&pd->port_wait);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return chkerrpkts;
|
|
|
|
}
|
|
|
|
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
2007-10-10 01:24:36 -04:00
|
|
|
char msg[128];
|
2006-03-29 18:23:29 -05:00
|
|
|
u64 ignore_this_time = 0;
|
2008-04-17 00:09:32 -04:00
|
|
|
u64 iserr = 0;
|
2006-03-29 18:23:29 -05:00
|
|
|
int chkerrpkts = 0, noprint = 0;
|
|
|
|
unsigned supp_msgs;
|
2007-05-17 10:26:28 -04:00
|
|
|
int log_idx;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
/*
|
|
|
|
* don't report errors that are masked, either at init
|
|
|
|
* (not set in ipath_errormask), or temporarily (set in
|
|
|
|
* ipath_maskederrs)
|
|
|
|
*/
|
|
|
|
errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
|
|
|
|
&noprint);
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
/* do these first, they are most important */
|
|
|
|
if (errs & INFINIPATH_E_HARDWARE) {
|
|
|
|
/* reuse same msg buf */
|
|
|
|
dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
|
2007-05-17 10:26:28 -04:00
|
|
|
} else {
|
|
|
|
u64 mask;
|
|
|
|
for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
|
|
|
|
mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
|
|
|
|
if (errs & mask)
|
|
|
|
ipath_inc_eeprom_err(dd, log_idx, 1);
|
|
|
|
}
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
if (errs & INFINIPATH_E_SDMAERRS)
|
|
|
|
handle_sdma_errors(dd, errs);
|
|
|
|
|
2006-09-28 12:00:11 -04:00
|
|
|
if (!noprint && (errs & ~dd->ipath_e_bitsextant))
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_dev_err(dd, "error interrupt with unknown errors "
|
|
|
|
"%llx set\n", (unsigned long long)
|
2006-09-28 12:00:11 -04:00
|
|
|
(errs & ~dd->ipath_e_bitsextant));
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
if (errs & E_SUM_ERRS)
|
|
|
|
ignore_this_time = handle_e_sum_errs(dd, errs);
|
2006-07-01 07:36:03 -04:00
|
|
|
else if ((errs & E_SUM_LINK_PKTERRS) &&
|
|
|
|
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
|
|
|
|
/*
|
|
|
|
* This can happen when SMA is trying to bring the link
|
|
|
|
* up, but the IB link changes state at the "wrong" time.
|
|
|
|
* The IB logic then complains that the packet isn't
|
|
|
|
* valid. We don't want to confuse people, so we just
|
|
|
|
* don't print them, except at debug
|
|
|
|
*/
|
|
|
|
ipath_dbg("Ignoring packet errors %llx, because link not "
|
|
|
|
"ACTIVE\n", (unsigned long long) errs);
|
|
|
|
ignore_this_time = errs & E_SUM_LINK_PKTERRS;
|
|
|
|
}
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
if (supp_msgs == 250000) {
|
2007-03-15 17:44:55 -04:00
|
|
|
int s_iserr;
|
2006-03-29 18:23:29 -05:00
|
|
|
/*
|
|
|
|
* It's not entirely reasonable assuming that the errors set
|
|
|
|
* in the last clear period are all responsible for the
|
|
|
|
* problem, but the alternative is to assume it's the only
|
|
|
|
* ones on this particular interrupt, which also isn't great
|
|
|
|
*/
|
|
|
|
dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
|
2008-04-17 00:09:29 -04:00
|
|
|
|
2007-07-20 17:41:26 -04:00
|
|
|
dd->ipath_errormask &= ~dd->ipath_maskederrs;
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
2008-04-17 00:09:29 -04:00
|
|
|
dd->ipath_errormask);
|
2008-04-17 00:09:32 -04:00
|
|
|
s_iserr = ipath_decode_err(dd, msg, sizeof msg,
|
2008-04-17 00:09:29 -04:00
|
|
|
dd->ipath_maskederrs);
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2007-07-20 17:41:26 -04:00
|
|
|
if (dd->ipath_maskederrs &
|
2008-04-17 00:09:29 -04:00
|
|
|
~(INFINIPATH_E_RRCVEGRFULL |
|
|
|
|
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
2007-03-15 17:44:55 -04:00
|
|
|
ipath_dev_err(dd, "Temporarily disabling "
|
|
|
|
"error(s) %llx reporting; too frequent (%s)\n",
|
2008-04-17 00:09:29 -04:00
|
|
|
(unsigned long long) dd->ipath_maskederrs,
|
2007-07-20 17:41:26 -04:00
|
|
|
msg);
|
2006-03-29 18:23:29 -05:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* rcvegrfull and rcvhdrqfull are "normal",
|
|
|
|
* for some types of processes (mostly benchmarks)
|
|
|
|
* that send huge numbers of messages, while not
|
|
|
|
* processing them. So only complain about
|
|
|
|
* these at debug level.
|
|
|
|
*/
|
2007-03-15 17:44:55 -04:00
|
|
|
if (s_iserr)
|
|
|
|
ipath_dbg("Temporarily disabling reporting "
|
|
|
|
"too frequent queue full errors (%s)\n",
|
|
|
|
msg);
|
|
|
|
else
|
|
|
|
ipath_cdbg(ERRPKT,
|
|
|
|
"Temporarily disabling reporting too"
|
|
|
|
" frequent packet errors (%s)\n",
|
|
|
|
msg);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Re-enable the masked errors after around 3 minutes. in
|
|
|
|
* ipath_get_faststats(). If we have a series of fast
|
|
|
|
* repeating but different errors, the interval will keep
|
|
|
|
* stretching out, but that's OK, as that's pretty
|
|
|
|
* catastrophic.
|
|
|
|
*/
|
|
|
|
dd->ipath_unmasktime = jiffies + HZ * 180;
|
|
|
|
}
|
|
|
|
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
|
|
|
|
if (ignore_this_time)
|
|
|
|
errs &= ~ignore_this_time;
|
|
|
|
if (errs & ~dd->ipath_lasterror) {
|
|
|
|
errs &= ~dd->ipath_lasterror;
|
|
|
|
/* never suppress duplicate hwerrors or ibstatuschange */
|
|
|
|
dd->ipath_lasterror |= errs &
|
|
|
|
~(INFINIPATH_E_HARDWARE |
|
|
|
|
INFINIPATH_E_IBSTATUSCHANGED);
|
|
|
|
}
|
2006-09-28 12:00:18 -04:00
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
|
|
|
|
dd->ipath_spectriggerhit++;
|
|
|
|
ipath_dbg("%lu special trigger hits\n",
|
|
|
|
dd->ipath_spectriggerhit);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* likely due to cancel; so suppress message unless verbose */
|
2006-09-28 12:00:18 -04:00
|
|
|
if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
|
|
|
|
dd->ipath_lastcancel > jiffies) {
|
2008-04-17 00:09:32 -04:00
|
|
|
/* armlaunch takes precedence; it often causes both. */
|
|
|
|
ipath_cdbg(VERBOSE,
|
|
|
|
"Suppressed %s error (%llx) after sendbuf cancel\n",
|
|
|
|
(errs & INFINIPATH_E_SPIOARMLAUNCH) ?
|
|
|
|
"armlaunch" : "sendpktlen", (unsigned long long)errs);
|
2006-09-28 12:00:18 -04:00
|
|
|
errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
if (!errs)
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
return 0;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
if (!noprint) {
|
|
|
|
ipath_err_t mask;
|
2006-03-29 18:23:29 -05:00
|
|
|
/*
|
2008-04-17 00:09:32 -04:00
|
|
|
* The ones we mask off are handled specially below
|
|
|
|
* or above. Also mask SDMADISABLED by default as it
|
|
|
|
* is too chatty.
|
2006-03-29 18:23:29 -05:00
|
|
|
*/
|
2008-04-17 00:09:32 -04:00
|
|
|
mask = INFINIPATH_E_IBSTATUSCHANGED |
|
|
|
|
INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
|
|
|
INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
|
|
|
|
|
|
|
|
/* if we're in debug, then don't mask SDMADISABLED msgs */
|
|
|
|
if (ipath_debug & __IPATH_DBG)
|
|
|
|
mask &= ~INFINIPATH_E_SDMADISABLED;
|
|
|
|
|
|
|
|
ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
|
|
|
|
} else
|
2006-03-29 18:23:29 -05:00
|
|
|
/* so we don't need if (!noprint) at strlcat's below */
|
|
|
|
*msg = 0;
|
|
|
|
|
|
|
|
if (errs & E_SUM_PKTERRS) {
|
|
|
|
ipath_stats.sps_pkterrs++;
|
|
|
|
chkerrpkts = 1;
|
|
|
|
}
|
|
|
|
if (errs & E_SUM_ERRS)
|
|
|
|
ipath_stats.sps_errs++;
|
|
|
|
|
|
|
|
if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
|
|
|
|
ipath_stats.sps_crcerrs++;
|
|
|
|
chkerrpkts = 1;
|
|
|
|
}
|
2007-03-15 17:44:55 -04:00
|
|
|
iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't want to print these two as they happen, or we can make
|
|
|
|
* the situation even worse, because it takes so long to print
|
|
|
|
* messages to serial consoles. Kernel ports get printed from
|
|
|
|
* fast_stats, no more than every 5 seconds, user ports get printed
|
|
|
|
* on close
|
|
|
|
*/
|
2008-04-17 00:09:32 -04:00
|
|
|
if (errs & INFINIPATH_E_RRCVHDRFULL)
|
|
|
|
chkerrpkts |= handle_hdrq_full(dd);
|
2006-03-29 18:23:29 -05:00
|
|
|
if (errs & INFINIPATH_E_RRCVEGRFULL) {
|
2007-12-20 05:43:23 -05:00
|
|
|
struct ipath_portdata *pd = dd->ipath_pd[0];
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
/*
|
|
|
|
* since this is of less importance and not likely to
|
|
|
|
* happen without also getting hdrfull, only count
|
|
|
|
* occurrences; don't check each port (or even the kernel
|
|
|
|
* vs user)
|
|
|
|
*/
|
|
|
|
ipath_stats.sps_etidfull++;
|
2008-04-17 00:09:29 -04:00
|
|
|
if (pd->port_head != ipath_get_hdrqtail(pd))
|
2008-04-17 00:09:32 -04:00
|
|
|
chkerrpkts |= 1;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* do this before IBSTATUSCHANGED, in case both bits set in a single
|
|
|
|
* interrupt; we want the STATUSCHANGE to "win", so we do our
|
|
|
|
* internal copy of state machine correctly
|
|
|
|
*/
|
|
|
|
if (errs & INFINIPATH_E_RIBLOSTLINK) {
|
|
|
|
/*
|
|
|
|
* force through block below
|
|
|
|
*/
|
|
|
|
errs |= INFINIPATH_E_IBSTATUSCHANGED;
|
|
|
|
ipath_stats.sps_iblink++;
|
|
|
|
dd->ipath_flags |= IPATH_LINKDOWN;
|
|
|
|
dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
|
|
|
|
| IPATH_LINKARMED | IPATH_LINKACTIVE);
|
|
|
|
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
|
|
|
|
2008-04-17 00:09:24 -04:00
|
|
|
ipath_dbg("Lost link, link now down (%s)\n",
|
|
|
|
ipath_ibcstatus_str[ipath_read_kreg64(dd,
|
|
|
|
dd->ipath_kregs->kr_ibcstatus) & 0xf]);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
if (errs & INFINIPATH_E_IBSTATUSCHANGED)
|
2008-04-17 00:09:24 -04:00
|
|
|
handle_e_ibstatuschanged(dd, errs);
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
if (errs & INFINIPATH_E_RESET) {
|
|
|
|
if (!noprint)
|
|
|
|
ipath_dev_err(dd, "Got reset, requires re-init "
|
|
|
|
"(unload and reload driver)\n");
|
|
|
|
dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */
|
|
|
|
/* mark as having had error */
|
|
|
|
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
|
|
|
|
*dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
|
|
|
|
}
|
|
|
|
|
2007-03-15 17:44:55 -04:00
|
|
|
if (!noprint && *msg) {
|
|
|
|
if (iserr)
|
|
|
|
ipath_dev_err(dd, "%s error\n", msg);
|
|
|
|
}
|
2006-08-25 14:24:34 -04:00
|
|
|
if (dd->ipath_state_wanted & dd->ipath_flags) {
|
|
|
|
ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
|
|
|
|
"waking\n", dd->ipath_state_wanted,
|
2006-03-29 18:23:29 -05:00
|
|
|
dd->ipath_flags);
|
2006-08-25 14:24:34 -04:00
|
|
|
wake_up_interruptible(&ipath_state_wait);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
return chkerrpkts;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
2007-07-06 15:48:33 -04:00
|
|
|
/*
|
|
|
|
* try to cleanup as much as possible for anything that might have gone
|
|
|
|
* wrong while in freeze mode, such as pio buffers being written by user
|
|
|
|
* processes (causing armlaunch), send errors due to going into freeze mode,
|
|
|
|
* etc., and try to avoid causing extra interrupts while doing so.
|
|
|
|
* Forcibly update the in-memory pioavail register copies after cleanup
|
2008-05-07 14:00:15 -04:00
|
|
|
* because the chip won't do it while in freeze mode (the register values
|
|
|
|
* themselves are kept correct).
|
2007-07-06 15:48:33 -04:00
|
|
|
* Make sure that we don't lose any important interrupts by using the chip
|
|
|
|
* feature that says that writing 0 to a bit in *clear that is set in
|
|
|
|
* *status will cause an interrupt to be generated again (if allowed by
|
|
|
|
* the *mask value).
|
|
|
|
*/
|
|
|
|
void ipath_clear_freeze(struct ipath_devdata *dd)
|
|
|
|
{
|
|
|
|
/* disable error interrupts, to avoid confusion */
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
|
|
|
|
|
2007-07-20 17:41:26 -04:00
|
|
|
/* also disable interrupts; errormask is sometimes overwriten */
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
|
|
|
|
|
2008-05-07 13:57:48 -04:00
|
|
|
ipath_cancel_sends(dd, 1);
|
2008-05-07 14:00:15 -04:00
|
|
|
|
|
|
|
/* clear the freeze, and be sure chip saw it */
|
2007-07-06 15:48:33 -04:00
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
|
|
|
|
dd->ipath_control);
|
2008-05-07 14:00:15 -04:00
|
|
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
2007-07-06 15:48:33 -04:00
|
|
|
|
2008-05-07 14:00:15 -04:00
|
|
|
/* force in-memory update now we are out of freeze */
|
2008-04-17 00:09:26 -04:00
|
|
|
ipath_force_pio_avail_update(dd);
|
2007-07-06 15:48:33 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* force new interrupt if any hwerr, error or interrupt bits are
|
|
|
|
* still set, and clear "safe" send packet errors related to freeze
|
|
|
|
* and cancelling sends. Re-enable error interrupts before possible
|
|
|
|
* force of re-interrupt on pending interrupts.
|
|
|
|
*/
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
|
|
|
|
E_SPKT_ERRS_IGNORE);
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
2007-07-20 17:41:26 -04:00
|
|
|
dd->ipath_errormask);
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
|
2007-07-06 15:48:33 -04:00
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
/* this is separate to allow for better optimization of ipath_intr() */
|
|
|
|
|
2007-10-10 08:10:35 -04:00
|
|
|
static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* sometimes happen during driver init and unload, don't want
|
|
|
|
* to process any interrupts at that point
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* this is just a bandaid, not a fix, if something goes badly
|
|
|
|
* wrong */
|
|
|
|
if (++*unexpectp > 100) {
|
|
|
|
if (++*unexpectp > 105) {
|
|
|
|
/*
|
|
|
|
* ok, we must be taking somebody else's interrupts,
|
|
|
|
* due to a messed up mptable and/or PIRQ table, so
|
|
|
|
* unregister the interrupt. We've seen this during
|
|
|
|
* linuxbios development work, and it may happen in
|
|
|
|
* the future again.
|
|
|
|
*/
|
2006-11-08 20:44:58 -05:00
|
|
|
if (dd->pcidev && dd->ipath_irq) {
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_dev_err(dd, "Now %u unexpected "
|
|
|
|
"interrupts, unregistering "
|
|
|
|
"interrupt handler\n",
|
|
|
|
*unexpectp);
|
2006-11-08 20:44:58 -05:00
|
|
|
ipath_dbg("free_irq of irq %d\n",
|
|
|
|
dd->ipath_irq);
|
|
|
|
dd->ipath_f_free_irq(dd);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
}
|
2008-01-07 00:12:38 -05:00
|
|
|
if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_dev_err(dd, "%u unexpected interrupts, "
|
|
|
|
"disabling interrupts completely\n",
|
|
|
|
*unexpectp);
|
|
|
|
/*
|
|
|
|
* disable all interrupts, something is very wrong
|
|
|
|
*/
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
|
|
|
|
0ULL);
|
|
|
|
}
|
|
|
|
} else if (*unexpectp > 1)
|
|
|
|
ipath_dbg("Interrupt when not ready, should not happen, "
|
|
|
|
"ignoring\n");
|
|
|
|
}
|
|
|
|
|
2007-10-10 08:10:35 -04:00
|
|
|
static noinline void ipath_bad_regread(struct ipath_devdata *dd)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
static int allbits;
|
|
|
|
|
|
|
|
/* separate routine, for better optimization of ipath_intr() */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We print the message and disable interrupts, in hope of
|
|
|
|
* having a better chance of debugging the problem.
|
|
|
|
*/
|
|
|
|
ipath_dev_err(dd,
|
|
|
|
"Read of interrupt status failed (all bits set)\n");
|
|
|
|
if (allbits++) {
|
|
|
|
/* disable all interrupts, something is very wrong */
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
|
|
|
|
if (allbits == 2) {
|
|
|
|
ipath_dev_err(dd, "Still bad interrupt status, "
|
|
|
|
"unregistering interrupt\n");
|
2006-11-08 20:44:58 -05:00
|
|
|
dd->ipath_f_free_irq(dd);
|
2006-03-29 18:23:29 -05:00
|
|
|
} else if (allbits > 2) {
|
|
|
|
if ((allbits % 10000) == 0)
|
|
|
|
printk(".");
|
|
|
|
} else
|
|
|
|
ipath_dev_err(dd, "Disabling interrupts, "
|
|
|
|
"multiple errors\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_layer_pioavail(struct ipath_devdata *dd)
|
|
|
|
{
|
2007-09-05 04:57:14 -04:00
|
|
|
unsigned long flags;
|
2006-03-29 18:23:29 -05:00
|
|
|
int ret;
|
|
|
|
|
2006-08-25 14:24:31 -04:00
|
|
|
ret = ipath_ib_piobufavail(dd->verbs_dev);
|
2006-03-29 18:23:29 -05:00
|
|
|
if (ret > 0)
|
2006-04-24 17:23:08 -04:00
|
|
|
goto set;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
|
|
|
return;
|
2006-04-24 17:23:08 -04:00
|
|
|
set:
|
2007-09-05 04:57:14 -04:00
|
|
|
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
|
|
|
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
|
|
|
dd->ipath_sendctrl);
|
2007-09-05 04:57:14 -04:00
|
|
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
|
|
|
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
2006-07-01 07:36:04 -04:00
|
|
|
/*
|
|
|
|
* Handle receive interrupts for user ports; this means a user
|
|
|
|
* process was waiting for a packet to arrive, and didn't want
|
|
|
|
* to poll
|
|
|
|
*/
|
2008-04-17 00:09:29 -04:00
|
|
|
static void handle_urcv(struct ipath_devdata *dd, u64 istat)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
u64 portr;
|
|
|
|
int i;
|
|
|
|
int rcvdint = 0;
|
|
|
|
|
2007-10-17 21:18:29 -04:00
|
|
|
/*
|
|
|
|
* test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
|
|
|
|
* test_and_clear_bit(IPATH_PORT_WAITING_URG) below
|
|
|
|
* would both like timely updates of the bits so that
|
|
|
|
* we don't pass them by unnecessarily. the rmb()
|
|
|
|
* here ensures that we see them promptly -- the
|
|
|
|
* corresponding wmb()'s are in ipath_poll_urgent()
|
|
|
|
* and ipath_poll_next()...
|
|
|
|
*/
|
2007-09-14 15:22:49 -04:00
|
|
|
rmb();
|
2008-04-17 00:09:29 -04:00
|
|
|
portr = ((istat >> dd->ipath_i_rcvavail_shift) &
|
|
|
|
dd->ipath_i_rcvavail_mask) |
|
|
|
|
((istat >> dd->ipath_i_rcvurg_shift) &
|
|
|
|
dd->ipath_i_rcvurg_mask);
|
2006-07-01 07:36:04 -04:00
|
|
|
for (i = 1; i < dd->ipath_cfgports; i++) {
|
2006-03-29 18:23:29 -05:00
|
|
|
struct ipath_portdata *pd = dd->ipath_pd[i];
|
2008-04-17 00:09:29 -04:00
|
|
|
|
2007-06-18 17:24:49 -04:00
|
|
|
if (portr & (1 << i) && pd && pd->port_cnt) {
|
2007-09-14 15:22:49 -04:00
|
|
|
if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
|
|
|
|
&pd->port_flag)) {
|
2007-12-21 04:50:59 -05:00
|
|
|
clear_bit(i + dd->ipath_r_intravail_shift,
|
2007-06-18 17:24:49 -04:00
|
|
|
&dd->ipath_rcvctrl);
|
|
|
|
wake_up_interruptible(&pd->port_wait);
|
|
|
|
rcvdint = 1;
|
2007-09-14 15:22:49 -04:00
|
|
|
} else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
|
|
|
|
&pd->port_flag)) {
|
|
|
|
pd->port_urgent++;
|
2007-06-18 17:24:49 -04:00
|
|
|
wake_up_interruptible(&pd->port_wait);
|
|
|
|
}
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rcvdint) {
|
|
|
|
/* only want to take one interrupt, so turn off the rcv
|
2008-04-17 00:09:29 -04:00
|
|
|
* interrupt for all the ports that we set the rcv_waiting
|
2006-03-29 18:23:29 -05:00
|
|
|
* (but never for kernel port)
|
|
|
|
*/
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
|
|
|
dd->ipath_rcvctrl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 09:55:46 -04:00
|
|
|
irqreturn_t ipath_intr(int irq, void *data)
|
2006-03-29 18:23:29 -05:00
|
|
|
{
|
|
|
|
struct ipath_devdata *dd = data;
|
2008-04-17 00:09:32 -04:00
|
|
|
u64 istat, chk0rcv = 0;
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_err_t estat = 0;
|
|
|
|
irqreturn_t ret;
|
2006-07-01 07:36:04 -04:00
|
|
|
static unsigned unexpected = 0;
|
2008-04-17 00:09:29 -04:00
|
|
|
u64 kportrbits;
|
2006-07-01 07:36:04 -04:00
|
|
|
|
|
|
|
ipath_stats.sps_ints++;
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2007-07-06 15:48:53 -04:00
|
|
|
if (dd->ipath_int_counter != (u32) -1)
|
|
|
|
dd->ipath_int_counter++;
|
|
|
|
|
2006-07-01 07:36:04 -04:00
|
|
|
if (!(dd->ipath_flags & IPATH_PRESENT)) {
|
2006-04-24 17:23:03 -04:00
|
|
|
/*
|
2006-07-01 07:36:04 -04:00
|
|
|
* This return value is not great, but we do not want the
|
2006-04-24 17:23:03 -04:00
|
|
|
* interrupt core code to remove our interrupt handler
|
|
|
|
* because we don't appear to be handling an interrupt
|
|
|
|
* during a chip reset.
|
|
|
|
*/
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2006-07-01 07:36:04 -04:00
|
|
|
/*
|
|
|
|
* this needs to be flags&initted, not statusp, so we keep
|
|
|
|
* taking interrupts even after link goes down, etc.
|
|
|
|
* Also, we *must* clear the interrupt at some point, or we won't
|
|
|
|
* take it again, which can be real bad for errors, etc...
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!(dd->ipath_flags & IPATH_INITTED)) {
|
|
|
|
ipath_bad_intr(dd, &unexpected);
|
|
|
|
ret = IRQ_NONE;
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2008-01-07 00:12:38 -05:00
|
|
|
istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
if (unlikely(!istat)) {
|
|
|
|
ipath_stats.sps_nullintr++;
|
|
|
|
ret = IRQ_NONE; /* not our interrupt, or already handled */
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
if (unlikely(istat == -1)) {
|
|
|
|
ipath_bad_regread(dd);
|
|
|
|
/* don't know if it was our interrupt or not */
|
|
|
|
ret = IRQ_NONE;
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unexpected)
|
|
|
|
unexpected = 0;
|
|
|
|
|
2006-09-28 12:00:11 -04:00
|
|
|
if (unlikely(istat & ~dd->ipath_i_bitsextant))
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_dev_err(dd,
|
2008-04-17 00:09:32 -04:00
|
|
|
"interrupt with unknown interrupts %Lx set\n",
|
|
|
|
istat & ~dd->ipath_i_bitsextant);
|
|
|
|
else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
|
|
|
|
ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
if (istat & INFINIPATH_I_ERROR) {
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_stats.sps_errints++;
|
|
|
|
estat = ipath_read_kreg64(dd,
|
|
|
|
dd->ipath_kregs->kr_errorstatus);
|
|
|
|
if (!estat)
|
2008-04-17 00:09:32 -04:00
|
|
|
dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
|
2006-03-29 18:23:29 -05:00
|
|
|
"but no error bits set!\n", istat);
|
|
|
|
else if (estat == -1LL)
|
|
|
|
/*
|
|
|
|
* should we try clearing all, or hope next read
|
|
|
|
* works?
|
|
|
|
*/
|
|
|
|
ipath_dev_err(dd, "Read of error status failed "
|
|
|
|
"(all bits set); ignoring\n");
|
|
|
|
else
|
2008-04-17 00:09:29 -04:00
|
|
|
chk0rcv |= handle_errors(dd, estat);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if (istat & INFINIPATH_I_GPIO) {
|
2006-07-01 07:36:04 -04:00
|
|
|
/*
|
2006-09-28 12:00:00 -04:00
|
|
|
* GPIO interrupts fall in two broad classes:
|
|
|
|
* GPIO_2 indicates (on some HT4xx boards) that a packet
|
|
|
|
* has arrived for Port 0. Checking for this
|
|
|
|
* is controlled by flag IPATH_GPIO_INTR.
|
2007-08-02 17:46:29 -04:00
|
|
|
* GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
|
|
|
|
* errors that we need to count. Checking for this
|
2006-09-28 12:00:00 -04:00
|
|
|
* is controlled by flag IPATH_GPIO_ERRINTRS.
|
2006-07-01 07:36:04 -04:00
|
|
|
*/
|
2006-09-28 12:00:00 -04:00
|
|
|
u32 gpiostatus;
|
|
|
|
u32 to_clear = 0;
|
|
|
|
|
|
|
|
gpiostatus = ipath_read_kreg32(
|
|
|
|
dd, dd->ipath_kregs->kr_gpio_status);
|
2008-04-17 00:09:29 -04:00
|
|
|
/* First the error-counter case. */
|
2006-09-28 12:00:00 -04:00
|
|
|
if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
|
|
|
|
(dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
|
|
|
|
/* want to clear the bits we see asserted. */
|
|
|
|
to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count appropriately, clear bits out of our copy,
|
|
|
|
* as they have been "handled".
|
|
|
|
*/
|
|
|
|
if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
|
|
|
|
ipath_dbg("FlowCtl on UnsupVL\n");
|
|
|
|
dd->ipath_rxfc_unsupvl_errs++;
|
|
|
|
}
|
|
|
|
if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
|
|
|
|
ipath_dbg("Overrun Threshold exceeded\n");
|
|
|
|
dd->ipath_overrun_thresh_errs++;
|
|
|
|
}
|
|
|
|
if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
|
|
|
|
ipath_dbg("Local Link Integrity error\n");
|
|
|
|
dd->ipath_lli_errs++;
|
|
|
|
}
|
|
|
|
gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
2006-09-28 12:00:00 -04:00
|
|
|
/* Now the Port0 Receive case */
|
|
|
|
if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
|
|
|
|
(dd->ipath_flags & IPATH_GPIO_INTR)) {
|
|
|
|
/*
|
|
|
|
* GPIO status bit 2 is set, and we expected it.
|
|
|
|
* clear it and indicate in p0bits.
|
|
|
|
* This probably only happens if a Port0 pkt
|
|
|
|
* arrives at _just_ the wrong time, and we
|
|
|
|
* handle that by seting chk0rcv;
|
|
|
|
*/
|
|
|
|
to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
|
|
|
|
gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
chk0rcv = 1;
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
2007-05-10 15:10:49 -04:00
|
|
|
if (gpiostatus) {
|
2006-09-28 12:00:00 -04:00
|
|
|
/*
|
|
|
|
* Some unexpected bits remain. If they could have
|
|
|
|
* caused the interrupt, complain and clear.
|
2007-10-03 13:47:38 -04:00
|
|
|
* To avoid repetition of this condition, also clear
|
|
|
|
* the mask. It is almost certainly due to error.
|
2006-09-28 12:00:00 -04:00
|
|
|
*/
|
2007-05-10 15:10:49 -04:00
|
|
|
const u32 mask = (u32) dd->ipath_gpio_mask;
|
|
|
|
|
2006-09-28 12:00:00 -04:00
|
|
|
if (mask & gpiostatus) {
|
|
|
|
ipath_dbg("Unexpected GPIO IRQ bits %x\n",
|
|
|
|
gpiostatus & mask);
|
|
|
|
to_clear |= (gpiostatus & mask);
|
2007-10-03 13:47:38 -04:00
|
|
|
dd->ipath_gpio_mask &= ~(gpiostatus & mask);
|
|
|
|
ipath_write_kreg(dd,
|
|
|
|
dd->ipath_kregs->kr_gpio_mask,
|
|
|
|
dd->ipath_gpio_mask);
|
2006-09-28 12:00:00 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (to_clear) {
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
|
|
|
|
(u64) to_clear);
|
|
|
|
}
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-07-01 07:36:05 -04:00
|
|
|
* Clear the interrupt bits we found set, unless they are receive
|
|
|
|
* related, in which case we already cleared them above, and don't
|
|
|
|
* want to clear them again, because we might lose an interrupt.
|
|
|
|
* Clear it early, so we "know" know the chip will have seen this by
|
|
|
|
* the time we process the queue, and will re-interrupt if necessary.
|
|
|
|
* The processor itself won't take the interrupt again until we return.
|
2006-03-29 18:23:29 -05:00
|
|
|
*/
|
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
|
|
|
|
|
2006-07-01 07:36:04 -04:00
|
|
|
/*
|
2008-04-17 00:09:29 -04:00
|
|
|
* Handle kernel receive queues before checking for pio buffers
|
|
|
|
* available since receives can overflow; piobuf waiters can afford
|
|
|
|
* a few extra cycles, since they were waiting anyway, and user's
|
|
|
|
* waiting for receive are at the bottom.
|
2006-07-01 07:36:04 -04:00
|
|
|
*/
|
2008-04-17 00:09:29 -04:00
|
|
|
kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
|
|
|
|
(1ULL << dd->ipath_i_rcvurg_shift);
|
|
|
|
if (chk0rcv || (istat & kportrbits)) {
|
|
|
|
istat &= ~kportrbits;
|
2007-12-20 05:43:23 -05:00
|
|
|
ipath_kreceive(dd->ipath_pd[0]);
|
2006-07-01 07:36:04 -04:00
|
|
|
}
|
[PATCH] IB/ipath: fixed bug 9776 for real
The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update. Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts. The fix was to not write the counter value except
on the last update.
I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).
I also made some small cleanups while debugging this.
With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.
I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp. Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp. Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB
Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-01 07:36:05 -04:00
|
|
|
|
2008-04-17 00:09:29 -04:00
|
|
|
if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
|
|
|
|
(dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
|
2006-07-01 07:36:04 -04:00
|
|
|
handle_urcv(dd, istat);
|
|
|
|
|
2008-04-17 00:09:32 -04:00
|
|
|
if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
|
|
|
|
handle_sdma_intr(dd, istat);
|
|
|
|
|
2006-03-29 18:23:29 -05:00
|
|
|
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
|
2007-09-05 04:57:14 -04:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
|
|
|
dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
|
2006-03-29 18:23:29 -05:00
|
|
|
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
|
|
|
dd->ipath_sendctrl);
|
2007-09-05 04:57:14 -04:00
|
|
|
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
|
|
|
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
2006-03-29 18:23:29 -05:00
|
|
|
|
2008-05-07 14:00:15 -04:00
|
|
|
/* always process; sdma verbs uses PIO for acks and VL15 */
|
|
|
|
handle_layer_pioavail(dd);
|
2006-03-29 18:23:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
return ret;
|
|
|
|
}
|