aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r--drivers/infiniband/hw/qib/qib.h31
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c180
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c19
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c31
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c225
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h143
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c90
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c35
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c46
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c37
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c51
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h7
29 files changed, 475 insertions, 611 deletions
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 769a1d9..c0b72a6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -171,7 +171,9 @@ struct qib_ctxtdata {
/* how many alloc_pages() chunks in rcvegrbuf_pages */
u32 rcvegrbuf_chunks;
/* how many egrbufs per chunk */
- u32 rcvegrbufs_perchunk;
+ u16 rcvegrbufs_perchunk;
+ /* ilog2 of above */
+ u16 rcvegrbufs_perchunk_shift;
/* order for rcvegrbuf_pages */
size_t rcvegrbuf_size;
/* rcvhdrq size (for freeing) */
@@ -221,6 +223,9 @@ struct qib_ctxtdata {
/* ctxt rcvhdrq head offset */
u32 head;
u32 pkt_count;
+ /* lookaside fields */
+ struct qib_qp *lookaside_qp;
+ u32 lookaside_qpn;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
};
@@ -807,6 +812,10 @@ struct qib_devdata {
* supports, less gives more pio bufs/ctxt, etc.
*/
u32 cfgctxts;
+ /*
+ * number of ctxts available for PSM open
+ */
+ u32 freectxts;
/*
* hint that we should update pioavailshadow before
@@ -936,7 +945,9 @@ struct qib_devdata {
/* chip address space used by 4k pio buffers */
u32 align4k;
/* size of each rcvegrbuffer */
- u32 rcvegrbufsize;
+ u16 rcvegrbufsize;
+ /* log2 of above */
+ u16 rcvegrbufsize_shift;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
@@ -994,12 +1005,6 @@ struct qib_devdata {
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
- /* active time is kept in seconds, but logged in hours */
- atomic_t active_time;
- /* Below are nominal shadow of EEPROM, new since last EEPROM update */
- uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
- uint16_t eep_hrs;
/*
* masks for which bits of errs, hwerrs that cause
* each of the counters to increment.
@@ -1012,6 +1017,8 @@ struct qib_devdata {
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
+ /* high volume overflow errors defered to tasklet */
+ struct tasklet_struct error_tasklet;
};
/* hol_state values */
@@ -1214,8 +1221,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1415,6 +1421,10 @@ extern struct mutex qib_mutex;
qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
} while (0)
+#define qib_dev_warn(dd, fmt, ...) \
+ dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+
#define qib_dev_porterr(dd, port, fmt, ...) \
do { \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
@@ -1433,6 +1443,7 @@ extern struct mutex qib_mutex;
struct qib_hwerror_msgs {
u64 mask;
const char *msg;
+ size_t sz;
};
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 204c4dd..9892456 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -46,6 +46,7 @@
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 23e584f..c90a55f 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "qib.h"
@@ -279,10 +280,10 @@ bail:
*/
static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
{
- const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
- const u32 idx = etail % rcd->rcvegrbufs_perchunk;
+ const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
+ const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
- return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
+ return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
}
/*
@@ -310,7 +311,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
u32 opcode;
u32 psn;
int diff;
- unsigned long flags;
/* Sanity check packet */
if (tlen < 24)
@@ -365,7 +365,6 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
- spin_lock_irqsave(&qp->s_lock, flags);
ruc_res =
qib_ruc_check_hdr(
ibp, hdr,
@@ -373,11 +372,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
qp,
be32_to_cpu(ohdr->bth[0]));
if (ruc_res) {
- spin_unlock_irqrestore(&qp->s_lock,
- flags);
goto unlock;
}
- spin_unlock_irqrestore(&qp->s_lock, flags);
/* Only deal with RDMA Writes for now */
if (opcode <
@@ -547,6 +543,15 @@ move_along:
updegr = 0;
}
}
+ /*
+ * Notify qib_destroy_qp() if it is waiting
+ * for lookaside_qp to finish.
+ */
+ if (rcd->lookaside_qp) {
+ if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
+ wake_up(&rcd->lookaside_qp->wait);
+ rcd->lookaside_qp = NULL;
+ }
rcd->head = l;
rcd->pkt_count += i;
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 92d9cfe..6359c2f 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -263,189 +263,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
qib_dev_err(dd, "Board SN %s did not pass functional "
"test: %s\n", dd->serial, ifp->if_comment);
- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
- /*
- * Power-on (actually "active") hours are kept as little-endian value
- * in EEPROM, but as seconds in a (possibly as small as 24-bit)
- * atomic_t while running.
- */
- atomic_set(&dd->active_time, 0);
- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
done:
vfree(buf);
bail:;
}
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
- void *buf;
- struct qib_flash *ifp;
- int len, hi_water;
- uint32_t new_time, new_hrs;
- u8 csum;
- int ret, idx;
- unsigned long flags;
-
- /* first, check if we actually need to do anything. */
- ret = 0;
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- if (dd->eep_st_new_errs[idx]) {
- ret = 1;
- break;
- }
- }
- new_time = atomic_read(&dd->active_time);
-
- if (ret == 0 && new_time < 3600)
- goto bail;
-
- /*
- * The quick-check above determined that there is something worthy
- * of logging, so get current contents and do a more detailed idea.
- * read full flash, not just currently used part, since it may have
- * been written with a newer definition
- */
- len = sizeof(struct qib_flash);
- buf = vmalloc(len);
- ret = 1;
- if (!buf) {
- qib_dev_err(dd, "Couldn't allocate memory to read %u "
- "bytes from eeprom for logging\n", len);
- goto bail;
- }
-
- /* Grab semaphore and read current EEPROM. If we get an
- * error, let go, but if not, keep it until we finish write.
- */
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret) {
- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
- goto free_bail;
- }
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
- if (ret) {
- mutex_unlock(&dd->eep_lock);
- qib_dev_err(dd, "Unable read EEPROM for logging\n");
- goto free_bail;
- }
- ifp = (struct qib_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- mutex_unlock(&dd->eep_lock);
- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
- csum, ifp->if_csum);
- ret = 1;
- goto free_bail;
- }
- hi_water = 0;
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- int new_val = dd->eep_st_new_errs[idx];
- if (new_val) {
- /*
- * If we have seen any errors, add to EEPROM values
- * We need to saturate at 0xFF (255) and we also
- * would need to adjust the checksum if we were
- * trying to minimize EEPROM traffic
- * Note that we add to actual current count in EEPROM,
- * in case it was altered while we were running.
- */
- new_val += ifp->if_errcntp[idx];
- if (new_val > 0xFF)
- new_val = 0xFF;
- if (ifp->if_errcntp[idx] != new_val) {
- ifp->if_errcntp[idx] = new_val;
- hi_water = offsetof(struct qib_flash,
- if_errcntp) + idx;
- }
- /*
- * update our shadow (used to minimize EEPROM
- * traffic), to match what we are about to write.
- */
- dd->eep_st_errs[idx] = new_val;
- dd->eep_st_new_errs[idx] = 0;
- }
- }
- /*
- * Now update active-time. We would like to round to the nearest hour
- * but unless atomic_t are sure to be proper signed ints we cannot,
- * because we need to account for what we "transfer" to EEPROM and
- * if we log an hour at 31 minutes, then we would need to set
- * active_time to -29 to accurately count the _next_ hour.
- */
- if (new_time >= 3600) {
- new_hrs = new_time / 3600;
- atomic_sub((new_hrs * 3600), &dd->active_time);
- new_hrs += dd->eep_hrs;
- if (new_hrs > 0xFFFF)
- new_hrs = 0xFFFF;
- dd->eep_hrs = new_hrs;
- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
- ifp->if_powerhour[0] = new_hrs & 0xFF;
- hi_water = offsetof(struct qib_flash, if_powerhour);
- }
- if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
- ifp->if_powerhour[1] = new_hrs >> 8;
- hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
- }
- }
- /*
- * There is a tiny possibility that we could somehow fail to write
- * the EEPROM after updating our shadows, but problems from holding
- * the spinlock too long are a much bigger issue.
- */
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
- if (hi_water) {
- /* we made some change to the data, uopdate cksum and write */
- csum = flash_csum(ifp, 1);
- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
- }
- mutex_unlock(&dd->eep_lock);
- if (ret)
- qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
- vfree(buf);
-bail:
- return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
- uint new_val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- new_val = dd->eep_st_new_errs[eidx] + incr;
- if (new_val > 255)
- new_val = 255;
- dd->eep_st_new_errs[eidx] = new_val;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 406fca5..a740324 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -43,6 +43,7 @@
#include <linux/jiffies.h>
#include <asm/pgtable.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include "qib.h"
#include "qib_common.h"
@@ -1284,6 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1527,6 +1529,7 @@ done_chk_sdma:
struct qib_filedata *fd = fp->private_data;
const struct qib_ctxtdata *rcd = fd->rcd;
const struct qib_devdata *dd = rcd->dd;
+ unsigned int weight;
if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
@@ -1545,8 +1548,8 @@ done_chk_sdma:
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
- if (!ret && cpus_weight(current->cpus_allowed) >=
- qib_cpulist_count) {
+ weight = cpumask_weight(tsk_cpus_allowed(current));
+ if (!ret && weight >= qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
@@ -1554,13 +1557,13 @@ done_chk_sdma:
__set_bit(cpu, qib_cpulist);
fd->rec_cpu_num = cpu;
}
- } else if (cpus_weight(current->cpus_allowed) == 1 &&
- test_bit(first_cpu(current->cpus_allowed),
+ } else if (weight == 1 &&
+ test_bit(cpumask_first(tsk_cpus_allowed(current)),
qib_cpulist))
qib_devinfo(dd->pcidev, "%s PID %u affinity "
"set to cpu %d; already allocated\n",
current->comm, current->pid,
- first_cpu(current->cpus_allowed));
+ cpumask_first(tsk_cpus_allowed(current)));
}
mutex_unlock(&qib_mutex);
@@ -1791,6 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
@@ -1904,8 +1908,9 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
+ unsigned long flags;
- spin_lock(&ppd->dd->uctxt_lock);
+ spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
ctxt++) {
rcd = ppd->dd->rcd[ctxt];
@@ -1924,7 +1929,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
ret = 1;
break;
}
- spin_unlock(&ppd->dd->uctxt_lock);
+ spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 65df26c..2a0f7de 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2674,8 +2674,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
qib_chk_6120_errormask(dd);
@@ -3275,6 +3273,8 @@ static int init_6120_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
ret = ib_mtu_enum_to_int(qib_ibmtu);
dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_6120_tidtemplate(dd);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 759bb63..4dc04e3 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -39,6 +39,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <rdma/ib_verbs.h>
@@ -2434,6 +2435,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
int lsb, ret = 0, setforce = 0;
u16 lcmd, licmd;
unsigned long flags;
+ u32 tmp = 0;
switch (which) {
case QIB_IB_CFG_LIDLMC:
@@ -2467,9 +2469,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
maskr = IBA7220_IBC_WIDTH_MASK;
lsb = IBA7220_IBC_WIDTH_SHIFT;
setforce = 1;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
break;
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
@@ -2643,6 +2642,28 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
goto bail;
}
qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
+ /* If the width active on the chip does not match the
+ * width in the shadow register, write the new active
+ * width to the chip.
+ * We don't have to worry about speed as the speed is taken
+ * care of by set_7220_ibspeed_fast called by ib_updown.
+ */
+ if (ppd->link_width_enabled-1 != tmp) {
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |=
+ (((u64)(ppd->link_width_enabled-1) & maskr) <<
+ lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
goto bail;
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
@@ -3271,8 +3292,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
done:
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
@@ -4067,6 +4086,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
ret = ib_mtu_enum_to_int(qib_ibmtu);
dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7220_tidtemplate(dd);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 49e4a58..44180c6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -40,6 +40,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/module.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
@@ -114,6 +115,10 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+static ushort qib_krcvq01_no_msi;
+module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
+MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
+
/*
* Receive header queue sizes
*/
@@ -397,7 +402,6 @@ MODULE_PARM_DESC(txselect, \
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
@@ -1107,9 +1111,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname , .sz = sizeof(#fldname) }
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname }
+ fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
@@ -1127,14 +1131,16 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO(statusValidNoEop),
HWE_AUTO(LATriggered),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(RcvEgrFullErr),
+ E_AUTO(RcvHdrFullErr),
E_AUTO(ResetNegated),
E_AUTO(HardwareErr),
E_AUTO(InvalidAddrErr),
@@ -1147,9 +1153,7 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(SendSpecialTriggerErr),
E_AUTO(SDmaWrongPortErr),
E_AUTO(SDmaBufMaskDuplicateErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(RcvEgrFullErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
@@ -1159,7 +1163,8 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
+ .sz = 11},
E_P_AUTO(SDmaDescAddrMisalignErr),
E_P_AUTO(SDmaUnexpDataErr),
E_P_AUTO(SDmaMissingDwErr),
@@ -1195,7 +1200,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO(RcvICRCErr),
E_P_AUTO(RcvVCRCErr),
E_P_AUTO(RcvFormatErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
/*
@@ -1203,17 +1208,17 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
@@ -1221,7 +1226,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C"}
+ .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SDmaInt),
@@ -1235,11 +1240,12 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SendDoneInt),
INTR_AUTO(SendBufAvailInt),
INTR_AUTO_C(RcvAvail),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(NonKeyPacket),
TXSYMPTOM_AUTO_P(GRHFail),
@@ -1248,7 +1254,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(SLIDFail),
TXSYMPTOM_AUTO_P(RawIPV6),
TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
@@ -1293,7 +1299,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
u64 these, lmask;
int took, multi, n = 0;
- while (msp && msp->mask) {
+ while (errs && msp && msp->mask) {
multi = (msp->mask & (msp->mask - 1));
while (errs & msp->mask) {
these = (errs & msp->mask);
@@ -1304,9 +1310,14 @@ static void err_decode(char *msg, size_t len, u64 errs,
*msg++ = ',';
len--;
}
- took = scnprintf(msg, len, "%s", msp->msg);
+ BUG_ON(!msp->sz);
+ /* msp->sz counts the nul */
+ took = min_t(size_t, msp->sz - (size_t)1, len);
+ memcpy(msg, msp->msg, took);
len -= took;
msg += took;
+ if (len)
+ *msg = '\0';
}
errs &= ~lmask;
if (len && multi) {
@@ -1644,6 +1655,14 @@ done:
return;
}
+static void qib_error_tasklet(unsigned long data)
+{
+ struct qib_devdata *dd = (struct qib_devdata *)data;
+
+ handle_7322_errors(dd);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
static void reenable_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -2260,6 +2279,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
@@ -2288,16 +2312,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2363,17 +2382,20 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
- /* Hold the link state machine for mezz boards */
- if (IS_QMH(dd) || IS_QME(dd))
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
/* Also enable IBSTATUSCHG interrupt. */
val = qib_read_kreg_port(ppd, krp_errmask);
qib_write_kreg_port(ppd, krp_errmask,
@@ -2725,8 +2747,10 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
unknown_7322_ibits(dd, istat);
if (istat & QIB_I_GPIO)
unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR)
- handle_7322_errors(dd);
+ if (istat & QIB_I_C_ERROR) {
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+ tasklet_schedule(&dd->error_tasklet);
+ }
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
handle_7322_p_errors(dd->rcd[0]->ppd);
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
@@ -2833,9 +2857,8 @@ static irqreturn_t qib_7322intr(int irq, void *data)
for (i = 0; i < dd->first_user_ctxt; i++) {
if (ctxtrbits & rmask) {
ctxtrbits &= ~rmask;
- if (dd->rcd[i]) {
+ if (dd->rcd[i])
qib_kreceive(dd->rcd[i], NULL, &npkts);
- }
}
rmask <<= 1;
}
@@ -3125,6 +3148,8 @@ try_intx:
arg = dd->rcd[ctxt];
if (!arg)
continue;
+ if (qib_krcvq01_no_msi && ctxt < 2)
+ continue;
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
name = QIB_DRV_NAME " (kctx)";
@@ -3159,6 +3184,8 @@ try_intx:
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
+ tasklet_init(&dd->error_tasklet, qib_error_tasklet,
+ (unsigned long)dd);
bail:;
}
@@ -4766,8 +4793,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
traffic_wds -= ppd->dd->traffic_wds;
ppd->dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
QIB_IB_QDR) &&
@@ -5208,6 +5233,8 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
QIBL_IB_AUTONEG_INPROG)))
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
+ struct qib_qsfp_data *qd =
+ &ppd->cpspec->qsfp_data;
/* unlock the Tx settings, speed may change */
qib_write_kreg_port(ppd, krp_tx_deemph_override,
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
@@ -5215,6 +5242,12 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
qib_cancel_sends(ppd);
/* on link down, ensure sane pcs state */
qib_7322_mini_pcs_reset(ppd);
+ /* schedule the qsfp refresh which should turn the link
+ off */
+ if (ppd->dd->flags & QIB_HAS_QSFP) {
+ qd->t_insert = get_jiffies_64();
+ queue_work(ib_wq, &qd->work);
+ }
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
__qib_sdma_process_event(ppd,
@@ -5565,43 +5598,79 @@ static void qsfp_7322_event(struct work_struct *work)
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
u64 pwrup;
+ unsigned long flags;
int ret;
u32 le2;
qd = container_of(work, struct qib_qsfp_data, work);
ppd = qd->ppd;
- pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
+ pwrup = qd->t_insert +
+ msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
- /*
- * Some QSFP's not only do not respond until the full power-up
- * time, but may behave badly if we try. So hold off responding
- * to insertion.
- */
- while (1) {
- u64 now = get_jiffies_64();
- if (time_after64(now, pwrup))
- break;
- msleep(20);
- }
- ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
- /*
- * Need to change LE2 back to defaults if we couldn't
- * read the cable type (to handle cable swaps), so do this
- * even on failure to read cable information. We don't
- * get here for QME, so IS_QME check not needed here.
- */
- if (!ret && !ppd->dd->cspec->r1) {
- if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
- le2 = LE2_QME;
- else if (qd->cache.atten[1] >= qib_long_atten &&
- QSFP_IS_CU(qd->cache.tech))
- le2 = LE2_5m;
- else
+ /* Delay for 20 msecs to allow ModPrs resistor to setup */
+ mdelay(QSFP_MODPRS_LAG_MSEC);
+
+ if (!qib_qsfp_mod_present(ppd)) {
+ ppd->cpspec->qsfp_data.modpresent = 0;
+ /* Set the physical link to disabled */
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags &= ~QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ } else {
+ /*
+ * Some QSFP's not only do not respond until the full power-up
+ * time, but may behave badly if we try. So hold off responding
+ * to insertion.
+ */
+ while (1) {
+ u64 now = get_jiffies_64();
+ if (time_after64(now, pwrup))
+ break;
+ msleep(20);
+ }
+
+ ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
+
+ /*
+ * Need to change LE2 back to defaults if we couldn't
+ * read the cable type (to handle cable swaps), so do this
+ * even on failure to read cable information. We don't
+ * get here for QME, so IS_QME check not needed here.
+ */
+ if (!ret && !ppd->dd->cspec->r1) {
+ if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
+ le2 = LE2_QME;
+ else if (qd->cache.atten[1] >= qib_long_atten &&
+ QSFP_IS_CU(qd->cache.tech))
+ le2 = LE2_5m;
+ else
+ le2 = LE2_DEFAULT;
+ } else
le2 = LE2_DEFAULT;
- } else
- le2 = LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
- init_txdds_table(ppd, 0);
+ ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
+ /*
+ * We always change parameteters, since we can choose
+ * values for cables without eeproms, and the cable may have
+ * changed from a cable with full or partial eeprom content
+ * to one with partial or no content.
+ */
+ init_txdds_table(ppd, 0);
+ /* The physical link is being re-enabled only when the
+ * previous state was DISABLED and the VALID bit is not
+ * set. This should only happen when the cable has been
+ * physically pulled. */
+ if (!ppd->cpspec->qsfp_data.modpresent &&
+ (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
+ ppd->cpspec->qsfp_data.modpresent = 1;
+ qib_set_ib_7322_lstate(ppd, 0,
+ QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_LINKV;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
+ }
}
/*
@@ -5705,7 +5774,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
/* now change the IBC and serdes, overriding generic */
init_txdds_table(ppd, 1);
/* Re-enable the physical state machine on mezz boards
- * now that the correct settings have been set. */
+ * now that the correct settings have been set.
+ * QSFP boards are handles by the QSFP event handler */
if (IS_QMH(dd) || IS_QME(dd))
qib_set_ib_7322_lstate(ppd, 0,
QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
@@ -6183,6 +6253,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
/* we always allocate at least 2048 bytes for eager buffers */
dd->rcvegrbufsize = max(mtu, 2048);
+ BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
+ dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
qib_7322_tidtemplate(dd);
@@ -6790,6 +6862,10 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
(i >= ARRAY_SIZE(irq_table) &&
dd->rcd[i - ARRAY_SIZE(irq_table)]))
actual_cnt++;
+ /* reduce by ctxt's < 2 */
+ if (qib_krcvq01_no_msi)
+ actual_cnt -= dd->num_pports;
+
tabsize = actual_cnt;
dd->cspec->msix_entries = kmalloc(tabsize *
sizeof(struct msix_entry), GFP_KERNEL);
@@ -7121,7 +7197,8 @@ static void find_best_ent(struct qib_pportdata *ppd,
}
}
- /* Lookup serdes setting by cable type and attenuation */
+ /* Active cables don't have attenuation so we only set SERDES
+ * settings to account for the attenuation of the board traces. */
if (!override && QSFP_IS_ACTIVE(qd->tech)) {
*sdr_dds = txdds_sdr + ppd->dd->board_atten;
*ddr_dds = txdds_ddr + ppd->dd->board_atten;
@@ -7438,12 +7515,6 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
/* Clear cmode-override, may be set from older driver */
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
@@ -7629,6 +7700,12 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
/* VGA output common mode */
ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+ /*
+ * Initialize the Tx DDS tables. Also done every QSFP event,
+ * for adapters with QSFP
+ */
+ init_txdds_table(ppd, 0);
+
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a01f3fc..9966ec2 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/idr.h>
+#include <linux/module.h>
#include "qib.h"
#include "qib_common.h"
@@ -183,6 +184,9 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
rcd->rcvegrbufs_perchunk - 1) /
rcd->rcvegrbufs_perchunk;
+ BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
+ rcd->rcvegrbufs_perchunk_shift =
+ ilog2(rcd->rcvegrbufs_perchunk);
}
return rcd;
}
@@ -398,6 +402,7 @@ static void enable_chip(struct qib_devdata *dd)
if (rcd)
dd->f_rcvctrl(rcd->ppd, rcvmask, i);
}
+ dd->freectxts = dd->cfgctxts - dd->first_user_ctxt;
}
static void verify_interrupt(unsigned long opaque)
@@ -581,10 +586,6 @@ int qib_init(struct qib_devdata *dd, int reinit)
continue;
}
- /* let link come up, and enable IBC */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
portok++;
}
@@ -764,7 +765,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
dd->f_quiet_serdes(ppd);
}
- qib_update_eeprom_log(dd);
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 8fd19a4..ca6e6cf 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -69,6 +69,10 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
* unrestricted LKEY.
*/
rkt->gen++;
+ /*
+ * bits are capped in qib_verbs.c to insure enough bits
+ * for generation number
+ */
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 8fd3df5..dd2ad6d3 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1007,7 +1007,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = 1;
+ event.element.port_num = port;
ib_dispatch_event(&event);
}
return 0;
@@ -1125,22 +1125,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
-static int pma_get_classportinfo(struct ib_perf *pmp,
+static int pma_get_classportinfo(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
- struct ib_pma_classportinfo *p =
- (struct ib_pma_classportinfo *)pmp->data;
+ struct ib_class_port_info *p =
+ (struct ib_class_port_info *)pmp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
memset(pmp->data, 0, sizeof(pmp->data));
- if (pmp->attr_mod != 0)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Note that AllPortSelect is not valid */
p->base_version = 1;
p->class_version = 1;
- p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
/*
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
@@ -1154,7 +1154,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1169,8 +1169,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
spin_lock_irqsave(&ibp->lock, flags);
@@ -1192,7 +1192,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1205,8 +1205,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,
u8 status, xmit_flags;
int ret;
- if (pmp->attr_mod != 0 || p->port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1321,7 +1321,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
return ret;
}
-static int pma_get_portsamplesresult(struct ib_perf *pmp,
+static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult *p =
@@ -1360,7 +1360,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult_ext *p =
@@ -1402,7 +1402,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters(struct ib_perf *pmp,
+static int pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1436,8 +1436,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1472,7 +1472,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1500,7 +1500,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters_cong(struct ib_perf *pmp,
+static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
/* Congestion PMA packets start at offset 24 not 64 */
@@ -1510,7 +1510,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
u64 xmit_wait_counter;
unsigned long flags;
@@ -1519,9 +1519,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
* SET method ends up calling this anyway.
*/
if (!dd->psxmitwait_supported)
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
if (port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
@@ -1603,7 +1603,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1613,7 +1613,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
return reply((struct ib_smp *)pmp);
}
-static int pma_get_portcounters_ext(struct ib_perf *pmp,
+static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
@@ -1626,8 +1626,8 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
@@ -1652,7 +1652,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portcounters(struct ib_perf *pmp,
+static int pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1715,14 +1715,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,
return pma_get_portcounters(pmp, ibdev, port);
}
-static int pma_set_portcounters_cong(struct ib_perf *pmp,
+static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
int ret = 0;
unsigned long flags;
@@ -1766,7 +1766,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,
return ret;
}
-static int pma_set_portcounters_ext(struct ib_perf *pmp,
+static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1959,19 +1959,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
- struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
- if (pmp->class_version != 1) {
- pmp->status |= IB_SMP_UNSUP_VERSION;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
- switch (pmp->method) {
+ switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = pma_get_classportinfo(pmp, ibdev);
goto bail;
@@ -1994,13 +1994,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_get_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = pma_set_portsamplescontrol(pmp, ibdev, port);
goto bail;
@@ -2014,7 +2014,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_set_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -2030,7 +2030,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METHOD;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 7840ab5..ecc416c 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -32,6 +32,8 @@
* SOFTWARE.
*/
+#include <rdma/ib_pma.h>
+
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
@@ -180,109 +182,8 @@ struct ib_vl_weight_elem {
#define IB_VLARB_HIGHPRI_0_31 3
#define IB_VLARB_HIGHPRI_32_63 4
-/*
- * PMA class portinfo capability mask bits
- */
-#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
-#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
-#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
-
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
-#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-struct ib_perf {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 unused;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-struct ib_pma_classportinfo {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplescontrol {
- u8 opcode;
- u8 port_select;
- u8 tick;
- u8 counter_width; /* only lower 3 bits */
- __be32 counter_mask0_9; /* 2, 10 * 3, bits */
- __be16 counter_mask10_14; /* 1, 5 * 3, bits */
- u8 sample_mechanisms;
- u8 sample_status; /* only lower 2 bits */
- __be64 option_mask;
- __be64 vendor_mask;
- __be32 sample_start;
- __be32 sample_interval;
- __be16 tag;
- __be16 counter_select[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult_ext {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 extended_width; /* only upper 2 bits */
- __be64 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portcounters {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved1;
- u8 lli_ebor_errors; /* 4, 4, bits */
- __be16 reserved2;
- __be16 vl15_dropped;
- __be32 port_xmit_data;
- __be32 port_rcv_data;
- __be32 port_xmit_packets;
- __be32 port_rcv_packets;
-} __attribute__ ((packed));
-
struct ib_pma_portcounters_cong {
u8 reserved;
u8 reserved1;
@@ -297,7 +198,7 @@ struct ib_pma_portcounters_cong {
u8 port_xmit_constraint_errors;
u8 port_rcv_constraint_errors;
u8 reserved2;
- u8 lli_ebor_errors; /* 4, 4, bits */
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
__be16 reserved3;
__be16 vl15_dropped;
__be64 port_xmit_data;
@@ -316,49 +217,11 @@ struct ib_pma_portcounters_cong {
/* number of 4nsec cycles equaling 2secs */
#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
-
#define IB_PMA_SEL_CONG_ALL 0x01
#define IB_PMA_SEL_CONG_PORT_DATA 0x02
#define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08
-struct ib_pma_portcounters_ext {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be32 reserved1;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_unicast_xmit_packets;
- __be64 port_unicast_rcv_packets;
- __be64 port_multicast_xmit_packets;
- __be64 port_multicast_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
-
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 891cc2f..97a8bdf 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/aer.h>
+#include <linux/module.h>
#include "qib.h"
@@ -255,7 +256,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
u16 linkstat, speed;
int pos = 0, pose, ret = 1;
- pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ pose = pci_pcie_cap(dd->pcidev);
if (!pose) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
@@ -509,7 +510,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (!ppos)
return 1;
if (parent->vendor != 0x8086)
@@ -578,14 +579,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (ppos) {
pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
} else
goto bail;
/* Find out supported and configured values for endpoint (us) */
- epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ epos = pci_pcie_cap(dd->pcidev);
if (epos) {
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index e16751f..7e7e16f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -34,6 +34,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/jhash.h>
#include "qib.h"
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
}
+static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
+{
+ return jhash_1word(qpn, dev->qp_rnd) &
+ (dev->qp_table_size - 1);
+}
+
+
/*
* Put the QP into the hash table.
* The hash table holds a reference to the QP.
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
unsigned long flags;
+ unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
spin_lock_irqsave(&dev->qpt_lock, flags);
+ atomic_inc(&qp->refcount);
if (qp->ibqp.qp_num == 0)
- ibp->qp0 = qp;
+ rcu_assign_pointer(ibp->qp0, qp);
else if (qp->ibqp.qp_num == 1)
- ibp->qp1 = qp;
+ rcu_assign_pointer(ibp->qp1, qp);
else {
qp->next = dev->qp_table[n];
- dev->qp_table[n] = qp;
+ rcu_assign_pointer(dev->qp_table[n], qp);
}
- atomic_inc(&qp->refcount);
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
}
/*
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_qp *q, **qpp;
+ unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
unsigned long flags;
- qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
-
spin_lock_irqsave(&dev->qpt_lock, flags);
if (ibp->qp0 == qp) {
- ibp->qp0 = NULL;
atomic_dec(&qp->refcount);
+ rcu_assign_pointer(ibp->qp0, NULL);
} else if (ibp->qp1 == qp) {
- ibp->qp1 = NULL;
atomic_dec(&qp->refcount);
- } else
+ rcu_assign_pointer(ibp->qp1, NULL);
+ } else {
+ struct qib_qp *q, **qpp;
+
+ qpp = &dev->qp_table[n];
for (; (q = *qpp) != NULL; qpp = &q->next)
if (q == qp) {
- *qpp = qp->next;
- qp->next = NULL;
atomic_dec(&qp->refcount);
+ rcu_assign_pointer(*qpp, qp->next);
+ qp->next = NULL;
break;
}
+ }
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
}
/**
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
if (!qib_mcast_tree_empty(ibp))
qp_inuse++;
- if (ibp->qp0)
+ rcu_read_lock();
+ if (rcu_dereference(ibp->qp0))
qp_inuse++;
- if (ibp->qp1)
+ if (rcu_dereference(ibp->qp1))
qp_inuse++;
+ rcu_read_unlock();
}
spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) {
qp = dev->qp_table[n];
- dev->qp_table[n] = NULL;
+ rcu_assign_pointer(dev->qp_table[n], NULL);
for (; qp; qp = qp->next)
qp_inuse++;
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ synchronize_rcu();
return qp_inuse;
}
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
*/
struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
{
- struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- unsigned long flags;
- struct qib_qp *qp;
+ struct qib_qp *qp = NULL;
- spin_lock_irqsave(&dev->qpt_lock, flags);
+ if (unlikely(qpn <= 1)) {
+ rcu_read_lock();
+ if (qpn == 0)
+ qp = rcu_dereference(ibp->qp0);
+ else
+ qp = rcu_dereference(ibp->qp1);
+ } else {
+ struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
+ unsigned n = qpn_hash(dev, qpn);
- if (qpn == 0)
- qp = ibp->qp0;
- else if (qpn == 1)
- qp = ibp->qp1;
- else
- for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
- qp = qp->next)
+ rcu_read_lock();
+ for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
if (qp->ibqp.qp_num == qpn)
break;
+ }
if (qp)
- atomic_inc(&qp->refcount);
+ if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
+ qp = NULL;
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
+ rcu_read_unlock();
return qp;
}
@@ -765,8 +783,10 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (attr_mask & IB_QP_PATH_MTU)
+ if (attr_mask & IB_QP_PATH_MTU) {
qp->path_mtu = pmtu;
+ qp->pmtu = ib_mtu_enum_to_int(pmtu);
+ }
if (attr_mask & IB_QP_RETRY_CNT) {
qp->s_retry_cnt = attr->retry_cnt;
@@ -781,8 +801,12 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MIN_RNR_TIMER)
qp->r_min_rnr_timer = attr->min_rnr_timer;
- if (attr_mask & IB_QP_TIMEOUT)
+ if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ }
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
@@ -1013,6 +1037,10 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
ret = ERR_PTR(-ENOMEM);
goto bail_swq;
}
+ RCU_INIT_POINTER(qp->next, NULL);
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
if (init_attr->srq)
sz = 0;
else {
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 3374a52..fa71b1e 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -273,18 +273,12 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
int ret;
int idx;
u16 cks;
- u32 mask;
u8 peek[4];
/* ensure sane contents on invalid reads, for cable swaps */
memset(cp, 0, sizeof(*cp));
- mask = QSFP_GPIO_MOD_PRS_N;
- if (ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
- if (ret & mask) {
+ if (!qib_qsfp_mod_present(ppd)) {
ret = -ENODEV;
goto bail;
}
@@ -444,6 +438,19 @@ const char * const qib_qsfp_devtech[16] = {
static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+int qib_qsfp_mod_present(struct qib_pportdata *ppd)
+{
+ u32 mask;
+ int ret;
+
+ mask = QSFP_GPIO_MOD_PRS_N <<
+ (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
+ ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
+
+ return !((ret & mask) >>
+ ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
+}
+
/*
* Initialize structures that control access to QSFP. Called once per port
* on cards that support QSFP.
@@ -452,7 +459,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *))
{
u32 mask, highs;
- int pins;
struct qib_devdata *dd = qd->ppd->dd;
@@ -474,19 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- pins = dd->f_gpio_mod(dd, 0, 0, 0);
- if (pins & mask)
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index c109bbd..46002a9 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -34,6 +34,7 @@
#define QSFP_DEV 0xA0
#define QSFP_PWR_LAG_MSEC 2000
+#define QSFP_MODPRS_LAG_MSEC 20
/*
* Below are masks for various QSFP signals, for Port 1.
@@ -177,10 +178,12 @@ struct qib_qsfp_data {
struct work_struct work;
struct qib_qsfp_cache cache;
u64 t_insert;
+ u8 modpresent;
};
extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
struct qib_qsfp_cache *cp);
+extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
extern void qib_qsfp_init(struct qib_qsfp_data *qd,
void (*fevent)(struct work_struct *));
extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index eca0c41..894afac 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -59,8 +59,7 @@ static void start_timer(struct qib_qp *qp)
qp->s_flags |= QIB_S_TIMER;
qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies +
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
+ qp->s_timer.expires = jiffies + qp->timeout_jiffies;
add_timer(&qp->s_timer);
}
@@ -239,7 +238,7 @@ int qib_make_rc_req(struct qib_qp *qp)
u32 len;
u32 bth0;
u32 bth2;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
char newreq;
unsigned long flags;
int ret = 0;
@@ -272,13 +271,9 @@ int qib_make_rc_req(struct qib_qp *qp)
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
- while (qp->s_last != qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- wqe = get_swqe_ptr(qp, qp->s_last);
- }
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
+ /* will get called again */
goto done;
}
@@ -1519,9 +1514,7 @@ read_middle:
* 4.096 usec. * (1 << qp->timeout)
*/
qp->s_flags |= QIB_S_TIMER;
- mod_timer(&qp->s_timer, jiffies +
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL));
+ mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
if (qp->s_flags & QIB_S_WAIT_ACK) {
qp->s_flags &= ~QIB_S_WAIT_ACK;
qib_schedule_send(qp);
@@ -1732,7 +1725,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* same request.
*/
offset = ((psn - e->psn) & QIB_PSN_MASK) *
- ib_mtu_enum_to_int(qp->path_mtu);
+ qp->pmtu;
len = be32_to_cpu(reth->length);
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
@@ -1876,7 +1869,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
u32 psn;
u32 pad;
struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
int diff;
struct ib_reth *reth;
unsigned long flags;
@@ -1892,10 +1885,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
}
opcode = be32_to_cpu(ohdr->bth[0]);
- spin_lock_irqsave(&qp->s_lock, flags);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- goto sunlock;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
@@ -1955,8 +1946,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
break;
}
- memset(&wc, 0, sizeof wc);
-
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST;
if (qp->ibqp.event_handler) {
@@ -2009,16 +1998,19 @@ send_middle:
goto rnr_nak;
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
- goto send_last;
- /* FALLTHROUGH */
+ goto no_immediate_data;
+ /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
+ goto send_last;
case OP(SEND_LAST):
case OP(RDMA_WRITE_LAST):
+no_immediate_data:
+ wc.wc_flags = 0;
+ wc.ex.imm_data = 0;
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
@@ -2051,6 +2043,12 @@ send_last:
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ wc.csum_ok = 0;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -2089,7 +2087,7 @@ send_last:
if (opcode == OP(RDMA_WRITE_FIRST))
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
- goto send_last;
+ goto no_immediate_data;
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index eb78d93..b4b37e4 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -260,12 +260,15 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
/*
*
- * This should be called with the QP s_lock held.
+ * This should be called with the QP r_lock held.
+ *
+ * The s_lock will be acquired around the qib_migrate_qp() call.
*/
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, struct qib_qp *qp, u32 bth0)
{
__be64 guid;
+ unsigned long flags;
if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
if (!has_grh) {
@@ -295,7 +298,9 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
goto err;
+ spin_lock_irqsave(&qp->s_lock, flags);
qib_migrate_qp(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
} else {
if (!has_grh) {
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index e9f9f8b..de1a4b2 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/firmware.h>
#include "qib.h"
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index cad4449..12a9604 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
#include "qib.h"
#include "qib_common.h"
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
index c3ec8ef..d623593 100644
--- a/drivers/infiniband/hw/qib/qib_srq.c
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -107,6 +107,11 @@ struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
u32 sz;
struct ib_srq *ret;
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ ret = ERR_PTR(-ENOSYS);
+ goto done;
+ }
+
if (srq_init_attr->attr.max_sge == 0 ||
srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
srq_init_attr->attr.max_wr == 0 ||
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d50a33f..c97224a 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -507,6 +507,17 @@ static ssize_t show_nctxts(struct device *device,
dd->first_user_ctxt);
}
+static ssize_t show_nfreectxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of free user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
+}
+
static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -539,28 +550,6 @@ bail:
return ret < 0 ? ret : count;
}
-static ssize_t show_logged_errs(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int idx, count;
-
- /* force consistency with actual EEPROM */
- if (qib_update_eeprom_log(dd) != 0)
- return -ENXIO;
-
- count = 0;
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
- dd->eep_st_errs[idx],
- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
- }
-
- return count;
-}
-
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
@@ -604,9 +593,9 @@ static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -617,9 +606,9 @@ static struct device_attribute *qib_attributes[] = {
&dev_attr_board_id,
&dev_attr_version,
&dev_attr_nctxts,
+ &dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
- &dev_attr_logged_errors,
&dev_attr_tempsense,
&dev_attr_localbus_info,
&dev_attr_chip_reset,
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 7f36454..1bf626c 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
#include "qib.h"
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 32ccf3c..847e7af 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -51,7 +51,7 @@ int qib_make_uc_req(struct qib_qp *qp)
u32 hwords;
u32 bth0;
u32 len;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -243,13 +243,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
- unsigned long flags;
u32 opcode;
u32 hdrsize;
u32 psn;
u32 pad;
struct ib_wc wc;
- u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
+ u32 pmtu = qp->pmtu;
struct ib_reth *reth;
int ret;
@@ -263,14 +262,11 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
}
opcode = be32_to_cpu(ohdr->bth[0]);
- spin_lock_irqsave(&qp->s_lock, flags);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- goto sunlock;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ return;
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
- memset(&wc, 0, sizeof wc);
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
@@ -370,7 +366,7 @@ send_first:
}
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
- goto send_last;
+ goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
/* FALLTHROUGH */
@@ -389,8 +385,11 @@ send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
- /* FALLTHROUGH */
+ goto send_last;
case OP(SEND_LAST):
+no_immediate_data:
+ wc.ex.imm_data = 0;
+ wc.wc_flags = 0;
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
@@ -418,6 +417,12 @@ last_imm:
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
+ /* zero fields that are N/A */
+ wc.vendor_err = 0;
+ wc.pkey_index = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ wc.csum_ok = 0;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -546,6 +551,4 @@ op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
-sunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 828609f..c0d93d4 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
+ enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 7689e49..2bc1d2b 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -74,7 +74,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail_release;
}
- current->mm->locked_vm += num_pages;
+ current->mm->pinned_vm += num_pages;
ret = 0;
goto bail;
@@ -151,7 +151,7 @@ void qib_release_user_pages(struct page **p, size_t num_pages)
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
- current->mm->locked_vm -= num_pages;
+ current->mm->pinned_vm -= num_pages;
up_write(&current->mm->mmap_sem);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 8244208..573b460 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -284,8 +284,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
int j;
int ret;
- ret = get_user_pages(current, current->mm, addr,
- npages, 0, 1, pages, NULL);
+ ret = get_user_pages_fast(addr, npages, 0, pages);
if (ret != npages) {
int i;
@@ -830,10 +829,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
while (dim) {
const int mxp = 8;
- down_write(&current->mm->mmap_sem);
ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
- up_write(&current->mm->mmap_sem);
-
if (ret <= 0)
goto done_unlock;
else {
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9fab404..c51a6f9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -35,14 +35,17 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_verbs.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
#include "qib.h"
#include "qib_common.h"
-static unsigned int ib_qib_qp_table_size = 251;
+static unsigned int ib_qib_qp_table_size = 256;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -659,17 +662,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
+ if (rcd->lookaside_qp) {
+ if (rcd->lookaside_qpn != qp_num) {
+ if (atomic_dec_and_test(
+ &rcd->lookaside_qp->refcount))
+ wake_up(
+ &rcd->lookaside_qp->wait);
+ rcd->lookaside_qp = NULL;
+ }
+ }
+ if (!rcd->lookaside_qp) {
+ qp = qib_lookup_qpn(ibp, qp_num);
+ if (!qp)
+ goto drop;
+ rcd->lookaside_qp = qp;
+ rcd->lookaside_qpn = qp_num;
+ } else
+ qp = rcd->lookaside_qp;
ibp->n_unicast_rcv++;
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
}
return;
@@ -1974,6 +1985,8 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
+ RCU_INIT_POINTER(ibp->qp0, NULL);
+ RCU_INIT_POINTER(ibp->qp1, NULL);
}
/**
@@ -1990,12 +2003,15 @@ int qib_register_ib_device(struct qib_devdata *dd)
int ret;
dev->qp_table_size = ib_qib_qp_table_size;
- dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
+ get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
+ dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
GFP_KERNEL);
if (!dev->qp_table) {
ret = -ENOMEM;
goto err_qpt;
}
+ for (i = 0; i < dev->qp_table_size; i++)
+ RCU_INIT_POINTER(dev->qp_table[i], NULL);
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
@@ -2020,10 +2036,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
+ /* insure generation is at least 4 bits see keys.c */
+ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
+ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ }
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion **)
- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
@@ -2193,7 +2215,7 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
@@ -2247,7 +2269,6 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- free_pages((unsigned long) dev->lk_table.table,
- get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 95e5b47..66f7f62 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -485,6 +485,7 @@ struct qib_qp {
u8 alt_timeout; /* Alternate path timeout for this QP */
u8 port_num;
enum ib_mtu path_mtu;
+ u32 pmtu; /* decoded from path_mtu */
u32 remote_qpn;
u32 qkey; /* QKEY for this QP (for UD or RD) */
u32 s_size; /* send work queue size */
@@ -495,6 +496,7 @@ struct qib_qp {
u32 s_last; /* last completed entry */
u32 s_ssn; /* SSN of tail entry */
u32 s_lsn; /* limit sequence number (credit) */
+ unsigned long timeout_jiffies; /* computed from timeout */
struct qib_swqe *s_wq; /* send work queue */
struct qib_swqe *s_wqe;
struct qib_rq r_rq; /* receive work queue */
@@ -620,6 +622,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};
+#define MAX_LKEY_TABLE_BITS 23
+
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
@@ -723,7 +727,8 @@ struct qib_ibdev {
dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */
spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
- unsigned qp_table_size; /* size of the hash table */
+ u32 qp_table_size; /* size of the hash table */
+ u32 qp_rnd; /* random bytes for hash */
spinlock_t qpt_lock;
u32 n_piowait;