2006-03-29 18:23:36 -05:00
|
|
|
/*
|
2007-06-11 13:21:14 -04:00
|
|
|
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
2006-03-29 18:23:36 -05:00
|
|
|
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
|
|
#include "ipath_verbs.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_cq_enter - add a new entry to the completion queue
|
|
|
|
* @cq: completion queue
|
|
|
|
* @entry: work completion entry to add
|
|
|
|
* @sig: true if @entry is a solicitated entry
|
|
|
|
*
|
2006-09-22 18:22:26 -04:00
|
|
|
* This may be called with qp->s_lock held.
|
2006-03-29 18:23:36 -05:00
|
|
|
*/
|
|
|
|
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
|
|
|
|
{
|
2006-09-28 12:00:23 -04:00
|
|
|
struct ipath_cq_wc *wc;
|
2006-03-29 18:23:36 -05:00
|
|
|
unsigned long flags;
|
2006-09-22 18:22:26 -04:00
|
|
|
u32 head;
|
2006-03-29 18:23:36 -05:00
|
|
|
u32 next;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
|
|
2006-09-22 18:22:26 -04:00
|
|
|
/*
|
|
|
|
* Note that the head pointer might be writable by user processes.
|
|
|
|
* Take care to verify it is a sane value.
|
|
|
|
*/
|
2006-09-28 12:00:23 -04:00
|
|
|
wc = cq->queue;
|
2006-09-22 18:22:26 -04:00
|
|
|
head = wc->head;
|
|
|
|
if (head >= (unsigned) cq->ibcq.cqe) {
|
|
|
|
head = cq->ibcq.cqe;
|
2006-03-29 18:23:36 -05:00
|
|
|
next = 0;
|
2006-09-22 18:22:26 -04:00
|
|
|
} else
|
|
|
|
next = head + 1;
|
|
|
|
if (unlikely(next == wc->tail)) {
|
2006-03-29 18:23:36 -05:00
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
|
|
|
if (cq->ibcq.event_handler) {
|
|
|
|
struct ib_event ev;
|
|
|
|
|
|
|
|
ev.device = cq->ibcq.device;
|
|
|
|
ev.element.cq = &cq->ibcq;
|
|
|
|
ev.event = IB_EVENT_CQ_ERR;
|
|
|
|
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2007-09-07 19:54:01 -04:00
|
|
|
if (cq->ip) {
|
|
|
|
wc->uqueue[head].wr_id = entry->wr_id;
|
|
|
|
wc->uqueue[head].status = entry->status;
|
|
|
|
wc->uqueue[head].opcode = entry->opcode;
|
|
|
|
wc->uqueue[head].vendor_err = entry->vendor_err;
|
|
|
|
wc->uqueue[head].byte_len = entry->byte_len;
|
2008-07-15 02:48:45 -04:00
|
|
|
wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
|
2007-09-07 19:54:01 -04:00
|
|
|
wc->uqueue[head].qp_num = entry->qp->qp_num;
|
|
|
|
wc->uqueue[head].src_qp = entry->src_qp;
|
|
|
|
wc->uqueue[head].wc_flags = entry->wc_flags;
|
|
|
|
wc->uqueue[head].pkey_index = entry->pkey_index;
|
|
|
|
wc->uqueue[head].slid = entry->slid;
|
|
|
|
wc->uqueue[head].sl = entry->sl;
|
|
|
|
wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
|
|
|
|
wc->uqueue[head].port_num = entry->port_num;
|
|
|
|
/* Make sure entry is written before the head index. */
|
|
|
|
smp_wmb();
|
|
|
|
} else
|
|
|
|
wc->kqueue[head] = *entry;
|
2006-09-22 18:22:26 -04:00
|
|
|
wc->head = next;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
|
|
|
if (cq->notify == IB_CQ_NEXT_COMP ||
|
|
|
|
(cq->notify == IB_CQ_SOLICITED && solicited)) {
|
|
|
|
cq->notify = IB_CQ_NONE;
|
|
|
|
cq->triggered++;
|
|
|
|
/*
|
|
|
|
* This will cause send_complete() to be called in
|
|
|
|
* another thread.
|
|
|
|
*/
|
|
|
|
tasklet_hi_schedule(&cq->comptask);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
|
|
|
|
|
|
|
if (entry->status != IB_WC_SUCCESS)
|
|
|
|
to_idev(cq->ibcq.device)->n_wqe_errs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_poll_cq - poll for work completion entries
|
|
|
|
* @ibcq: the completion queue to poll
|
|
|
|
* @num_entries: the maximum number of entries to return
|
|
|
|
* @entry: pointer to array where work completions are placed
|
|
|
|
*
|
|
|
|
* Returns the number of completion entries polled.
|
|
|
|
*
|
|
|
|
* This may be called from interrupt context. Also called by ib_poll_cq()
|
|
|
|
* in the generic verbs code.
|
|
|
|
*/
|
|
|
|
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
|
|
|
{
|
|
|
|
struct ipath_cq *cq = to_icq(ibcq);
|
2006-09-28 12:00:23 -04:00
|
|
|
struct ipath_cq_wc *wc;
|
2006-03-29 18:23:36 -05:00
|
|
|
unsigned long flags;
|
|
|
|
int npolled;
|
2006-09-28 12:00:23 -04:00
|
|
|
u32 tail;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
2007-09-07 19:54:01 -04:00
|
|
|
/* The kernel can only poll a kernel completion queue */
|
|
|
|
if (cq->ip) {
|
|
|
|
npolled = -EINVAL;
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
|
|
2006-09-28 12:00:23 -04:00
|
|
|
wc = cq->queue;
|
|
|
|
tail = wc->tail;
|
|
|
|
if (tail > (u32) cq->ibcq.cqe)
|
|
|
|
tail = (u32) cq->ibcq.cqe;
|
2006-03-29 18:23:36 -05:00
|
|
|
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
|
2006-09-28 12:00:23 -04:00
|
|
|
if (tail == wc->head)
|
2006-03-29 18:23:36 -05:00
|
|
|
break;
|
2007-09-07 19:54:01 -04:00
|
|
|
/* The kernel doesn't need a RMB since it has the lock. */
|
|
|
|
*entry = wc->kqueue[tail];
|
2006-09-28 12:00:23 -04:00
|
|
|
if (tail >= cq->ibcq.cqe)
|
|
|
|
tail = 0;
|
2006-03-29 18:23:36 -05:00
|
|
|
else
|
2006-09-28 12:00:23 -04:00
|
|
|
tail++;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
2006-09-28 12:00:23 -04:00
|
|
|
wc->tail = tail;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
|
|
|
|
2007-09-07 19:54:01 -04:00
|
|
|
bail:
|
2006-03-29 18:23:36 -05:00
|
|
|
return npolled;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void send_complete(unsigned long data)
|
|
|
|
{
|
|
|
|
struct ipath_cq *cq = (struct ipath_cq *)data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The completion handler will most likely rearm the notification
|
|
|
|
* and poll for all pending entries. If a new completion entry
|
|
|
|
* is added while we are in this routine, tasklet_hi_schedule()
|
|
|
|
* won't call us again until we return so we check triggered to
|
|
|
|
* see if we need to call the handler again.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
u8 triggered = cq->triggered;
|
|
|
|
|
|
|
|
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
|
|
|
|
|
|
|
if (cq->triggered == triggered)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_create_cq - create a completion queue
|
|
|
|
* @ibdev: the device this completion queue is attached to
|
|
|
|
* @entries: the minimum size of the completion queue
|
|
|
|
* @context: unused by the InfiniPath driver
|
|
|
|
* @udata: unused by the InfiniPath driver
|
|
|
|
*
|
|
|
|
* Returns a pointer to the completion queue or negative errno values
|
|
|
|
* for failure.
|
|
|
|
*
|
|
|
|
* Called by ib_create_cq() in the generic verbs code.
|
|
|
|
*/
|
2007-05-03 06:48:47 -04:00
|
|
|
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
|
2006-03-29 18:23:36 -05:00
|
|
|
struct ib_ucontext *context,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
2006-07-01 07:35:58 -04:00
|
|
|
struct ipath_ibdev *dev = to_idev(ibdev);
|
2006-03-29 18:23:36 -05:00
|
|
|
struct ipath_cq *cq;
|
2006-09-22 18:22:26 -04:00
|
|
|
struct ipath_cq_wc *wc;
|
2006-03-29 18:23:36 -05:00
|
|
|
struct ib_cq *ret;
|
2007-09-07 19:54:01 -04:00
|
|
|
u32 sz;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
2006-08-25 14:24:37 -04:00
|
|
|
if (entries < 1 || entries > ib_ipath_max_cqes) {
|
2006-07-01 07:35:58 -04:00
|
|
|
ret = ERR_PTR(-EINVAL);
|
2006-09-22 18:22:26 -04:00
|
|
|
goto done;
|
2006-07-01 07:35:58 -04:00
|
|
|
}
|
|
|
|
|
2006-09-22 18:22:26 -04:00
|
|
|
/* Allocate the completion queue structure. */
|
2006-03-29 18:23:36 -05:00
|
|
|
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
|
|
|
|
if (!cq) {
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
2006-09-22 18:22:26 -04:00
|
|
|
goto done;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-09-22 18:22:26 -04:00
|
|
|
* Allocate the completion queue entries and head/tail pointers.
|
|
|
|
* This is allocated separately so that it can be resized and
|
|
|
|
* also mapped into user space.
|
|
|
|
* We need to use vmalloc() in order to support mmap and large
|
|
|
|
* numbers of entries.
|
2006-03-29 18:23:36 -05:00
|
|
|
*/
|
2007-09-07 19:54:01 -04:00
|
|
|
sz = sizeof(*wc);
|
|
|
|
if (udata && udata->outlen >= sizeof(__u64))
|
|
|
|
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
|
|
|
|
else
|
|
|
|
sz += sizeof(struct ib_wc) * (entries + 1);
|
|
|
|
wc = vmalloc_user(sz);
|
2006-03-29 18:23:36 -05:00
|
|
|
if (!wc) {
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
2006-09-22 18:22:26 -04:00
|
|
|
goto bail_cq;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
2006-09-22 18:22:26 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the address of the WC as the offset to mmap.
|
|
|
|
* See ipath_mmap() for details.
|
|
|
|
*/
|
|
|
|
if (udata && udata->outlen >= sizeof(__u64)) {
|
|
|
|
int err;
|
|
|
|
|
2007-09-07 19:54:01 -04:00
|
|
|
cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
|
2007-04-28 00:07:23 -04:00
|
|
|
if (!cq->ip) {
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
2006-09-22 18:22:26 -04:00
|
|
|
goto bail_wc;
|
|
|
|
}
|
|
|
|
|
2007-04-28 00:07:23 -04:00
|
|
|
err = ib_copy_to_udata(udata, &cq->ip->offset,
|
|
|
|
sizeof(cq->ip->offset));
|
|
|
|
if (err) {
|
|
|
|
ret = ERR_PTR(err);
|
|
|
|
goto bail_ip;
|
2006-09-22 18:22:26 -04:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
cq->ip = NULL;
|
|
|
|
|
2006-09-28 12:00:03 -04:00
|
|
|
spin_lock(&dev->n_cqs_lock);
|
|
|
|
if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
|
|
|
|
spin_unlock(&dev->n_cqs_lock);
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
2007-04-28 00:07:23 -04:00
|
|
|
goto bail_ip;
|
2006-09-28 12:00:03 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
dev->n_cqs_allocated++;
|
|
|
|
spin_unlock(&dev->n_cqs_lock);
|
|
|
|
|
2007-04-28 00:07:23 -04:00
|
|
|
if (cq->ip) {
|
|
|
|
spin_lock_irq(&dev->pending_lock);
|
|
|
|
list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
|
|
|
|
spin_unlock_irq(&dev->pending_lock);
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
/*
|
|
|
|
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
|
|
|
|
* The number of entries should be >= the number requested or return
|
|
|
|
* an error.
|
|
|
|
*/
|
|
|
|
cq->ibcq.cqe = entries;
|
|
|
|
cq->notify = IB_CQ_NONE;
|
|
|
|
cq->triggered = 0;
|
|
|
|
spin_lock_init(&cq->lock);
|
|
|
|
tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
|
2006-09-22 18:22:26 -04:00
|
|
|
wc->head = 0;
|
|
|
|
wc->tail = 0;
|
2006-03-29 18:23:36 -05:00
|
|
|
cq->queue = wc;
|
|
|
|
|
|
|
|
ret = &cq->ibcq;
|
|
|
|
|
2006-09-22 18:22:26 -04:00
|
|
|
goto done;
|
2006-07-01 07:35:58 -04:00
|
|
|
|
2007-04-28 00:07:23 -04:00
|
|
|
bail_ip:
|
|
|
|
kfree(cq->ip);
|
2006-09-22 18:22:26 -04:00
|
|
|
bail_wc:
|
|
|
|
vfree(wc);
|
|
|
|
bail_cq:
|
|
|
|
kfree(cq);
|
|
|
|
done:
|
2006-03-29 18:23:36 -05:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_destroy_cq - destroy a completion queue
|
|
|
|
* @ibcq: the completion queue to destroy.
|
|
|
|
*
|
|
|
|
* Returns 0 for success.
|
|
|
|
*
|
|
|
|
* Called by ib_destroy_cq() in the generic verbs code.
|
|
|
|
*/
|
|
|
|
int ipath_destroy_cq(struct ib_cq *ibcq)
|
|
|
|
{
|
2006-07-01 07:35:58 -04:00
|
|
|
struct ipath_ibdev *dev = to_idev(ibcq->device);
|
2006-03-29 18:23:36 -05:00
|
|
|
struct ipath_cq *cq = to_icq(ibcq);
|
|
|
|
|
|
|
|
tasklet_kill(&cq->comptask);
|
2006-09-28 12:00:03 -04:00
|
|
|
spin_lock(&dev->n_cqs_lock);
|
2006-07-01 07:35:58 -04:00
|
|
|
dev->n_cqs_allocated--;
|
2006-09-28 12:00:03 -04:00
|
|
|
spin_unlock(&dev->n_cqs_lock);
|
2006-09-22 18:22:26 -04:00
|
|
|
if (cq->ip)
|
|
|
|
kref_put(&cq->ip->ref, ipath_release_mmap_info);
|
|
|
|
else
|
|
|
|
vfree(cq->queue);
|
2006-03-29 18:23:36 -05:00
|
|
|
kfree(cq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ipath_req_notify_cq - change the notification type for a completion queue
|
|
|
|
* @ibcq: the completion queue
|
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 00:02:48 -04:00
|
|
|
* @notify_flags: the type of notification to request
|
2006-03-29 18:23:36 -05:00
|
|
|
*
|
|
|
|
* Returns 0 for success.
|
|
|
|
*
|
|
|
|
* This may be called from interrupt context. Also called by
|
|
|
|
* ib_req_notify_cq() in the generic verbs code.
|
|
|
|
*/
|
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 00:02:48 -04:00
|
|
|
int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
2006-03-29 18:23:36 -05:00
|
|
|
{
|
|
|
|
struct ipath_cq *cq = to_icq(ibcq);
|
|
|
|
unsigned long flags;
|
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 00:02:48 -04:00
|
|
|
int ret = 0;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
|
|
|
spin_lock_irqsave(&cq->lock, flags);
|
|
|
|
/*
|
|
|
|
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
|
2006-09-22 18:22:26 -04:00
|
|
|
* any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
|
2006-03-29 18:23:36 -05:00
|
|
|
*/
|
|
|
|
if (cq->notify != IB_CQ_NEXT_COMP)
|
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 00:02:48 -04:00
|
|
|
cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
|
|
|
|
|
|
|
|
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
|
|
|
|
cq->queue->head != cq->queue->tail)
|
|
|
|
ret = 1;
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that
completion events are only generated when a completions is added to a
completion queue (CQ) after completion notification is requested. In
other words, this means that the following race is possible:
while (CQ is not empty)
ib_poll_cq(CQ);
// new completion is added after while loop is exited
ib_req_notify_cq(CQ);
// no event is generated for the existing completion
To close this race, the IB spec recommends doing another poll of the
CQ after requesting notification.
However, it is not always possible to arrange code this way (for
example, we have found that NAPI for IPoIB cannot poll after
requesting notification). Also, some hardware (eg Mellanox HCAs)
actually will generate an event for completions added before the call
to ib_req_notify_cq() -- which is allowed by the spec, since there's
no way for any upper-layer consumer to know exactly when a completion
was really added -- so the extra poll of the CQ is just a waste.
Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for
ib_req_notify_cq() so that it can return a hint about whether the a
completion may have been added before the request for notification.
The return value of ib_req_notify_cq() is extended so:
< 0 means an error occurred while requesting notification
== 0 means notification was requested successfully, and if
IB_CQ_REPORT_MISSED_EVENTS was passed in, then no
events were missed and it is safe to wait for another
event.
> 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was
passed in. It means that the consumer must poll the
CQ again to make sure it is empty to avoid the race
described above.
We add a flag to enable this behavior rather than turning it on
unconditionally, because checking for missed events may incur
significant overhead for some low-level drivers, and consumers that
don't care about the results of this test shouldn't be forced to pay
for the test.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2007-05-07 00:02:48 -04:00
|
|
|
|
|
|
|
return ret;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
|
|
|
|
2006-09-28 12:00:23 -04:00
|
|
|
/**
|
|
|
|
* ipath_resize_cq - change the size of the CQ
|
|
|
|
* @ibcq: the completion queue
|
|
|
|
*
|
|
|
|
* Returns 0 for success.
|
|
|
|
*/
|
2006-03-29 18:23:36 -05:00
|
|
|
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
struct ipath_cq *cq = to_icq(ibcq);
|
2006-09-28 12:00:23 -04:00
|
|
|
struct ipath_cq_wc *old_wc;
|
2006-09-22 18:22:26 -04:00
|
|
|
struct ipath_cq_wc *wc;
|
|
|
|
u32 head, tail, n;
|
2006-03-29 18:23:36 -05:00
|
|
|
int ret;
|
2007-09-07 19:54:01 -04:00
|
|
|
u32 sz;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
2006-08-25 14:24:37 -04:00
|
|
|
if (cqe < 1 || cqe > ib_ipath_max_cqes) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
/*
|
|
|
|
* Need to use vmalloc() if we want to support large #s of entries.
|
|
|
|
*/
|
2007-09-07 19:54:01 -04:00
|
|
|
sz = sizeof(*wc);
|
|
|
|
if (udata && udata->outlen >= sizeof(__u64))
|
|
|
|
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
|
|
|
|
else
|
|
|
|
sz += sizeof(struct ib_wc) * (cqe + 1);
|
|
|
|
wc = vmalloc_user(sz);
|
2006-03-29 18:23:36 -05:00
|
|
|
if (!wc) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2007-11-06 20:51:38 -05:00
|
|
|
/* Check that we can write the offset to mmap. */
|
2006-09-22 18:22:26 -04:00
|
|
|
if (udata && udata->outlen >= sizeof(__u64)) {
|
2007-11-06 20:51:38 -05:00
|
|
|
__u64 offset = 0;
|
2006-09-22 18:22:26 -04:00
|
|
|
|
|
|
|
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
|
|
|
|
if (ret)
|
2007-10-24 18:49:39 -04:00
|
|
|
goto bail_free;
|
2006-09-22 18:22:26 -04:00
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
spin_lock_irq(&cq->lock);
|
2006-09-22 18:22:26 -04:00
|
|
|
/*
|
|
|
|
* Make sure head and tail are sane since they
|
|
|
|
* might be user writable.
|
|
|
|
*/
|
2006-09-28 12:00:23 -04:00
|
|
|
old_wc = cq->queue;
|
2006-09-22 18:22:26 -04:00
|
|
|
head = old_wc->head;
|
|
|
|
if (head > (u32) cq->ibcq.cqe)
|
|
|
|
head = (u32) cq->ibcq.cqe;
|
|
|
|
tail = old_wc->tail;
|
|
|
|
if (tail > (u32) cq->ibcq.cqe)
|
|
|
|
tail = (u32) cq->ibcq.cqe;
|
|
|
|
if (head < tail)
|
|
|
|
n = cq->ibcq.cqe + 1 + head - tail;
|
2006-03-29 18:23:36 -05:00
|
|
|
else
|
2006-09-22 18:22:26 -04:00
|
|
|
n = head - tail;
|
2006-03-29 18:23:36 -05:00
|
|
|
if (unlikely((u32)cqe < n)) {
|
2007-11-08 22:53:01 -05:00
|
|
|
ret = -EINVAL;
|
2007-10-24 18:49:39 -04:00
|
|
|
goto bail_unlock;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
2006-09-22 18:22:26 -04:00
|
|
|
for (n = 0; tail != head; n++) {
|
2007-09-07 19:54:01 -04:00
|
|
|
if (cq->ip)
|
|
|
|
wc->uqueue[n] = old_wc->uqueue[tail];
|
|
|
|
else
|
|
|
|
wc->kqueue[n] = old_wc->kqueue[tail];
|
2006-09-22 18:22:26 -04:00
|
|
|
if (tail == (u32) cq->ibcq.cqe)
|
|
|
|
tail = 0;
|
2006-03-29 18:23:36 -05:00
|
|
|
else
|
2006-09-22 18:22:26 -04:00
|
|
|
tail++;
|
2006-03-29 18:23:36 -05:00
|
|
|
}
|
|
|
|
cq->ibcq.cqe = cqe;
|
2006-09-22 18:22:26 -04:00
|
|
|
wc->head = n;
|
|
|
|
wc->tail = 0;
|
2006-03-29 18:23:36 -05:00
|
|
|
cq->queue = wc;
|
|
|
|
spin_unlock_irq(&cq->lock);
|
|
|
|
|
|
|
|
vfree(old_wc);
|
|
|
|
|
2006-09-22 18:22:26 -04:00
|
|
|
if (cq->ip) {
|
|
|
|
struct ipath_ibdev *dev = to_idev(ibcq->device);
|
|
|
|
struct ipath_mmap_info *ip = cq->ip;
|
|
|
|
|
2007-09-07 19:54:01 -04:00
|
|
|
ipath_update_mmap_info(dev, ip, sz, wc);
|
2007-11-06 20:51:38 -05:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the offset to mmap.
|
|
|
|
* See ipath_mmap() for details.
|
|
|
|
*/
|
|
|
|
if (udata && udata->outlen >= sizeof(__u64)) {
|
|
|
|
ret = ib_copy_to_udata(udata, &ip->offset,
|
|
|
|
sizeof(ip->offset));
|
|
|
|
if (ret)
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2006-09-22 18:22:26 -04:00
|
|
|
spin_lock_irq(&dev->pending_lock);
|
2007-04-28 00:07:23 -04:00
|
|
|
if (list_empty(&ip->pending_mmaps))
|
|
|
|
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
|
2006-09-22 18:22:26 -04:00
|
|
|
spin_unlock_irq(&dev->pending_lock);
|
|
|
|
}
|
|
|
|
|
2006-03-29 18:23:36 -05:00
|
|
|
ret = 0;
|
2007-10-24 18:49:39 -04:00
|
|
|
goto bail;
|
2006-03-29 18:23:36 -05:00
|
|
|
|
2007-10-24 18:49:39 -04:00
|
|
|
bail_unlock:
|
|
|
|
spin_unlock_irq(&cq->lock);
|
|
|
|
bail_free:
|
|
|
|
vfree(wc);
|
2006-03-29 18:23:36 -05:00
|
|
|
bail:
|
|
|
|
return ret;
|
|
|
|
}
|