void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
ibevent.element.qp = &iwqp->ibqp;
iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
}
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq;
+ u64 qword3;
+ __le64 *cqe;
+ u8 polarity;
+
+ ukcq = &iwcq->sc_cq.cq_uk;
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != ukcq->polarity;
+}
struct irdma_cq *iwcq;
struct irdma_cq_uk *ukcq;
unsigned long flags;
- enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+ enum irdma_cmpl_notify cq_notify;
+ bool promo_event = false;
+ int ret = 0;
+ cq_notify = notify_flags == IB_CQ_SOLICITED ?
+ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
iwcq = to_iwcq(ibcq);
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_SOLICITED)
- cq_notify = IRDMA_CQ_COMPL_SOLICITED;
spin_lock_irqsave(&iwcq->lock, flags);
- irdma_uk_cq_request_notification(ukcq, cq_notify);
+ /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+ promo_event = true;
+
+ if (!iwcq->armed || promo_event) {
+ iwcq->armed = true;
+ iwcq->last_notify = cq_notify;
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ }
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+ ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
- return 0;
+ return ret;
}
static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,