u8 zip_cores;
};
+struct nitrox_stats {
+ atomic64_t posted;
+ atomic64_t completed;
+ atomic64_t dropped;
+};
+
#define MAX_MSIX_VECTOR_NAME 20
/**
* vectors for queues (64 AE, 64 SE and 64 ZIP) and
struct nitrox_msix msix;
struct nitrox_bh bh;
+ struct nitrox_stats stats;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
if (err)
goto pf_hw_fail;
+ /* clear the statistics */
+ atomic64_set(&ndev->stats.posted, 0);
+ atomic64_set(&ndev->stats.completed, 0);
+ atomic64_set(&ndev->stats.dropped, 0);
+
atomic_set(&ndev->state, __NDEV_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
spin_unlock_bh(&cmdq->cmdq_lock);
+
+ /* increment the posted command count */
+ atomic64_inc(&ndev->stats.posted);
}
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ /* increment drop count */
+ atomic64_inc(&ndev->stats.dropped);
return -ENOSPC;
+ }
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
READ_ONCE(sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
+ atomic64_inc(&ndev->stats.completed);
/* sync with other cpus */
smp_mb__after_atomic();
/* remove from response list */