{
bool first;
bool retiring = false;
+ bool locked = false;
int num_entries = 0;
unsigned int qc_idx = 0;
unsigned long iflags;
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ return 0;
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
+ if (!locked) {
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ locked = true;
+ }
if (first) {
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
first = false;
+ if (!test_bit(qc_idx, sqp->in_use_bm))
+ continue;
} else {
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
}
- if (unlikely(qc_idx >= sdebug_max_queue))
+ if (qc_idx >= sdebug_max_queue)
break;
sqcp = &sqp->qc_arr[qc_idx];
}
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ locked = false;
scsi_done(scp); /* callback to mid level */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
num_entries++;
+ if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
+ break; /* if no more then exit without retaking spinlock */
}
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (locked)
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
return num_entries;