bool overflow_tx;
};
+
+/**
+ * struct iwl_trans_txqs - transport tx queues data
+ *
+ * @queue_used - bit mask of used queues
+ * @queue_stopped - bit mask of stopped queues
+ */
+struct iwl_trans_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct {
+ u8 fifo;
+ u8 q_id;
+ unsigned int wdg_timeout;
+ } cmd;
+
+};
+
/**
* struct iwl_trans - transport common data
*
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @iwl_trans_txqs: transport tx queues data.
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
+ struct iwl_trans_txqs txqs;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
struct dma_pool *bc_pool;
struct iwl_txq *txq_memory;
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
/* PCI bus related data */
struct pci_dev *pci_dev;
u8 page_offs, dev_cmd_offs;
- u8 cmd_queue;
u8 def_rx_queue;
- u8 cmd_fifo;
- unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+ if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans_pcie->txq[i])
+ if (!trans->txqs.txq[i])
continue;
- del_timer(&trans_pcie->txq[i]->stuck_timer);
+ del_timer(&trans->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size))
+ if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- trans_pcie->cmd_queue = trans_cfg->cmd_queue;
- trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
- trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
unsigned long txqs,
bool freeze)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue;
for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans_pcie->txq[queue];
+ struct iwl_txq *txq = trans->txqs.txq[queue];
unsigned long now;
spin_lock_bh(&txq->lock);
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (i == trans_pcie->cmd_queue)
+ if (i == trans->txqs.cmd.q_id)
continue;
spin_lock_bh(&txq->lock);
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans_pcie->queue_used))
+ if (!test_bit(txq_idx, trans->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans_pcie->txq[txq_idx];
+ txq = trans->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->cmd_queue)
+ if (cnt == trans->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans_pcie->queue_used))
+ if (!test_bit(cnt, trans->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[state->pos];
+ struct iwl_txq *txq = trans->txqs.txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
- !!test_bit(state->pos, trans_pcie->queue_used),
- !!test_bit(state->pos, trans_pcie->queue_stopped));
+ !!test_bit(state->pos, trans->txqs.queue_used),
+ !!test_bit(state->pos, trans->txqs.queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans_pcie->cmd_queue)
+ if (state->pos == trans->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
*/
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int txq_id;
/*
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
- if (!trans_pcie->txq[txq_id])
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+ if (!trans->txqs.txq[txq_id])
continue;
iwl_pcie_gen2_txq_unmap(trans, txq_id);
}
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
u16 cmd_len;
int idx;
void *tfd;
"queue %d out of range", txq_id))
return -EINVAL;
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
*/
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
int i;
"queue %d out of range", txq_id))
return;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
if (WARN_ON(!txq))
return;
iwl_pcie_gen2_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
iwl_pcie_gen2_txq_free_memory(trans, txq);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
- clear_bit(txq_id, trans_pcie->queue_used);
+ clear_bit(txq_id, trans->txqs.queue_used);
}
int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_host_cmd *hcmd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue_cfg_rsp *rsp;
int ret, qid;
u32 wr_ptr;
qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer);
- if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
+ if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
WARN_ONCE(1, "queue index %d unsupported", qid);
ret = -EIO;
goto error_free_resp;
}
- if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+ if (test_and_set_bit(qid, trans->txqs.queue_used)) {
WARN_ONCE(1, "queue %d already used", qid);
ret = -EIO;
goto error_free_resp;
}
txq->id = qid;
- trans_pcie->txq[qid] = txq;
+ trans->txqs.txq[qid] = txq;
wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
"queue %d out of range", queue))
return;
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", queue);
return;
iwl_pcie_gen2_txq_unmap(trans, queue);
- iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
- trans_pcie->txq[queue] = NULL;
+ iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
+ trans->txqs.txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
- if (!trans_pcie->txq[i])
+ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
+ if (!trans->txqs.txq[i])
continue;
iwl_pcie_gen2_txq_free(trans, i);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *queue;
int ret;
/* alloc and init the tx queue */
- if (!trans_pcie->txq[txq_id]) {
+ if (!trans->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM;
}
- trans_pcie->txq[txq_id] = queue;
+ trans->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
} else {
- queue = trans_pcie->txq[txq_id];
+ queue = trans->txqs.txq[txq_id];
}
ret = iwl_pcie_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->cmd_queue));
+ (txq_id == trans->txqs.cmd.q_id));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans_pcie->queue_used);
+ trans->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans->txqs.queue_used);
return 0;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->cmd_queue &&
+ txq_id != trans->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans_pcie->txq[i];
+ struct iwl_txq *txq = trans->txqs.txq[i];
- if (!test_bit(i, trans_pcie->queue_used))
+ if (!test_bit(i, trans->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->cmd_queue) {
+ if (txq_id != trans->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
if (WARN_ON_ONCE(!skb))
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
+ if (txq_id == trans->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kzfree(txq->entries[i].cmd);
kzfree(txq->entries[i].free_buf);
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo,
- trans_pcie->cmd_q_wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
+ trans->txqs.cmd.fifo,
+ trans->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
if (trans->trans_cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans_pcie->txq[txq_id] = NULL;
+ trans->txqs.txq[txq_id] = NULL;
}
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+ trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans_pcie->txq[txq_id]->id = txq_id;
+ trans->txqs.txq[txq_id]->id = txq_id;
}
return 0;
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+ ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans_pcie->txq[txq_id]->dma_addr >> 8);
+ trans->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
return;
spin_lock_bh(&txq->lock);
- if (!test_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_bit(txq_id, trans->txqs.queue_used)) {
IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
txq_id, ssn);
goto out;
iwl_pcie_txq_progress(txq);
if (iwl_queue_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans_pcie->queue_stopped)) {
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
struct sk_buff_head overflow_skbs;
__skb_queue_head_init(&overflow_skbs);
/* Set wr_ptr of specific device and txq */
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
spin_lock_bh(&txq->lock);
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
unsigned long flags;
int nfreed = 0;
u16 r;
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
+ WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+ if (test_and_set_bit(txq_id, trans->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->cmd_queue)
+ if (txq_id != trans->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->cmd_queue &&
+ if (txq_id == trans->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
- trans_pcie->txq[txq_id]->frozen = false;
+ trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans_pcie->txq[txq_id]->ampdu = false;
+ trans->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
unsigned long flags;
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->cmd_queue,
+ if (WARN(txq_id != trans->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+ txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
int cmd_idx;
int ret;
u16 wifi_seq;
bool amsdu;
- txq = trans_pcie->txq[txq_id];
+ txq = trans->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;