/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
const char *name;
u32 offset;
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_FLUSH);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = ipa_reg_n_offset(ipa, ENDP_STATUS, IPA_ENDPOINT_COUNT - 1);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
bool state;
u32 mask;
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_CTRL, endpoint->endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
-
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
+
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
WARN_ON(!(mask & ipa->available));
- val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, STATE_AGGR_ACTIVE));
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
return !!(val & mask);
}
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + ipa_reg_offset(ipa, AGGR_FORCE_CLOSE));
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
+ const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = ipa_reg_n_offset(ipa, ENDP_STATUS, endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_CFG, endpoint->endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->config.checksum) {
enum ipa_version version = ipa->version;
val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_NAT, endpoint->endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static u32
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR, endpoint->endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR_EXT,
- endpoint->endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
val |= HDR_ENDIANNESS_FMASK; /* big endian */
}
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR_METADATA_MASK,
- endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_MODE, endpoint->endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id;
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_AGGR, endpoint->endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
/* other fields ignored */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, microseconds);
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_HOL_BLOCK_TIMER, endpoint_id);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_HOL_BLOCK_EN, endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = enable ? HOL_BLOCK_EN_FMASK : 0;
iowrite32(val, ipa->reg_virt + offset);
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_DEAGGR, endpoint->endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_RSRC_GRP,
- endpoint->endpoint_id);
-
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- offset = ipa_reg_n_offset(ipa, ENDP_INIT_SEQ, endpoint->endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
SEQ_REP_TYPE_FMASK);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/**
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = ipa_reg_n_offset(ipa, ENDP_STATUS, endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
val |= STATUS_EN_FMASK;
if (endpoint->toward_ipa) {
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
val |= ROUTE_DEF_HDR_TABLE_FMASK;
val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
val |= ROUTE_DEF_RETAIN_HDR_FMASK;
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, ROUTE));
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
u32 initialized;
u32 rx_base;
u32 rx_mask;
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, FLAVOR_0));
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
{
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_offset(ipa, IPA_IRQ_CLR);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ const struct ipa_reg *reg;
struct device *dev;
u32 pending;
u32 offset;
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_offset(ipa, IPA_IRQ_STTS);
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = ipa_reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_offset(ipa, IPA_IRQ_CLR);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_offset(ipa, IRQ_SUSPEND_EN);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 val;
- val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, IRQ_SUSPEND_INFO));
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IRQ_SUSPEND_CLR));
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_offset(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Remove the handler for an IPA interrupt type */
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_offset(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
interrupt->handler[ipa_irq] = NULL;
}
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
+ const struct ipa_reg *reg;
unsigned int irq;
int ret;
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(ipa, IPA_IRQ_EN));
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
static void
ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
{
+ const struct ipa_reg *reg;
u32 val;
/* IPA v4.5+ has no backward compatibility register */
if (ipa->version >= IPA_VERSION_4_5)
return;
+ reg = ipa_reg(ipa, IPA_BCR);
val = data->backward_compat;
-
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IPA_BCR));
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
return;
/* Disable PA mask to allow HOLB drop */
- offset = ipa_reg_offset(ipa, IPA_TX_CFG);
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = ipa_reg_offset(reg);
+
val = ioread32(ipa->reg_virt + offset);
val &= ~PA_MASK_EN_FMASK;
static void ipa_hardware_config_clkon(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
u32 val;
- if (version < IPA_VERSION_3_1 || version >= IPA_VERSION_4_5)
+ if (version >= IPA_VERSION_4_5)
return;
- /* Implement some hardware workarounds */
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
- if (version >= IPA_VERSION_4_0) {
- /* Enable open global clocks in the CLKON configuration */
- val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
- } else if (version == IPA_VERSION_3_1) {
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1)
val = MISC_FMASK; /* Disable MISC clock gating */
- } else {
- return;
- }
+ else /* Enable open global clocks in the CLKON configuration */
+ val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK; /* IPA v4.0+ */
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, CLKON_CFG));
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
if (ipa->version < IPA_VERSION_4_0)
return;
- offset = ipa_reg_offset(ipa, COMP_CFG);
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
+ const struct ipa_reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
if (data->qsb_count > 1)
val |= u32_encode_bits(data1->max_writes,
GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, QSB_MAX_WRITES));
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
if (ipa->version >= IPA_VERSION_4_0)
val |= u32_encode_bits(data0->max_reads_beats,
val |= u32_encode_bits(data1->max_reads_beats,
GEN_QMB_1_MAX_READS_BEATS_FMASK);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, QSB_MAX_READS));
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- offset = ipa_reg_offset(ipa, TIMERS_XO_CLK_DIV_CFG);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- offset = ipa_reg_offset(ipa, QTIME_TIMESTAMP_CFG);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- offset = ipa_reg_offset(ipa, TIMERS_PULSE_GRAN_CFG);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = ipa_reg_offset(reg);
val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- offset = ipa_reg_offset(ipa, TIMERS_XO_CLK_DIV_CFG);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
/* Before IPA v4.5 timing is controlled by a counter register */
static void ipa_hardware_config_counter(struct ipa *ipa)
{
- u32 granularity;
- u32 offset;
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct ipa_reg *reg;
u32 val;
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
-
+ reg = ipa_reg(ipa, COUNTER_CFG);
val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
-
- offset = ipa_reg_offset(ipa, COUNTER_CFG);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_hardware_config_timing(struct ipa *ipa)
static void ipa_hardware_config_hashing(struct ipa *ipa)
{
- u32 offset;
+ const struct ipa_reg *reg;
if (ipa->version != IPA_VERSION_4_2)
return;
/* IPA v4.2 does not support hashed tables, so disable them */
- offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_EN);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
val = u32_encode_bits(enter_idle_debounce_thresh,
ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
if (const_non_idle_enable)
val |= CONST_NON_IDLE_ENABLE_FMASK;
- offset = ipa_reg_offset(ipa, IDLE_INDICATION_CFG);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
- val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- offset = ipa_reg_offset(ipa, LOCAL_PKT_PROC_CNTXT);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = proc_cntxt_base_addr_encoded(ipa->version, offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
return 0;
}
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, SHARED_MEM_SIZE));
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* The fields in the register are in 8 byte units */
ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
return valid && ipa->regs->reg[reg_id];
}
-/* Assumes the endpoint transfer direction is valid; 0 is a bad offset */
-u32 __ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n)
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
{
- const struct ipa_reg *reg;
-
if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
- return 0;
-
- reg = ipa->regs->reg[reg_id];
+ return NULL;
- return reg->offset + n * reg->stride;
+ return ipa->regs->reg[reg_id];
}
static const struct ipa_regs *ipa_regs(enum ipa_version version)
extern const struct ipa_regs ipa_regs_v4_9;
extern const struct ipa_regs ipa_regs_v4_11;
-u32 __ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n);
-
const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-static inline u32 ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
{
- return __ipa_reg_offset(ipa, reg_id, 0);
+ return reg ? reg->offset : 0;
}
-static inline u32
-ipa_reg_n_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
{
- return __ipa_reg_offset(ipa, reg_id, n);
+ return reg ? reg->offset + n * reg->stride : 0;
}
int ipa_reg_init(struct ipa *ipa);
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
+ const struct ipa_reg *reg;
u32 offset;
resource = &data->resource_src[resource_type];
- offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_01_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_23_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_45_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_67_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
}
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
+ const struct ipa_reg *reg;
u32 offset;
resource = &data->resource_dst[resource_type];
- offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_01_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_23_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_45_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_67_RSRC_TYPE,
- resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
+ offset = ipa_reg_n_offset(reg, resource_type);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
}
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_FLUSH);
+ const struct ipa_reg *reg;
struct gsi_trans *trans;
+ u32 offset;
u32 val;
if (!ipa_table_hash_support(ipa))
return -EBUSY;
}
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
+
val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = ipa_reg_n_offset(ipa, ENDP_FILTER_ROUTER_HSH_CFG, endpoint_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = ipa_reg_n_offset(ipa, ENDP_FILTER_ROUTER_HSH_CFG, route_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, route_id);
val = ioread32(ipa->reg_virt + offset);
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ const struct ipa_reg *reg;
u32 val;
/* Fill in the command data */
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
val = u32_encode_bits(1, UC_INTR_FMASK);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IPA_IRQ_UC));
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */