* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_FLUSH);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ offset = ipa_reg_n_offset(ipa, ENDP_STATUS, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
- u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ u32 offset;
bool state;
u32 mask;
u32 val;
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_CTRL, endpoint->endpoint_id);
+
mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
val = ioread32(ipa->reg_virt + offset);
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- u32 offset;
u32 val;
WARN_ON(!(mask & ipa->available));
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, STATE_AGGR_ACTIVE));
return !!(val & mask);
}
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(ipa, AGGR_FORCE_CLOSE));
}
/**
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ offset = ipa_reg_n_offset(ipa, ENDP_STATUS, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
u32 val = 0;
+ u32 offset;
+
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_CFG, endpoint->endpoint_id);
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->config.checksum) {
- enum ipa_version version = endpoint->ipa->version;
+ enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
u32 off;
val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
+ struct ipa *ipa = endpoint->ipa;
u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_NAT, endpoint->endpoint_id);
+
val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static u32
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
+ u32 offset;
+
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR, endpoint->endpoint_id);
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 off; /* Field offset within header */
+ u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
off = offsetof(struct rmnet_map_header, mux_id);
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->config.rx.pad_align;
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
+ u32 offset;
+
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR_EXT,
+ endpoint->endpoint_id);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 off;
+ u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
off >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
+
iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_HDR_METADATA_MASK,
+ endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_MODE, endpoint->endpoint_id);
+
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id;
- dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
+ dma_endpoint_id = ipa->name_map[name]->endpoint_id;
val = u32_encode_bits(IPA_DMA, MODE_FMASK);
val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
}
/* All other bits unspecified (and 0) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Encoded values for AGGR endpoint register fields */
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
- enum ipa_version version = endpoint->ipa->version;
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
+ u32 offset;
+
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_AGGR, endpoint->endpoint_id);
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
+ enum ipa_version version = ipa->version;
u32 buffer_size;
bool close_eof;
u32 limit;
/* other fields ignored */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* The head-of-line blocking timer is defined as a tick count. For
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
val = hol_block_timer_encode(ipa, microseconds);
+
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_HOL_BLOCK_TIMER, endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
u32 offset;
u32 val;
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_HOL_BLOCK_EN, endpoint_id);
+
val = enable ? HOL_BLOCK_EN_FMASK : 0;
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+
+ iowrite32(val, ipa->reg_virt + offset);
+
/* When enabling, the register must be written twice for IPA v4.5+ */
- if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
+ u32 offset;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_DEAGGR, endpoint->endpoint_id);
+
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ u32 offset;
u32 val;
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_RSRC_GRP,
+ endpoint->endpoint_id);
+
val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
+
iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
+ u32 offset;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ offset = ipa_reg_n_offset(ipa, ENDP_INIT_SEQ, endpoint->endpoint_id);
+
/* Low-order byte configures primary packet processing */
val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
/* Second byte (if supported) configures replicated packet processing */
- if (endpoint->ipa->version < IPA_VERSION_4_5)
+ if (ipa->version < IPA_VERSION_4_5)
val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
SEQ_REP_TYPE_FMASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
/**
u32 val = 0;
u32 offset;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ offset = ipa_reg_n_offset(ipa, ENDP_STATUS, endpoint_id);
if (endpoint->config.status_enable) {
val |= STATUS_EN_FMASK;
val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
val |= ROUTE_DEF_RETAIN_HDR_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, ROUTE));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, FLAVOR_0));
/* Our RX is an IPA producer */
rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IPA_IRQ_CLR);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_irq_stts_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IPA_IRQ_STTS);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IPA_IRQ_CLR);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IRQ_SUSPEND_EN);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
u32 val;
- offset = ipa_reg_irq_suspend_info_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, IRQ_SUSPEND_INFO));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IRQ_SUSPEND_CLR));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IPA_IRQ_EN);
iowrite32(interrupt->enabled, ipa->reg_virt + offset);
}
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IPA_IRQ_EN);
iowrite32(interrupt->enabled, ipa->reg_virt + offset);
interrupt->handler[ipa_irq] = NULL;
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
unsigned int irq;
- u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(0, ipa->reg_virt + offset);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(ipa, IPA_IRQ_EN));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
return;
val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IPA_BCR));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
+ u32 offset;
u32 val;
if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
return;
/* Disable PA mask to allow HOLB drop */
- val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ offset = ipa_reg_offset(ipa, IPA_TX_CFG);
+ val = ioread32(ipa->reg_virt + offset);
val &= ~PA_MASK_EN_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_hardware_config_clkon(struct ipa *ipa)
return;
/* Implement some hardware workarounds */
+
if (version >= IPA_VERSION_4_0) {
/* Enable open global clocks in the CLKON configuration */
val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
return;
}
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, CLKON_CFG));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
- val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ offset = ipa_reg_offset(ipa, COMP_CFG);
+ val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
if (data->qsb_count > 1)
val |= u32_encode_bits(data1->max_writes,
GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, QSB_MAX_WRITES));
/* Max outstanding read accesses for QSB masters */
val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
val |= u32_encode_bits(data1->max_reads_beats,
GEN_QMB_1_MAX_READS_BEATS_FMASK);
}
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, QSB_MAX_READS));
}
/* The internal inactivity timer clock is used for the aggregation timer */
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ offset = ipa_reg_offset(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + offset);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
/* Configure tag and NAT Qtime timestamp resolution as well */
val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+
+ offset = ipa_reg_offset(ipa, QTIME_TIMESTAMP_CFG);
+ iowrite32(val, ipa->reg_virt + offset);
/* Set granularity of pulse generators used for other timers */
val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+
+ offset = ipa_reg_offset(ipa, TIMERS_PULSE_GRAN_CFG);
+ iowrite32(val, ipa->reg_virt + offset);
/* Actual divider is 1 more than value supplied here */
val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ offset = ipa_reg_offset(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Before IPA v4.5 timing is controlled by a counter register */
static void ipa_hardware_config_counter(struct ipa *ipa)
{
u32 granularity;
+ u32 offset;
u32 val;
granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ offset = ipa_reg_offset(ipa, COUNTER_CFG);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_hardware_config_timing(struct ipa *ipa)
return;
/* IPA v4.2 does not support hashed tables, so disable them */
- offset = ipa_reg_filt_rout_hash_en_offset(IPA_VERSION_4_2);
+ offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_EN);
iowrite32(0, ipa->reg_virt + offset);
}
if (const_non_idle_enable)
val |= CONST_NON_IDLE_ENABLE_FMASK;
- offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
+ offset = ipa_reg_offset(ipa, IDLE_INDICATION_CFG);
iowrite32(val, ipa->reg_virt + offset);
}
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
+
+ offset = ipa_reg_offset(ipa, LOCAL_PKT_PROC_CNTXT);
+ iowrite32(val, ipa->reg_virt + offset);
return 0;
}
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(ipa, SHARED_MEM_SIZE));
/* The fields in the register are in 8 byte units */
ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
return valid;
}
+/* Assumes the endpoint transfer direction is valid; 0 is a bad offset */
+u32 __ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n)
+{
+ enum ipa_version version;
+
+ if (!ipa_reg_valid(ipa, reg_id))
+ return 0;
+
+ version = ipa->version;
+
+ switch (reg_id) {
+ case COMP_CFG:
+ return IPA_REG_COMP_CFG_OFFSET;
+ case CLKON_CFG:
+ return IPA_REG_CLKON_CFG_OFFSET;
+ case ROUTE:
+ return IPA_REG_ROUTE_OFFSET;
+ case SHARED_MEM_SIZE:
+ return IPA_REG_SHARED_MEM_SIZE_OFFSET;
+ case QSB_MAX_WRITES:
+ return IPA_REG_QSB_MAX_WRITES_OFFSET;
+ case QSB_MAX_READS:
+ return IPA_REG_QSB_MAX_READS_OFFSET;
+ case FILT_ROUT_HASH_EN:
+ return ipa_reg_filt_rout_hash_en_offset(version);
+ case FILT_ROUT_HASH_FLUSH:
+ return ipa_reg_filt_rout_hash_flush_offset(version);
+ case STATE_AGGR_ACTIVE:
+ return ipa_reg_state_aggr_active_offset(version);
+ case IPA_BCR:
+ return IPA_REG_BCR_OFFSET;
+ case LOCAL_PKT_PROC_CNTXT:
+ return IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET;
+ case AGGR_FORCE_CLOSE:
+ return IPA_REG_AGGR_FORCE_CLOSE_OFFSET;
+ case COUNTER_CFG:
+ return IPA_REG_COUNTER_CFG_OFFSET;
+ case IPA_TX_CFG:
+ return IPA_REG_TX_CFG_OFFSET;
+ case FLAVOR_0:
+ return IPA_REG_FLAVOR_0_OFFSET;
+ case IDLE_INDICATION_CFG:
+ return ipa_reg_idle_indication_cfg_offset(version);
+ case QTIME_TIMESTAMP_CFG:
+ return IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET;
+ case TIMERS_XO_CLK_DIV_CFG:
+ return IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET;
+ case TIMERS_PULSE_GRAN_CFG:
+ return IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET;
+ case SRC_RSRC_GRP_01_RSRC_TYPE:
+ return IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(n);
+ case SRC_RSRC_GRP_23_RSRC_TYPE:
+ return IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(n);
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ return IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(n);
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ return IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(n);
+ case DST_RSRC_GRP_01_RSRC_TYPE:
+ return IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(n);
+ case DST_RSRC_GRP_23_RSRC_TYPE:
+ return IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(n);
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ return IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(n);
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ return IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(n);
+ case ENDP_INIT_CTRL:
+ return IPA_REG_ENDP_INIT_CTRL_N_OFFSET(n);
+ case ENDP_INIT_CFG:
+ return IPA_REG_ENDP_INIT_CFG_N_OFFSET(n);
+ case ENDP_INIT_NAT:
+ return IPA_REG_ENDP_INIT_NAT_N_OFFSET(n);
+ case ENDP_INIT_HDR:
+ return IPA_REG_ENDP_INIT_HDR_N_OFFSET(n);
+ case ENDP_INIT_HDR_EXT:
+ return IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(n);
+ case ENDP_INIT_HDR_METADATA_MASK:
+ return IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(n);
+ case ENDP_INIT_MODE:
+ return IPA_REG_ENDP_INIT_MODE_N_OFFSET(n);
+ case ENDP_INIT_AGGR:
+ return IPA_REG_ENDP_INIT_AGGR_N_OFFSET(n);
+ case ENDP_INIT_HOL_BLOCK_EN:
+ return IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(n);
+ case ENDP_INIT_HOL_BLOCK_TIMER:
+ return IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(n);
+ case ENDP_INIT_DEAGGR:
+ return IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(n);
+ case ENDP_INIT_RSRC_GRP:
+ return IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(n);
+ case ENDP_INIT_SEQ:
+ return IPA_REG_ENDP_INIT_SEQ_N_OFFSET(n);
+ case ENDP_STATUS:
+ return IPA_REG_ENDP_STATUS_N_OFFSET(n);
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ return IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(n);
+ /* The IRQ registers below are only used for GSI_EE_AP */
+ case IPA_IRQ_STTS:
+ return ipa_reg_irq_stts_offset(version);
+ case IPA_IRQ_EN:
+ return ipa_reg_irq_en_offset(version);
+ case IPA_IRQ_CLR:
+ return ipa_reg_irq_clr_offset(version);
+ case IPA_IRQ_UC:
+ return ipa_reg_irq_uc_offset(version);
+ case IRQ_SUSPEND_INFO:
+ return ipa_reg_irq_suspend_info_offset(version);
+ case IRQ_SUSPEND_EN:
+ return ipa_reg_irq_suspend_en_offset(version);
+ case IRQ_SUSPEND_CLR:
+ return ipa_reg_irq_suspend_clr_offset(version);
+ default:
+ WARN(true, "bad register id %u???\n", reg_id);
+ return 0;
+ }
+}
+
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct resource *res;
- (void)ipa_reg_valid; /* Avoid a warning */
-
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
}
+u32 __ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n);
+
+static inline u32 ipa_reg_offset(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ return __ipa_reg_offset(ipa, reg_id, 0);
+}
+
+static inline u32
+ipa_reg_n_offset(struct ipa *ipa, enum ipa_reg_id reg_id, u32 n)
+{
+ return __ipa_reg_offset(ipa, reg_id, n);
+}
+
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
resource = &data->resource_src[resource_type];
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_01_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_23_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_45_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, SRC_RSRC_GRP_67_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
}
resource = &data->resource_dst[resource_type];
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_01_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_23_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_45_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ offset = ipa_reg_n_offset(ipa, DST_RSRC_GRP_67_RSRC_TYPE,
+ resource_type);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
}
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ u32 offset = ipa_reg_offset(ipa, FILT_ROUT_HASH_FLUSH);
struct gsi_trans *trans;
u32 val;
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
u32 offset;
u32 val;
- offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+ offset = ipa_reg_n_offset(ipa, ENDP_FILTER_ROUTER_HSH_CFG, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ u32 offset;
u32 val;
+ offset = ipa_reg_n_offset(ipa, ENDP_FILTER_ROUTER_HSH_CFG, route_id);
+
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- u32 offset;
u32 val;
/* Fill in the command data */
/* Use an interrupt to tell the microcontroller the command is ready */
val = u32_encode_bits(1, UC_INTR_FMASK);
- offset = ipa_reg_irq_uc_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(ipa, IPA_IRQ_UC));
}
/* Tell the microcontroller the AP is shutting down */