{
struct ipa *ipa = endpoint->ipa;
- if (!endpoint->config->aggregation)
+ if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
- if (endpoint->config->checksum) {
+ if (endpoint->config.checksum) {
enum ipa_version version = endpoint->ipa->version;
if (endpoint->toward_ipa) {
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
- if (!endpoint->config->checksum)
+ if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
- if (endpoint->config->qmap) {
+ if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
- u32 pad_align = endpoint->config->rx.pad_align;
+ u32 pad_align = endpoint->config.rx.pad_align;
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
- if (endpoint->config->qmap) {
+ if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
val |= HDR_ENDIANNESS_FMASK; /* big endian */
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
- if (endpoint->config->qmap && !endpoint->toward_ipa) {
+ if (endpoint->config.qmap && !endpoint->toward_ipa) {
u32 offset;
offset = offsetof(struct rmnet_map_header, pkt_len);
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (endpoint->config->qmap)
+ if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- if (endpoint->config->dma_mode) {
- enum ipa_endpoint_name name = endpoint->config->dma_endpoint;
+ if (endpoint->config.dma_mode) {
+ enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id;
dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
enum ipa_version version = endpoint->ipa->version;
u32 val = 0;
- if (endpoint->config->aggregation) {
+ if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
bool close_eof;
u32 limit;
- rx_config = &endpoint->config->rx;
+ rx_config = &endpoint->config.rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
struct ipa *ipa = endpoint->ipa;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->config->resource_group);
+ val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
iowrite32(val, ipa->reg_virt + offset);
}
return; /* Register not valid for RX endpoints */
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->config->tx.seq_type, SEQ_TYPE_FMASK);
+ val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
/* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->config->tx.seq_rep_type,
+ val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
SEQ_REP_TYPE_FMASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
- if (endpoint->config->status_enable) {
+ if (endpoint->config.status_enable) {
val |= STATUS_EN_FMASK;
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
- name = endpoint->config->tx.status_endpoint;
+ name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
val |= u32_encode_bits(status_endpoint_id,
u32 len;
int ret;
- buffer_size = endpoint->config->rx.buffer_size;
+ buffer_size = endpoint->config.rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
- u32 buffer_size = endpoint->config->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
- u32 buffer_size = endpoint->config->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
u32 resid = total_len;
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
- align = endpoint->config->rx.pad_align ? : 1;
+ align = endpoint->config.rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
len = sizeof(*status) + ALIGN(len, align);
- if (endpoint->config->checksum)
+ if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, status)) {
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
- if (endpoint->config->status_enable)
+ if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
struct page *page = trans->data;
if (page) {
- u32 buffer_size = endpoint->config->rx.buffer_size;
+ u32 buffer_size = endpoint->config.rx.buffer_size;
__free_pages(page, get_order(buffer_size));
}
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
- endpoint->config->aggregation;
+ endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
- endpoint->config = &data->endpoint.config;
+ endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
}