if (mem_size > OCTEON_DDR1_SIZE) {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
- mem_size - OCTEON_DDR1_SIZE, 0);
+ mem_size - OCTEON_DDR2_BASE, 0);
} else {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
}
addr += sizeof(struct cvmx_bootmem_named_block_desc);
}
- // test-only: DEBUG ifdef???
cvmx_bootmem_phy_list_print();
return 1;
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper Functions for the Configuration Framework
*/
static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
static int cvmx_pko_queue_alloc(u64 port, int count);
static void cvmx_init_port_cfg(void);
-static const int dbg;
int __cvmx_helper_cfg_pknd(int xiface, int index)
{
return cvmx_cfg_max_pko_engines;
}
-int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
-{
- if (opt >= CVMX_HELPER_CFG_OPT_MAX)
- return -1;
-
- cvmx_cfg_opts[opt] = val;
-
- return 0;
-}
-
uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
{
if (opt >= CVMX_HELPER_CFG_OPT_MAX)
return 0;
}
-int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg)
-{
- *cfg = __cvmx_pko_queue_static_config[node];
- return 0;
-}
-
-int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg)
-{
- __cvmx_pko_queue_static_config[node] = *cfg;
- return 0;
-}
-
static int queue_range_init;
int init_cvmx_pko_que_range(void)
return 0;
}
-/*
- * return the queues for "port"
- *
- * @param port the port for which the queues are returned
- *
- * Return: 0 on success
- * -1 on failure
- */
-int cvmx_pko_queue_free(uint64_t port)
-{
- int ret_val = -1;
-
- init_cvmx_pko_que_range();
- if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
- debug("ERROR: %s port=%d > %d", __func__, (int)port,
- CVMX_HELPER_CFG_MAX_PKO_QUEUES);
- return -1;
- }
-
- ret_val = cvmx_free_global_resource_range_with_base(
- CVMX_GR_TAG_PKO_QUEUES, cvmx_pko_queue_table[port].ccppp_queue_base,
- cvmx_pko_queue_table[port].ccppp_num_queues);
- if (ret_val != 0)
- return ret_val;
-
- cvmx_pko_queue_table[port].ccppp_num_queues = 0;
- cvmx_pko_queue_table[port].ccppp_queue_base = CVMX_HELPER_CFG_INVALID_VALUE;
- ret_val = 0;
- return ret_val;
-}
-
-void cvmx_pko_queue_free_all(void)
-{
- int i;
-
- for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
- if (cvmx_pko_queue_table[i].ccppp_queue_base !=
- CVMX_HELPER_CFG_INVALID_VALUE)
- cvmx_pko_queue_free(i);
-}
-
-void cvmx_pko_queue_show(void)
-{
- int i;
-
- cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_QUEUES);
- for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
- if (cvmx_pko_queue_table[i].ccppp_queue_base !=
- CVMX_HELPER_CFG_INVALID_VALUE)
- debug("port=%d que_base=%d que_num=%d\n", i,
- (int)cvmx_pko_queue_table[i].ccppp_queue_base,
- (int)cvmx_pko_queue_table[i].ccppp_num_queues);
-}
-
-void cvmx_helper_cfg_show_cfg(void)
-{
- int i, j;
-
- for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
- debug("%s: interface%d mode %10s nports%4d\n", __func__, i,
- cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
- cvmx_helper_interface_enumerate(i));
-
- for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
- debug("\tpknd[%i][%d]%d", i, j,
- __cvmx_helper_cfg_pknd(i, j));
- debug(" pko_port_base[%i][%d]%d", i, j,
- __cvmx_helper_cfg_pko_port_base(i, j));
- debug(" pko_port_num[%i][%d]%d\n", i, j,
- __cvmx_helper_cfg_pko_port_num(i, j));
- }
- }
-
- for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
- if (__cvmx_helper_cfg_pko_queue_base(i) !=
- CVMX_HELPER_CFG_INVALID_VALUE) {
- debug("%s: pko_port%d qbase%d nqueues%d interface%d index%d\n",
- __func__, i, __cvmx_helper_cfg_pko_queue_base(i),
- __cvmx_helper_cfg_pko_queue_num(i),
- __cvmx_helper_cfg_pko_port_interface(i),
- __cvmx_helper_cfg_pko_port_index(i));
- }
- }
-}
-
/*
* initialize cvmx_cfg_pko_port_map
*/
cvmx_cfg_max_pko_engines = pko_eid;
}
-void cvmx_helper_cfg_set_jabber_and_frame_max(void)
-{
- int interface, port;
- /*Set the frame max size and jabber size to 65535. */
- const unsigned int max_frame = 65535;
-
- // FIXME: should support node argument for remote node init
- if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
- int ipd_port;
- int node = cvmx_get_node_num();
-
- for (interface = 0;
- interface < cvmx_helper_get_number_of_interfaces();
- interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
- cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
- int num_ports = cvmx_helper_ports_on_interface(xiface);
-
- // FIXME: should be an easier way to determine
- // that an interface is Ethernet/BGX
- switch (imode) {
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- case CVMX_HELPER_INTERFACE_MODE_RXAUI:
- case CVMX_HELPER_INTERFACE_MODE_XLAUI:
- case CVMX_HELPER_INTERFACE_MODE_XFI:
- case CVMX_HELPER_INTERFACE_MODE_10G_KR:
- case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
- for (port = 0; port < num_ports; port++) {
- ipd_port = cvmx_helper_get_ipd_port(xiface, port);
- cvmx_pki_set_max_frm_len(ipd_port, max_frame);
- cvmx_helper_bgx_set_jabber(xiface, port, max_frame);
- }
- break;
- default:
- break;
- }
- }
- } else {
- /*Set the frame max size and jabber size to 65535. */
- for (interface = 0; interface < cvmx_helper_get_number_of_interfaces();
- interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(cvmx_get_node_num(),
- interface);
- /*
- * Set the frame max size and jabber size to 65535, as the defaults
- * are too small.
- */
- cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
- int num_ports = cvmx_helper_ports_on_interface(xiface);
-
- switch (imode) {
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_QSGMII:
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- case CVMX_HELPER_INTERFACE_MODE_RXAUI:
- for (port = 0; port < num_ports; port++)
- csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
- /* Set max and min value for frame check */
- cvmx_pip_set_frame_check(interface, -1);
- break;
-
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- /* Set max and min value for frame check */
- cvmx_pip_set_frame_check(interface, -1);
- for (port = 0; port < num_ports; port++) {
- csr_wr(CVMX_GMXX_RXX_FRM_MAX(port, interface), 65535);
- csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_ILK:
- /* Set max and min value for frame check */
- cvmx_pip_set_frame_check(interface, -1);
- for (port = 0; port < num_ports; port++) {
- int ipd_port = cvmx_helper_get_ipd_port(interface, port);
-
- cvmx_ilk_enable_la_header(ipd_port, 0);
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_SRIO:
- /* Set max and min value for frame check */
- cvmx_pip_set_frame_check(interface, -1);
- break;
- case CVMX_HELPER_INTERFACE_MODE_AGL:
- /* Set max and min value for frame check */
- cvmx_pip_set_frame_check(interface, -1);
- csr_wr(CVMX_AGL_GMX_RXX_FRM_MAX(0), 65535);
- csr_wr(CVMX_AGL_GMX_RXX_JABBER(0), 65535);
- break;
- default:
- break;
- }
- }
- }
-}
-
-/**
- * Enable storing short packets only in the WQE
- * unless NO_WPTR is set, which already has the same effect
- */
-void cvmx_helper_cfg_store_short_packets_in_wqe(void)
-{
- int interface, port;
- cvmx_ipd_ctl_status_t ipd_ctl_status;
- unsigned int dyn_rs = 1;
-
- if (octeon_has_feature(OCTEON_FEATURE_PKI))
- return;
-
- /* NO_WPTR combines WQE with 1st MBUF, RS is redundant */
- ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
- if (ipd_ctl_status.s.no_wptr) {
- dyn_rs = 0;
- /* Note: consider also setting 'ignrs' wtn NO_WPTR is set */
- }
-
- for (interface = 0; interface < cvmx_helper_get_number_of_interfaces(); interface++) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
-
- for (port = 0; port < num_ports; port++) {
- cvmx_pip_port_cfg_t port_cfg;
- int pknd = port;
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND))
- pknd = cvmx_helper_get_pknd(interface, port);
- else
- pknd = cvmx_helper_get_ipd_port(interface, port);
- port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
- port_cfg.s.dyn_rs = dyn_rs;
- csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
- }
- }
-}
-
int __cvmx_helper_cfg_pko_port_interface(int pko_port)
{
return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_base_port;
}
-int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
-{
- int ipd_y, ipd_x;
-
- ipd_y = IPD2PKO_CACHE_Y(ipd_port);
- ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
-
- return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_nports;
-}
-
/**
* Return the number of queues to be assigned to this pko_port
*
rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
if (!rc)
rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
- if (!rc)
- rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
return 0;
}
-typedef int (*cvmx_import_config_t)(void);
-cvmx_import_config_t cvmx_import_app_config;
-
-int __cvmx_helper_init_port_config_data_local(void)
-{
- int rv = 0;
- int dbg = 0;
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
-
- if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
- if (cvmx_import_app_config) {
- rv = (*cvmx_import_app_config)();
- if (rv != 0) {
- debug("failed to import config\n");
- return -1;
- }
- }
-
- cvmx_helper_cfg_init_pko_port_map();
- __cvmx_helper_cfg_init_ipd2pko_cache();
- } else {
- if (cvmx_import_app_config) {
- rv = (*cvmx_import_app_config)();
- if (rv != 0) {
- debug("failed to import config\n");
- return -1;
- }
- }
- }
- if (dbg) {
- cvmx_helper_cfg_show_cfg();
- cvmx_pko_queue_show();
- }
- return rv;
-}
-
/*
* This call is made from Linux octeon_ethernet driver
* to setup the PKO with a specific queue count and
{
int rv, p, port_start, cnt;
- if (dbg)
- debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
- queue_cnt);
+ debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
+ queue_cnt);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
struct cvmx_srio_port_param *sr;
pcfg = &cvmx_cfg_port[node][i][j];
+
memset(pcfg, 0, sizeof(*pcfg));
pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
int pknd = 0, bpid = 0;
const int use_static_config = 1;
- if (dbg)
- printf("%s:\n", __func__);
+ debug("%s:\n", __func__);
if (!port_cfg_data_initialized)
cvmx_init_port_cfg();
__cvmx_helper_cfg_init_ipd2pko_cache();
}
- if (dbg) {
- cvmx_helper_cfg_show_cfg();
- cvmx_pko_queue_show();
- }
+#ifdef DEBUG
+ cvmx_helper_cfg_show_cfg();
+ cvmx_pko_queue_show();
+#endif
+
return rv;
}
return cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node;
}
-/**
- * Search for a port based on its FDT node offset
- *
- * @param of_offset Node offset of port to search for
- * @param[out] xiface xinterface of match
- * @param[out] index port index of match
- *
- * Return: 0 if found, -1 if not found
- */
-int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
-{
- int iface;
- int i;
- int node;
- struct cvmx_cfg_port_param *pcfg = NULL;
- *xiface = -1;
- *index = -1;
-
- for (node = 0; node < CVMX_MAX_NODES; node++) {
- for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
- for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
- pcfg = &cvmx_cfg_port[node][iface][i];
- if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
- *xiface = cvmx_helper_node_interface_to_xiface(node, iface);
- *index = i;
- return 0;
- }
- }
- }
- }
- return -1;
-}
-
/**
* @INTERNAL
* Store the FDT node offset in the device tree of a phy
return !cvmx_cfg_port[xi.node][xi.interface][index].disable_an;
}
-/**
- * @INTERNAL
- * Override default forward error correction for a port
- *
- * @param xiface node and interface
- * @param index port index
- * @param enable true to enable fec, false to disable it
- */
-void cvmx_helper_set_port_fec(int xiface, int index, bool enable)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- cvmx_cfg_port[xi.node][xi.interface][index].enable_fec = enable;
-}
-
/**
* @INTERNAL
* Returns if forward error correction is enabled or not.
return cvmx_cfg_port[xi.node][xi.interface][index].enable_fec;
}
-/**
- * @INTERNAL
- * Configure the SRIO RX interface AGC settings for host mode
- *
- * @param xiface node and interface
- * @param index lane
- * @param long_run true for long run, false for short run
- * @param agc_override true to put AGC in manual mode
- * @param ctle_zero RX equalizer peaking control (default 0x6)
- * @param agc_pre_ctle AGC pre-CTLE gain (default 0x5)
- * @param agc_post_ctle AGC post-CTLE gain (default 0x4)
- *
- * NOTE: This must be called before SRIO is initialized to take effect
- */
-void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
- u8 ctle_zero, bool agc_override, uint8_t agc_pre_ctle,
- uint8_t agc_post_ctle)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
- struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
- struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- sr->srio_rx_ctle_zero_override = ctle_zero_override;
- sr->srio_rx_ctle_zero = ctle_zero;
- sr->srio_rx_ctle_agc_override = agc_override;
- sr->srio_rx_agc_pre_ctle = agc_pre_ctle;
- sr->srio_rx_agc_post_ctle = agc_post_ctle;
-}
-
-/**
- * @INTERNAL
- * Get the SRIO RX interface AGC settings for host mode
- *
- * @param xiface node and interface
- * @param index lane
- * @param long_run true for long run, false for short run
- * @param[out] agc_override true to put AGC in manual mode
- * @param[out] ctle_zero RX equalizer peaking control (default 0x6)
- * @param[out] agc_pre_ctle AGC pre-CTLE gain (default 0x5)
- * @param[out] agc_post_ctle AGC post-CTLE gain (default 0x4)
- */
-void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
- u8 *ctle_zero, bool *agc_override, uint8_t *agc_pre_ctle,
- uint8_t *agc_post_ctle)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
- struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
- struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- if (ctle_zero_override)
- *ctle_zero_override = sr->srio_rx_ctle_zero_override;
- if (ctle_zero)
- *ctle_zero = sr->srio_rx_ctle_zero;
- if (agc_override)
- *agc_override = sr->srio_rx_ctle_agc_override;
- if (agc_pre_ctle)
- *agc_pre_ctle = sr->srio_rx_agc_pre_ctle;
- if (agc_post_ctle)
- *agc_post_ctle = sr->srio_rx_agc_post_ctle;
-}
-
-/**
- * @INTERNAL
- * Configure the SRIO TX interface for host mode
- *
- * @param xiface node and interface
- * @param index lane
- * @param long_run true for long run, false for short run
- * @param tx_swing tx swing value to use (default 0x7), -1 to not
- * override.
- * @param tx_gain PCS SDS TX gain (default 0x3), -1 to not
- * override
- * @param tx_premptap_override true to override preemphasis control
- * @param tx_premptap_pre preemphasis pre tap value (default 0x0)
- * @param tx_premptap_post preemphasis post tap value (default 0xF)
- * @param tx_vboost vboost enable (1 = enable, -1 = don't override)
- * hardware default is 1.
- *
- * NOTE: This must be called before SRIO is initialized to take effect
- */
-void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
- bool tx_premptap_override, uint8_t tx_premptap_pre,
- u8 tx_premptap_post, int tx_vboost)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
- struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
- struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
-
- sr->srio_tx_swing_override = (tx_swing != -1);
- sr->srio_tx_swing = tx_swing != -1 ? tx_swing : 0x7;
- sr->srio_tx_gain_override = (tx_gain != -1);
- sr->srio_tx_gain = tx_gain != -1 ? tx_gain : 0x3;
- sr->srio_tx_premptap_override = tx_premptap_override;
- sr->srio_tx_premptap_pre = tx_premptap_override ? tx_premptap_pre : 0;
- sr->srio_tx_premptap_post = tx_premptap_override ? tx_premptap_post : 0xF;
- sr->srio_tx_vboost_override = tx_vboost != -1;
- sr->srio_tx_vboost = (tx_vboost != -1) ? tx_vboost : 1;
-}
-
-/**
- * @INTERNAL
- * Get the SRIO TX interface settings for host mode
- *
- * @param xiface node and interface
- * @param index lane
- * @param long_run true for long run, false for short run
- * @param[out] tx_swing_override true to override pcs_sds_txX_swing
- * @param[out] tx_swing tx swing value to use (default 0x7)
- * @param[out] tx_gain_override true to override default gain
- * @param[out] tx_gain PCS SDS TX gain (default 0x3)
- * @param[out] tx_premptap_override true to override preemphasis control
- * @param[out] tx_premptap_pre preemphasis pre tap value (default 0x0)
- * @param[out] tx_premptap_post preemphasis post tap value (default 0xF)
- * @param[out] tx_vboost_override override vboost setting
- * @param[out] tx_vboost vboost enable (default true)
- */
-void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
- u8 *tx_swing, bool *tx_gain_override, uint8_t *tx_gain,
- bool *tx_premptap_override, uint8_t *tx_premptap_pre,
- u8 *tx_premptap_post, bool *tx_vboost_override, bool *tx_vboost)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
- struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
- struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
-
- if (tx_swing_override)
- *tx_swing_override = sr->srio_tx_swing_override;
- if (tx_swing)
- *tx_swing = sr->srio_tx_swing;
- if (tx_gain_override)
- *tx_gain_override = sr->srio_tx_gain_override;
- if (tx_gain)
- *tx_gain = sr->srio_tx_gain;
- if (tx_premptap_override)
- *tx_premptap_override = sr->srio_tx_premptap_override;
- if (tx_premptap_pre)
- *tx_premptap_pre = sr->srio_tx_premptap_pre;
- if (tx_premptap_post)
- *tx_premptap_post = sr->srio_tx_premptap_post;
- if (tx_vboost_override)
- *tx_vboost_override = sr->srio_tx_vboost_override;
- if (tx_vboost)
- *tx_vboost = sr->srio_tx_vboost;
-}
-
/**
* @INTERNAL
* Sets the PHY info data structure
return cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds;
}
-/**
- * @INTERNAL
- * Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
- *
- * @param xiface node and interface
- * @param index portindex
- * @param leds pointer to led data structure
- */
-void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds = leds;
-}
-
/**
* @INTERNAL
* Disables RGMII TX clock bypass and sets delay value
*clk_delay = cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay;
}
-/**
- * @INTERNAL
- * Retrieve the SFP node offset in the device tree
- *
- * @param xiface node and interface
- * @param index port index
- *
- * Return: offset in device tree or -1 if error or not defined.
- */
-int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- return cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset;
-}
-
-/**
- * @INTERNAL
- * Sets the SFP node offset
- *
- * @param xiface node and interface
- * @param index port index
- * @param sfp_of_offset Offset of SFP node in device tree
- */
-void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset = sfp_of_offset;
-}
-
-/**
- * Get data structure defining the Microsemi VSC7224 channel info
- * or NULL if not present
- *
- * @param xiface node and interface
- * @param index port index
- *
- * Return: pointer to vsc7224 data structure or NULL if not present
- */
-struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- return cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan;
-}
-
/**
* Sets the Microsemi VSC7224 channel info data structure
*
cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan = vsc7224_chan_info;
}
-/**
- * Get data structure defining the Avago AVSP5410 phy info
- * or NULL if not present
- *
- * @param xiface node and interface
- * @param index port index
- *
- * Return: pointer to avsp5410 data structure or NULL if not present
- */
-struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- return cvmx_cfg_port[xi.node][xi.interface][index].avsp5410;
-}
-
-/**
- * Sets the Avago AVSP5410 phy info data structure
- *
- * @param xiface node and interface
- * @param index port index
- * @param[in] avsp5410_info Avago AVSP5410 data structure
- */
-void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- cvmx_cfg_port[xi.node][xi.interface][index].avsp5410 = avsp5410_info;
-}
-
/**
* Gets the SFP data associated with a port
*
cvmx_init_port_cfg();
cvmx_cfg_port[xi.node][xi.interface][index].sfp_info = sfp_info;
}
-
-/**
- * Returns a pointer to the phy device associated with a port
- *
- * @param xiface node and interface
- * @param index port index
- *
- * return pointer to phy device or NULL if none
- */
-struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- return cvmx_cfg_port[xi.node][xi.interface][index].phydev;
-}
-
-/**
- * Sets the phy device associated with a port
- *
- * @param xiface node and interface
- * @param index port index
- * @param[in] phydev phy device to assiciate
- */
-void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev)
-{
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- if (!port_cfg_data_initialized)
- cvmx_init_port_cfg();
- cvmx_cfg_port[xi.node][xi.interface][index].phydev = phydev;
-}
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* FDT Helper functions similar to those provided to U-Boot.
*/
+#include <dm.h>
+#include <i2c.h>
#include <log.h>
#include <malloc.h>
#include <net.h>
#include <linux/delay.h>
+#include <asm-generic/gpio.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-fdt.h>
-#include <mach/cvmx-helper-gpio.h>
-
-/** Structure used to get type of GPIO from device tree */
-struct gpio_compat {
- char *compatible; /** Compatible string */
- enum cvmx_gpio_type type; /** Type */
- int8_t size; /** (max) Number of pins */
-};
-
-#define GPIO_REG_PCA953X_IN 0
-#define GPIO_REG_PCA953X_OUT 1
-#define GPIO_REG_PCA953X_INVERT 2
-#define GPIO_REG_PCA953X_DIR 3
-
-#define GPIO_REG_PCA957X_IN 0
-#define GPIO_REG_PCA957X_INVERT 1
-#define GPIO_REG_PCA957X_CFG 4
-#define GPIO_REG_PCA957X_OUT 5
-
-enum cvmx_i2c_mux_type { I2C_MUX, I2C_SWITCH };
-
-/** Structure used to get type of GPIO from device tree */
-struct mux_compat {
- char *compatible; /** Compatible string */
- enum cvmx_i2c_bus_type type; /** Mux chip type */
- enum cvmx_i2c_mux_type mux_type; /** Type of mux */
- u8 enable; /** Enable bit for mux */
- u8 size; /** (max) Number of channels */
-};
/**
* Local allocator to handle both SE and U-Boot that also zeroes out memory
* Return: pointer to allocated memory or NULL if out of memory.
* Alignment is set to 8-bytes.
*/
-void *__cvmx_fdt_alloc(size_t size)
+static void *cvmx_fdt_alloc(size_t size)
{
return calloc(size, 1);
}
-/**
- * Free allocated memory.
- *
- * @param ptr pointer to memory to free
- *
- * NOTE: This only works in U-Boot since SE does not really have a freeing
- * mechanism. In SE the memory is zeroed out.
- */
-void __cvmx_fdt_free(void *ptr, size_t size)
-{
- free(ptr);
-}
-
-/**
- * Look up a phandle and follow it to its node then return the offset of that
- * node.
- *
- * @param[in] fdt_addr pointer to FDT blob
- * @param node node to read phandle from
- * @param[in] prop_name name of property to find
- * @param[in,out] lenp Number of phandles, input max number
- * @param[out] nodes Array of phandle nodes
- *
- * Return: -ve error code on error or 0 for success
- */
-int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node,
- const char *prop_name, int *lenp,
- int *nodes)
+int cvmx_ofnode_lookup_phandles(ofnode node, const char *prop_name, int *lenp,
+ ofnode *nodes)
{
const u32 *phandles;
int count;
int i;
- phandles = fdt_getprop(fdt_addr, node, prop_name, &count);
+ phandles = ofnode_get_property(node, prop_name, &count);
if (!phandles || count < 0)
return -FDT_ERR_NOTFOUND;
count = *lenp;
for (i = 0; i < count; i++)
- nodes[i] = fdt_node_offset_by_phandle(fdt_addr,
- fdt32_to_cpu(phandles[i]));
+ nodes[i] = ofnode_get_by_phandle(fdt32_to_cpu(phandles[i]));
+
*lenp = count;
return 0;
}
return fdt32_to_cpu(ranges[2]) / 0x10;
}
-/**
- * Get the total size of the flat device tree
- *
- * @param[in] fdt_addr Address of FDT
- *
- * Return: Size of flat device tree in bytes or error if negative.
- */
-int cvmx_fdt_get_fdt_size(const void *fdt_addr)
-{
- int rc;
-
- rc = fdt_check_header(fdt_addr);
- if (rc)
- return rc;
- return fdt_totalsize(fdt_addr);
-}
-
-/**
- * Returns if a node is compatible with one of the items in the string list
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param node Node offset to check
- * @param[in] strlist Array of FDT device compatibility strings,
- * must end with NULL or empty string.
- *
- * Return: 0 if at least one item matches, 1 if no matches
- */
-int cvmx_fdt_node_check_compatible_list(const void *fdt_addr, int node, const char *const *strlist)
-{
- while (*strlist && **strlist) {
- if (!fdt_node_check_compatible(fdt_addr, node, *strlist))
- return 0;
- strlist++;
- }
- return 1;
-}
-
-/**
- * Given a FDT node, return the next compatible node.
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param start_offset Starting node offset or -1 to find the first
- * @param strlist Array of FDT device compatibility strings, must
- * end with NULL or empty string.
- *
- * Return: next matching node or -1 if no more matches.
- */
-int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffset,
- const char *const *strlist)
-{
- int offset;
-
- for (offset = fdt_next_node(fdt_addr, startoffset, NULL); offset >= 0;
- offset = fdt_next_node(fdt_addr, offset, NULL)) {
- if (!cvmx_fdt_node_check_compatible_list(fdt_addr, offset, strlist))
- return offset;
- }
- return -1;
-}
-
-/**
- * Attaches a PHY to a SFP or QSFP.
- *
- * @param sfp sfp to attach PHY to
- * @param phy_info phy descriptor to attach or NULL to detach
- */
-void cvmx_sfp_attach_phy(struct cvmx_fdt_sfp_info *sfp, struct cvmx_phy_info *phy_info)
-{
- sfp->phy_info = phy_info;
- if (phy_info)
- phy_info->sfp_info = sfp;
-}
-
-/**
- * Assigns an IPD port to a SFP slot
- *
- * @param sfp Handle to SFP data structure
- * @param ipd_port Port to assign it to
- *
- * Return: 0 for success, -1 on error
- */
-int cvmx_sfp_set_ipd_port(struct cvmx_fdt_sfp_info *sfp, int ipd_port)
-{
- int i;
-
- if (sfp->is_qsfp) {
- int xiface;
- cvmx_helper_interface_mode_t mode;
-
- xiface = cvmx_helper_get_interface_num(ipd_port);
- mode = cvmx_helper_interface_get_mode(xiface);
- sfp->ipd_port[0] = ipd_port;
-
- switch (mode) {
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_XFI:
- case CVMX_HELPER_INTERFACE_MODE_10G_KR:
- for (i = 1; i < 4; i++)
- sfp->ipd_port[i] = cvmx_helper_get_ipd_port(xiface, i);
- break;
- case CVMX_HELPER_INTERFACE_MODE_XLAUI:
- case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
- sfp->ipd_port[0] = ipd_port;
- for (i = 1; i < 4; i++)
- sfp->ipd_port[i] = -1;
- break;
- default:
- debug("%s: Interface mode %s for interface 0x%x, ipd_port %d not supported for QSFP\n",
- __func__, cvmx_helper_interface_mode_to_string(mode), xiface,
- ipd_port);
- return -1;
- }
- } else {
- sfp->ipd_port[0] = ipd_port;
- for (i = 1; i < 4; i++)
- sfp->ipd_port[i] = -1;
- }
- return 0;
-}
-
/**
* Parses all of the channels assigned to a VSC7224 device
*
*
* Return: 0 for success, -1 on error
*/
-static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
+static int cvmx_fdt_parse_vsc7224_channels(ofnode node,
struct cvmx_vsc7224 *vsc7224)
{
- int parent_offset = of_offset;
+ struct ofnode_phandle_args phandle;
int err = 0;
int reg;
int num_chan = 0;
bool is_tx;
bool is_qsfp;
const char *mac_str;
+ ofnode node_chan;
- debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, vsc7224->name);
- do {
- /* Walk through all channels */
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "vitesse,vsc7224-channel");
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- debug("%s: Failed finding compatible channel\n",
- __func__);
- err = -1;
+ debug("%s(%x, %s)\n", __func__, ofnode_to_offset(node), vsc7224->name);
+ ofnode_for_each_compatible_node(node_chan, "vitesse,vsc7224-channel") {
+ if (!ofnode_valid(node_chan)) {
+ debug("%s: Error parsing FDT node %s\n",
+ __func__, ofnode_get_name(node));
break;
}
- if (fdt_parent_offset(fdt_addr, of_offset) != parent_offset)
+
+ if (ofnode_to_offset(ofnode_get_parent(node_chan)) !=
+ ofnode_to_offset(node))
break;
- reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
+
+ reg = ofnode_get_addr(node_chan);
if (reg < 0 || reg > 3) {
debug("%s: channel reg is either not present or out of range\n",
__func__);
err = -1;
break;
}
- is_tx = cvmx_fdt_get_bool(fdt_addr, of_offset, "direction-tx");
+ is_tx = ofnode_read_bool(node_chan, "direction-tx");
debug("%s(%s): Adding %cx channel %d\n",
__func__, vsc7224->name, is_tx ? 't' : 'r',
reg);
- tap_values = (const uint32_t *)fdt_getprop(fdt_addr, of_offset, "taps", &len);
+ tap_values = ofnode_get_property(node_chan, "taps", &len);
if (!tap_values) {
debug("%s: Error: no taps defined for vsc7224 channel %d\n",
__func__, reg);
num_taps = len / 16;
debug("%s: Adding %d taps\n", __func__, num_taps);
- channel = __cvmx_fdt_alloc(sizeof(*channel) +
- num_taps * sizeof(struct cvmx_vsc7224_tap));
+ channel = cvmx_fdt_alloc(sizeof(*channel) +
+ num_taps * sizeof(struct cvmx_vsc7224_tap));
if (!channel) {
debug("%s: Out of memory\n", __func__);
err = -1;
vsc7224->channel[reg] = channel;
channel->num_taps = num_taps;
channel->lane = reg;
- channel->of_offset = of_offset;
+ channel->of_offset = ofnode_to_offset(node_chan);
channel->is_tx = is_tx;
- channel->pretap_disable = cvmx_fdt_get_bool(fdt_addr, of_offset, "pretap-disable");
- channel->posttap_disable =
- cvmx_fdt_get_bool(fdt_addr, of_offset, "posttap-disable");
+ channel->pretap_disable = ofnode_read_bool(node_chan,
+ "pretap-disable");
+ channel->posttap_disable = ofnode_read_bool(node_chan,
+ "posttap-disable");
channel->vsc7224 = vsc7224;
/* Read all the tap values */
for (i = 0; i < num_taps; i++) {
channel->ipd_port = -1;
mac_str = "sfp-mac";
- if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
+ if (ofnode_get_property(node_chan, mac_str, NULL)) {
is_qsfp = false;
- } else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
+ } else if (ofnode_get_property(node_chan, "qsfp-mac", NULL)) {
is_qsfp = true;
mac_str = "qsfp-mac";
} else {
vsc7224->name, reg);
return -1;
}
- of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
- if (of_mac < 0) {
+
+ err = ofnode_parse_phandle_with_args(node_chan, mac_str, NULL,
+ 0, 0, &phandle);
+ if (err) {
debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
mac_str, vsc7224->name);
return -1;
}
- debug("%s: Found mac at offset %d\n", __func__, of_mac);
- err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
- if (!err) {
- channel->xiface = xiface;
- channel->index = index;
- channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
-
- debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
- xiface, index, channel->ipd_port);
- if (channel->ipd_port >= 0) {
- cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
- debug("%s: Storing config channel for xiface 0x%x, index %d\n",
- __func__, xiface, index);
- }
- sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
- if (!sfp_info) {
- debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
- __func__, xiface, index, channel->lane);
- continue;
- }
+ debug("%s: Found mac at %s\n", __func__,
+ ofnode_get_name(phandle.node));
+
+ xiface = (ofnode_get_addr(ofnode_get_parent(phandle.node))
+ >> 24) & 0x0f;
+ index = ofnode_get_addr(phandle.node);
+ channel->xiface = xiface;
+ channel->index = index;
+ channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
+
+ debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
+ xiface, index, channel->ipd_port);
+ if (channel->ipd_port >= 0) {
+ cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
+ debug("%s: Storing config channel for xiface 0x%x, index %d\n",
+ __func__, xiface, index);
+ }
+ sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
+ if (!sfp_info) {
+ debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
+ __func__, xiface, index, channel->lane);
+ continue;
+ }
- /* Link it */
- channel->next = sfp_info->vsc7224_chan;
- if (sfp_info->vsc7224_chan)
- sfp_info->vsc7224_chan->prev = channel;
- sfp_info->vsc7224_chan = channel;
- sfp_info->is_vsc7224 = true;
- debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
- vsc7224->name, channel->lane, sfp_info->name);
- if (!sfp_info->mod_abs_changed) {
- debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
- __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
- cvmx_sfp_register_mod_abs_changed(
- sfp_info,
- &cvmx_sfp_vsc7224_mod_abs_changed,
- NULL);
- }
+ /* Link it */
+ channel->next = sfp_info->vsc7224_chan;
+ if (sfp_info->vsc7224_chan)
+ sfp_info->vsc7224_chan->prev = channel;
+ sfp_info->vsc7224_chan = channel;
+ sfp_info->is_vsc7224 = true;
+ debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
+ vsc7224->name, channel->lane, sfp_info->name);
+ if (!sfp_info->mod_abs_changed) {
+ debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
+ __func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
+ cvmx_sfp_register_mod_abs_changed(
+ sfp_info,
+ &cvmx_sfp_vsc7224_mod_abs_changed,
+ NULL);
}
- } while (!err && num_chan < 4);
+
+ if (num_chan >= 4)
+ break;
+ }
return err;
}
*/
int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
{
- int of_offset = -1;
struct cvmx_vsc7224 *vsc7224 = NULL;
- struct cvmx_fdt_gpio_info *gpio_info = NULL;
+ ofnode node;
int err = 0;
- int of_parent;
static bool parsed;
+ const int *init_array;
+ struct udevice *dev;
+ u16 value;
+ int reg;
+ int len;
+ int ret;
+ int i;
debug("%s(%p)\n", __func__, fdt_addr);
debug("%s: Already parsed\n", __func__);
return 0;
}
- do {
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "vitesse,vsc7224");
- debug("%s: of_offset: %d\n", __func__, of_offset);
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- err = -1;
- debug("%s: Error %d parsing FDT\n",
- __func__, of_offset);
+
+ ofnode_for_each_compatible_node(node, "vitesse,vsc7224") {
+ if (!ofnode_valid(node)) {
+ debug("%s: Error parsing FDT node %s\n",
+ __func__, ofnode_get_name(node));
break;
}
- vsc7224 = __cvmx_fdt_alloc(sizeof(*vsc7224));
-
+ vsc7224 = cvmx_fdt_alloc(sizeof(*vsc7224));
if (!vsc7224) {
debug("%s: Out of memory!\n", __func__);
return -1;
}
- vsc7224->of_offset = of_offset;
- vsc7224->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
- "reg", -1);
- of_parent = fdt_parent_offset(fdt_addr, of_offset);
- vsc7224->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
+
+ vsc7224->of_offset = ofnode_to_offset(node);
+ vsc7224->i2c_addr = ofnode_get_addr(node);
+ vsc7224->i2c_bus = cvmx_ofnode_get_i2c_bus(ofnode_get_parent(node));
if (vsc7224->i2c_addr < 0) {
debug("%s: Error: reg field missing\n", __func__);
err = -1;
err = -1;
break;
}
- vsc7224->name = fdt_get_name(fdt_addr, of_offset, NULL);
+ vsc7224->name = ofnode_get_name(node);
debug("%s: Adding %s\n", __func__, vsc7224->name);
- if (fdt_getprop(fdt_addr, of_offset, "reset", NULL)) {
- gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
- vsc7224->reset_gpio = gpio_info;
- }
- if (fdt_getprop(fdt_addr, of_offset, "los", NULL)) {
- gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "los");
- vsc7224->los_gpio = gpio_info;
- }
- debug("%s: Parsing channels\n", __func__);
- err = cvmx_fdt_parse_vsc7224_channels(fdt_addr, of_offset, vsc7224);
+
+ err = gpio_request_by_name_nodev(node, "reset", 0,
+ &vsc7224->reset_gpio,
+ GPIOD_IS_OUT);
if (err) {
- debug("%s: Error parsing VSC7224 channels\n", __func__);
- break;
+ printf("%s: reset GPIO not found in DT!\n", __func__);
+ return -ENODEV;
}
- } while (of_offset > 0);
- if (err) {
- debug("%s(): Error\n", __func__);
- if (vsc7224) {
- if (vsc7224->reset_gpio)
- __cvmx_fdt_free(vsc7224->reset_gpio, sizeof(*vsc7224->reset_gpio));
- if (vsc7224->los_gpio)
- __cvmx_fdt_free(vsc7224->los_gpio, sizeof(*vsc7224->los_gpio));
- if (vsc7224->i2c_bus)
- cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
- __cvmx_fdt_free(vsc7224, sizeof(*vsc7224));
+ err = gpio_request_by_name_nodev(node, "los", 0,
+ &vsc7224->los_gpio,
+ GPIOD_IS_IN);
+ if (err) {
+ printf("%s: los GPIO not found in DT!\n", __func__);
+ return -ENODEV;
}
- }
- if (!err)
- parsed = true;
-
- return err;
-}
-
-/**
- * @INTERNAL
- * Parses all instances of the Avago AVSP5410 gearbox phy
- *
- * @param[in] fdt_addr Address of flat device tree
- *
- * Return: 0 for success, error otherwise
- */
-int __cvmx_fdt_parse_avsp5410(const void *fdt_addr)
-{
- int of_offset = -1;
- struct cvmx_avsp5410 *avsp5410 = NULL;
- struct cvmx_fdt_sfp_info *sfp_info;
- int err = 0;
- int of_parent;
- static bool parsed;
- int of_mac;
- int xiface, index;
- bool is_qsfp;
- const char *mac_str;
-
- debug("%s(%p)\n", __func__, fdt_addr);
- if (parsed) {
- debug("%s: Already parsed\n", __func__);
- return 0;
- }
-
- do {
- of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
- "avago,avsp-5410");
- debug("%s: of_offset: %d\n", __func__, of_offset);
- if (of_offset == -FDT_ERR_NOTFOUND) {
- break;
- } else if (of_offset < 0) {
- err = -1;
- debug("%s: Error %d parsing FDT\n", __func__, of_offset);
- break;
+ /*
+ * This code was taken from the NIC23 board specific code
+ * but should be better placed here in the common code
+ */
+ debug("%s: Putting device in reset\n", __func__);
+ dm_gpio_set_value(&vsc7224->reset_gpio, 1);
+ mdelay(10);
+ debug("%s: Taking device out of reset\n", __func__);
+ dm_gpio_set_value(&vsc7224->reset_gpio, 0);
+ mdelay(50);
+
+ init_array = ofnode_get_property(node, "vitesse,reg-init",
+ &len);
+ if (!init_array) {
+ debug("%s: No initialization array\n", __func__);
+ continue;
}
-
- avsp5410 = __cvmx_fdt_alloc(sizeof(*avsp5410));
-
- if (!avsp5410) {
- debug("%s: Out of memory!\n", __func__);
+ if ((len % 8) != 0) {
+ printf("%s: Error: register init string should be an array of reg number followed by value\n",
+ __func__);
return -1;
}
- avsp5410->of_offset = of_offset;
- avsp5410->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
- "reg", -1);
- of_parent = fdt_parent_offset(fdt_addr, of_offset);
- avsp5410->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
- if (avsp5410->i2c_addr < 0) {
- debug("%s: Error: reg field missing\n", __func__);
- err = -1;
- break;
- }
- if (!avsp5410->i2c_bus) {
- debug("%s: Error getting i2c bus\n", __func__);
- err = -1;
- break;
- }
- avsp5410->name = fdt_get_name(fdt_addr, of_offset, NULL);
- debug("%s: Adding %s\n", __func__, avsp5410->name);
-
- /* Now find out which interface it's mapped to */
- avsp5410->ipd_port = -1;
- mac_str = "sfp-mac";
- if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
- is_qsfp = false;
- } else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
- is_qsfp = true;
- mac_str = "qsfp-mac";
- } else {
- debug("%s: Error: MAC not found for %s\n", __func__, avsp5410->name);
- return -1;
- }
- of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
- if (of_mac < 0) {
- debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
- mac_str, avsp5410->name);
+ ret = i2c_get_chip(vsc7224->i2c_bus->i2c_bus,
+ vsc7224->i2c_addr, 1, &dev);
+ if (ret) {
+ debug("Cannot find I2C device: %d\n", ret);
return -1;
}
- debug("%s: Found mac at offset %d\n", __func__, of_mac);
- err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
- if (!err) {
- avsp5410->xiface = xiface;
- avsp5410->index = index;
- avsp5410->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
-
- debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
- xiface, index, avsp5410->ipd_port);
- if (avsp5410->ipd_port >= 0) {
- cvmx_helper_cfg_set_avsp5410_info(xiface, index, avsp5410);
- debug("%s: Storing config phy for xiface 0x%x, index %d\n",
- __func__, xiface, index);
- }
- sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
- if (!sfp_info) {
- debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d\n",
- __func__, xiface, index);
- continue;
+ for (i = 0; i < len / sizeof(int); i += 2) {
+ u8 buffer[2];
+
+ reg = fdt32_to_cpu(init_array[i]);
+ value = fdt32_to_cpu(init_array[i + 1]);
+ buffer[0] = value >> 8;
+ buffer[1] = value & 0xff;
+ ret = dm_i2c_write(dev, reg, buffer, 2);
+ if (ret) {
+ debug("Cannot write I2C device: %d\n", ret);
+ return -1;
}
- sfp_info->is_avsp5410 = true;
- sfp_info->avsp5410 = avsp5410;
- debug("%s: Registering AVSP5410 %s with SFP %s\n", __func__, avsp5410->name,
- sfp_info->name);
- if (!sfp_info->mod_abs_changed) {
- debug("%s: Registering cvmx_sfp_avsp5410_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
- __func__, &cvmx_sfp_avsp5410_mod_abs_changed, xiface, index);
- cvmx_sfp_register_mod_abs_changed(
- sfp_info,
- &cvmx_sfp_avsp5410_mod_abs_changed,
- NULL);
- }
+ debug(" Wrote 0x%02x <= 0x%02x%02x\n", reg,
+ buffer[0], buffer[1]);
+ }
+
+ debug("%s: Parsing channels\n", __func__);
+ err = cvmx_fdt_parse_vsc7224_channels(node, vsc7224);
+ if (err) {
+ debug("%s: Error parsing VSC7224 channels\n", __func__);
+ break;
}
- } while (of_offset > 0);
+ }
if (err) {
debug("%s(): Error\n", __func__);
- if (avsp5410) {
- if (avsp5410->i2c_bus)
- cvmx_fdt_free_i2c_bus(avsp5410->i2c_bus);
- __cvmx_fdt_free(avsp5410, sizeof(*avsp5410));
+ if (vsc7224) {
+ dm_gpio_free(vsc7224->reset_gpio.dev,
+ &vsc7224->reset_gpio);
+ dm_gpio_free(vsc7224->los_gpio.dev,
+ &vsc7224->los_gpio);
+ if (vsc7224->i2c_bus)
+ cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
+ free(vsc7224);
}
}
if (!err)
}
/**
- * Parse QSFP GPIOs for SFP
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of QSFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
+ * Given the parent offset of an i2c device build up a list describing the bus
+ * which can contain i2c muxes and switches.
*
- * Return: 0 for success
- */
-static int cvmx_parse_qsfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
-{
- sfp_info->select = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "select");
- sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_prs");
- sfp_info->reset = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
- sfp_info->interrupt = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "interrupt");
- sfp_info->lp_mode = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "lp_mode");
- return 0;
-}
-
-/**
- * Parse SFP GPIOs for SFP
+ * @param[in] node ofnode of the parent node of a GPIO device in
+ * the device tree.
*
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of SFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
+ * @return pointer to list of i2c devices starting from the root which
+ * can include i2c muxes and switches or NULL if error. Note that
+ * all entries are allocated on the heap.
*
- * Return: 0 for success
+ * @see cvmx_fdt_free_i2c_bus()
*/
-static int cvmx_parse_sfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
+struct cvmx_fdt_i2c_bus_info *cvmx_ofnode_get_i2c_bus(ofnode node)
{
- sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_abs");
- sfp_info->rx_los = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "rx_los");
- sfp_info->tx_disable = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_disable");
- sfp_info->tx_error = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_error");
- return 0;
-}
-
-/**
- * Parse SFP/QSFP EEPROM and diag
- *
- * @param[in] fdt_addr Pointer to flat device tree
- * @param of_offset Offset of SFP node
- * @param[out] sfp_info Pointer to sfp info to fill in
- *
- * Return: 0 for success, -1 on error
- */
-static int cvmx_parse_sfp_eeprom(const void *fdt_addr, int of_offset,
- struct cvmx_fdt_sfp_info *sfp_info)
-{
- int of_eeprom;
- int of_diag;
-
- debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, sfp_info->name);
- of_eeprom = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "eeprom");
- if (of_eeprom < 0) {
- debug("%s: Missing \"eeprom\" from device tree for %s\n", __func__, sfp_info->name);
- return -1;
- }
-
- sfp_info->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, fdt_parent_offset(fdt_addr, of_eeprom));
- sfp_info->i2c_eeprom_addr = cvmx_fdt_get_int(fdt_addr, of_eeprom, "reg", 0x50);
-
- debug("%s(%p, %d, %s, %d)\n", __func__, fdt_addr, of_offset, sfp_info->name,
- sfp_info->i2c_eeprom_addr);
-
- if (!sfp_info->i2c_bus) {
- debug("%s: Error: could not determine i2c bus for eeprom for %s\n", __func__,
- sfp_info->name);
- return -1;
+ struct cvmx_fdt_i2c_bus_info *businfo = NULL;
+ struct udevice *bus;
+ int ret;
+
+ businfo = cvmx_fdt_alloc(sizeof(*businfo));
+ if (!businfo) {
+ debug("Out of memory\n");
+ return NULL;
}
- of_diag = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "diag");
- if (of_diag >= 0)
- sfp_info->i2c_diag_addr = cvmx_fdt_get_int(fdt_addr, of_diag, "reg", 0x51);
- else
- sfp_info->i2c_diag_addr = 0x51;
- return 0;
-}
-
-/**
- * Parse SFP information from device tree
- *
- * @param[in] fdt_addr Address of flat device tree
- *
- * Return: pointer to sfp info or NULL if error
- */
-struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset)
-{
- struct cvmx_fdt_sfp_info *sfp_info = NULL;
- int err = -1;
- bool is_qsfp;
- if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,sfp-slot")) {
- is_qsfp = false;
- } else if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,qsfp-slot")) {
- is_qsfp = true;
- } else {
- debug("%s: Error: incompatible sfp/qsfp slot, compatible=%s\n", __func__,
- (char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
- goto error_exit;
- }
+ debug("%s: Found node %s\n", __func__, ofnode_get_name(node));
+ businfo->of_offset = ofnode_to_offset(node);
- debug("%s: %ssfp module found at offset %d\n", __func__, is_qsfp ? "q" : "", of_offset);
- sfp_info = __cvmx_fdt_alloc(sizeof(*sfp_info));
- if (!sfp_info) {
- debug("%s: Error: out of memory\n", __func__);
- goto error_exit;
- }
- sfp_info->name = fdt_get_name(fdt_addr, of_offset, NULL);
- sfp_info->of_offset = of_offset;
- sfp_info->is_qsfp = is_qsfp;
- sfp_info->last_mod_abs = -1;
- sfp_info->last_rx_los = -1;
-
- if (is_qsfp)
- err = cvmx_parse_qsfp(fdt_addr, of_offset, sfp_info);
- else
- err = cvmx_parse_sfp(fdt_addr, of_offset, sfp_info);
- if (err) {
- debug("%s: Error in %s parsing %ssfp GPIO info\n", __func__, sfp_info->name,
- is_qsfp ? "q" : "");
- goto error_exit;
- }
- debug("%s: Parsing %ssfp module eeprom\n", __func__, is_qsfp ? "q" : "");
- err = cvmx_parse_sfp_eeprom(fdt_addr, of_offset, sfp_info);
- if (err) {
- debug("%s: Error parsing eeprom info for %s\n", __func__, sfp_info->name);
- goto error_exit;
+ /*
+ * Get I2C bus and probe it automatically - needed for later use
+ */
+ ret = device_get_global_by_ofnode(node, &bus);
+ if (!bus || ret) {
+ printf("Cannot find a I2C bus\n");
+ return NULL;
}
- /* Register default check for mod_abs changed */
- if (!err)
- cvmx_sfp_register_check_mod_abs(sfp_info, cvmx_sfp_check_mod_abs, NULL);
+ businfo->i2c_bus = bus;
-error_exit:
- /* Note: we don't free any data structures on error since it gets
- * rather complicated with i2c buses and whatnot.
- */
- return err ? NULL : sfp_info;
+ return businfo;
}
/**
- * @INTERNAL
- * Parse a slice of the Inphi/Cortina CS4343 in the device tree
+ * Return the Octeon bus number for a bus descriptor
*
- * @param[in] fdt_addr Address of flat device tree
- * @param of_offset fdt offset of slice
- * @param phy_info phy_info data structure
+ * @param[in] bus bus descriptor
*
- * Return: slice number if non-negative, otherwise error
+ * @return Octeon twsi bus number or -1 on error
*/
-static int cvmx_fdt_parse_cs4343_slice(const void *fdt_addr, int of_offset,
- struct cvmx_phy_info *phy_info)
+int cvmx_fdt_i2c_get_root_bus(const struct cvmx_fdt_i2c_bus_info *bus)
{
- struct cvmx_cs4343_slice_info *slice;
- int reg;
- int reg_offset;
-
- reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
- reg_offset = cvmx_fdt_get_int(fdt_addr, of_offset, "slice_offset", -1);
-
- if (reg < 0 || reg >= 4) {
- debug("%s(%p, %d, %p): Error: reg %d undefined or out of range\n", __func__,
- fdt_addr, of_offset, phy_info, reg);
- return -1;
- }
- if (reg_offset % 0x1000 || reg_offset > 0x3000 || reg_offset < 0) {
- debug("%s(%p, %d, %p): Error: reg_offset 0x%x undefined or out of range\n",
- __func__, fdt_addr, of_offset, phy_info, reg_offset);
- return -1;
- }
- if (!phy_info->cs4343_info) {
- debug("%s: Error: phy info cs4343 datastructure is NULL\n", __func__);
+ if (bus->type != CVMX_I2C_BUS_OCTEON)
return -1;
- }
- debug("%s(%p, %d, %p): %s, reg: %d, slice offset: 0x%x\n", __func__, fdt_addr, of_offset,
- phy_info, fdt_get_name(fdt_addr, of_offset, NULL), reg, reg_offset);
- slice = &phy_info->cs4343_info->slice[reg];
- slice->name = fdt_get_name(fdt_addr, of_offset, NULL);
- slice->mphy = phy_info->cs4343_info;
- slice->phy_info = phy_info;
- slice->of_offset = of_offset;
- slice->slice_no = reg;
- slice->reg_offset = reg_offset;
- /* SR settings */
- slice->sr_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-cmode-res", 3);
- slice->sr_stx_drv_lower_cm =
- cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-drv-lower-cm", 8);
- slice->sr_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-level", 0x1c);
- slice->sr_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-pre-peak", 1);
- slice->sr_stx_muxsubrate_sel =
- cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-muxsubrate-sel", 0);
- slice->sr_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-post-peak", 8);
- /* CX settings */
- slice->cx_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-cmode-res", 3);
- slice->cx_stx_drv_lower_cm =
- cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-drv-lower-cm", 8);
- slice->cx_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-level", 0x1c);
- slice->cx_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-pre-peak", 1);
- slice->cx_stx_muxsubrate_sel =
- cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-muxsubrate-sel", 0);
- slice->cx_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-post-peak", 0xC);
- /* 1000Base-X settings */
- /* CX settings */
- slice->basex_stx_cmode_res =
- cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-cmode-res", 3);
- slice->basex_stx_drv_lower_cm =
- cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-drv-lower-cm", 8);
- slice->basex_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset,
- "basex-stx-level", 0x1c);
- slice->basex_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset,
- "basex-stx-pre-peak", 1);
- slice->basex_stx_muxsubrate_sel =
- cvmx_fdt_get_int(fdt_addr, of_offset,
- "basex-stx-muxsubrate-sel", 0);
- slice->basex_stx_post_peak =
- cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-post-peak", 8);
- /* Get the link LED gpio pin */
- slice->link_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
- "link-led-gpio", -1);
- slice->error_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
- "error-led-gpio", -1);
- slice->los_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
- "los-input-gpio", -1);
- slice->link_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
- "link-led-gpio-inverted");
- slice->error_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
- "error-led-gpio-inverted");
- slice->los_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
- "los-input-gpio-inverted");
- /* Convert GPIOs to be die based if they're not already */
- if (slice->link_gpio > 4 && slice->link_gpio <= 8)
- slice->link_gpio -= 4;
- if (slice->error_gpio > 4 && slice->error_gpio <= 8)
- slice->error_gpio -= 4;
- if (slice->los_gpio > 4 && slice->los_gpio <= 8)
- slice->los_gpio -= 4;
-
- return reg;
+ return bus->channel;
}
/**
- * @INTERNAL
- * Parses either a CS4343 phy or a slice of the phy from the device tree
- * @param[in] fdt_addr Address of FDT
- * @param of_offset offset of slice or phy in device tree
- * @param phy_info phy_info data structure to fill in
+ * Frees all entries for an i2c bus descriptor
*
- * Return: 0 for success, -1 on error
+ * @param bus bus to free
+ *
+ * @return 0
*/
-int cvmx_fdt_parse_cs4343(const void *fdt_addr, int of_offset, struct cvmx_phy_info *phy_info)
+int cvmx_fdt_free_i2c_bus(struct cvmx_fdt_i2c_bus_info *bus)
{
- int of_slice = -1;
- struct cvmx_cs4343_info *cs4343;
- int err = -1;
- int reg;
-
- debug("%s(%p, %d, %p): %s (%s)\n", __func__,
- fdt_addr, of_offset, phy_info,
- fdt_get_name(fdt_addr, of_offset, NULL),
- (const char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
+ struct cvmx_fdt_i2c_bus_info *last;
- if (!phy_info->cs4343_info)
- phy_info->cs4343_info = __cvmx_fdt_alloc(sizeof(struct cvmx_cs4343_info));
- if (!phy_info->cs4343_info) {
- debug("%s: Error: out of memory!\n", __func__);
- return -1;
- }
- cs4343 = phy_info->cs4343_info;
- /* If we're passed to a slice then process only that slice */
- if (!fdt_node_check_compatible(fdt_addr, of_offset, "cortina,cs4343-slice")) {
- err = 0;
- of_slice = of_offset;
- of_offset = fdt_parent_offset(fdt_addr, of_offset);
- reg = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice, phy_info);
- if (reg >= 0)
- phy_info->cs4343_slice_info = &cs4343->slice[reg];
- else
- err = reg;
- } else if (!fdt_node_check_compatible(fdt_addr, of_offset,
- "cortina,cs4343")) {
- /* Walk through and process all of the slices */
- of_slice =
- fdt_node_offset_by_compatible(fdt_addr, of_offset, "cortina,cs4343-slice");
- while (of_slice > 0 && fdt_parent_offset(fdt_addr, of_slice) ==
- of_offset) {
- debug("%s: Parsing slice %s\n", __func__,
- fdt_get_name(fdt_addr, of_slice, NULL));
- err = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice,
- phy_info);
- if (err < 0)
- break;
- of_slice = fdt_node_offset_by_compatible(fdt_addr,
- of_slice,
- "cortina,cs4343-slice");
- }
- } else {
- debug("%s: Error: unknown compatible string %s for %s\n", __func__,
- (const char *)fdt_getprop(fdt_addr, of_offset,
- "compatible", NULL),
- fdt_get_name(fdt_addr, of_offset, NULL));
+ while (bus) {
+ last = bus;
+ bus = bus->child;
+ free(last);
}
-
- if (err >= 0) {
- cs4343->name = fdt_get_name(fdt_addr, of_offset, NULL);
- cs4343->phy_info = phy_info;
- cs4343->of_offset = of_offset;
- }
-
- return err < 0 ? -1 : 0;
+ return 0;
}
return "UNKNOWN";
}
-/**
- * Debug routine to dump the packet structure to the console
- *
- * @param work Work queue entry containing the packet to dump
- * @return
- */
-int cvmx_helper_dump_packet(cvmx_wqe_t *work)
-{
- u64 count;
- u64 remaining_bytes;
- union cvmx_buf_ptr buffer_ptr;
- cvmx_buf_ptr_pki_t bptr;
- cvmx_wqe_78xx_t *wqe = (void *)work;
- u64 start_of_buffer;
- u8 *data_address;
- u8 *end_of_data;
-
- if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
- cvmx_pki_dump_wqe(wqe);
- cvmx_wqe_pki_errata_20776(work);
- } else {
- debug("WORD0 = %lx\n", (unsigned long)work->word0.u64);
- debug("WORD1 = %lx\n", (unsigned long)work->word1.u64);
- debug("WORD2 = %lx\n", (unsigned long)work->word2.u64);
- debug("Packet Length: %u\n", cvmx_wqe_get_len(work));
- debug(" Input Port: %u\n", cvmx_wqe_get_port(work));
- debug(" QoS: %u\n", cvmx_wqe_get_qos(work));
- debug(" Buffers: %u\n", cvmx_wqe_get_bufs(work));
- }
-
- if (cvmx_wqe_get_bufs(work) == 0) {
- int wqe_pool;
-
- if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
- debug("%s: ERROR: Unexpected bufs==0 in WQE\n", __func__);
- return -1;
- }
- wqe_pool = (int)cvmx_fpa_get_wqe_pool();
- buffer_ptr.u64 = 0;
- buffer_ptr.s.pool = wqe_pool;
-
- buffer_ptr.s.size = 128;
- buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
- if (cvmx_likely(!work->word2.s.not_IP)) {
- union cvmx_pip_ip_offset pip_ip_offset;
-
- pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
- buffer_ptr.s.addr +=
- (pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset;
- buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
- } else {
- /*
- * WARNING: This code assume that the packet
- * is not RAW. If it was, we would use
- * PIP_GBL_CFG[RAW_SHF] instead of
- * PIP_GBL_CFG[NIP_SHF].
- */
- union cvmx_pip_gbl_cfg pip_gbl_cfg;
-
- pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
- buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
- }
- } else {
- buffer_ptr = work->packet_ptr;
- }
-
- remaining_bytes = cvmx_wqe_get_len(work);
-
- while (remaining_bytes) {
- /* native cn78xx buffer format, unless legacy-translated */
- if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) && !wqe->pki_wqe_translated) {
- bptr.u64 = buffer_ptr.u64;
- /* XXX- assumes cache-line aligned buffer */
- start_of_buffer = (bptr.addr >> 7) << 7;
- debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
- debug(" Buffer Data: %llx\n", (unsigned long long)bptr.addr);
- debug(" Buffer Size: %u\n", bptr.size);
- data_address = (uint8_t *)cvmx_phys_to_ptr(bptr.addr);
- end_of_data = data_address + bptr.size;
- } else {
- start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
- debug(" Buffer I : %u\n", buffer_ptr.s.i);
- debug(" Buffer Back: %u\n", buffer_ptr.s.back);
- debug(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- debug(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
- debug(" Buffer Size: %u\n", buffer_ptr.s.size);
- data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
- end_of_data = data_address + buffer_ptr.s.size;
- }
-
- debug("\t\t");
- count = 0;
- while (data_address < end_of_data) {
- if (remaining_bytes == 0)
- break;
-
- remaining_bytes--;
- debug("%02x", (unsigned int)*data_address);
- data_address++;
- if (remaining_bytes && count == 7) {
- debug("\n\t\t");
- count = 0;
- } else {
- count++;
- }
- }
- debug("\n");
-
- if (remaining_bytes) {
- if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
- !wqe->pki_wqe_translated)
- buffer_ptr.u64 = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
- else
- buffer_ptr.u64 =
- *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- }
- }
- return 0;
-}
-
/**
* @INTERNAL
*
}
}
-void cvmx_helper_setup_legacy_red(int pass_thresh, int drop_thresh)
-{
- unsigned int node = cvmx_get_node_num();
- int aura, bpid;
- int buf_cnt;
- bool ena_red = 0, ena_drop = 0, ena_bp = 0;
-
-#define FPA_RED_AVG_DLY 1
-#define FPA_RED_LVL_DLY 3
-#define FPA_QOS_AVRG 0
- /* Trying to make it backward compatible with older chips */
-
- /* Setting up avg_dly and prb_dly, enable bits */
- if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
- cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG,
- FPA_RED_LVL_DLY, FPA_RED_AVG_DLY);
- }
-
- /* Disable backpressure on queued buffers which is aura in 78xx*/
- /*
- * Assumption is that all packets from all interface and ports goes
- * in same poolx/aurax for backward compatibility
- */
- aura = cvmx_fpa_get_packet_pool();
- buf_cnt = cvmx_fpa_get_packet_pool_buffer_count();
- pass_thresh = buf_cnt - pass_thresh;
- drop_thresh = buf_cnt - drop_thresh;
- /* Map aura to bpid 0*/
- bpid = 0;
- cvmx_pki_write_aura_bpid(node, aura, bpid);
- /* Don't enable back pressure */
- ena_bp = 0;
- /* enable RED */
- ena_red = 1;
- /*
- * This will enable RED on all interfaces since
- * they all have packet buffer coming from same aura
- */
- cvmx_helper_setup_aura_qos(node, aura, ena_red, ena_drop, pass_thresh,
- drop_thresh, ena_bp, 0);
-}
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @param pass_thresh
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @param drop_thresh
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Return: Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
- if (octeon_has_feature(OCTEON_FEATURE_PKI))
- cvmx_helper_setup_legacy_red(pass_thresh, drop_thresh);
- else
- cvmx_ipd_setup_red(pass_thresh, drop_thresh);
- return 0;
-}
-
/**
* @INTERNAL
* Setup the common GMX settings that determine the number of
return CVMX_INVALID_BPID;
}
-/**
- * Display interface statistics.
- *
- * @param port IPD/PKO port number
- *
- * Return: none
- */
-void cvmx_helper_show_stats(int port)
-{
- cvmx_pip_port_status_t status;
- cvmx_pko_port_status_t pko_status;
-
- /* ILK stats */
- if (octeon_has_feature(OCTEON_FEATURE_ILK))
- __cvmx_helper_ilk_show_stats();
-
- /* PIP stats */
- cvmx_pip_get_port_stats(port, 0, &status);
- debug("port %d: the number of packets - ipd: %d\n", port,
- (int)status.packets);
-
- /* PKO stats */
- cvmx_pko_get_port_status(port, 0, &pko_status);
- debug("port %d: the number of packets - pko: %d\n", port,
- (int)pko_status.packets);
-
- /* TODO: other stats */
-}
-
/**
* Returns the interface number for an IPD/PKO port number.
*
return -1;
}
-
-/**
- * Prints out a buffer with the address, hex bytes, and ASCII
- *
- * @param addr Start address to print on the left
- * @param[in] buffer array of bytes to print
- * @param count Number of bytes to print
- */
-void cvmx_print_buffer_u8(unsigned int addr, const uint8_t *buffer,
- size_t count)
-{
- uint i;
-
- while (count) {
- unsigned int linelen = count < 16 ? count : 16;
-
- debug("%08x:", addr);
-
- for (i = 0; i < linelen; i++)
- debug(" %0*x", 2, buffer[i]);
-
- while (i++ < 17)
- debug(" ");
-
- for (i = 0; i < linelen; i++) {
- if (buffer[i] >= 0x20 && buffer[i] < 0x7f)
- debug("%c", buffer[i]);
- else
- debug(".");
- }
- debug("\n");
- addr += linelen;
- buffer += linelen;
- count -= linelen;
- }
-}
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2020 Marvell International Ltd.
+ * Copyright (C) 2020-2022 Marvell International Ltd.
*
* Helper functions for common, but complicated tasks.
*/
.enable = __cvmx_helper_npi_enable,
};
-/**
- * @INTERNAL
- * This structure specifies the interface methods used by interfaces
- * configured as srio.
- */
-static const struct iface_ops iface_ops_srio = {
- .mode = CVMX_HELPER_INTERFACE_MODE_SRIO,
- .enumerate = __cvmx_helper_srio_probe,
- .probe = __cvmx_helper_srio_probe,
- .enable = __cvmx_helper_srio_enable,
- .link_get = __cvmx_helper_srio_link_get,
- .link_set = __cvmx_helper_srio_link_set,
-};
-
/**
* @INTERNAL
* This structure specifies the interface methods used by interfaces
return 0;
}
-/*
- * Shut down the interfaces; free the resources.
- * @INTERNAL
- */
-void __cvmx_helper_shutdown_interfaces_node(unsigned int node)
-{
- int i;
- int nifaces; /* number of interfaces */
- struct cvmx_iface *piface;
-
- nifaces = cvmx_helper_get_number_of_interfaces();
- for (i = 0; i < nifaces; i++) {
- piface = &cvmx_interfaces[node][i];
-
- /*
- * For SE apps, bootmem was meant to be allocated and never
- * freed.
- */
- piface->cvif_ipd_port_link_info = 0;
- }
-}
-
-void __cvmx_helper_shutdown_interfaces(void)
-{
- unsigned int node = cvmx_get_node_num();
-
- __cvmx_helper_shutdown_interfaces_node(node);
-}
-
int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
{
struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
typedef int (*cvmx_export_config_t)(void);
cvmx_export_config_t cvmx_export_app_config;
-void cvmx_rgmii_set_back_pressure(uint64_t backpressure_dis)
-{
- cvmx_rgmii_backpressure_dis = backpressure_dis;
-}
-
/*
* internal functions that are not exported in the .h file but must be
* declared to make gcc happy.
*/
extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
-/**
- * cvmx_override_iface_phy_mode(int interface, int index) is a function pointer.
- * It is meant to allow customization of interfaces which do not have a PHY.
- *
- * @returns 0 if MAC decides TX_CONFIG_REG or 1 if PHY decides TX_CONFIG_REG.
- *
- * If this function pointer is NULL then it defaults to the MAC.
- */
-int (*cvmx_override_iface_phy_mode)(int interface, int index);
-
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD
* chip and configuration, this can be 1-16. A value of 0
* specifies that the interface doesn't exist or isn't usable.
*
- * @param xiface xiface to get the port count for
+ * @param xiface to get the port count for
*
* Return: Number of ports on interface. Can be Zero.
*/
break;
}
} else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
- cvmx_sriox_status_reg_t sriox_status_reg;
- int srio_port = interface - 1;
-
- sriox_status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(srio_port));
-
- if (sriox_status_reg.s.srio)
- iface_ops[interface] = &iface_ops_srio;
- else
- iface_ops[interface] = &iface_ops_dis;
+ /* SRIO is disabled for now */
+ printf("SRIO disabled for now!\n");
+ iface_ops[interface] = &iface_ops_dis;
} else if (interface == 3) { /* DPI */
iface_ops[interface] = &iface_ops_npi;
} else if (interface == 4) { /* LOOP */
(OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
interface <= 7)) {
/* Only present in CN63XX & CN66XX Octeon model */
- union cvmx_sriox_status_reg sriox_status_reg;
/* cn66xx pass1.0 has only 2 SRIO interfaces. */
if ((interface == 5 || interface == 7) &&
*/
iface_ops[interface] = &iface_ops_dis;
} else {
- sriox_status_reg.u64 =
- csr_rd(CVMX_SRIOX_STATUS_REG(interface - 4));
- if (sriox_status_reg.s.srio)
- iface_ops[interface] = &iface_ops_srio;
- else
- iface_ops[interface] = &iface_ops_dis;
+ /* SRIO is disabled for now */
+ printf("SRIO disabled for now!\n");
+ iface_ops[interface] = &iface_ops_dis;
}
} else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_mio_qlmx_cfg mio_qlm_cfg;
return 0;
}
-/**
- * @INTERNAL
- * Verify the per port IPD backpressure is aligned properly.
- * Return: Zero if working, non zero if misaligned
- */
-int __cvmx_helper_backpressure_is_misaligned(void)
-{
- return 0;
-}
-
/**
* @INTERNAL
* Enable packet input/output from the hardware. This function is
if (iface_node_ops[xi.node][xi.interface]->enable)
result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
- result |= __cvmx_helper_board_hardware_enable(xiface);
+
return result;
}
/* Skip invalid/disabled interfaces */
if (cvmx_helper_ports_on_interface(xiface) <= 0)
continue;
- printf("Node %d Interface %d has %d ports (%s)\n", node, interface,
+ debug("Node %d Interface %d has %d ports (%s)\n",
+ node, interface,
cvmx_helper_ports_on_interface(xiface),
cvmx_helper_interface_mode_to_string(
cvmx_helper_interface_get_mode(xiface)));
return 0;
}
-/**
- * Helper function for global packet IO shutdown
- */
-int cvmx_helper_shutdown_packet_io_global_cn78xx(int node)
-{
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- cvmx_wqe_t *work;
- int interface;
- int result = 0;
-
- /* Shut down all interfaces and disable TX and RX on all ports */
- for (interface = 0; interface < num_interfaces; interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
- int index;
- int num_ports = cvmx_helper_ports_on_interface(xiface);
-
- if (num_ports > 4)
- num_ports = 4;
-
- cvmx_bgx_set_backpressure_override(xiface, 0);
- for (index = 0; index < num_ports; index++) {
- cvmx_helper_link_info_t link_info;
-
- if (!cvmx_helper_is_port_valid(xiface, index))
- continue;
-
- cvmx_helper_bgx_shutdown_port(xiface, index);
-
- /* Turn off link LEDs */
- link_info.u64 = 0;
- cvmx_helper_update_link_led(xiface, index, link_info);
- }
- }
-
- /* Stop input first */
- cvmx_helper_pki_shutdown(node);
-
- /* Retrieve all packets from the SSO and free them */
- result = 0;
- while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
- cvmx_helper_free_pki_pkt_data(work);
- cvmx_wqe_pki_free(work);
- result++;
- }
-
- if (result > 0)
- debug("%s: Purged %d packets from SSO\n", __func__, result);
-
- /*
- * No need to wait for PKO queues to drain,
- * dq_close() drains the queues to NULL.
- */
-
- /* Shutdown PKO interfaces */
- for (interface = 0; interface < num_interfaces; interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
- cvmx_helper_pko3_shut_interface(xiface);
- }
-
- /* Disable MAC address filtering */
- for (interface = 0; interface < num_interfaces; interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
- switch (cvmx_helper_interface_get_mode(xiface)) {
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- case CVMX_HELPER_INTERFACE_MODE_RXAUI:
- case CVMX_HELPER_INTERFACE_MODE_XLAUI:
- case CVMX_HELPER_INTERFACE_MODE_XFI:
- case CVMX_HELPER_INTERFACE_MODE_10G_KR:
- case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_MIXED: {
- int index;
- int num_ports = cvmx_helper_ports_on_interface(xiface);
-
- for (index = 0; index < num_ports; index++) {
- if (!cvmx_helper_is_port_valid(xiface, index))
- continue;
-
- /* Reset MAC filtering */
- cvmx_helper_bgx_rx_adr_ctl(node, interface, index, 0, 0, 0);
- }
- break;
- }
- default:
- break;
- }
- }
-
- for (interface = 0; interface < num_interfaces; interface++) {
- int index;
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
- int num_ports = cvmx_helper_ports_on_interface(xiface);
-
- for (index = 0; index < num_ports; index++) {
- /* Doing this twice should clear it since no packets
- * can be received.
- */
- cvmx_update_rx_activity_led(xiface, index, false);
- cvmx_update_rx_activity_led(xiface, index, false);
- }
- }
-
- /* Shutdown the PKO unit */
- result = cvmx_helper_pko3_shutdown(node);
-
- /* Release interface structures */
- __cvmx_helper_shutdown_interfaces();
-
- return result;
-}
-
-/**
- * Undo the initialization performed in
- * cvmx_helper_initialize_packet_io_global(). After calling this routine and the
- * local version on each core, packet IO for Octeon will be disabled and placed
- * in the initial reset state. It will then be safe to call the initialize
- * later on. Note that this routine does not empty the FPA pools. It frees all
- * buffers used by the packet IO hardware to the FPA so a function emptying the
- * FPA after shutdown should find all packet buffers in the FPA.
- *
- * Return: Zero on success, negative on failure.
- */
-int cvmx_helper_shutdown_packet_io_global(void)
-{
- const int timeout = 5; /* Wait up to 5 seconds for timeouts */
- int result = 0;
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- int interface;
- int num_ports;
- int index;
- struct cvmx_buffer_list *pool0_buffers;
- struct cvmx_buffer_list *pool0_buffers_tail;
- cvmx_wqe_t *work;
- union cvmx_ipd_ctl_status ipd_ctl_status;
- int wqe_pool = (int)cvmx_fpa_get_wqe_pool();
- int node = cvmx_get_node_num();
- cvmx_pcsx_mrx_control_reg_t control_reg;
-
- if (octeon_has_feature(OCTEON_FEATURE_BGX))
- return cvmx_helper_shutdown_packet_io_global_cn78xx(node);
-
- /* Step 1: Disable all backpressure */
- for (interface = 0; interface < num_interfaces; interface++) {
- cvmx_helper_interface_mode_t mode =
- cvmx_helper_interface_get_mode(interface);
-
- if (mode == CVMX_HELPER_INTERFACE_MODE_AGL)
- cvmx_agl_set_backpressure_override(interface, 0x1);
- else if (mode != CVMX_HELPER_INTERFACE_MODE_DISABLED)
- cvmx_gmx_set_backpressure_override(interface, 0xf);
- }
-
- /* Step 2: Wait for the PKO queues to drain */
- result = __cvmx_helper_pko_drain();
- if (result < 0) {
- debug("WARNING: %s: Failed to drain some PKO queues\n",
- __func__);
- }
-
- /* Step 3: Disable TX and RX on all ports */
- for (interface = 0; interface < num_interfaces; interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node,
- interface);
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- /* Not a packet interface */
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_SRIO:
- case CVMX_HELPER_INTERFACE_MODE_ILK:
- /*
- * We don't handle the NPI/NPEI/SRIO packet
- * engines. The caller must know these are
- * idle.
- */
- break;
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- /*
- * Nothing needed. Once PKO is idle, the
- * loopback devices must be idle.
- */
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- /*
- * SPI cannot be disabled from Octeon. It is
- * the responsibility of the caller to make
- * sure SPI is idle before doing shutdown.
- *
- * Fall through and do the same processing as
- * RGMII/GMII.
- */
- fallthrough;
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- /* Disable outermost RX at the ASX block */
- csr_wr(CVMX_ASXX_RX_PRT_EN(interface), 0);
- num_ports = cvmx_helper_ports_on_interface(xiface);
- if (num_ports > 4)
- num_ports = 4;
- for (index = 0; index < num_ports; index++) {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
-
- if (!cvmx_helper_is_port_valid(interface, index))
- continue;
- gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 0;
- csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- /* Poll the GMX state machine waiting for it to become idle */
- csr_wr(CVMX_NPI_DBG_SELECT,
- interface * 0x800 + index * 0x100 + 0x880);
- if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
- data & 7, ==, 0, timeout * 1000000)) {
- debug("GMX RX path timeout waiting for idle\n");
- result = -1;
- }
- if (CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data,
- data & 0xf, ==, 0, timeout * 1000000)) {
- debug("GMX TX path timeout waiting for idle\n");
- result = -1;
- }
- }
- /* Disable outermost TX at the ASX block */
- csr_wr(CVMX_ASXX_TX_PRT_EN(interface), 0);
- /* Disable interrupts for interface */
- csr_wr(CVMX_ASXX_INT_EN(interface), 0);
- csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0);
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- case CVMX_HELPER_INTERFACE_MODE_RXAUI:
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_QSGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- num_ports = cvmx_helper_ports_on_interface(xiface);
- if (num_ports > 4)
- num_ports = 4;
- for (index = 0; index < num_ports; index++) {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
-
- if (!cvmx_helper_is_port_valid(interface, index))
- continue;
- gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
- gmx_cfg.s.en = 0;
- csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
- union cvmx_gmxx_prtx_cfg, rx_idle, ==, 1,
- timeout * 1000000)) {
- debug("GMX RX path timeout waiting for idle\n");
- result = -1;
- }
- if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
- union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
- timeout * 1000000)) {
- debug("GMX TX path timeout waiting for idle\n");
- result = -1;
- }
- /* For SGMII some PHYs require that the PCS
- * interface be powered down and reset (i.e.
- * Atheros/Qualcomm PHYs).
- */
- if (cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_SGMII) {
- u64 reg;
-
- reg = CVMX_PCSX_MRX_CONTROL_REG(index, interface);
- /* Power down the interface */
- control_reg.u64 = csr_rd(reg);
- control_reg.s.pwr_dn = 1;
- csr_wr(reg, control_reg.u64);
- csr_rd(reg);
- }
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_AGL: {
- int port = cvmx_helper_agl_get_port(interface);
- union cvmx_agl_gmx_prtx_cfg agl_gmx_cfg;
-
- agl_gmx_cfg.u64 = csr_rd(CVMX_AGL_GMX_PRTX_CFG(port));
- agl_gmx_cfg.s.en = 0;
- csr_wr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_cfg.u64);
- if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
- union cvmx_agl_gmx_prtx_cfg, rx_idle, ==, 1,
- timeout * 1000000)) {
- debug("AGL RX path timeout waiting for idle\n");
- result = -1;
- }
- if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
- union cvmx_agl_gmx_prtx_cfg, tx_idle, ==, 1,
- timeout * 1000000)) {
- debug("AGL TX path timeout waiting for idle\n");
- result = -1;
- }
- } break;
- default:
- break;
- }
- }
-
- /* Step 4: Retrieve all packets from the POW and free them */
- while ((work = cvmx_pow_work_request_sync(CVMX_POW_WAIT))) {
- cvmx_helper_free_packet_data(work);
- cvmx_fpa1_free(work, wqe_pool, 0);
- }
-
- /* Step 5 */
- cvmx_ipd_disable();
-
- /*
- * Step 6: Drain all prefetched buffers from IPD/PIP. Note that IPD/PIP
- * have not been reset yet
- */
- __cvmx_ipd_free_ptr();
-
- /* Step 7: Free the PKO command buffers and put PKO in reset */
- cvmx_pko_shutdown();
-
- /* Step 8: Disable MAC address filtering */
- for (interface = 0; interface < num_interfaces; interface++) {
- int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_SRIO:
- case CVMX_HELPER_INTERFACE_MODE_ILK:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- case CVMX_HELPER_INTERFACE_MODE_RXAUI:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_QSGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- num_ports = cvmx_helper_ports_on_interface(xiface);
- if (num_ports > 4)
- num_ports = 4;
- for (index = 0; index < num_ports; index++) {
- if (!cvmx_helper_is_port_valid(interface, index))
- continue;
- csr_wr(CVMX_GMXX_RXX_ADR_CTL(index, interface), 1);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM_EN(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), 0);
- csr_wr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), 0);
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_AGL: {
- int port = cvmx_helper_agl_get_port(interface);
-
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CTL(port), 1);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), 0);
- csr_wr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), 0);
- } break;
- default:
- break;
- }
- }
-
- /*
- * Step 9: Drain all FPA buffers out of pool 0 before we reset
- * IPD/PIP. This is needed to keep IPD_QUE0_FREE_PAGE_CNT in
- * sync. We temporarily keep the buffers in the pool0_buffers
- * list.
- */
- pool0_buffers = NULL;
- pool0_buffers_tail = NULL;
- while (1) {
- struct cvmx_buffer_list *buffer = cvmx_fpa1_alloc(0);
-
- if (buffer) {
- buffer->next = NULL;
-
- if (!pool0_buffers)
- pool0_buffers = buffer;
- else
- pool0_buffers_tail->next = buffer;
-
- pool0_buffers_tail = buffer;
- } else {
- break;
- }
- }
-
- /* Step 10: Reset IPD and PIP */
- ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.reset = 1;
- csr_wr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
-
- /* Make sure IPD has finished reset. */
- if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) {
- if (CVMX_WAIT_FOR_FIELD64(CVMX_IPD_CTL_STATUS, union cvmx_ipd_ctl_status, rst_done,
- ==, 0, 1000)) {
- debug("IPD reset timeout waiting for idle\n");
- result = -1;
- }
- }
-
- /* Step 11: Restore the FPA buffers into pool 0 */
- while (pool0_buffers) {
- struct cvmx_buffer_list *n = pool0_buffers->next;
-
- cvmx_fpa1_free(pool0_buffers, 0, 0);
- pool0_buffers = n;
- }
-
- /* Step 12: Release interface structures */
- __cvmx_helper_shutdown_interfaces();
-
- return result;
-}
-
-/**
- * Does core local shutdown of packet io
- *
- * Return: Zero on success, non-zero on failure
- */
-int cvmx_helper_shutdown_packet_io_local(void)
-{
- /*
- * Currently there is nothing to do per core. This may change
- * in the future.
- */
- return 0;
-}
-
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
return result;
}
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @param xipd_port IPD/PKO port to loopback.
- * @param enable_internal
- * Non zero if you want internal loopback
- * @param enable_external
- * Non zero if you want external loopback
- *
- * Return: Zero on success, negative on failure.
- */
-int cvmx_helper_configure_loopback(int xipd_port, int enable_internal, int enable_external)
-{
- int result = -1;
- int xiface = cvmx_helper_get_interface_num(xipd_port);
- struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
- int index = cvmx_helper_get_interface_index_num(xipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(xiface))
- return -1;
-
- cvmx_helper_interface_get_mode(xiface);
- if (iface_node_ops[xi.node][xi.interface]->loopback)
- result = iface_node_ops[xi.node][xi.interface]->loopback(xipd_port, enable_internal,
- enable_external);
-
- return result;
-}
-
-void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers, int pko_buffers)
-{
- if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
- cvmx_helper_pki_set_dflt_pool_buffer(node, num_packet_buffers);
- cvmx_helper_pki_set_dflt_aura_buffer(node, num_packet_buffers);
-
- } else {
- cvmx_ipd_set_packet_pool_buffer_count(num_packet_buffers);
- cvmx_ipd_set_wqe_pool_buffer_count(num_packet_buffers);
- cvmx_pko_set_cmd_queue_pool_buffer_count(pko_buffers);
- }
-}
-
void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
{
s64 paddr;
}
return cvmx_phys_to_ptr(paddr);
}
-
-void cvmx_helper_mem_free(void *buffer, uint64_t size)
-{
- __cvmx_bootmem_phy_free(cvmx_ptr_to_phys(buffer), size, 0);
-}
-
-int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg)
-{
- int i;
-
- memset(qos_cfg, 0, sizeof(cvmx_qos_config_t));
- qos_cfg->pkt_mode = CVMX_QOS_PKT_MODE_HWONLY; /* Process PAUSEs in hardware only.*/
- qos_cfg->pool_mode = CVMX_QOS_POOL_PER_PORT; /* One Pool per BGX:LMAC.*/
- qos_cfg->pktbuf_size = 2048; /* Fit WQE + MTU in one buffer.*/
- qos_cfg->aura_size = 1024; /* 1K buffers typically enough for any application.*/
- qos_cfg->pko_pfc_en = 1; /* Enable PKO layout for PFC feature. */
- qos_cfg->vlan_num = 1; /* For Stacked VLAN, use 2nd VLAN in the QPG algorithm.*/
- qos_cfg->qos_proto = qos_proto; /* Use PFC flow-control protocol.*/
- qos_cfg->qpg_base = -1; /* QPG Table index is undefined.*/
- qos_cfg->p_time = 0x60; /* PAUSE packets time window.*/
- qos_cfg->p_interval = 0x10; /* PAUSE packets interval.*/
- for (i = 0; i < CVMX_QOS_NUM; i++) {
- qos_cfg->groups[i] = i; /* SSO Groups = 0...7 */
- qos_cfg->group_prio[i] = i; /* SSO Group priority = QOS. */
- qos_cfg->drop_thresh[i] = 99; /* 99% of the Aura size.*/
- qos_cfg->red_thresh[i] = 90; /* 90% of the Aura size.*/
- qos_cfg->bp_thresh[i] = 70; /* 70% of the Aura size.*/
- }
- return 0;
-}
-
-int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
- cvmx_user_static_pko_queue_config_t pkocfg;
- cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
- int xiface = cvmx_helper_get_interface_num(xipdport);
- cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
-
- /* Configure PKO port for PFC SQ layout: */
- cvmx_helper_pko_queue_config_get(xp.node, &pkocfg);
- pkocfg.pknd.pko_cfg_iface[xi.interface].pfc_enable = 1;
- cvmx_helper_pko_queue_config_set(xp.node, &pkocfg);
- return 0;
-}
-
-int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
- const int channles = CVMX_QOS_NUM;
- int bufsize = qos_cfg->pktbuf_size;
- int aura_size = qos_cfg->aura_size;
- cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
- int node = xp.node;
- int ipdport = xp.port;
- int port = cvmx_helper_get_interface_index_num(xp.port);
- int xiface = cvmx_helper_get_interface_num(xipdport);
- cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
- cvmx_fpa3_pool_t gpool;
- cvmx_fpa3_gaura_t gaura;
- cvmx_bgxx_cmr_rx_ovr_bp_t ovrbp;
- struct cvmx_pki_qpg_config qpgcfg;
- struct cvmx_pki_style_config stcfg, stcfg_dflt;
- struct cvmx_pki_pkind_config pkcfg;
- int chan, bpid, group, qpg;
- int bpen, reden, dropen, passthr, dropthr, bpthr;
- int nbufs, pkind, style;
- char name[32];
-
- if (qos_cfg->pool_mode == CVMX_QOS_POOL_PER_PORT) {
- /* Allocate and setup packet Pool: */
- nbufs = aura_size * channles;
- sprintf(name, "QOS.P%d", ipdport);
- gpool = cvmx_fpa3_setup_fill_pool(node, -1 /*auto*/, name, bufsize, nbufs, NULL);
- if (!__cvmx_fpa3_pool_valid(gpool)) {
- printf("%s: Failed to setup FPA Pool\n", __func__);
- return -1;
- }
- for (chan = 0; chan < channles; chan++)
- qos_cfg->gpools[chan] = gpool;
- } else {
- printf("%s: Invalid pool_mode %d\n", __func__, qos_cfg->pool_mode);
- return -1;
- }
- /* Allocate QPG entries: */
- qos_cfg->qpg_base = cvmx_pki_qpg_entry_alloc(node, -1 /*auto*/, channles);
- if (qos_cfg->qpg_base < 0) {
- printf("%s: Failed to allocate QPG entry\n", __func__);
- return -1;
- }
- for (chan = 0; chan < channles; chan++) {
- /* Allocate and setup Aura, setup BP threshold: */
- gpool = qos_cfg->gpools[chan];
- sprintf(name, "QOS.A%d", ipdport + chan);
- gaura = cvmx_fpa3_set_aura_for_pool(gpool, -1 /*auto*/, name, bufsize, aura_size);
- if (!__cvmx_fpa3_aura_valid(gaura)) {
- printf("%s: Failed to setup FPA Aura for Channel %d\n", __func__, chan);
- return -1;
- }
- qos_cfg->gauras[chan] = gaura;
- bpen = 1;
- reden = 1;
- dropen = 1;
- dropthr = (qos_cfg->drop_thresh[chan] * 10 * aura_size) / 1000;
- passthr = (qos_cfg->red_thresh[chan] * 10 * aura_size) / 1000;
- bpthr = (qos_cfg->bp_thresh[chan] * 10 * aura_size) / 1000;
- cvmx_fpa3_setup_aura_qos(gaura, reden, passthr, dropthr, bpen, bpthr);
- cvmx_pki_enable_aura_qos(node, gaura.laura, reden, dropen, bpen);
-
- /* Allocate BPID, link Aura and Channel using BPID: */
- bpid = cvmx_pki_bpid_alloc(node, -1 /*auto*/);
- if (bpid < 0) {
- printf("%s: Failed to allocate BPID for channel %d\n",
- __func__, chan);
- return -1;
- }
- qos_cfg->bpids[chan] = bpid;
- cvmx_pki_write_aura_bpid(node, gaura.laura, bpid);
- cvmx_pki_write_channel_bpid(node, ipdport + chan, bpid);
-
- /* Setup QPG entries: */
- group = qos_cfg->groups[chan];
- qpg = qos_cfg->qpg_base + chan;
- cvmx_pki_read_qpg_entry(node, qpg, &qpgcfg);
- qpgcfg.port_add = chan;
- qpgcfg.aura_num = gaura.laura;
- qpgcfg.grp_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
- qpgcfg.grp_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | group;
- qpgcfg.grptag_ok = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
- qpgcfg.grptag_bad = (node << CVMX_WQE_GRP_NODE_SHIFT) | 0;
- cvmx_pki_write_qpg_entry(node, qpg, &qpgcfg);
- }
- /* Allocate and setup STYLE: */
- cvmx_helper_pki_get_dflt_style(node, &stcfg_dflt);
- style = cvmx_pki_style_alloc(node, -1 /*auto*/);
- cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
- stcfg.tag_cfg = stcfg_dflt.tag_cfg;
- stcfg.parm_cfg.tag_type = CVMX_POW_TAG_TYPE_ORDERED;
- stcfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN;
- stcfg.parm_cfg.qpg_base = qos_cfg->qpg_base;
- stcfg.parm_cfg.qpg_port_msb = 0;
- stcfg.parm_cfg.qpg_port_sh = 0;
- stcfg.parm_cfg.qpg_dis_grptag = 1;
- stcfg.parm_cfg.fcs_strip = 1;
- stcfg.parm_cfg.mbuff_size = bufsize - 64; /* Do not use 100% of the buffer. */
- stcfg.parm_cfg.force_drop = 0;
- stcfg.parm_cfg.nodrop = 0;
- stcfg.parm_cfg.rawdrp = 0;
- stcfg.parm_cfg.cache_mode = 2; /* 1st buffer in L2 */
- stcfg.parm_cfg.wqe_vs = qos_cfg->vlan_num;
- cvmx_pki_write_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &stcfg);
-
- /* Setup PKIND: */
- pkind = cvmx_helper_get_pknd(xiface, port);
- cvmx_pki_read_pkind_config(node, pkind, &pkcfg);
- pkcfg.cluster_grp = 0; /* OCTEON3 has only one cluster group = 0 */
- pkcfg.initial_style = style;
- pkcfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG;
- cvmx_pki_write_pkind_config(node, pkind, &pkcfg);
-
- /* Setup parameters of the QOS packet and enable QOS flow-control: */
- cvmx_bgx_set_pause_pkt_param(xipdport, 0, 0x0180c2000001, 0x8808, qos_cfg->p_time,
- qos_cfg->p_interval);
- cvmx_bgx_set_flowctl_mode(xipdport, qos_cfg->qos_proto, qos_cfg->pkt_mode);
-
- /* Enable PKI channel backpressure in the BGX: */
- ovrbp.u64 = csr_rd_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface));
- ovrbp.s.en &= ~(1 << port);
- ovrbp.s.ign_fifo_bp &= ~(1 << port);
- csr_wr_node(node, CVMX_BGXX_CMR_RX_OVR_BP(xi.interface), ovrbp.u64);
- return 0;
-}
-
-int cvmx_helper_qos_sso_setup(int xipdport, cvmx_qos_config_t *qos_cfg)
-{
- const int channels = CVMX_QOS_NUM;
- cvmx_sso_grpx_pri_t grppri;
- int chan, qos, group;
- cvmx_xport_t xp = cvmx_helper_ipd_port_to_xport(xipdport);
- int node = xp.node;
-
- for (chan = 0; chan < channels; chan++) {
- qos = cvmx_helper_qos2prio(chan);
- group = qos_cfg->groups[qos];
- grppri.u64 = csr_rd_node(node, CVMX_SSO_GRPX_PRI(group));
- grppri.s.pri = qos_cfg->group_prio[chan];
- csr_wr_node(node, CVMX_SSO_GRPX_PRI(group), grppri.u64);
- }
- return 0;
-}
-
-int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen)
-{
- int n, dpichans;
-
- if ((unsigned int)chan >= CVMX_PKO3_IPD_NUM_MAX) {
- printf("%s: Channel %d is out of range (0..4095)\n", __func__, chan);
- return -1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN78XX))
- dpichans = 64;
- else
- dpichans = 128;
-
- if (chan >= 0 && chan < 64)
- n = snprintf(namebuf, buflen, "LBK%d", chan);
- else if (chan >= 0x100 && chan < (0x100 + dpichans))
- n = snprintf(namebuf, buflen, "DPI%d", chan - 0x100);
- else if (chan == 0x200)
- n = snprintf(namebuf, buflen, "NQM");
- else if (chan >= 0x240 && chan < (0x240 + (1 << 1) + 2))
- n = snprintf(namebuf, buflen, "SRIO%d:%d", (chan - 0x240) >> 1,
- (chan - 0x240) & 0x1);
- else if (chan >= 0x400 && chan < (0x400 + (1 << 8) + 256))
- n = snprintf(namebuf, buflen, "ILK%d:%d", (chan - 0x400) >> 8,
- (chan - 0x400) & 0xFF);
- else if (chan >= 0x800 && chan < (0x800 + (5 << 8) + (3 << 4) + 16))
- n = snprintf(namebuf, buflen, "BGX%d:%d:%d", (chan - 0x800) >> 8,
- ((chan - 0x800) >> 4) & 0x3, (chan - 0x800) & 0xF);
- else
- n = snprintf(namebuf, buflen, "--");
- return n;
-}
-
-#ifdef CVMX_DUMP_DIAGNOSTICS
-void cvmx_helper_dump_for_diagnostics(int node)
-{
- if (!(OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))) {
- printf("Diagnostics are not implemented for this model\n");
- return;
- }
-#ifdef CVMX_DUMP_GSER
- {
- int qlm, num_qlms;
-
- num_qlms = cvmx_qlm_get_num();
- for (qlm = 0; qlm < num_qlms; qlm++) {
- cvmx_dump_gser_config_node(node, qlm);
- cvmx_dump_gser_status_node(node, qlm);
- }
- }
-#endif
-#ifdef CVMX_DUMP_BGX
- {
- int bgx;
-
- for (bgx = 0; bgx < CVMX_HELPER_MAX_GMX; bgx++) {
- cvmx_dump_bgx_config_node(node, bgx);
- cvmx_dump_bgx_status_node(node, bgx);
- }
- }
-#endif
-#ifdef CVMX_DUMP_PKI
- cvmx_pki_config_dump(node);
- cvmx_pki_stats_dump(node);
-#endif
-#ifdef CVMX_DUMP_PKO
- cvmx_helper_pko3_config_dump(node);
- cvmx_helper_pko3_stats_dump(node);
-#endif
-#ifdef CVMX_DUMO_SSO
- cvmx_sso_config_dump(node);
-#endif
-}
-#endif