blob: 4597015b8bffb55c3eea2050b983074453922776 [file] [log] [blame]
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _QED_HSI_H
#define _QED_HSI_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/storage_common.h>
#include <linux/qed/tcp_common.h>
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
#include <linux/qed/qed_fcoe_if.h>
struct qed_hwfn;
struct qed_ptt;
/* Opcodes for the event ring */
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
COMMON_EVENT_VF_START,
COMMON_EVENT_VF_STOP,
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
COMMON_EVENT_MALICIOUS_VF,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
/* Common Ramrod Command IDs */
enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START,
COMMON_RAMROD_PF_STOP,
COMMON_RAMROD_VF_START,
COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_RL_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
/* How ll2 should deal with packet upon errors */
enum core_error_handle {
LL2_DROP_PACKET,
LL2_DO_NOTHING,
LL2_ASSERT,
MAX_CORE_ERROR_HANDLE
};
/* Opcodes for the event ring */
enum core_event_opcode {
CORE_EVENT_TX_QUEUE_START,
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
CORE_EVENT_TX_QUEUE_UPDATE,
CORE_EVENT_QUEUE_STATS_QUERY,
MAX_CORE_EVENT_OPCODE
};
/* The L4 pseudo checksum mode for Core */
enum core_l4_pseudo_checksum_mode {
CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
struct regpair gsi_invalid_pkt_length;
struct regpair gsi_unsupported_pkt_typ;
struct regpair gsi_crcchksm_error;
};
/* LL2 TX Per Queue Stats */
struct core_ll2_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
struct regpair sent_bcast_bytes;
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
struct regpair error_drop_pkts;
};
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_rx_prod {
__le16 bd_prod;
__le16 cqe_prod;
};
struct core_ll2_tstorm_per_queue_stat {
struct regpair packet_too_big_discard;
struct regpair no_buff_discard;
};
struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {
__le16 icid; /* internal CID */
u8 reserved0;
u8 params;
#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3
#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0
#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */
#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
struct core_ll2_rx_prod prod; /* Producers */
};
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
CORE_RAMROD_RX_QUEUE_START,
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
CORE_RAMROD_RX_QUEUE_FLUSH,
CORE_RAMROD_TX_QUEUE_UPDATE,
CORE_RAMROD_QUEUE_STATS_QUERY,
MAX_CORE_RAMROD_CMD_ID
};
/* Core RX CQE Type for Light L2 */
enum core_roce_flavor_type {
CORE_ROCE,
CORE_RROCE,
MAX_CORE_ROCE_FLAVOR_TYPE
};
/* Specifies how ll2 should deal with packets errors: packet_too_big and
* no_buff.
*/
struct core_rx_action_on_error {
u8 error_type;
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
/* Core RX BD for Light L2 */
struct core_rx_bd {
struct regpair addr;
__le16 reserved[4];
};
/* Core RX CM offload BD for Light L2 */
struct core_rx_bd_with_buff_len {
struct regpair addr;
__le16 buff_length;
__le16 reserved[3];
};
/* Core RX CM offload BD for Light L2 */
union core_rx_bd_union {
struct core_rx_bd rx_bd;
struct core_rx_bd_with_buff_len rx_bd_with_len;
};
/* Opaque Data for Light L2 RX CQE */
struct core_rx_cqe_opaque_data {
__le32 data[2];
};
/* Core RX CQE Type for Light L2 */
enum core_rx_cqe_type {
CORE_RX_CQE_ILLEGAL_TYPE,
CORE_RX_CQE_TYPE_REGULAR,
CORE_RX_CQE_TYPE_GSI_OFFLOAD,
CORE_RX_CQE_TYPE_SLOW_PATH,
MAX_CORE_RX_CQE_TYPE
};
/* Core RX CQE for Light L2 */
struct core_rx_fast_path_cqe {
u8 type;
u8 placement_offset;
struct parsing_and_err_flags parse_flags;
__le16 packet_length;
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
__le16 reserved0;
__le32 reserved1[3];
};
/* Core Rx CM offload CQE */
struct core_rx_gsi_offload_cqe {
u8 type;
u8 data_length_error;
struct parsing_and_err_flags parse_flags;
__le16 data_length;
__le16 vlan;
__le32 src_mac_addrhi;
__le16 src_mac_addrlo;
__le16 qp_id;
__le32 src_qp;
struct core_rx_cqe_opaque_data opaque_data;
__le32 reserved;
};
/* Core RX CQE for Light L2 */
struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
struct core_rx_cqe_opaque_data opaque_data;
__le32 reserved1[5];
};
/* Core RX CM offload BD for Light L2 */
union core_rx_cqe_union {
struct core_rx_fast_path_cqe rx_cqe_fp;
struct core_rx_gsi_offload_cqe rx_cqe_gsi;
struct core_rx_slow_path_cqe rx_cqe_sp;
};
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
struct regpair cqe_pbl_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 drop_ttl0_flg;
__le16 num_of_pbl_pages;
u8 inner_vlan_stripping_en;
u8 report_outer_vlan;
u8 queue_id;
u8 main_func_queue;
u8 mf_si_bcast_accept_all;
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
u8 vport_id_valid;
u8 vport_id;
u8 zero_prod_flg;
u8 wipe_inner_vlan_pri_en;
u8 reserved[2];
};
/* Ramrod data for rx queue stop ramrod */
struct core_rx_stop_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 queue_id;
u8 reserved1;
__le16 reserved2[2];
};
/* Flags for Core TX BD */
struct core_tx_bd_data {
__le16 as_bitfield;
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
#define CORE_TX_BD_DATA_START_BD_MASK 0x1
#define CORE_TX_BD_DATA_START_BD_SHIFT 2
#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
#define CORE_TX_BD_DATA_NBDS_MASK 0xF
#define CORE_TX_BD_DATA_NBDS_SHIFT 8
#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
};
/* Core TX BD for Light L2 */
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
};
/* Light L2 TX Destination */
enum core_tx_dest {
CORE_TX_DEST_NW,
CORE_TX_DEST_LB,
CORE_TX_DEST_RESERVED,
CORE_TX_DEST_DROP,
MAX_CORE_TX_DEST
};
/* Ramrod data for tx queue start ramrod */
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr;
__le16 mtu;
__le16 sb_id;
u8 sb_index;
u8 stats_en;
u8 stats_id;
u8 conn_type;
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
u8 ctx_stats_en;
u8 vport_id_valid;
u8 vport_id;
u8 enforce_security_flag;
u8 reserved[7];
};
/* Ramrod data for tx queue stop ramrod */
struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
/* Ramrod data for tx queue update ramrod */
struct core_tx_update_ramrod_data {
u8 update_qm_pq_id_flg;
u8 reserved0;
__le16 qm_pq_id;
__le32 reserved1[1];
};
/* Enum flag for what type of dcb data to update */
enum dcb_dscp_update_mode {
DONT_UPDATE_DCB_DSCP,
UPDATE_DCB,
UPDATE_DSCP,
UPDATE_DCB_DSCP,
MAX_DCB_DSCP_UPDATE_MODE
};
/* The core storm context for the Ystorm */
struct ystorm_core_conn_st_ctx {
__le32 reserved[4];
};
/* The core storm context for the Pstorm */
struct pstorm_core_conn_st_ctx {
__le32 reserved[20];
};
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
__le32 spq_base_lo;
__le32 spq_base_hi;
struct regpair consolid_base_addr;
__le16 spq_cons;
__le16 consolid_cons;
__le32 reserved0[55];
};
struct e4_xstorm_core_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 consolid_prod;
__le16 reserved16;
__le16 tx_bd_cons;
__le16 tx_bd_or_spq_prod;
__le16 updated_qm_pq_id;
__le16 conn_dpi;
u8 byte3;
u8 byte4;
u8 byte5;
u8 byte6;
__le32 reg0;
__le32 reg1;
__le32 reg2;
__le32 reg3;
__le32 reg4;
__le32 reg5;
__le32 reg6;
__le16 word7;
__le16 word8;
__le16 word9;
__le16 word10;
__le32 reg7;
__le32 reg8;
__le32 reg9;
u8 byte7;
u8 byte8;
u8 byte9;
u8 byte10;
u8 byte11;
u8 byte12;
u8 byte13;
u8 byte14;
u8 byte15;
u8 e5_reserved;
__le16 word11;
__le32 reg10;
__le32 reg11;
__le32 reg12;
__le32 reg13;
__le32 reg14;
__le32 reg15;
__le32 reg16;
__le32 reg17;
__le32 reg18;
__le32 reg19;
__le16 word12;
__le16 word13;
__le16 word14;
__le16 word15;
};
struct e4_tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
__le32 reg3;
__le32 reg4;
__le32 reg5;
__le32 reg6;
__le32 reg7;
__le32 reg8;
u8 byte2;
u8 byte3;
__le16 word0;
u8 byte4;
u8 byte5;
__le16 word1;
__le16 word2;
__le16 word3;
__le32 ll2_rx_prod;
__le32 reg10;
};
struct e4_ustorm_core_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
__le16 word1;
__le32 rx_producers;
__le32 reg1;
__le32 reg2;
__le32 reg3;
__le16 word2;
__le16 word3;
};
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
__le32 reserved[40];
};
/* The core storm context for the Ustorm */
struct ustorm_core_conn_st_ctx {
__le32 reserved[20];
};
/* The core storm context for the Tstorm */
struct tstorm_core_conn_st_ctx {
__le32 reserved[4];
};
/* core connection context */
struct e4_core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
struct tstorm_core_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
};
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
struct regpair geneve_discard_pkts;
struct regpair lb_discard_pkts;
};
struct eth_mstorm_per_queue_stat {
struct regpair ttl0_discard;
struct regpair packet_too_big_discard;
struct regpair no_buff_discard;
struct regpair not_active_discard;
struct regpair tpa_coalesced_pkts;
struct regpair tpa_coalesced_events;
struct regpair tpa_aborts_num;
struct regpair tpa_coalesced_bytes;
};
/* Ethernet TX Per PF */
struct eth_pstorm_per_pf_stat {
struct regpair sent_lb_ucast_bytes;
struct regpair sent_lb_mcast_bytes;
struct regpair sent_lb_bcast_bytes;
struct regpair sent_lb_ucast_pkts;
struct regpair sent_lb_mcast_pkts;
struct regpair sent_lb_bcast_pkts;
struct regpair sent_gre_bytes;
struct regpair sent_vxlan_bytes;
struct regpair sent_geneve_bytes;
struct regpair sent_mpls_bytes;
struct regpair sent_gre_mpls_bytes;
struct regpair sent_udp_mpls_bytes;
struct regpair sent_gre_pkts;
struct regpair sent_vxlan_pkts;
struct regpair sent_geneve_pkts;
struct regpair sent_mpls_pkts;
struct regpair sent_gre_mpls_pkts;
struct regpair sent_udp_mpls_pkts;
struct regpair gre_drop_pkts;
struct regpair vxlan_drop_pkts;
struct regpair geneve_drop_pkts;
struct regpair mpls_drop_pkts;
struct regpair gre_mpls_drop_pkts;
struct regpair udp_mpls_drop_pkts;
};
/* Ethernet TX Per Queue Stats */
struct eth_pstorm_per_queue_stat {
struct regpair sent_ucast_bytes;
struct regpair sent_mcast_bytes;
struct regpair sent_bcast_bytes;
struct regpair sent_ucast_pkts;
struct regpair sent_mcast_pkts;
struct regpair sent_bcast_pkts;
struct regpair error_drop_pkts;
};
/* ETH Rx producers data */
struct eth_rx_rate_limit {
__le16 mult;
__le16 cnst;
u8 add_sub_cnst;
u8 reserved0;
__le16 reserved1;
};
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
u8 valid;
u8 vport_id;
u8 ind_table_index;
u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
};
struct eth_ustorm_per_pf_stat {
struct regpair rcv_lb_ucast_bytes;
struct regpair rcv_lb_mcast_bytes;
struct regpair rcv_lb_bcast_bytes;
struct regpair rcv_lb_ucast_pkts;
struct regpair rcv_lb_mcast_pkts;
struct regpair rcv_lb_bcast_pkts;
struct regpair rcv_gre_bytes;
struct regpair rcv_vxlan_bytes;
struct regpair rcv_geneve_bytes;
struct regpair rcv_gre_pkts;
struct regpair rcv_vxlan_pkts;
struct regpair rcv_geneve_pkts;
};
struct eth_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
struct regpair rcv_bcast_bytes;
struct regpair rcv_ucast_pkts;
struct regpair rcv_mcast_pkts;
struct regpair rcv_bcast_pkts;
};
/* Event Ring VF-PF Channel data */
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
/* Event Ring malicious VF data */
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
__le16 reserved[3];
};
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
struct iscsi_connect_done_results iscsi_conn_done_info;
union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
};
/* Event Ring Entry */
struct event_ring_entry {
u8 protocol_id;
u8 opcode;
u8 reserved0;
u8 vf_id;
__le16 echo;
u8 fw_return_code;
u8 flags;
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
union event_ring_data data;
};
/* Event Ring Next Page Address */
struct event_ring_next_addr {
struct regpair addr;
__le32 reserved[2];
};
/* Event Ring Element */
union event_ring_element {
struct event_ring_entry entry;
struct event_ring_next_addr next_addr;
};
/* Ports mode */
enum fw_flow_ctrl_mode {
flow_ctrl_pause,
flow_ctrl_pfc,
MAX_FW_FLOW_CTRL_MODE
};
/* GFT profile type */
enum gft_profile_type {
GFT_PROFILE_TYPE_4_TUPLE,
GFT_PROFILE_TYPE_L4_DST_PORT,
GFT_PROFILE_TYPE_IP_DST_ADDR,
GFT_PROFILE_TYPE_IP_SRC_ADDR,
GFT_PROFILE_TYPE_TUNNEL_TYPE,
MAX_GFT_PROFILE_TYPE
};
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
u8 major_ver_arr[2];
};
enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR,
MAX_IWARP_LL2_TX_QUEUES
};
/* Malicious VF error ID */
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID,
VF_ZONE_FUNC_NOT_ENABLED,
ETH_PACKET_TOO_SMALL,
ETH_ILLEGAL_VLAN_MODE,
ETH_MTU_VIOLATION,
ETH_ILLEGAL_INBAND_TAGS,
ETH_VLAN_INSERT_AND_INBAND_VLAN,
ETH_ILLEGAL_NBDS,
ETH_FIRST_BD_WO_SOP,
ETH_INSUFFICIENT_BDS,
ETH_ILLEGAL_LSO_HDR_NBDS,
ETH_ILLEGAL_LSO_MSS,
ETH_ZERO_SIZE_BD,
ETH_ILLEGAL_LSO_HDR_LEN,
ETH_INSUFFICIENT_PAYLOAD,
ETH_EDPM_OUT_OF_SYNC,
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
ETH_ANTI_SPOOFING_ERR,
ETH_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_VLAN_MODE,
CORE_ILLEGAL_NBDS,
CORE_FIRST_BD_WO_SOP,
CORE_INSUFFICIENT_BDS,
CORE_PACKET_TOO_SMALL,
CORE_ILLEGAL_INBAND_TAGS,
CORE_VLAN_INSERT_AND_INBAND_VLAN,
CORE_MTU_VIOLATION,
CORE_CONTROL_PACKET_VIOLATION,
CORE_ANTI_SPOOFING_ERR,
CORE_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_BD_FLAGS,
CORE_GSI_PACKET_VIOLATION,
MAX_MALICIOUS_VF_ERROR_ID,
};
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
};
/* vlan header including TPID and TCI fields */
struct vlan_header {
__le16 tpid;
__le16 tci;
};
/* outer tag configurations */
struct outer_tag_config_struct {
u8 enable_stag_pri_change;
u8 pri_map_valid;
u8 reserved[2];
struct vlan_header outer_tag;
u8 inner_to_outer_pri_map[8];
};
/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_ISCSI,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RDMA,
PERSONALITY_CORE,
PERSONALITY_ETH,
PERSONALITY_RESERVED,
MAX_PERSONALITY_TYPE
};
/* tunnel configuration */
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 no_inner_l2_vxlan_udp_port;
__le16 reserved[3];
};
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
struct regpair consolid_q_pbl_addr;
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id;
u8 base_vf_id;
u8 num_vfs;
u8 event_ring_num_pages;
u8 event_ring_sb_index;
u8 path_id;
u8 warning_as_error;
u8 dont_log_ramrods;
u8 personality;
__le16 log_type_mask;
u8 mf_mode;
u8 integ_phase;
u8 allow_npar_tx_switching;
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
struct outer_tag_config_struct outer_tag_config;
};
/* Data for port update ramrod */
struct protocol_dcb_data {
u8 dcb_enable_flag;
u8 dscp_enable_flag;
u8 dcb_priority;
u8 dcb_tc;
u8 dscp_val;
u8 dcb_dont_add_vlan0;
};
/* Update tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_rx_def_ucast_clss;
u8 update_rx_def_non_ucast_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 no_inner_l2_vxlan_udp_port;
__le16 reserved1[3];
};
/* Data for port update ramrod */
struct pf_update_ramrod_data {
u8 update_eth_dcb_data_mode;
u8 update_fcoe_dcb_data_mode;
u8 update_iscsi_dcb_data_mode;
u8 update_roce_dcb_data_mode;
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode;
u8 update_mf_vlan_flag;
u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
struct protocol_dcb_data rroce_dcb_data;
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan;
u8 enable_stag_pri_change;
u8 reserved;
struct pf_update_tunnel_config tunnel_config;
};
/* Ports mode */
enum ports_mode {
ENGX2_PORTX1,
ENGX2_PORTX2,
ENGX1_PORTX1,
ENGX1_PORTX2,
ENGX1_PORTX4,
MAX_PORTS_MODE
};
/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {
ETH_VER_KEY = 0,
ROCE_VER_KEY,
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
/* RDMA TX Stats */
struct rdma_sent_stats {
struct regpair sent_bytes;
struct regpair sent_pkts;
};
/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats;
};
/* Pstorm VF zone */
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
};
/* Ramrod Header of SPQE */
struct ramrod_header {
__le32 cid;
u8 cmd_id;
u8 protocol_id;
__le16 echo;
};
/* RDMA RX Stats */
struct rdma_rcv_stats {
struct regpair rcv_bytes;
struct regpair rcv_pkts;
};
/* Data for update QCN/DCQCN RL ramrod */
struct rl_update_ramrod_data {
u8 qcn_update_param_flg;
u8 dcqcn_update_param_flg;
u8 rl_init_flg;
u8 rl_start_flg;
u8 rl_stop_flg;
u8 rl_id_first;
u8 rl_id_last;
u8 rl_dc_qcn_flg;
u8 dcqcn_reset_alpha_on_idle;
u8 rl_bc_stage_th;
u8 rl_timer_stage_th;
u8 reserved1;
__le32 rl_bc_rate;
__le16 rl_max_rate;
__le16 rl_r_ai;
__le16 rl_r_hai;
__le16 dcqcn_g;
__le32 dcqcn_k_us;
__le32 dcqcn_timeuot_us;
__le32 qcn_timeuot_us;
__le32 reserved2;
};
/* Slowpath Element (SPQE) */
struct slow_path_element {
struct ramrod_header hdr;
struct regpair data_ptr;
};
/* Tstorm non-triggering VF zone */
struct tstorm_non_trigger_vf_zone {
struct rdma_rcv_stats rdma_stats;
};
struct tstorm_per_port_stat {
struct regpair trunc_error_discard;
struct regpair mac_error_discard;
struct regpair mftag_filter_discard;
struct regpair eth_mac_filter_discard;
struct regpair ll2_mac_filter_discard;
struct regpair ll2_conn_disabled_discard;
struct regpair iscsi_irregular_pkt;
struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
struct regpair iwarp_irregular_pkt;
struct regpair eth_irregular_pkt;
struct regpair toe_irregular_pkt;
struct regpair preroce_irregular_pkt;
struct regpair eth_gre_tunn_filter_discard;
struct regpair eth_vxlan_tunn_filter_discard;
struct regpair eth_geneve_tunn_filter_discard;
struct regpair eth_gft_drop_pkt;
};
/* Tstorm VF zone */
struct tstorm_vf_zone {
struct tstorm_non_trigger_vf_zone non_trigger;
};
/* Tunnel classification scheme */
enum tunnel_clss {
TUNNEL_CLSS_MAC_VLAN = 0,
TUNNEL_CLSS_MAC_VNI,
TUNNEL_CLSS_INNER_MAC_VLAN,
TUNNEL_CLSS_INNER_MAC_VNI,
TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_TUNNEL_CLSS
};
/* Ustorm non-triggering VF zone */
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
/* Ustorm triggering VF zone */
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
/* Ustorm VF zone */
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
/* VF-PF channel data */
struct vf_pf_channel_data {
__le32 ready;
u8 valid;
u8 reserved0;
__le16 reserved1;
};
/* Ramrod data for VF start ramrod */
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
u8 reserved[7];
struct hsi_fp_ver_struct hsi_fp_ver;
};
/* Ramrod data for VF start ramrod */
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
__le16 reserved1;
__le32 reserved2;
};
/* VF zone size mode */
enum vf_zone_size_mode {
VF_ZONE_SIZE_MODE_DEFAULT,
VF_ZONE_SIZE_MODE_DOUBLE,
VF_ZONE_SIZE_MODE_QUAD,
MAX_VF_ZONE_SIZE_MODE
};
/* Xstorm non-triggering VF zone */
struct xstorm_non_trigger_vf_zone {
struct regpair non_edpm_ack_pkts;
};
/* Tstorm VF zone */
struct xstorm_vf_zone {
struct xstorm_non_trigger_vf_zone non_trigger;
};
/* Attentions status block */
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
__le16 reserved0;
__le16 sb_index;
__le32 reserved1;
};
/* DMAE command */
struct dmae_cmd {
__le32 opcode;
#define DMAE_CMD_SRC_MASK 0x1
#define DMAE_CMD_SRC_SHIFT 0
#define DMAE_CMD_DST_MASK 0x3
#define DMAE_CMD_DST_SHIFT 1
#define DMAE_CMD_C_DST_MASK 0x1
#define DMAE_CMD_C_DST_SHIFT 3
#define DMAE_CMD_CRC_RESET_MASK 0x1
#define DMAE_CMD_CRC_RESET_SHIFT 4
#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
#define DMAE_CMD_COMP_FUNC_MASK 0x1
#define DMAE_CMD_COMP_FUNC_SHIFT 7
#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
#define DMAE_CMD_RESERVED1_MASK 0x1
#define DMAE_CMD_RESERVED1_SHIFT 13
#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
#define DMAE_CMD_ERR_HANDLING_MASK 0x3
#define DMAE_CMD_ERR_HANDLING_SHIFT 16
#define DMAE_CMD_PORT_ID_MASK 0x3
#define DMAE_CMD_PORT_ID_SHIFT 18
#define DMAE_CMD_SRC_PF_ID_MASK 0xF
#define DMAE_CMD_SRC_PF_ID_SHIFT 20
#define DMAE_CMD_DST_PF_ID_MASK 0xF
#define DMAE_CMD_DST_PF_ID_SHIFT 24
#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
#define DMAE_CMD_RESERVED2_MASK 0x3
#define DMAE_CMD_RESERVED2_SHIFT 30
__le32 src_addr_lo;
__le32 src_addr_hi;
__le32 dst_addr_lo;
__le32 dst_addr_hi;
__le16 length_dw;
__le16 opcode_b;
#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
#define DMAE_CMD_DST_VF_ID_MASK 0xFF
#define DMAE_CMD_DST_VF_ID_SHIFT 8
__le32 comp_addr_lo;
__le32 comp_addr_hi;
__le32 comp_val;
__le32 crc32;
__le32 crc_32_c;
__le16 crc16;
__le16 crc16_c;
__le16 crc10;
__le16 error_bit_reserved;
#define DMAE_CMD_ERROR_BIT_MASK 0x1
#define DMAE_CMD_ERROR_BIT_SHIFT 0
#define DMAE_CMD_RESERVED_MASK 0x7FFF
#define DMAE_CMD_RESERVED_SHIFT 1
__le16 xsum16;
__le16 xsum8;
};
enum dmae_cmd_comp_crc_en_enum {
dmae_cmd_comp_crc_disabled,
dmae_cmd_comp_crc_enabled,
MAX_DMAE_CMD_COMP_CRC_EN_ENUM
};
enum dmae_cmd_comp_func_enum {
dmae_cmd_comp_func_to_src,
dmae_cmd_comp_func_to_dst,
MAX_DMAE_CMD_COMP_FUNC_ENUM
};
enum dmae_cmd_comp_word_en_enum {
dmae_cmd_comp_word_disabled,
dmae_cmd_comp_word_enabled,
MAX_DMAE_CMD_COMP_WORD_EN_ENUM
};
enum dmae_cmd_c_dst_enum {
dmae_cmd_c_dst_pcie,
dmae_cmd_c_dst_grc,
MAX_DMAE_CMD_C_DST_ENUM
};
enum dmae_cmd_dst_enum {
dmae_cmd_dst_none_0,
dmae_cmd_dst_pcie,
dmae_cmd_dst_grc,
dmae_cmd_dst_none_3,
MAX_DMAE_CMD_DST_ENUM
};
enum dmae_cmd_error_handling_enum {
dmae_cmd_error_handling_send_regular_comp,
dmae_cmd_error_handling_send_comp_with_err,
dmae_cmd_error_handling_dont_send_comp,
MAX_DMAE_CMD_ERROR_HANDLING_ENUM
};
enum dmae_cmd_src_enum {
dmae_cmd_src_pcie,
dmae_cmd_src_grc,
MAX_DMAE_CMD_SRC_ENUM
};
struct e4_mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
struct e4_ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
__le32 reg0;
__le32 reg1;
__le16 word1;
__le16 word2;
__le16 word3;
__le16 word4;
__le32 reg2;
__le32 reg3;
};
/* DMAE parameters */
struct qed_dmae_params {
u32 flags;
/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1
#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0
#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1
#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1
#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1
#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2
#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1
#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3
#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1
#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4
#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1
#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5
#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1
#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6
#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF
#define QED_DMAE_PARAMS_RESERVED_SHIFT 7
u8 src_vfid;
u8 dst_vfid;
u8 port_id;
u8 src_pfid;
u8 dst_pfid;
u8 reserved1;
__le16 reserved2;
};
/* IGU cleanup command */
struct igu_cleanup {
__le32 sb_id_and_flags;
#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
#define IGU_CLEANUP_RESERVED0_SHIFT 0
#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
__le32 reserved1;
};
/* IGU firmware driver command */
union igu_command {
struct igu_prod_cons_update prod_cons_update;
struct igu_cleanup cleanup;
};
/* IGU firmware driver command */
struct igu_command_reg_ctrl {
__le16 opaque_fid;
__le16 igu_command_reg_ctrl_fields;
#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
/* IGU mapping line structure */
struct igu_mapping_line {
__le32 igu_mapping_line_fields;
#define IGU_MAPPING_LINE_VALID_MASK 0x1
#define IGU_MAPPING_LINE_VALID_SHIFT 0
#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
};
/* IGU MSIX line structure */
struct igu_msix_vector {
struct regpair address;
__le32 data;
__le32 msix_vector_fields;
#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
};
enum pxp_tph_st_hint {
TPH_ST_HINT_BIDIR,
TPH_ST_HINT_REQUESTER,
TPH_ST_HINT_TARGET,
TPH_ST_HINT_TARGET_PRIO,
MAX_PXP_TPH_ST_HINT
};
/* QM hardware structure of enable bypass credit mask */
struct qm_rf_bypass_mask {
u8 flags;
#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
};
/* QM hardware structure of opportunistic credit mask */
struct qm_rf_opportunistic_mask {
__le16 flags;
#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
};
/* QM hardware structure of QM map memory */
struct qm_rf_pq_map_e4 {
__le32 reg;
#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
struct sdm_agg_int_comp_params {
__le16 params;
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
};
/* SDM operation gen command (generate aggregative interrupt) */
struct sdm_op_gen {
__le32 command;
#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
#define SDM_OP_GEN_RESERVED_MASK 0xFFF
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
/* Physical memory descriptor */
struct phys_mem_desc {
dma_addr_t phys_addr;
void *virt_addr;
u32 size; /* In bytes */
};
/* Virtual memory descriptor */
struct virt_mem_desc {
void *ptr;
u32 size; /* In bytes */
};
/****************************************/
/* Debug Tools HSI constants and macros */
/****************************************/
enum block_id {
BLOCK_GRC,
BLOCK_MISCS,
BLOCK_MISC,
BLOCK_DBU,
BLOCK_PGLUE_B,
BLOCK_CNIG,
BLOCK_CPMU,
BLOCK_NCSI,
BLOCK_OPTE,
BLOCK_BMB,
BLOCK_PCIE,
BLOCK_MCP,
BLOCK_MCP2,
BLOCK_PSWHST,
BLOCK_PSWHST2,
BLOCK_PSWRD,
BLOCK_PSWRD2,
BLOCK_PSWWR,
BLOCK_PSWWR2,
BLOCK_PSWRQ,
BLOCK_PSWRQ2,
BLOCK_PGLCS,
BLOCK_DMAE,
BLOCK_PTU,
BLOCK_TCM,
BLOCK_MCM,
BLOCK_UCM,
BLOCK_XCM,
BLOCK_YCM,
BLOCK_PCM,
BLOCK_QM,
BLOCK_TM,
BLOCK_DORQ,
BLOCK_BRB,
BLOCK_SRC,
BLOCK_PRS,
BLOCK_TSDM,
BLOCK_MSDM,
BLOCK_USDM,
BLOCK_XSDM,
BLOCK_YSDM,
BLOCK_PSDM,
BLOCK_TSEM,
BLOCK_MSEM,
BLOCK_USEM,
BLOCK_XSEM,
BLOCK_YSEM,
BLOCK_PSEM,
BLOCK_RSS,
BLOCK_TMLD,
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
BLOCK_RPB,
BLOCK_BTB,
BLOCK_PBF,
BLOCK_RDIF,
BLOCK_TDIF,
BLOCK_CDU,
BLOCK_CCFC,
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_MSTAT,
BLOCK_DBG,
BLOCK_NIG,
BLOCK_WOL,
BLOCK_BMBN,
BLOCK_IPC,
BLOCK_NWM,
BLOCK_NWS,
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
BLOCK_PXPREQBUS,
BLOCK_BAR0_MAP,
BLOCK_MCP_FIO,
BLOCK_LAST_INIT,
BLOCK_PRS_FC,
BLOCK_PBF_FC,
BLOCK_NIG_LB_FC,
BLOCK_NIG_LB_FC_PLLH,
BLOCK_NIG_TX_FC_PLLH,
BLOCK_NIG_TX_FC,
BLOCK_NIG_RX_FC_PLLH,
BLOCK_NIG_RX_FC,
MAX_BLOCK_ID
};
/* binary debug buffer types */
enum bin_dbg_buffer_type {
BIN_BUF_DBG_MODE_TREE,
BIN_BUF_DBG_DUMP_REG,
BIN_BUF_DBG_DUMP_MEM,
BIN_BUF_DBG_IDLE_CHK_REGS,
BIN_BUF_DBG_IDLE_CHK_IMMS,
BIN_BUF_DBG_IDLE_CHK_RULES,
BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
BIN_BUF_DBG_ATTN_BLOCKS,
BIN_BUF_DBG_ATTN_REGS,
BIN_BUF_DBG_ATTN_INDEXES,
BIN_BUF_DBG_ATTN_NAME_OFFSETS,
BIN_BUF_DBG_BLOCKS,
BIN_BUF_DBG_BLOCKS_CHIP_DATA,
BIN_BUF_DBG_BUS_LINES,
BIN_BUF_DBG_BLOCKS_USER_DATA,
BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
BIN_BUF_DBG_RESET_REGS,
BIN_BUF_DBG_PARSING_STRINGS,
MAX_BIN_DBG_BUFFER_TYPE
};
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
u16 data;
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
};
/* Attention block per-type data */
struct dbg_attn_block_type_data {
u16 names_offset;
u16 reserved1;
u8 num_regs;
u8 reserved2;
u16 regs_offset;
};
/* Block attentions */
struct dbg_attn_block {
struct dbg_attn_block_type_data per_type_data[2];
};
/* Attention register result */
struct dbg_attn_reg_result {
u32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
u16 block_attn_offset;
u16 reserved;
u32 sts_val;
u32 mask_val;
};
/* Attention block result */
struct dbg_attn_block_result {
u8 block_id;
u8 data;
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
u16 names_offset;
struct dbg_attn_reg_result reg_results[15];
};
/* Mode header */
struct dbg_mode_hdr {
u16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
};
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
u16 block_attn_offset;
u32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
u32 sts_clr_address;
u32 mask_address;
};
/* Attention types */
enum dbg_attn_type {
ATTN_TYPE_INTERRUPT,
ATTN_TYPE_PARITY,
MAX_DBG_ATTN_TYPE
};
/* Block debug data */
struct dbg_block {
u8 name[15];
u8 associated_storm_letter;
};
/* Chip-specific block debug data */
struct dbg_block_chip {
u8 flags;
#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
u8 dbg_client_id;
u8 reset_reg_id;
u8 reset_reg_bit_offset;
struct dbg_mode_hdr dbg_bus_mode;
u16 reserved1;
u8 reserved2;
u8 num_of_dbg_bus_lines;
u16 dbg_bus_lines_offset;
u32 dbg_select_reg_addr;
u32 dbg_dword_enable_reg_addr;
u32 dbg_shift_reg_addr;
u32 dbg_force_valid_reg_addr;
u32 dbg_force_frame_reg_addr;
};
/* Chip-specific block user debug data */
struct dbg_block_chip_user {
u8 num_of_dbg_bus_lines;
u8 has_latency_events;
u16 names_offset;
};
/* Block user debug data */
struct dbg_block_user {
u8 name[16];
};
/* Block Debug line data */
struct dbg_bus_line {
u8 data;
#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
#define DBG_BUS_LINE_IS_256B_MASK 0x1
#define DBG_BUS_LINE_IS_256B_SHIFT 4
#define DBG_BUS_LINE_RESERVED_MASK 0x7
#define DBG_BUS_LINE_RESERVED_SHIFT 5
u8 group_sizes;
};
/* Condition header for registers dump */
struct dbg_dump_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
u8 block_id; /* block ID */
u8 data_size; /* size in dwords of the data following this header */
};
/* Memory data for registers dump */
struct dbg_dump_mem {
u32 dword0;
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
u32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
/* Register data for registers dump */
struct dbg_dump_reg {
u32 data;
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
#define DBG_DUMP_REG_LENGTH_MASK 0xFF
#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
/* Split header for registers dump */
struct dbg_dump_split_hdr {
u32 hdr;
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
};
/* Condition header for idle check */
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
u16 data_size; /* size in dwords of the data following this header */
};
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
u32 data;
#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
u16 num_entries;
u8 entry_size;
u8 start_entry;
};
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
u32 data;
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
u16 size; /* register size in dwords */
struct dbg_mode_hdr mode; /* Mode header */
};
/* Idle Check register */
union dbg_idle_chk_reg {
struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
struct dbg_idle_chk_info_reg info_reg; /* info register */
};
/* Idle Check result header */
struct dbg_idle_chk_result_hdr {
u16 rule_id; /* Failing rule index */
u16 mem_entry_id; /* Failing memory entry index */
u8 num_dumped_cond_regs; /* number of dumped condition registers */
u8 num_dumped_info_regs; /* number of dumped condition registers */
u8 severity; /* from dbg_idle_chk_severity_types enum */
u8 reserved;
};
/* Idle Check result register header */
struct dbg_idle_chk_result_reg_hdr {
u8 data;
#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
u8 start_entry; /* index of the first checked entry */
u16 size; /* register size in dwords */
};
/* Idle Check rule */
struct dbg_idle_chk_rule {
u16 rule_id; /* Idle Check rule ID */
u8 severity; /* value from dbg_idle_chk_severity_types enum */
u8 cond_id; /* Condition ID */
u8 num_cond_regs; /* number of condition registers */
u8 num_info_regs; /* number of info registers */
u8 num_imms; /* number of immediates in the condition */
u8 reserved1;
u16 reg_offset; /* offset of this rules registers in the idle check
* register array (in dbg_idle_chk_reg units).
*/
u16 imm_offset; /* offset of this rules immediate values in the
* immediate values array (in dwords).
*/
};
/* Idle Check rule parsing data */
struct dbg_idle_chk_rule_parsing_data {
u32 data;
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
};
/* Idle check severity types */
enum dbg_idle_chk_severity_types {
/* idle check failure should cause an error */
IDLE_CHK_SEVERITY_ERROR,
/* idle check failure should cause an error only if theres no traffic */
IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
/* idle check failure should cause a warning */
IDLE_CHK_SEVERITY_WARNING,
MAX_DBG_IDLE_CHK_SEVERITY_TYPES
};
/* Reset register */
struct dbg_reset_reg {
u32 data;
#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
#define DBG_RESET_REG_ADDR_SHIFT 0
#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
#define DBG_RESET_REG_RESERVED_MASK 0x7F
#define DBG_RESET_REG_RESERVED_SHIFT 25
};
/* Debug Bus block data */
struct dbg_bus_block_data {
u8 enable_mask;
u8 right_shift;
u8 force_valid_mask;
u8 force_frame_mask;
u8 dword_mask;
u8 line_num;
u8 hw_id;
u8 flags;
#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
};
enum dbg_bus_clients {
DBG_BUS_CLIENT_RBCN,
DBG_BUS_CLIENT_RBCP,
DBG_BUS_CLIENT_RBCR,
DBG_BUS_CLIENT_RBCT,
DBG_BUS_CLIENT_RBCU,
DBG_BUS_CLIENT_RBCF,
DBG_BUS_CLIENT_RBCX,
DBG_BUS_CLIENT_RBCS,
DBG_BUS_CLIENT_RBCH,
DBG_BUS_CLIENT_RBCZ,
DBG_BUS_CLIENT_OTHER_ENGINE,
DBG_BUS_CLIENT_TIMESTAMP,
DBG_BUS_CLIENT_CPU,
DBG_BUS_CLIENT_RBCY,
DBG_BUS_CLIENT_RBCQ,
DBG_BUS_CLIENT_RBCM,
DBG_BUS_CLIENT_RBCB,
DBG_BUS_CLIENT_RBCW,
DBG_BUS_CLIENT_RBCV,
MAX_DBG_BUS_CLIENTS
};
/* Debug Bus constraint operation types */
enum dbg_bus_constraint_ops {
DBG_BUS_CONSTRAINT_OP_EQ,
DBG_BUS_CONSTRAINT_OP_NE,
DBG_BUS_CONSTRAINT_OP_LT,
DBG_BUS_CONSTRAINT_OP_LTC,
DBG_BUS_CONSTRAINT_OP_LE,
DBG_BUS_CONSTRAINT_OP_LEC,
DBG_BUS_CONSTRAINT_OP_GT,
DBG_BUS_CONSTRAINT_OP_GTC,
DBG_BUS_CONSTRAINT_OP_GE,
DBG_BUS_CONSTRAINT_OP_GEC,
MAX_DBG_BUS_CONSTRAINT_OPS
};
/* Debug Bus trigger state data */
struct dbg_bus_trigger_state_data {
u8 msg_len;
u8 constraint_dword_mask;
u8 storm_id;
u8 reserved;
};
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
u32 lo;
u32 hi;
};
/* Debug Bus PCI buffer data */
struct dbg_bus_pci_buf_data {
struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
u32 size; /* PCI buffer size in bytes */
};
/* Debug Bus Storm EID range filter params */
struct dbg_bus_storm_eid_range_params {
u8 min; /* Minimal event ID to filter on */
u8 max; /* Maximal event ID to filter on */
};
/* Debug Bus Storm EID mask filter params */
struct dbg_bus_storm_eid_mask_params {
u8 val; /* Event ID value */
u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
};
/* Debug Bus Storm EID filter params */
union dbg_bus_storm_eid_params {
struct dbg_bus_storm_eid_range_params range;
struct dbg_bus_storm_eid_mask_params mask;
};
/* Debug Bus Storm data */
struct dbg_bus_storm_data {
u8 enabled;
u8 mode;
u8 hw_id;
u8 eid_filter_en;
u8 eid_range_not_mask;
u8 cid_filter_en;
union dbg_bus_storm_eid_params eid_filter_params;
u32 cid;
};
/* Debug Bus data */
struct dbg_bus_data {
u32 app_version;
u8 state;
u8 mode_256b_en;
u8 num_enabled_blocks;
u8 num_enabled_storms;
u8 target;
u8 one_shot_en;
u8 grc_input_en;
u8 timestamp_input_en;
u8 filter_en;
u8 adding_filter;
u8 filter_pre_trigger;
u8 filter_post_trigger;
u8 trigger_en;
u8 filter_constraint_dword_mask;
u8 next_trigger_state;
u8 next_constraint_id;
struct dbg_bus_trigger_state_data trigger_states[3];
u8 filter_msg_len;
u8 rcv_from_other_engine;
u8 blocks_dword_mask;
u8 blocks_dword_overlap;
u32 hw_id_mask;
struct dbg_bus_pci_buf_data pci_buf;
struct dbg_bus_block_data blocks[132];
struct dbg_bus_storm_data storms[6];
};
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE,
DBG_BUS_STATE_READY,
DBG_BUS_STATE_RECORDING,
DBG_BUS_STATE_STOPPED,
MAX_DBG_BUS_STATES
};
/* Debug Bus Storm modes */
enum dbg_bus_storm_modes {
DBG_BUS_STORM_MODE_PRINTF,
DBG_BUS_STORM_MODE_PRAM_ADDR,
DBG_BUS_STORM_MODE_DRA_RW,
DBG_BUS_STORM_MODE_DRA_W,
DBG_BUS_STORM_MODE_LD_ST_ADDR,
DBG_BUS_STORM_MODE_DRA_FSM,
DBG_BUS_STORM_MODE_FAST_DBGMUX,
DBG_BUS_STORM_MODE_RH,
DBG_BUS_STORM_MODE_RH_WITH_STORE,
DBG_BUS_STORM_MODE_FOC,
DBG_BUS_STORM_MODE_EXT_STORE,
MAX_DBG_BUS_STORM_MODES
};
/* Debug bus target IDs */
enum dbg_bus_targets {
DBG_BUS_TARGET_ID_INT_BUF,
DBG_BUS_TARGET_ID_NIG,
DBG_BUS_TARGET_ID_PCI,
MAX_DBG_BUS_TARGETS
};
/* GRC Dump data */
struct dbg_grc_data {
u8 params_initialized;
u8 reserved1;
u16 reserved2;
u32 param_val[48];
};
/* Debug GRC params */
enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_TSTORM,
DBG_GRC_PARAM_DUMP_MSTORM,
DBG_GRC_PARAM_DUMP_USTORM,
DBG_GRC_PARAM_DUMP_XSTORM,
DBG_GRC_PARAM_DUMP_YSTORM,
DBG_GRC_PARAM_DUMP_PSTORM,
DBG_GRC_PARAM_DUMP_REGS,
DBG_GRC_PARAM_DUMP_RAM,
DBG_GRC_PARAM_DUMP_PBUF,
DBG_GRC_PARAM_DUMP_IOR,
DBG_GRC_PARAM_DUMP_VFC,
DBG_GRC_PARAM_DUMP_CM_CTX,
DBG_GRC_PARAM_DUMP_PXP,
DBG_GRC_PARAM_DUMP_RSS,
DBG_GRC_PARAM_DUMP_CAU,
DBG_GRC_PARAM_DUMP_QM,
DBG_GRC_PARAM_DUMP_MCP,
DBG_GRC_PARAM_DUMP_DORQ,
DBG_GRC_PARAM_DUMP_CFC,
DBG_GRC_PARAM_DUMP_IGU,
DBG_GRC_PARAM_DUMP_BRB,
DBG_GRC_PARAM_DUMP_BTB,
DBG_GRC_PARAM_DUMP_BMB,
DBG_GRC_PARAM_RESERVD1,
DBG_GRC_PARAM_DUMP_MULD,
DBG_GRC_PARAM_DUMP_PRS,
DBG_GRC_PARAM_DUMP_DMAE,
DBG_GRC_PARAM_DUMP_TM,
DBG_GRC_PARAM_DUMP_SDM,
DBG_GRC_PARAM_DUMP_DIF,
DBG_GRC_PARAM_DUMP_STATIC,
DBG_GRC_PARAM_UNSTALL,
DBG_GRC_PARAM_RESERVED2,
DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_EXCLUDE_ALL,
DBG_GRC_PARAM_CRASH,
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM,
DBG_GRC_PARAM_DUMP_PHY,
DBG_GRC_PARAM_NO_MCP,
DBG_GRC_PARAM_NO_FW_VER,
DBG_GRC_PARAM_RESERVED3,
DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
DBG_GRC_PARAM_DUMP_ILT_CDUC,
DBG_GRC_PARAM_DUMP_ILT_CDUT,
DBG_GRC_PARAM_DUMP_CAU_EXT,
MAX_DBG_GRC_PARAMS
};
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
DBG_STATUS_APP_VERSION_NOT_SET,
DBG_STATUS_UNSUPPORTED_APP_VERSION,
DBG_STATUS_DBG_BLOCK_NOT_RESET,
DBG_STATUS_INVALID_ARGS,
DBG_STATUS_OUTPUT_ALREADY_SET,
DBG_STATUS_INVALID_PCI_BUF_SIZE,
DBG_STATUS_PCI_BUF_ALLOC_FAILED,
DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
DBG_STATUS_NO_MATCHING_FRAMING_MODE,
DBG_STATUS_VFC_READ_ERROR,
DBG_STATUS_STORM_ALREADY_ENABLED,
DBG_STATUS_STORM_NOT_ENABLED,
DBG_STATUS_BLOCK_ALREADY_ENABLED,
DBG_STATUS_BLOCK_NOT_ENABLED,
DBG_STATUS_NO_INPUT_ENABLED,
DBG_STATUS_NO_FILTER_TRIGGER_256B,
DBG_STATUS_FILTER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_ALREADY_ENABLED,
DBG_STATUS_TRIGGER_NOT_ENABLED,
DBG_STATUS_CANT_ADD_CONSTRAINT,
DBG_STATUS_TOO_MANY_TRIGGER_STATES,
DBG_STATUS_TOO_MANY_CONSTRAINTS,
DBG_STATUS_RECORDING_NOT_STARTED,
DBG_STATUS_DATA_DIDNT_TRIGGER,
DBG_STATUS_NO_DATA_RECORDED,
DBG_STATUS_DUMP_BUF_TOO_SMALL,
DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
DBG_STATUS_UNKNOWN_CHIP,
DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
DBG_STATUS_BLOCK_IN_RESET,
DBG_STATUS_INVALID_TRACE_SIGNATURE,
DBG_STATUS_INVALID_NVRAM_BUNDLE,
DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
DBG_STATUS_NVRAM_READ_FAILED,
DBG_STATUS_IDLE_CHK_PARSE_FAILED,
DBG_STATUS_MCP_TRACE_BAD_DATA,
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
DBG_STATUS_RESERVED0,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
DBG_STATUS_RESERVED1,
DBG_STATUS_NON_MATCHING_LINES,
DBG_STATUS_INSUFFICIENT_HW_IDS,
DBG_STATUS_DBG_BUS_IN_USE,
DBG_STATUS_INVALID_STORM_DBG_MODE,
DBG_STATUS_OTHER_ENGINE_BB_ONLY,
DBG_STATUS_FILTER_SINGLE_HW_ID,
DBG_STATUS_TRIGGER_SINGLE_HW_ID,
DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
MAX_DBG_STATUS
};
/* Debug Storms IDs */
enum dbg_storms {
DBG_TSTORM_ID,
DBG_MSTORM_ID,
DBG_USTORM_ID,
DBG_XSTORM_ID,
DBG_YSTORM_ID,
DBG_PSTORM_ID,
MAX_DBG_STORMS
};
/* Idle Check data */
struct idle_chk_data {
u32 buf_size;
u8 buf_size_set;
u8 reserved1;
u16 reserved2;
};
struct pretend_params {
u8 split_type;
u8 reserved;
u16 split_id;
};
/* Debug Tools data (per HW function)
*/
struct dbg_tools_data {
struct dbg_grc_data grc;
struct dbg_bus_data bus;
struct idle_chk_data idle_chk;
u8 mode_enable[40];
u8 block_in_reset[132];
u8 chip_id;
u8 hw_type;
u8 num_ports;
u8 num_pfs_per_port;
u8 num_vfs;
u8 initialized;
u8 use_dmae;
u8 reserved;
struct pretend_params pretend;
u32 num_regs_read;
};
/* ILT Clients */
enum ilt_clients {
ILT_CLI_CDUC,
ILT_CLI_CDUT,
ILT_CLI_QM,
ILT_CLI_TM,
ILT_CLI_SRC,
ILT_CLI_TSDM,
ILT_CLI_RGFS,
ILT_CLI_TGFS,
MAX_ILT_CLIENTS
};
/********************************/
/* HSI Init Functions constants */
/********************************/
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
/* BRB RAM init requirements */
struct init_brb_ram_req {
u32 guranteed_per_tc;
u32 headroom_per_tc;
u32 min_pkt_size;
u32 max_ports_per_engine;
u8 num_active_tcs[MAX_NUM_PORTS];
};
/* ETS per-TC init requirements */
struct init_ets_tc_req {
u8 use_sp;
u8 use_wfq;
u16 weight;
};
/* ETS init requirements */
struct init_ets_req {
u32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {
u16 lb_mac_rate;
u16 lb_rate;
u32 mtu;
u16 tc_rate[NUM_OF_PHYS_TCS];
};
/* NIG TC mapping for each priority */
struct init_nig_pri_tc_map_entry {
u8 tc_id;
u8 valid;
};
/* NIG priority to TC map init requirements */
struct init_nig_pri_tc_map_req {
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
/* QM per global RL init parameters */
struct init_qm_global_rl_params {
u32 rate_limit;
};
/* QM per-port init parameters */
struct init_qm_port_params {
u16 active_phys_tcs;
u16 num_pbf_cmd_lines;
u16 num_btb_blocks;
u8 active;
u8 reserved;
};
/* QM per-PQ init parameters */
struct init_qm_pq_params {
u8 vport_id;
u8 tc_id;
u8 wrr_group;
u8 rl_valid;
u16 rl_id;
u8 port_id;
u8 reserved;
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
u16 wfq;
u16 first_tx_pq_id[NUM_OF_TCS];
};
/**************************************/
/* Init Tool HSI constants and macros */
/**************************************/
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
enum chip_ids {
CHIP_BB,
CHIP_K2,
MAX_CHIP_IDS
};
struct fw_asserts_ram_section {
__le16 section_ram_line_offset;
__le16 section_ram_line_size;
u8 list_dword_offset;
u8 list_element_dword_size;
u8 list_num_elements;
u8 list_next_index_dword_offset;
};
struct fw_ver_num {
u8 major;
u8 minor;
u8 rev;
u8 eng;
};
struct fw_ver_info {
__le16 tools_ver;
u8 image_id;
u8 reserved1;
struct fw_ver_num num;
__le32 timestamp;
__le32 reserved2;
};
struct fw_info {
struct fw_ver_info ver;
struct fw_asserts_ram_section fw_asserts_section;
};
struct fw_info_location {
__le32 grc_addr;
__le32 size;
};
enum init_modes {
MODE_RESERVED,
MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_RESERVED2,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
MODE_PORTS_PER_ENG_1,
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_RESERVED6,
MODE_RESERVED7,
MAX_INIT_MODES
};
enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
enum init_split_types {
SPLIT_TYPE_NONE,
SPLIT_TYPE_PORT,
SPLIT_TYPE_PF,
SPLIT_TYPE_PORT_PF,
SPLIT_TYPE_VF,
MAX_INIT_SPLIT_TYPES
};
/* Binary buffer header */
struct bin_buffer_hdr {
u32 offset;
u32 length;
};
/* Binary init buffer types */
enum bin_init_buffer_type {
BIN_BUF_INIT_FW_VER_INFO,
BIN_BUF_INIT_CMD,
BIN_BUF_INIT_VAL,
BIN_BUF_INIT_MODE_TREE,
BIN_BUF_INIT_IRO,
BIN_BUF_INIT_OVERLAYS,
MAX_BIN_INIT_BUFFER_TYPE
};
/* FW overlay buffer header */
struct fw_overlay_buf_hdr {
u32 data;
#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF
#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF
#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
};
/* init array header: raw */
struct init_array_raw_hdr {
u32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
/* init array header: standard */
struct init_array_standard_hdr {
u32 data;
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
/* init array header: zipped */
struct init_array_zipped_hdr {
u32 data;
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
/* init array header: pattern */
struct init_array_pattern_hdr {
u32 data;
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
/* init array header union */
union init_array_hdr {
struct init_array_raw_hdr raw;
struct init_array_standard_hdr standard;
struct init_array_zipped_hdr zipped;
struct init_array_pattern_hdr pattern;
};
/* init array types */
enum init_array_types {
INIT_ARR_STANDARD,
INIT_ARR_ZIPPED,
INIT_ARR_PATTERN,
MAX_INIT_ARRAY_TYPES
};
/* init operation: callback */
struct init_callback_op {
u32 op_data;
#define INIT_CALLBACK_OP_OP_MASK 0xF
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
u16 callback_id;
u16 block_id;
};
/* init operation: delay */
struct init_delay_op {
u32 op_data;
#define INIT_DELAY_OP_OP_MASK 0xF
#define INIT_DELAY_OP_OP_SHIFT 0
#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT 4
u32 delay;
};
/* init operation: if_mode */
struct init_if_mode_op {
u32 op_data;
#define INIT_IF_MODE_OP_OP_MASK 0xF
#define INIT_IF_MODE_OP_OP_SHIFT 0
#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
u16 reserved2;
u16 modes_buf_offset;
};
/* init operation: if_phase */
struct init_if_phase_op {
u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
u32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/* init mode operators */
enum init_mode_ops {
INIT_MODE_OP_NOT,
INIT_MODE_OP_OR,
INIT_MODE_OP_AND,
MAX_INIT_MODE_OPS
};
/* init operation: raw */
struct init_raw_op {
u32 op_data;
#define INIT_RAW_OP_OP_MASK 0xF
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
#define INIT_RAW_OP_PARAM1_SHIFT 4
u32 param2;
};
/* init array params */
struct init_op_array_params {
u16 size;
u16 offset;
};
/* Write init operation arguments */
union init_write_args {
u32 inline_val;
u32 zeros_count;
u32 array_offset;
struct init_op_array_params runtime;
};
/* init operation: write */
struct init_write_op {
u32 data;
#define INIT_WRITE_OP_OP_MASK 0xF
#define INIT_WRITE_OP_OP_SHIFT 0
#define INIT_WRITE_OP_SOURCE_MASK 0x7
#define INIT_WRITE_OP_SOURCE_SHIFT 4
#define INIT_WRITE_OP_RESERVED_MASK 0x1
#define INIT_WRITE_OP_RESERVED_SHIFT 7
#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_WRITE_OP_ADDRESS_SHIFT 9
union init_write_args args;
};
/* init operation: read */
struct init_read_op {
u32 op_data;
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
#define INIT_READ_OP_POLL_TYPE_MASK 0xF
#define INIT_READ_OP_POLL_TYPE_SHIFT 4
#define INIT_READ_OP_RESERVED_MASK 0x1
#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
u32 expected_val;
};
/* Init operations union */
union init_op {
struct init_raw_op raw;
struct init_write_op write;
struct init_read_op read;
struct init_if_mode_op if_mode;
struct init_if_phase_op if_phase;
struct init_callback_op callback;
struct init_delay_op delay;
};
/* Init command operation types */
enum init_op_types {
INIT_OP_READ,
INIT_OP_WRITE,
INIT_OP_IF_MODE,
INIT_OP_IF_PHASE,
INIT_OP_DELAY,
INIT_OP_CALLBACK,
MAX_INIT_OP_TYPES
};
/* init polling types */
enum init_poll_types {
INIT_POLL_NONE,
INIT_POLL_EQ,
INIT_POLL_OR,
INIT_POLL_AND,
MAX_INIT_POLL_TYPES
};
/* init source types */
enum init_source_types {
INIT_SRC_INLINE,
INIT_SRC_ZEROS,
INIT_SRC_ARRAY,
INIT_SRC_RUNTIME,
MAX_INIT_SOURCE_TYPES
};
/* Internal RAM Offsets macro data */
struct iro {
u32 base;
u16 m1;
u16 m2;
u16 m3;
u16 size;
};
/***************************** Public Functions *******************************/
/**
* @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
*
* @param p_hwfn - HW device data
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
const u8 * const bin_ptr);
/**
* @brief qed_read_regs - Reads registers into a buffer (using GRC).
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf - Destination buffer.
* @param addr - Source GRC address in dwords.
* @param len - Number of registers to read.
*/
void qed_read_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
/**
* @brief qed_read_fw_info - Reads FW info from the chip.
*
* The FW info contains FW-related information, such as the FW version,
* FW image (main/L2B/kuku), FW timestamp, etc.
* The FW info is read from the internal RAM of the first Storm that is not in
* reset.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param fw_info - Out: a pointer to write the FW info into.
*
* @return true if the FW info was read successfully from one of the Storms,
* or false if all Storms are in reset.
*/
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info);
/**
* @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
*
* @param p_hwfn - HW device data
* @param grc_param - GRC parameter
* @param val - Value to set.
*
* @return error if one of the following holds:
* - the version wasn't set
* - grc_param is invalid
* - val is outside the allowed boundaries
*/
enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param, u32 val);
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
* @param p_hwfn - HW device data
*/
void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
/**
* @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
* GRC Dump.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
* data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the collected GRC data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified dump buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
* for idle check results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for the idle check
* data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the idle check data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
* for mcp trace results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the trace data in MCP scratchpad contain an invalid signature
* - the bundle ID in NVRAM is invalid
* - the trace meta data cannot be found (in NVRAM or image file)
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the mcp trace data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - the trace data in MCP scratchpad contain an invalid signature
* - the bundle ID in NVRAM is invalid
* - the trace meta data cannot be found (in NVRAM or image file)
* - the trace meta data cannot be read (from NVRAM or image file)
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
* for grc trace fifo results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
* the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the reg fifo data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
* for the IGU fifo results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
* data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
* the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the IGU fifo data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
* buffer size for protection override window results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for protection
* override data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_protection_override_dump - Reads protection override window
* entries and writes the results into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the protection override data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
* size for FW Asserts results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size);
/**
* @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the FW Asserts data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief qed_dbg_read_attn - Reads the attention registers of the specified
* block and type, and writes the results into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - Block ID.
* @param attn_type - Attention type.
* @param clear_status - Indicates if the attention status should be cleared.
* @param results - OUT: Pointer to write the read results into
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.