MLX5DV_QUERY_DEVICE(3) | Library Functions Manual | MLX5DV_QUERY_DEVICE(3) |
mlx5dv_query_device - Query device capabilities specific to mlx5
#include <infiniband/mlx5dv.h> int mlx5dv_query_device(struct ibv_context *ctx_in, struct mlx5dv_context *attrs_out);
mlx5dv_query_device() Query HW device-specific information which is important for data-path, but isn't provided by ibv_query_device(3).
This function returns version, flags and compatibility mask. The version represents the format of the internal hardware structures that mlx5dv.h represents. Additions of new fields to the existed structures are handled by comp_mask field.
struct mlx5dv_sw_parsing_caps {
uint32_t sw_parsing_offloads; /* Use enum mlx5dv_sw_parsing_offloads */ uint32_t supported_qpts;
};
struct mlx5dv_striding_rq_caps {
uint32_t min_single_stride_log_num_of_bytes; /* min log size of each stride */ uint32_t max_single_stride_log_num_of_bytes; /* max log size of each stride */ uint32_t min_single_wqe_log_num_of_strides; /* min log number of strides per WQE */ uint32_t max_single_wqe_log_num_of_strides; /* max log number of strides per WQE */ uint32_t supported_qpts;
};
struct mlx5dv_dci_streams_caps { uint8_t max_log_num_concurent; /* max log number of parallel different streams that could be handled by HW */ uint8_t max_log_num_errored; /* max DCI error stream channels supported per DCI before a DCI move to an error state */ };
struct mlx5dv_context {
uint8_t version; uint64_t flags; uint64_t comp_mask; /* Use enum mlx5dv_context_comp_mask */ struct mlx5dv_cqe_comp_caps cqe_comp_caps; struct mlx5dv_sw_parsing_caps sw_parsing_caps; uint32_t tunnel_offloads_caps; uint32_t max_dynamic_bfregs /* max blue-flame registers that can be dynamiclly allocated */ uint64_t max_clock_info_update_nsec; uint32_t flow_action_flags; /* use enum mlx5dv_flow_action_cap_flags */ uint32_t dc_odp_caps; /* use enum ibv_odp_transport_cap_bits */ void *hca_core_clock; /* points to a memory location that is mapped to the HCA's core clock */ struct mlx5dv_sig_caps sig_caps; size_t max_wr_memcpy_length; /* max length that is supported by the DMA memcpy WR */ struct mlx5dv_crypto_caps crypto_caps;
}; enum mlx5dv_context_flags {
/*
* This flag indicates if CQE version 0 or 1 is needed.
*/
MLX5DV_CONTEXT_FLAGS_CQE_V1 = (1 << 0),
MLX5DV_CONTEXT_FLAGS_OBSOLETE = (1 << 1), /* Obsoleted, don't use */
MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED = (1 << 2), /* Multi packet WQE is allowed */
MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW = (1 << 3), /* Enhanced multi packet WQE is supported or not */
MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP = (1 << 4), /* Support CQE 128B compression */
MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD = (1 << 5), /* Support CQE 128B padding */
MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE = (1 << 6), /* Support packet based credit mode in RC QP */ /*
* If CQ was created with IBV_WC_EX_WITH_COMPLETION_TIMESTAMP_WALLCLOCK, CQEs timestamp will be in real time format.
*/
MLX5DV_CONTEXT_FLAGS_REAL_TIME_TS = (1 << 7),
};
enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0, MLX5DV_CONTEXT_MASK_SWP = 1 << 1, MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2, MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3, MLX5DV_CONTEXT_MASK_DYN_BFREGS = 1 << 4, MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE = 1 << 5, MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS = 1 << 6, MLX5DV_CONTEXT_MASK_DC_ODP_CAPS = 1 << 7, MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK = 1 << 8, MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS = 1 << 9, MLX5DV_CONTEXT_MASK_SIGNATURE_OFFLOAD = 1 << 10, MLX5DV_CONTEXT_MASK_DCI_STREAMS = 1 << 11, MLX5DV_CONTEXT_MASK_WR_MEMCPY_LENGTH = 1 << 12, MLX5DV_CONTEXT_MASK_CRYPTO_OFFLOAD = 1 << 13,
};
enum enum mlx5dv_sw_parsing_offloads {
MLX5DV_SW_PARSING = 1 << 0, MLX5DV_SW_PARSING_CSUM = 1 << 1, MLX5DV_SW_PARSING_LSO = 1 << 2,
};
enum mlx5dv_tunnel_offloads {
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0, MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1, MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
};
enum mlx5dv_flow_action_cap_flags {
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0, /* Flow action ESP (with AES_GCM keymat) is supported */ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1, /* Flow action ESP always return metadata in the payload */ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2, /* ESP (with AESGCM keymat) Supports matching by SPI (rather than hashing against SPI) */ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3, /* Flow action ESP supports full offload (with AES_GCM keymat) */ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, /* Flow action ESP (with AES_GCM keymat), ESN comes implicitly from IV. */
};
struct mlx5dv_sig_caps {
uint64_t block_size; /* use enum mlx5dv_block_size_caps */ uint32_t block_prot; /* use enum mlx5dv_sig_prot_caps */ uint16_t t10dif_bg; /* use enum mlx5dv_sig_t10dif_bg_caps */ uint16_t crc_type; /* use enum mlx5dv_sig_crc_type_caps */
}; enum mlx5dv_sig_prot_caps {
MLX5DV_SIG_PROT_CAP_T10DIF = 1 << MLX5DV_SIG_TYPE_T10DIF, MLX5DV_SIG_PROT_CAP_CRC = 1 << MLX5DV_SIG_TYPE_CRC,
}; enum mlx5dv_sig_t10dif_bg_caps {
MLX5DV_SIG_T10DIF_BG_CAP_CRC = 1 << MLX5DV_SIG_T10DIF_CRC, MLX5DV_SIG_T10DIF_BG_CAP_CSUM = 1 << MLX5DV_SIG_T10DIF_CSUM,
}; enum mlx5dv_sig_crc_type_caps {
MLX5DV_SIG_CRC_TYPE_CAP_CRC32 = 1 << MLX5DV_SIG_CRC_TYPE_CRC32, MLX5DV_SIG_CRC_TYPE_CAP_CRC32C = 1 << MLX5DV_SIG_CRC_TYPE_CRC32C, MLX5DV_SIG_CRC_TYPE_CAP_CRC64_XP10 = 1 << MLX5DV_SIG_CRC_TYPE_CRC64_XP10,
}; enum mlx5dv_block_size_caps {
MLX5DV_BLOCK_SIZE_CAP_512 = 1 << MLX5DV_BLOCK_SIZE_512, MLX5DV_BLOCK_SIZE_CAP_520 = 1 << MLX5DV_BLOCK_SIZE_520, MLX5DV_BLOCK_SIZE_CAP_4048 = 1 << MLX5DV_BLOCK_SIZE_4048, MLX5DV_BLOCK_SIZE_CAP_4096 = 1 << MLX5DV_BLOCK_SIZE_4096, MLX5DV_BLOCK_SIZE_CAP_4160 = 1 << MLX5DV_BLOCK_SIZE_4160,
};
struct mlx5dv_crypto_caps {
/*
* if failed_selftests != 0 it means there are some self tests errors
* that may render specific crypto engines unusable. Exact code meaning
* should be consulted with NVIDIA.
*/ uint16_t failed_selftests; uint8_t crypto_engines; /* use enum mlx5dv_crypto_engines_caps */ uint8_t wrapped_import_method; /* use enum mlx5dv_crypto_wrapped_import_method_caps */ uint8_t log_max_num_deks; uint32_t flags; /* use enum mlx5dv_crypto_caps_flags */
}; /* This bitmap indicates which crypto engines are supported by the device. */ enum mlx5dv_crypto_engines_caps {
/* Deprecated, replaced by MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK */ MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS = 1 << 0, /* * Indicates that AES-XTS only supports encrypting a single block * at a time. */ MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK = 1 << 1, /* Indicates that AES-XTS supports multi-block encryption. */ MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_MULTI_BLOCK = 1 << 2,
}; /*
* This bitmap indicates the import method of each crypto engine.
*
* If a bit is not set, the corresponding crypto engine is in plaintext import method and the
* application must use plaintext DEKs for this crypto engine.
*
* Otherwise, the corresponding crypto engine is in wrapped import method and the application
* must use wrapped DEKs for this crypto engine. To load wrapped DEKs the application must perform
* crypto login, which in turn requires MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL below to be set.
*/ enum mlx5dv_crypto_wrapped_import_method_caps {
MLX5DV_CRYPTO_WRAPPED_IMPORT_METHOD_CAP_AES_XTS = 1 << 0,
}; enum mlx5dv_crypto_caps_flags {
/* Indicates whether crypto capabilities are enabled on the device. */ MLX5DV_CRYPTO_CAPS_CRYPTO = 1 << 0, /* Indicates whether crypto engines that are in wrapped import method are operational. */ MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL = 1 << 1, /* * If set, indicates that after the next FW reset the device will go back to * commissioning mode, meaning that MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL * will be set to 0. */ MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_GOING_TO_COMMISSIONING = 1 << 2,
};
0 on success or the value of errno on failure (which indicates the failure reason).
* Compatibility mask (comp_mask) is in/out field.
2017-02-02 | 1.0.0 |