DPDK: update to 18.11.5.

dev
fengbojiang 2019-11-23 16:13:38 +08:00 committed by jfb8856606
parent 7b0de5a004
commit 4b05018ffe
384 changed files with 4029 additions and 2615 deletions

View File

@ -13,9 +13,6 @@ lib_execinfo = cc.find_library('execinfo', required: false)
default_cflags = machine_args
# specify -D_GNU_SOURCE unconditionally
default_cflags += '-D_GNU_SOURCE'
foreach app:apps
build = true
name = app

View File

@ -128,7 +128,7 @@ cperf_latency_test_runner(void *arg)
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
static int only_once;
static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
if (ctx == NULL)
return 0;
@ -310,7 +310,7 @@ cperf_latency_test_runner(void *arg)
time_min = tunit*(double)(tsc_min) / tsc_hz;
if (ctx->options->csv) {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
"Packet Size, cycles, time (us)");
@ -325,7 +325,6 @@ cperf_latency_test_runner(void *arg)
/ tsc_hz);
}
only_once = 1;
} else {
printf("\n# Device %d on lcore %u\n", ctx->dev_id,
ctx->lcore_id);

View File

@ -16,7 +16,7 @@
#define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n"
#define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n"
#define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n"
#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.3f;%.3f;%.3f\n"
struct cperf_pmd_cyclecount_ctx {
uint8_t dev_id;
@ -390,7 +390,7 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
state.lcore = rte_lcore_id();
state.linearize = 0;
static int only_once;
static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
static bool warmup = true;
/*
@ -436,13 +436,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
}
if (!opts->csv) {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf(PRETTY_HDR_FMT, "lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
"Deq Retries", "Cycles/Op",
"Cycles/Enq", "Cycles/Deq");
only_once = 1;
printf(PRETTY_LINE_FMT, state.ctx->lcore_id,
opts->test_buffer_size, test_burst_size,
@ -453,13 +452,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx)
state.cycles_per_enq,
state.cycles_per_deq);
} else {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf(CSV_HDR_FMT, "# lcore id", "Buf Size",
"Burst Size", "Enqueued",
"Dequeued", "Enq Retries",
"Deq Retries", "Cycles/Op",
"Cycles/Enq", "Cycles/Deq");
only_once = 1;
printf(CSV_LINE_FMT, state.ctx->lcore_id,
opts->test_buffer_size, test_burst_size,

View File

@ -94,7 +94,7 @@ cperf_throughput_test_runner(void *test_ctx)
uint8_t burst_size_idx = 0;
uint32_t imix_idx = 0;
static int only_once;
static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
struct rte_crypto_op *ops[ctx->options->max_burst_size];
struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
@ -261,13 +261,12 @@ cperf_throughput_test_runner(void *test_ctx)
ctx->options->total_ops);
if (!ctx->options->csv) {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst Size",
"Enqueued", "Dequeued", "Failed Enq",
"Failed Deq", "MOps", "Gbps",
"Cycles/Buf");
only_once = 1;
printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
"%12"PRIu64"%12.4f%12.4f%12.2f\n",
@ -282,12 +281,11 @@ cperf_throughput_test_runner(void *test_ctx)
throughput_gbps,
cycles_per_packet);
} else {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf("#lcore id,Buffer Size(B),"
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Ops(Millions),Throughput(Gbps),"
"Cycles/Buf\n\n");
only_once = 1;
printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
"%.3f;%.3f;%.3f\n",

View File

@ -232,7 +232,7 @@ cperf_verify_test_runner(void *test_ctx)
uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
uint64_t ops_failed = 0;
static int only_once;
static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
uint64_t i;
uint16_t ops_unused = 0;
@ -375,12 +375,11 @@ cperf_verify_test_runner(void *test_ctx)
}
if (!ctx->options->csv) {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
"lcore id", "Buf Size", "Burst size",
"Enqueued", "Dequeued", "Failed Enq",
"Failed Deq", "Failed Ops");
only_once = 1;
printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
"%12"PRIu64"%12"PRIu64"\n",
@ -393,11 +392,10 @@ cperf_verify_test_runner(void *test_ctx)
ops_deqd_failed,
ops_failed);
} else {
if (!only_once)
if (rte_atomic16_test_and_set(&display_once))
printf("\n# lcore id, Buffer Size(B), "
"Burst Size,Enqueued,Dequeued,Failed Enq,"
"Failed Deq,Failed Ops\n");
only_once = 1;
printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
"%"PRIu64"\n",

View File

@ -627,9 +627,12 @@ main(int argc, char **argv)
if (i == total_nb_qps)
break;
rte_eal_wait_lcore(lcore_id);
ret |= rte_eal_wait_lcore(lcore_id);
i++;
}
if (ret != EXIT_SUCCESS)
goto err;
} else {
/* Get next size from range or list */
@ -654,10 +657,13 @@ main(int argc, char **argv)
if (i == total_nb_qps)
break;
rte_eal_wait_lcore(lcore_id);
ret |= rte_eal_wait_lcore(lcore_id);
i++;
}
if (ret != EXIT_SUCCESS)
goto err;
/* Get next size from range or list */
if (opts.inc_buffer_size != 0)
opts.test_buffer_size += opts.inc_buffer_size;

View File

@ -67,6 +67,11 @@ order_producer(void *arg)
int
order_opt_check(struct evt_options *opt)
{
if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
evt_err("Invalid producer type");
return -EINVAL;
}
/* 1 producer + N workers + 1 master */
if (rte_lcore_count() < 3) {
evt_err("test need minimum 3 lcores");
@ -298,12 +303,23 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
int ret;
uint8_t port;
struct test_order *t = evt_test_priv(test);
struct rte_event_dev_info dev_info;
memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
if (ret) {
evt_err("failed to get eventdev info %d", opt->dev_id);
return ret;
}
if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
/* port configuration */
const struct rte_event_port_conf wkr_p_conf = {
const struct rte_event_port_conf p_conf = {
.dequeue_depth = opt->wkr_deq_dep,
.enqueue_depth = 64,
.new_event_threshold = 4096,
.enqueue_depth = dev_info.max_event_port_dequeue_depth,
.new_event_threshold = dev_info.max_num_events,
};
/* setup one port per worker, linking to all queues */
@ -314,7 +330,7 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
w->port_id = port;
w->t = t;
ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
@ -326,12 +342,6 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
return -EINVAL;
}
}
/* port for producer, no links */
const struct rte_event_port_conf prod_conf = {
.dequeue_depth = 8,
.enqueue_depth = 32,
.new_event_threshold = 1200,
};
struct prod_data *p = &t->prod;
p->dev_id = opt->dev_id;
@ -339,7 +349,7 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
p->queue_id = 0;
p->t = t;
ret = rte_event_port_setup(opt->dev_id, port, &prod_conf);
ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
if (ret) {
evt_err("failed to setup producer port %d", port);
return ret;

View File

@ -583,7 +583,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
return -1;
}
if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
/* Validate producer lcores */
if (evt_lcores_has_overlap(opt->plcores,
rte_get_master_lcore())) {

View File

@ -2042,6 +2042,7 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
{
struct cmd_config_rx_mode_flag *res = parsed_result;
portid_t pid;
int k;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
@ -2142,6 +2143,10 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
return;
}
port->dev_conf.rxmode.offloads = rx_offloads;
/* Apply Rx offloads configuration */
for (k = 0; k < port->dev_info.max_rx_queues; k++)
port->rx_conf[k].offloads =
port->dev_conf.rxmode.offloads;
}
init_port_config();
@ -4354,6 +4359,17 @@ csum_show(int port_id)
}
}
static void
cmd_config_queue_tx_offloads(struct rte_port *port)
{
int k;
/* Apply queue tx offloads configuration */
for (k = 0; k < port->dev_info.max_rx_queues; k++)
port->tx_conf[k].offloads =
port->dev_conf.txmode.offloads;
}
static void
cmd_csum_parsed(void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
@ -4438,6 +4454,7 @@ cmd_csum_parsed(void *parsed_result,
ports[res->port_id].dev_conf.txmode.offloads &=
(~csum_offloads);
}
cmd_config_queue_tx_offloads(&ports[res->port_id]);
}
csum_show(res->port_id);
@ -4589,6 +4606,7 @@ cmd_tso_set_parsed(void *parsed_result,
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
cmd_config_queue_tx_offloads(&ports[res->port_id]);
/* display warnings if configuration is not supported by the NIC */
rte_eth_dev_info_get(res->port_id, &dev_info);
@ -4744,6 +4762,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
"if outer L3 is IPv4; not necessary for IPv6\n");
}
cmd_config_queue_tx_offloads(&ports[res->port_id]);
cmd_reconfig_device_queue(res->port_id, 1, 1);
}
@ -8348,19 +8367,19 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
int ret = -ENOTSUP;
uint16_t rx_mode = 0;
uint16_t vf_rxmode = 0;
struct cmd_set_vf_rxmode *res = parsed_result;
int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
if (!strcmp(res->what,"rxmode")) {
if (!strcmp(res->mode, "AUPE"))
rx_mode |= ETH_VMDQ_ACCEPT_UNTAG;
vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
else if (!strcmp(res->mode, "ROPE"))
rx_mode |= ETH_VMDQ_ACCEPT_HASH_UC;
vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
else if (!strcmp(res->mode, "BAM"))
rx_mode |= ETH_VMDQ_ACCEPT_BROADCAST;
vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
else if (!strncmp(res->mode, "MPE",3))
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
RTE_SET_USED(is_on);
@ -8368,12 +8387,12 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
#ifdef RTE_LIBRTE_IXGBE_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
rx_mode, (uint8_t)is_on);
vf_rxmode, (uint8_t)is_on);
#endif
#ifdef RTE_LIBRTE_BNXT_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
rx_mode, (uint8_t)is_on);
vf_rxmode, (uint8_t)is_on);
#endif
if (ret < 0)
printf("bad VF receive mode parameter, return code = %d \n",

View File

@ -3378,6 +3378,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
{
static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
struct action_rss_data *action_rss_data;
const struct arg *arg;
int ret;
int i;
@ -3393,10 +3394,10 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
}
if (i >= ACTION_RSS_QUEUE_NUM)
return -1;
if (push_args(ctx,
ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
i * sizeof(action_rss_data->queue[i]),
sizeof(action_rss_data->queue[i]))))
arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
i * sizeof(action_rss_data->queue[i]),
sizeof(action_rss_data->queue[i]));
if (push_args(ctx, arg))
return -1;
ret = parse_int(ctx, token, str, len, NULL, 0);
if (ret < 0) {
@ -3746,6 +3747,8 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
.src_addr = mplsogre_encap_conf.ipv4_src,
.dst_addr = mplsogre_encap_conf.ipv4_dst,
.next_proto_id = IPPROTO_GRE,
.version_ihl = IPV4_VHL_DEF,
.time_to_live = IPDEFTTL,
},
};
struct rte_flow_item_ipv6 ipv6 = {
@ -3847,6 +3850,7 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
struct rte_flow_item_ipv6 ipv6 = {
.hdr = {
.proto = IPPROTO_GRE,
.hop_limits = IPDEFTTL,
},
};
struct rte_flow_item_gre gre = {
@ -3934,6 +3938,8 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
.src_addr = mplsoudp_encap_conf.ipv4_src,
.dst_addr = mplsoudp_encap_conf.ipv4_dst,
.next_proto_id = IPPROTO_UDP,
.version_ihl = IPV4_VHL_DEF,
.time_to_live = IPDEFTTL,
},
};
struct rte_flow_item_ipv6 ipv6 = {
@ -4038,6 +4044,7 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
struct rte_flow_item_ipv6 ipv6 = {
.hdr = {
.proto = IPPROTO_UDP,
.hop_limits = IPDEFTTL,
},
};
struct rte_flow_item_udp udp = {

View File

@ -510,6 +510,10 @@ port_infos_display(portid_t port_id)
printf("Min possible number of TXDs per queue: %hu\n",
dev_info.tx_desc_lim.nb_min);
printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
printf("Max segment number per packet: %hu\n",
dev_info.tx_desc_lim.nb_seg_max);
printf("Max segment number per MTU/TSO: %hu\n",
dev_info.tx_desc_lim.nb_mtu_seg_max);
/* Show switch info only if valid switch domain and port id is set */
if (dev_info.switch_info.domain_id !=

View File

@ -2290,7 +2290,7 @@ attach_port(char *identifier)
return;
}
if (rte_dev_probe(identifier) != 0) {
if (rte_dev_probe(identifier) < 0) {
TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
return;
}
@ -2360,7 +2360,7 @@ detach_port_device(portid_t port_id)
port_flow_flush(port_id);
}
if (rte_dev_remove(dev) != 0) {
if (rte_dev_remove(dev) < 0) {
TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
@ -2735,9 +2735,13 @@ static void
rxtx_port_config(struct rte_port *port)
{
uint16_t qid;
uint64_t offloads;
for (qid = 0; qid < nb_rxq; qid++) {
offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
if (offloads != 0)
port->rx_conf[qid].offloads = offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
@ -2759,7 +2763,10 @@ rxtx_port_config(struct rte_port *port)
}
for (qid = 0; qid < nb_txq; qid++) {
offloads = port->tx_conf[qid].offloads;
port->tx_conf[qid] = port->dev_info.default_txconf;
if (offloads != 0)
port->tx_conf[qid].offloads = offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
@ -3071,7 +3078,8 @@ signal_handler(int signum)
rte_pdump_uninit();
#endif
#ifdef RTE_LIBRTE_LATENCY_STATS
rte_latencystats_uninit();
if (latencystats_enabled != 0)
rte_latencystats_uninit();
#endif
force_quit();
/* Set flag to indicate the force termination. */

View File

@ -14,7 +14,7 @@
#include "testpmd.h"
static inline void
print_ether_addr(const char *what, struct ether_addr *eth_addr)
print_ether_addr(const char *what, const struct ether_addr *eth_addr)
{
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
@ -26,7 +26,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, int is_rx)
{
struct rte_mbuf *mb;
struct ether_hdr *eth_hdr;
const struct ether_hdr *eth_hdr;
struct ether_hdr _eth_hdr;
uint16_t eth_type;
uint64_t ol_flags;
uint16_t i, packet_type;
@ -45,7 +46,7 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
(unsigned int) nb_pkts);
for (i = 0; i < nb_pkts; i++) {
mb = pkts[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr);
eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
ol_flags = mb->ol_flags;
packet_type = mb->packet_type;

View File

@ -9,7 +9,7 @@ arm_force_native_march = false
arm_force_default_march = (machine == 'default')
machine_args_generic = [
['default', ['-march=armv8-a+crc+crypto']],
['default', ['-march=armv8-a+crc']],
['native', ['-march=native']],
['0xd03', ['-mcpu=cortex-a53']],
['0xd04', ['-mcpu=cortex-a35']],
@ -90,7 +90,7 @@ impl_dpaa2 = ['NXP DPAA2', flags_dpaa2, machine_args_generic]
dpdk_conf.set('RTE_FORCE_INTRINSICS', 1)
if cc.sizeof('void *') != 8
if not dpdk_conf.get('RTE_ARCH_64')
dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
dpdk_conf.set('RTE_ARCH_ARM', 1)
dpdk_conf.set('RTE_ARCH_ARMv7', 1)
@ -100,7 +100,6 @@ if cc.sizeof('void *') != 8
else
dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128)
dpdk_conf.set('RTE_ARCH_ARM64', 1)
dpdk_conf.set('RTE_ARCH_64', 1)
machine = []
cmd_generic = ['generic', '', '', 'default', '']

View File

@ -50,6 +50,8 @@ toolchain = cc.get_id()
dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain)
dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1)
dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8)
add_project_link_arguments('-Wl,--no-as-needed', language: 'c')
dpdk_extra_ldflags += '-Wl,--no-as-needed'
@ -97,7 +99,7 @@ warning_flags = [
'-Wcast-qual',
'-Wno-address-of-packed-member'
]
if cc.sizeof('void *') == 4
if not dpdk_conf.get('RTE_ARCH_64')
# for 32-bit, don't warn about casting a 32-bit pointer to 64-bit int - it's fine!!
warning_flags += '-Wno-pointer-to-int-cast'
endif
@ -117,6 +119,7 @@ dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64)
dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64)
dpdk_conf.set('RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', true)
compile_time_cpuflags = []
if host_machine.cpu_family().startswith('x86')
arch_subdir = 'x86'
@ -135,3 +138,11 @@ install_headers('rte_config.h', subdir: get_option('include_subdir_arch'))
# enable VFIO only if it is linux OS
dpdk_conf.set('RTE_EAL_VFIO', host_machine.system() == 'linux')
# specify -D_GNU_SOURCE unconditionally
add_project_arguments('-D_GNU_SOURCE', language: 'c')
# specify -D__BSD_VISIBLE for FreeBSD
if host_machine.system() == 'freebsd'
add_project_arguments('-D__BSD_VISIBLE', language: 'c')
endif

View File

@ -1,9 +1,11 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
if not dpdk_conf.get('RTE_ARCH_64')
error('Only 64-bit compiles are supported for this platform type')
endif
dpdk_conf.set('RTE_ARCH', 'ppc_64')
dpdk_conf.set('RTE_ARCH_PPC_64', 1)
dpdk_conf.set('RTE_ARCH_64', 1)
# overrides specific to ppc64
dpdk_conf.set('RTE_MAX_LCORE', 256)

View File

@ -29,10 +29,9 @@ foreach f:base_flags
endforeach
dpdk_conf.set('RTE_ARCH_X86', 1)
if (host_machine.cpu_family() == 'x86_64')
if dpdk_conf.get('RTE_ARCH_64')
dpdk_conf.set('RTE_ARCH_X86_64', 1)
dpdk_conf.set('RTE_ARCH', 'x86_64')
dpdk_conf.set('RTE_ARCH_64', 1)
else
dpdk_conf.set('RTE_ARCH_I686', 1)
dpdk_conf.set('RTE_ARCH', 'i686')

View File

@ -68,11 +68,13 @@ common_sources()
linux_sources()
{
find_sources "lib/librte_eal/linuxapp" '*.[chS]'
find_sources "kernel/linux" '*.[chS]'
}
bsd_sources()
{
find_sources "lib/librte_eal/bsdapp" '*.[chS]'
find_sources "kernel/freebsd" '*.[chS]'
}
arm_common()

View File

@ -26,7 +26,7 @@ if doxygen.found()
command: [generate_examples, '@INPUT@', '@OUTPUT@'],
install: get_option('enable_docs'),
install_dir: htmldir,
build_by_default: false)
build_by_default: get_option('enable_docs'))
cdata = configuration_data()
cdata.set('VERSION', meson.project_version())
@ -48,7 +48,7 @@ if doxygen.found()
command: [generate_doxygen, '@INPUT@', '@OUTPUT@', generate_css],
install: get_option('enable_docs'),
install_dir: htmldir,
build_by_default: false)
build_by_default: get_option('enable_docs'))
doc_targets += doxy_build
doc_target_names += 'Doxygen_API'

View File

@ -64,9 +64,13 @@ latex_documents = [
custom_latex_preamble = r"""
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{textalpha}
\usepackage{helvet}
\renewcommand{\familydefault}{\sfdefault}
\RecustomVerbatimEnvironment{Verbatim}{Verbatim}{xleftmargin=5mm}
\usepackage{etoolbox}
\robustify\(
\robustify\)
"""
# Configuration for the latex/pdf docs.

View File

@ -200,10 +200,10 @@ The main required packages can be installed as follows:
.. code-block:: console
# Ubuntu/Debian.
sudo apt-get -y install texlive-latex-extra
sudo apt-get -y install texlive-latex-extra texlive-lang-greek
# Red Hat/Fedora, selective install.
sudo dnf -y install texlive-collection-latexextra
sudo dnf -y install texlive-collection-latexextra texlive-greek-fontenc
`Latexmk <http://personal.psu.edu/jcc8/software/latexmk-jcc/>`_ is a perl script
for running LaTeX for resolving cross references,

View File

@ -637,12 +637,3 @@ patch accepted. The general cycle for patch review and acceptance is:
than rework of the original.
* Trivial patches may be merged sooner than described above at the tree committer's
discretion.
DPDK Maintainers
----------------
The following are the DPDK maintainers as listed in the ``MAINTAINERS`` file
in the DPDK root directory.
.. literalinclude:: ../../../MAINTAINERS
:lines: 3-

View File

@ -59,7 +59,6 @@ User can use app/test application to check how to use this PMD and to verify
crypto processing.
Test name is cryptodev_sw_armv8_autotest.
For performance test cryptodev_sw_armv8_perftest can be used.
Limitations
-----------

View File

@ -87,7 +87,6 @@ User can use app/test application to check how to use this pmd and to verify
crypto processing.
Test name is cryptodev_openssl_autotest.
For performance test cryptodev_openssl_perftest can be used.
For asymmetric crypto operations testing, run cryptodev_openssl_asym_autotest.
To verify real traffic l2fwd-crypto example can be used with this command:

View File

@ -45,7 +45,7 @@ Code
pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
pattern[0].spec = &eth;
/* set the vlan to pas all packets */
/* set the vlan to pass all packets */
pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN;
pattern[1].spec = &vlan;
@ -141,7 +141,7 @@ Code
pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
pattern[0].spec = &eth;
/* set the vlan to pas all packets */
/* set the vlan to pass all packets */
pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN;
pattern[1].spec = &vlan;

View File

@ -71,6 +71,7 @@ Copy the NUMA header files and lib to the cross compiler's directories:
cp <numa_install_dir>/include/numa*.h <cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/
cp <numa_install_dir>/lib/libnuma.a <cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/8.2/
cp <numa_install_dir>/lib/libnuma.so <cross_install_dir>/gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/8.2/
.. _configure_and_cross_compile_dpdk_build:

View File

@ -103,7 +103,7 @@ Such model has the following benefits:
More about the bifurcated driver can be found in
`Mellanox Bifurcated DPDK PMD
<https://dpdksummit.com/Archive/pdf/2016Userspace/Day02-Session04-RonyEfraim-Userspace2016.pdf>`__.
<https://www.dpdk.org/wp-content/uploads/sites/35/2016/10/Day02-Session04-RonyEfraim-Userspace2016.pdf>`__.
.. _linux_gsg_binding_kernel:

View File

@ -11,7 +11,7 @@ if sphinx.found()
command: [sphinx, '-b', 'html',
'-d', meson.current_build_dir() + '/.doctrees',
'@INPUT@', meson.current_build_dir() + '/guides'],
build_by_default: false,
build_by_default: get_option('enable_docs'),
install: get_option('enable_docs'),
install_dir: htmldir)

View File

@ -34,7 +34,7 @@ BNX2X Poll Mode Driver
The BNX2X poll mode driver library (**librte_pmd_bnx2x**) implements support
for **QLogic 578xx** 10/20 Gbps family of adapters as well as their virtual
functions (VF) in SR-IOV context. It is supported on several standard Linux
distros like Red Hat 7.x and SLES12 OS. It is compile-tested under FreeBSD OS.
distros like RHEL and SLES. It is compile-tested under FreeBSD OS.
More information can be found at `QLogic Corporation's Official Website
<http://www.qlogic.com>`_.
@ -65,14 +65,26 @@ The features not yet supported include:
Co-existence considerations
---------------------------
- BCM578xx being a CNA can have both NIC and Storage personalities.
However, coexistence with storage protocol drivers (cnic, bnx2fc and
bnx2fi) is not supported on the same adapter. So storage personality
has to be disabled on that adapter when used in DPDK applications.
- QLogic 578xx CNAs support Ethernet, iSCSI and FCoE functionalities.
These functionalities are supported using QLogic Linux kernel
drivers bnx2x, cnic, bnx2i and bnx2fc. DPDK is supported on these
adapters using bnx2x PMD.
- For SR-IOV case, bnx2x PMD will be used to bind to SR-IOV VF device and
Linux native kernel driver (bnx2x) will be attached to SR-IOV PF.
- When SR-IOV is not enabled on the adapter,
QLogic Linux kernel drivers (bnx2x, cnic, bnx2i and bnx2fc) and bnx2x
PMD cant be attached to different PFs on a given QLogic 578xx
adapter.
A given adapter needs to be completely used by DPDK or Linux drivers.
Before binding DPDK driver to one or more PFs on the adapter,
please make sure to unbind Linux drivers from all PFs of the adapter.
If there are multiple adapters on the system, one or more adapters
can be used by DPDK driver completely and other adapters can be used
by Linux drivers completely.
- When SR-IOV is enabled on the adapter,
Linux kernel drivers (bnx2x, cnic, bnx2i and bnx2fc) can be bound
to the PFs of a given adapter and either bnx2x PMD or Linux drivers
bnx2x can be bound to the VFs of the adapter.
Supported QLogic NICs
---------------------

View File

@ -260,12 +260,6 @@ Generic Flow API is supported. The baseline support is:
- Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
- In total, up to 64 bytes of mask is allowed across all headers
- **1400 and later series VICS with advanced filters enabled**
All the above plus:
- Action: count
The VIC performs packet matching after applying VLAN strip. If VLAN
stripping is enabled, EtherType in the ETH item corresponds to the
stripped VLAN header's EtherType. Stripping does not affect the VLAN

View File

@ -21,6 +21,9 @@ Multicast MAC filter = Y
RSS hash = Y
SR-IOV = Y
VLAN filter = Y
Flow control = Y
Flow API = Y
CRC offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Inner L3 checksum = Y

View File

@ -25,6 +25,7 @@ Inner RSS = Y
SR-IOV = Y
VLAN filter = Y
Flow director = Y
Flow control = Y
Flow API = Y
CRC offload = Y
VLAN offload = Y

View File

@ -82,6 +82,31 @@ To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be ch
fdir_conf->mode will also be checked.
VF Runtime Options
^^^^^^^^^^^^^^^^^^
The following ``devargs`` options can be enabled at runtime. They must
be passed as part of EAL arguments. For example,
.. code-block:: console
testpmd -w af:10.0,pflink_fullchk=1 -- -i
- ``pflink_fullchk`` (default **0**)
When calling ``rte_eth_link_get_nowait()`` to get VF link status,
this option is used to control how VF synchronizes its status with
PF's. If set, VF will not only check the PF's physical link status
by reading related register, but also check the mailbox status. We
call this behavior as fully checking. And checking mailbox will
trigger PF's mailbox interrupt generation. If unset, the application
can get the VF's link status quickly by just reading the PF's link
status register, this will avoid the whole system's mailbox interrupt
generation.
``rte_eth_link_get()`` will still use the mailbox method regardless
of the pflink_fullchk setting.
RX Burst Size
^^^^^^^^^^^^^

View File

@ -56,6 +56,7 @@ Features
- Several RSS hash keys, one for each flow type.
- Default RSS operation with no hash key specification.
- Configurable RETA table.
- Link flow control (pause frame).
- Support for multiple MAC addresses.
- VLAN filtering.
- RX VLAN stripping.
@ -177,7 +178,7 @@ Statistics
MLX5 supports various of methods to report statistics:
Port statistics can be queried using ``rte_eth_stats_get()``. The port statistics are through SW only and counts the number of packets received or sent successfully by the PMD.
Port statistics can be queried using ``rte_eth_stats_get()``. The received and sent statistics are through SW only and counts the number of packets received or sent successfully by the PMD. The imissed counter is the amount of packets that could not be delivered to SW because a queue was full. Packets not received due to congestion in the bus or on the NIC can be queried via the rx_discards_phy xstats counter.
Extended statistics can be queried using ``rte_eth_xstats_get()``. The extended statistics expose a wider set of counters counted by the device. The extended port statistics counts the number of packets received or sent successfully by the port. As Mellanox NICs are using the :ref:`Bifurcated Linux Driver <linux_gsg_linux_drivers>` those counters counts also packet received or sent by the Linux kernel. The counters with ``_phy`` suffix counts the total events on the physical port, therefore not valid for VF.

View File

@ -7,7 +7,7 @@ QEDE Poll Mode Driver
The QEDE poll mode driver library (**librte_pmd_qede**) implements support
for **QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G Intelligent Ethernet Adapters (IEA) and Converged Network Adapters (CNA)** family of adapters as well as SR-IOV virtual functions (VF). It is supported on
several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu.
several standard Linux distros like RHEL, SLES, Ubuntu etc.
It is compile-tested under FreeBSD OS.
More information can be found at `QLogic Corporation's Website
@ -47,8 +47,27 @@ Non-supported Features
Co-existence considerations
---------------------------
- QLogic FastLinQ QL4xxxx CNAs can have both NIC and Storage personalities. However, coexistence with storage protocol drivers (qedi and qedf) is not supported on the same adapter. So storage personality has to be disabled on that adapter when used in DPDK applications.
- For SR-IOV case, qede PMD will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will be attached to SR-IOV PF.
- QLogic FastLinQ QL4xxxx CNAs support Ethernet, RDMA, iSCSI and FCoE
functionalities. These functionalities are supported using
QLogic Linux kernel drivers qed, qede, qedr, qedi and qedf. DPDK is
supported on these adapters using qede PMD.
- When SR-IOV is not enabled on the adapter,
QLogic Linux kernel drivers (qed, qede, qedr, qedi and qedf) and qede
PMD cant be attached to different PFs on a given QLogic FastLinQ
QL4xxx adapter.
A given adapter needs to be completely used by DPDK or Linux drivers
Before binding DPDK driver to one or more PFs on the adapter,
please make sure to unbind Linux drivers from all PFs of the adapter.
If there are multiple adapters on the system, one or more adapters
can be used by DPDK driver completely and other adapters can be used
by Linux drivers completely.
- When SR-IOV is enabled on the adapter,
Linux kernel drivers (qed, qede, qedr, qedi and qedf) can be bound
to the PFs of a given adapter and either qede PMD or Linux drivers
(qed and qede) can be bound to the VFs of the adapter.
Supported QLogic Adapters
-------------------------

View File

@ -44,7 +44,7 @@ From the command line using the --vdev EAL option
--vdev 'baseband_turbo_sw,max_nb_queues=8,socket_id=0'
Our using the rte_vdev_init API within the application code.
Or using the rte_vdev_init API within the application code.
.. code-block:: c
@ -284,7 +284,7 @@ baseband operations is usually completed during the enqueue call to the bbdev
device. The dequeue burst API will retrieve any processed operations available
from the queue on the bbdev device, from physical devices this is usually
directly from the device's processed queue, and for virtual device's from a
``rte_ring`` where processed operations are place after being processed on the
``rte_ring`` where processed operations are placed after being processed on the
enqueue call.

View File

@ -201,7 +201,7 @@ for stateful processing of ops.
Operation Status
~~~~~~~~~~~~~~~~
Each operation carries a status information updated by PMD after it is processed.
following are currently supported status:
Following are currently supported:
- RTE_COMP_OP_STATUS_SUCCESS,
Operation is successfully completed
@ -227,14 +227,24 @@ following are currently supported status:
is not an error case. Output data up to op.produced can be used and
next op in the stream should continue on from op.consumed+1.
Operation status after enqueue / dequeue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some of the above values may arise in the op after an
``rte_compressdev_enqueue_burst()``. If number ops enqueued < number ops requested then
the app should check the op.status of nb_enqd+1. If status is RTE_COMP_OP_STATUS_NOT_PROCESSED,
it likely indicates a full-queue case for a hardware device and a retry after dequeuing some ops is likely
to be successful. If the op holds any other status, e.g. RTE_COMP_OP_STATUS_INVALID_ARGS, a retry with
the same op is unlikely to be successful.
Produced, Consumed And Operation Status
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- If status is RTE_COMP_OP_STATUS_SUCCESS,
consumed = amount of data read from input buffer, and
produced = amount of data written in destination buffer
- If status is RTE_COMP_OP_STATUS_FAILURE,
consumed = produced = 0 or undefined
- If status is RTE_COMP_OP_STATUS_ERROR,
consumed = produced = undefined
- If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
consumed = 0 and
produced = usually 0, but in decompression cases a PMD may return > 0
@ -568,7 +578,7 @@ operations is usually completed during the enqueue call to the compression
device. The dequeue burst API will retrieve any processed operations available
from the queue pair on the compression device, from physical devices this is usually
directly from the devices processed queue, and for virtual device's from a
``rte_ring`` where processed operations are place after being processed on the
``rte_ring`` where processed operations are placed after being processed on the
enqueue call.
A burst in DPDK compression can be a combination of stateless and stateful operations with a condition

View File

@ -52,7 +52,7 @@ From the command line using the --vdev EAL option
Example: ``--vdev 'crypto_aesni_mb0' --vdev 'crypto_aesni_mb1'``
Our using the rte_vdev_init API within the application code.
Or using the rte_vdev_init API within the application code.
.. code-block:: c
@ -294,7 +294,7 @@ Crypto operations is usually completed during the enqueue call to the Crypto
device. The dequeue burst API will retrieve any processed operations available
from the queue pair on the Crypto device, from physical devices this is usually
directly from the devices processed queue, and for virtual device's from a
``rte_ring`` where processed operations are place after being processed on the
``rte_ring`` where processed operations are placed after being processed on the
enqueue call.

View File

@ -659,7 +659,7 @@
sodipodi:role="line"
id="tspan11522"
x="69.303398"
y="858.42419">rte_eal_remote_lauch(app)</tspan></text>
y="858.42419">rte_eal_remote_launch(app)</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 176.30173,890.61234 0,15.67127"
@ -692,7 +692,7 @@
sodipodi:role="line"
id="tspan11522-1"
x="87.641663"
y="644.07324">rte_eal_remote_lauch(</tspan><tspan
y="644.07324">rte_eal_remote_launch(</tspan><tspan
sodipodi:role="line"
x="87.641663"
y="664.07324"

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -266,12 +266,16 @@ Use Case: Ingress
-----------------
On the DPDK RX side, the mbuf is allocated by the PMD in the RX thread context.
This thread will enqueue the mbuf in the rx_q FIFO.
This thread will enqueue the mbuf in the rx_q FIFO,
and the next pointers in mbuf-chain will convert to physical address.
The KNI thread will poll all KNI active devices for the rx_q.
If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx().
The dequeued mbuf must be freed, so the same pointer is sent back in the free_q FIFO.
The dequeued mbuf must be freed, so the same pointer is sent back in the free_q FIFO,
and next pointers must convert back to virtual address if exists before put in the free_q FIFO.
The RX thread, in the same main loop, polls this FIFO and frees the mbuf after dequeuing it.
The address conversion of the next pointer is to prevent the chained mbuf
in different hugepage segments from causing kernel crash.
Use Case: Egress
----------------

View File

@ -133,6 +133,14 @@ For applications that use ``rte_pktmbuf_create()``, there is a config setting
(``RTE_MBUF_DEFAULT_MEMPOOL_OPS``) that allows the application to make use of
an alternative mempool handler.
.. note::
When running a DPDK application with shared libraries, mempool handler
shared objects specified with the '-d' EAL command-line parameter are
dynamically loaded. When running a multi-process application with shared
libraries, the -d arguments for mempool handlers *must be specified in the
same order for all processes* to ensure correct operation.
Use Cases
---------

View File

@ -32,7 +32,7 @@ Key factors guiding design of the Rawdevice library:
1. Following are some generic operations which can be treated as applicable
to a large subset of device types. None of the operations are mandatory to
be implemented by a driver. Application should also be design for proper
be implemented by a driver. Application should also be designed for proper
handling for unsupported APIs.
* Device Start/Stop - In some cases, 'reset' might also be required which
@ -100,7 +100,7 @@ From the command line using the --vdev EAL option
--vdev 'rawdev_dev1'
Our using the rte_vdev_init API within the application code.
Or using the rte_vdev_init API within the application code.
.. code-block:: c

View File

@ -240,29 +240,29 @@ Example of an item specification matching an Ethernet header:
.. table:: Ethernet item
+----------+----------+--------------------+
| Field | Subfield | Value |
+==========+==========+====================+
| ``spec`` | ``src`` | ``00:01:02:03:04`` |
| +----------+--------------------+
| | ``dst`` | ``00:2a:66:00:01`` |
| +----------+--------------------+
| | ``type`` | ``0x22aa`` |
+----------+----------+--------------------+
| ``last`` | unspecified |
+----------+----------+--------------------+
| ``mask`` | ``src`` | ``00:ff:ff:ff:00`` |
| +----------+--------------------+
| | ``dst`` | ``00:00:00:00:ff`` |
| +----------+--------------------+
| | ``type`` | ``0x0000`` |
+----------+----------+--------------------+
+----------+----------+-----------------------+
| Field | Subfield | Value |
+==========+==========+=======================+
| ``spec`` | ``src`` | ``00:00:01:02:03:04`` |
| +----------+-----------------------+
| | ``dst`` | ``00:00:2a:66:00:01`` |
| +----------+-----------------------+
| | ``type`` | ``0x22aa`` |
+----------+----------+-----------------------+
| ``last`` | unspecified |
+----------+----------+-----------------------+
| ``mask`` | ``src`` | ``00:00:ff:ff:ff:00`` |
| +----------+-----------------------+
| | ``dst`` | ``00:00:00:00:00:ff`` |
| +----------+-----------------------+
| | ``type`` | ``0x0000`` |
+----------+----------+-----------------------+
Non-masked bits stand for any value (shown as ``?`` below), Ethernet headers
with the following properties are thus matched:
- ``src``: ``??:01:02:03:??``
- ``dst``: ``??:??:??:??:01``
- ``src``: ``??:??:01:02:03:??``
- ``dst``: ``??:??:??:??:??:01``
- ``type``: ``0x????``
Matching pattern

View File

@ -1688,3 +1688,578 @@ Fixes skipped and status unresolved
* 281bd1aa3 net/iavf: fix stats reset (18.02)
* fe252fb69 test/rwlock: benchmark on all available cores (1.2.3r0)
* 6fef1ae4f test/rwlock: amortize the cost of getting time (1.2.3r0)
18.11.3 Release Notes
---------------------
18.11.3 Fixes
~~~~~~~~~~~~~
* acl: fix build with some arm64 compiler
* acl: fix undefined behavior of bit shifts
* app/crypto-perf: check lcore job failure
* app/crypto-perf: fix CSV format
* app/crypto-perf: fix display once detection
* app/eventdev: fix order test port creation
* app/testpmd: fix eth packet dump for small buffers
* app/testpmd: fix latency stats deinit on signal
* app/testpmd: fix MPLS IPv4 encapsulation fields
* app/testpmd: fix offloads config
* app/testpmd: fix parsing RSS queue rule
* app/testpmd: fix queue offload configuration
* app/testpmd: fix show port info routine
* app/testpmd: rename ambiguous VF config variable
* bpf: fix check array size
* bpf: fix pseudo calls for program loaded from ELF
* bpf: fix validate for function return value
* build: add libatomic dependency for 32-bit clang
* build: enable BSD features visibility for FreeBSD
* build: enable large file support on 32-bit
* build: remove unnecessary large file support defines
* build: set RTE_ARCH_64 based on pointer size
* bus/fslmc: fix build with 0 headroom
* bus/pci: fix TOCTOU for sysfs access
* bus/pci: remove unused x86 Linux constant
* bus/vmbus: skip non-network devices
* compress/isal: fix use after free
* compress/zlib: fix error handling
* config: disable armv8 crypto extension
* cryptodev: fix typo in comment
* crypto/dpaa2_sec: fix handling of session init failure
* crypto/mvsam: fix typo in comment
* crypto/openssl: fix free of asymmetric crypto keys
* crypto/openssl: fix usage of non constant time memcmp
* crypto/openssl: remove useless check before freeing
* crypto/qat: set message field to zero in sym SGL case
* crypto/virtio: check PCI config read
* devtools: fix building kernel component tags
* distributor: fix check of workers number
* distributor: fix livelock on flush
* doc: add a note for multi-process in mempool guide
* doc: add co-existence consideration for bnx2x
* doc: add co-existence consideration for qede
* doc: clarify data plane error handling in compressdev
* doc: cleanup test removal in armv8 and openssl guides
* doc: fix a grammar mistake in rawdev guide
* doc: fix build with latest meson
* doc: fix ethernet addresses in flow API guide
* doc: fix grammar in prog guides
* doc: fix link about bifurcated model in Linux guide
* doc: fix Linux guide for arm64 cross-compilation
* doc: fix PDF build
* doc: fix PDF with greek letter
* doc: fix triplicated typo in prog guides
* doc: fix typo in EAL guide
* doc: fix typos in flow API guide
* doc: remove useless Rx configuration in l2fwd guide
* doc: robustify PDF build
* doc: update features supported by mlx
* drivers: fix typo in NXP comments
* drivers/net: fix double free on init failure
* eal: correct log for alarm error
* eal: fix control thread affinity with --lcores
* eal: fix positive error codes from probe/remove
* eal: fix typo in comments
* eal/freebsd: fix config creation
* eal/freebsd: fix init completion
* eal: hide internal function
* eal: hide internal hotplug function
* eal: increase maximum different hugepage sizes on Arm
* eal/linux: fix return after alarm registration failure
* ethdev: avoid error on PCI unplug of closed port
* ethdev: avoid getting uninitialized info for bad port
* ethdev: fix endian annotation for SPI item
* ethdev: fix Tx prepare documentation to use positive errno
* eventdev: fix doxygen comment
* eventdev: fix error sign
* event/dpaa2: fix timeout ticks
* event/opdl: fix error sign
* event/sw: fix error sign
* examples/bpf: fix build
* examples: fix make clean when using pkg-config
* examples: fix pkg-config detection with older make
* examples: fix use of ethdev internal device array
* examples/ip_frag: fix stale content of ethdev info
* examples/ip_frag: fix unknown ethernet type
* examples/ip_frag: fix use of ethdev internal device array
* examples/ip_fragmentation: fix Tx queues init
* examples/ip_frag: remove Tx fast free offload flag
* examples/ipsec-secgw: fix error sign
* examples/ipsec-secgw: fix inline modes
* examples/ipsec-secgw: fix use of ethdev internal struct
* examples/l3fwd: fix unaligned memory access on x86
* examples/l3fwd-vf: remove unused Rx/Tx configuration
* examples/multi_process: do not dereference global config struct
* examples/multi_process: fix FreeBSD build
* examples/performance-thread: init timer subsystem
* examples/power: fix FreeBSD meson lib dependency
* examples/power: fix strcpy buffer overrun
* examples/ptpclient: fix delay request message
* examples/qos_sched: do not dereference global config struct
* examples/tep_term: remove duplicate definitions
* examples/vdpa: remove trace of legacy linuxapp
* examples/vhost_crypto: remove unused function
* fix off-by-one errors in snprintf
* flow_classify: fix out-of-bounds access
* hash: use ordered loads only if signature matches
* igb_uio: fix build on Linux 5.3 for fall through
* ip_frag: fix IPv6 fragment size calculation
* kernel/freebsd: fix module build on latest head
* kernel/linux: fix modules install path
* kni: abort when IOVA is not PA
* kni: fix build on RHEL8
* kni: fix copy_from_user failure handling
* kni: fix kernel 5.4 build - merged pci_aspm.h
* kni: fix kernel 5.4 build - num_online_cpus
* kni: fix kernel 5.4 build - skb_frag_t to bio_vec
* kni: fix kernel crash with multi-segments
* kni: fix segmented mbuf data overflow
* kni: fix style
* mem: ease init in a docker container
* mem: fix typo in API description
* mem: mark unused function in 32-bit builds
* mem: remove incorrect experimental tag on static symbol
* mk: fix custom kernel directory name
* net: adjust L2 length on soft VLAN insertion
* net/af_packet: remove redundant declaration
* net/ark: fix queue packet replacement
* net/ark: remove unnecessary cast
* net/atlantic: fix Tx prepare to set positive rte_errno
* net/atlantic: remove unnecessary cast
* net/avf: fix address of first segment
* net/avf: fix driver crash when enable TSO
* net/avf: fix endless loop
* net/avf: fix Rx bytes stats
* net/axgbe: remove unnecessary cast
* net/bnx2x: fix fastpath SB allocation for SRIOV
* net/bnx2x: fix interrupt flood
* net/bnx2x: fix invalid free on unplug
* net/bnx2x: fix link events polling for SRIOV
* net/bnx2x: fix link state
* net/bnx2x: fix memory leak
* net/bnx2x: fix packet drop
* net/bnx2x: fix reading VF id
* net/bnx2x: fix supported max Rx/Tx descriptor count
* net/bnx2x: fix warnings from invalid assert
* net/bnxt: check for error conditions in Tx path
* net/bnxt: check for null completion ring doorbell
* net/bnxt: check invalid VNIC id for firmware
* net/bnxt: check invalid VNIC in cleanup path
* net/bnxt: fix adding MAC address
* net/bnxt: fix checking result of HWRM command
* net/bnxt: fix check of address mapping
* net/bnxt: fix compiler warning
* net/bnxt: fix crash on probe failure
* net/bnxt: fix device init error path
* net/bnxt: fix enabling/disabling interrupts
* net/bnxt: fix endianness in ring macros
* net/bnxt: fix error handling in port start
* net/bnxt: fix extended port counter statistics
* net/bnxt: fix getting statistics
* net/bnxt: fix icc build
* net/bnxt: fix interrupt rearm logic
* net/bnxt: fix interrupt vector initialization
* net/bnxt: fix L4 checksum error indication in Rx
* net/bnxt: fix lock release on getting NVM info
* net/bnxt: fix return values to standard error codes
* net/bnxt: fix ring type macro name
* net/bnxt: fix RSS RETA indirection table ops
* net/bnxt: fix Rx interrupt vector
* net/bnxt: fix RxQ count if ntuple filtering is disabled
* net/bnxt: fix setting primary MAC address
* net/bnxt: fix TSO
* net/bnxt: fix Tx batching
* net/bnxt: fix Tx hang after port stop/start
* net/bnxt: fix unconditional wait in link update
* net/bnxt: fix variable width in endian conversion
* net/bnxt: fix xstats
* net/bnxt: optimize Tx batching
* net/bnxt: reduce verbosity of a message
* net/bnxt: remove unnecessary cast
* net/bnxt: remove unnecessary interrupt disable
* net/bnxt: reset filters before registering interrupts
* net/bnxt: retry IRQ callback deregistration
* net/bnxt: save the number of EM flow count
* net/bonding: remove unnecessary cast
* net/cxgbe: do not dereference global config struct
* net/cxgbe: remove unnecessary cast
* net: define IPv4 IHL and VHL
* net/dpaa2: fix multi-segment Tx
* net/dpaa: check multi-segment external buffers
* net/dpaa: fix build with 0 headroom
* net/e1000: fix buffer overrun while i219 processing DMA
* net/e1000: fix Tx prepare to set positive rte_errno
* net/e1000: remove unnecessary cast
* net/ena: fix admin CQ polling for 32-bit
* net/ena: fix assigning NUMA node to IO queue
* net/ena: fix L4 checksum Tx offload
* net/ena: fix Rx checksum errors statistics
* net/ena: remove unnecessary cast
* net/enic: fix Tx prepare to set positive rte_errno
* net/enic: remove flow count action support
* net/enic: remove flow locks
* net/enic: remove unnecessary cast
* net/failsafe: fix reported device info
* net: fix definition of IPv6 traffic class mask
* net: fix encapsulation markers for inner L3 offset
* net: fix how L4 checksum choice is tested
* net/fm10k: advertise supported RSS hash function
* net/fm10k: fix address of first segment
* net/fm10k: fix descriptor filling in vector Tx
* net/fm10k: fix stats crash in multi-process
* net/fm10k: fix Tx prepare to set positive rte_errno
* net/i40e: fix address of first segment
* net/i40e: fix crash when TxQ/RxQ set to 0 in VF
* net/i40e: fix dropped packets statistics name
* net/i40e: fix ethernet flow rule
* net/i40e: fix flow director rule destroy
* net/i40e: fix MAC removal check
* net/i40e: fix RSS hash update for X722 VF
* net/i40e: fix SFP X722 with FW4.16
* net/i40e: fix Tx prepare to set positive rte_errno
* net/i40e: fix Tx threshold setup
* net/i40e: fix unexpected skip FDIR setup
* net/i40e: remove empty queue stats mapping set devops
* net/i40e: remove unnecessary cast
* net/iavf: fix Tx prepare to set positive rte_errno
* net/ixgbe/base: fix product version check
* net/ixgbe: fix address of first segment
* net/ixgbe: fix IP type for crypto session
* net/ixgbe: fix RETA size for VF
* net/ixgbe: fix Tx prepare to set positive rte_errno
* net/ixgbe: fix Tx threshold setup
* net/ixgbe: fix unexpected link handler
* net/ixgbe: remove unnecessary cast
* net/ixgbevf: add full link status check option
* net/mlx4: fix crash on info query in secondary process
* net/mlx5: check memory allocation in flow creation
* net/mlx5: fix 32-bit build
* net/mlx5: fix condition for link update fallback
* net/mlx5: fix crash for empty raw encap data
* net/mlx5: fix crash on null operation
* net/mlx5: fix description of return value
* net/mlx5: fix device arguments error detection
* net/mlx5: fix link speed info when link is down
* net/mlx5: fix memory free on queue create error
* net/mlx5: fix missing validation of null pointer
* net/mlx5: fix order of items in NEON scatter
* net/mlx5: fix typos in comments
* net/mlx5: fix validation of VLAN PCP item
* net/mlx5: fix VLAN inner type matching on DR/DV
* net/mlx5: remove redundant item from union
* net/mlx5: remove unnecessary cast
* net/mlx5: report imissed statistics
* net/mvneta: fix ierror statistics
* net/netvsc: fix definition of offload values
* net/netvsc: fix RSS offload settings
* net/netvsc: fix xstats for VF device
* net/netvsc: fix xstats id
* net/netvsc: initialize VF spinlock
* net/nfp: disable for 32-bit meson builds
* net/null: remove redundant declaration
* net/pcap: fix concurrent multiseg Tx
* net/pcap: fix possible mbuf double freeing
* net/pcap: fix Rx with small buffers
* net/pcap: fix Tx return count in error conditions
* net/pcap: remove redundant declaration
* net/qede: fix Tx prepare to set positive rte_errno
* net/qede: fix warnings from invalid assert
* net/ring: remove redundant declaration
* net/sfc/base: enable chained multicast on all EF10 cards
* net/sfc/base: fix shift by more bits than field width
* net/sfc/base: fix signed/unsigned mismatch
* net/sfc: ensure that device is closed on removal
* net/sfc: fix align to power of 2 when align has smaller type
* net/sfc: fix power of 2 round up when align has smaller type
* net/sfc: unify power of 2 alignment check macro
* net/tap: remove redundant declarations
* net/thunderx: fix crash on detach
* net/vhost: remove redundant declaration
* net/virtio: add Tx preparation
* net/virtio: fix build
* net/virtio: fix build with 0 headroom
* net/virtio: fix in-order Rx with segmented packet
* net/virtio: fix memory leak in in-order Rx
* net/virtio: fix queue memory leak on error
* net/virtio: move VLAN tag insertion to Tx prepare
* net/virtio: remove useless check on mempool
* net/virtio: unmap device on initialization error
* net/virtio: unmap port IO for legacy device
* net/virtio_user: remove redundant declaration
* net/vmxnet3: fix Tx prepare to set positive rte_errno
* net/vmxnet3: fix uninitialized variable
* raw/dpaa2_cmdif: remove redundant declaration
* raw/ifpga/base: fix physical address info
* raw/ifpga/base: fix use of untrusted scalar value
* raw/skeleton: fix test of attribute set/get
* raw/skeleton: remove redundant declaration
* security: remove duplicated symbols from map file
* table: fix crash in LPM IPv6
* telemetry: add missing header include
* telemetry: fix build
* telemetry: fix build warnings seen when using gcc 9
* telemetry: fix memory leak
* test: add rawdev autotest to meson
* test/distributor: fix flush with worker shutdown
* test/eal: fix --socket-mem option
* test: enable installing app with meson
* test/eventdev: fix producer core validity checks
* test: fix autotest crash
* test/flow_classify: fix undefined behavior
* test/hash: fix data reset on new run
* test/hash: fix off-by-one check on core count
* test/hash: rectify slave id to point to valid cores
* test/hash: use existing lcore API
* test: remove link to ixgbe/i40e with meson
* test/rwlock: amortize the cost of getting time
* test/rwlock: benchmark on all available cores
* usertools: fix input handling in telemetry script
* usertools: fix refresh binding infos
* usertools: replace unsafe input function
* version: 18.11.3-rc1
* version: 18.11.3-rc2
* vfio: remove incorrect experimental tag
* vfio: use contiguous mapping for IOVA as VA mode
* vhost/crypto: fix inferred misuse of enum
* vhost/crypto: fix logically dead code
* vhost: fix missing include
18.11.3 Validation
~~~~~~~~~~~~~~~~~~
* Intel(R) Testing
* Basic Intel(R) NIC(ixgbe and i40e) testing
* PF (i40e)
* PF (ixgbe)
* VF
* Compile Testing
* Intel NIC single core/NIC performance
* Basic cryptodev and virtio testing
* cryptodev
* vhost/virtio basic loopback, PVP and performance test
* Red Hat(R) Testing
* RHEL 8
* Functionality
* PF
* VF
* vhost single/multi queues and cross-NUMA
* vhostclient reconnect
* vhost live migration with single/multi queues and cross-NUMA
* OVS PVP
* Tested NICs
* X540-AT2 NIC(ixgbe, 10G)
* IBM(R) - DPDK on Power result
* Basic PF on Mellanox: No new errors or regressions seen
* Performance: no degradation compared to 18.11.2
* System tested
* IBM Power9 Model 8335-101 CPU: 2.3 (pvr 004e 1203)
* Tested NICs
* Mellanox Technologies MT28800 Family [ConnectX-5 Ex]
* firmware version: 16.26.292
* MLNX_OFED_LINUX-4.7-1.0.0.1
* Intel(R) Openvswitch Testing (Ian Stokes)
* OVS testing against head OVS Master, 2.12.2 and 2.11.3 with VSPERF
* Tested NICs
* i40e (X710) and i40eVF
* ixgbe (82599ES) and ixgbeVF
* igb (I350) and igbVF
* Functionality
* P2P
* PVP
* PVVP
* PV-PV in parallel
* Hotplug
* Multiqueue
* Vhostuserclient reconnect
* Vhost cross-NUMA awareness
* Jumbo frames
* Rate limiting
* QoS policer
* Mellanox(R) Testing
* Basic functionality with testpmd
* ConnectX-5
* RHEL 7.4
* Kernel 5.3.0
* Driver rdma-core v25.1
* fw 16.26.1040
* ConnectX-4 Lx
* RHEL 7.4
* Kernel 5.3.0
* Driver rdma-core v25.1
* fw 14.26.1040
* ConnectX-5
* RHEL 7.4
* Kernel 3.10.0-693.el7.x86_64
* Driver MLNX_OFED_LINUX-4.7-1.0.0.1
* fw 16.26.1040
* ConnectX-4 Lx
* RHEL 7.4
* Kernel 3.10.0-693.el7.x86_64
* Driver MLNX_OFED_LINUX-4.7-1.0.0.1
* fw 14.26.1040
* Microsoft(R) Azure Testing
* Images
* Canonical UbuntuServer 16.04-LTS latest
* Canonical UbuntuServer 18.04-DAILY-LTS latest
* RedHat RHEL 7-RAW latest
* RedHat RHEL 7.5 latest
* Openlogic CentOS 7.5 latest
* SUSE SLES 15 latest
* Drivers
* Mellanox and netvsc poll-mode drivers
* Functionality
* VM to VM traffic
* SRIOV/Failsafe
* Single core performance
* Multicore performance - see known issues below
18.11.3 Known Issues
~~~~~~~~~~~~~~~~~~~~
* DPDK 18.11.3 contains fixes up to DPDK v19.08. Issues identified/fixed in DPDK master branch after DPDK v19.08 may be present in DPDK 18.11.3
* Microsoft validation team testing saw a performance drop with some multicore tests compared to previous testing at time of DPDK 18.11.2 release. A re-run of DPDK 18.11.2 showed a similar drop, so it is not believed that there is any regression in DPDK.
18.11.3 Fixes skipped and status unresolved
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* dcfbc594f net/iavf: fix queue interrupt for ice
* 281bd1aa3 net/iavf: fix stats reset
* b149a7064 eal/freebsd: add config reattach in secondary process
* 6f43682e0 test/hash: init parameters in the correct function
* a135e050a examples/ipsec-secgw: fix packet length
* b0437f8b0 hash: load value after full key compare
* 9d10f53e4 test/metrics: fix second run
* 2967612f4 test/crypto: fix session init failure for wireless case
* 96b0931d5 net/bnxt: fix extended port counter statistics
* 72aaa312e net/bnxt: fix VF probe when MAC address is zero
* fe7848521 net/bnxt: fix doorbell register offset for Tx ring
* 1f3cea004 net/bnxt: fix check of address mapping
* 324c56551 net/bnxt: fix error checking of FW commands
* b4e190d55 net/bnxt: fix MAC/VLAN filter allocation
* ea81c1b81 net/mlx5: fix NVGRE matching
* dbda2092d net/i40e: fix request queue in VF
* 721c95301 net/mlx5: fix Rx scatter mode validation
* aa2c00702 net/bnxt: fix traffic stall on Rx queue stop/start
18.11.4 Release Notes
---------------------
18.11.4 Fixes
~~~~~~~~~~~~~
* vhost: fix possible denial of service by leaking FDs
* vhost: fix possible denial of service on SET_VRING_NUM
18.11.4 Validation
~~~~~~~~~~~~~~~~~~
* Security patches tested for CVE-2019-14818
18.11.4 Known Issues
~~~~~~~~~~~~~~~~~~~~
* DPDK 18.11.4 contains fixes up to DPDK v19.08 plus some security fixes. Issues identified/fixed in DPDK master branch after DPDK v19.08 may be present in DPDK 18.11.4
18.11.4 Fixes skipped and status unresolved
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* dcfbc594f net/iavf: fix queue interrupt for ice
* 281bd1aa3 net/iavf: fix stats reset
* b149a7064 eal/freebsd: add config reattach in secondary process
* 6f43682e0 test/hash: init parameters in the correct function
* a135e050a examples/ipsec-secgw: fix packet length
* b0437f8b0 hash: load value after full key compare
* 9d10f53e4 test/metrics: fix second run
* 2967612f4 test/crypto: fix session init failure for wireless case
* 96b0931d5 net/bnxt: fix extended port counter statistics
* 72aaa312e net/bnxt: fix VF probe when MAC address is zero
* fe7848521 net/bnxt: fix doorbell register offset for Tx ring
* 1f3cea004 net/bnxt: fix check of address mapping
* 324c56551 net/bnxt: fix error checking of FW commands
* b4e190d55 net/bnxt: fix MAC/VLAN filter allocation
* ea81c1b81 net/mlx5: fix NVGRE matching
* dbda2092d net/i40e: fix request queue in VF
* 721c95301 net/mlx5: fix Rx scatter mode validation
* aa2c00702 net/bnxt: fix traffic stall on Rx queue stop/start
18.11.5 Release Notes
---------------------
18.11.5 Fixes
~~~~~~~~~~~~~
* vhost: fix vring requests validation broken if no FD
18.11.5 Validation
~~~~~~~~~~~~~~~~~~
* The fix was tested by Intel via the virtio/vhost regression tests
* http://doc.dpdk.org/dts/test_plans/virtio_pvp_regression_test_plan.html
* http://doc.dpdk.org/dts/test_plans/vhost_dequeue_zero_copy_test_plan.html
* http://doc.dpdk.org/dts/test_plans/vm2vm_virtio_pmd_test_plan.html
18.11.5 Known Issues
~~~~~~~~~~~~~~~~~~~~
* DPDK 18.11.5 contains fixes up to DPDK v19.08 plus a security fix for CVE-2019-14818. Issues identified/fixed in DPDK master branch after DPDK v19.08 may be present in DPDK 18.11.5
18.11.5 Fixes skipped and status unresolved
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* dcfbc594f net/iavf: fix queue interrupt for ice
* 281bd1aa3 net/iavf: fix stats reset
* b149a7064 eal/freebsd: add config reattach in secondary process
* 6f43682e0 test/hash: init parameters in the correct function
* a135e050a examples/ipsec-secgw: fix packet length
* b0437f8b0 hash: load value after full key compare
* 9d10f53e4 test/metrics: fix second run
* 2967612f4 test/crypto: fix session init failure for wireless case
* 96b0931d5 net/bnxt: fix extended port counter statistics
* 72aaa312e net/bnxt: fix VF probe when MAC address is zero
* fe7848521 net/bnxt: fix doorbell register offset for Tx ring
* 1f3cea004 net/bnxt: fix check of address mapping
* 324c56551 net/bnxt: fix error checking of FW commands
* b4e190d55 net/bnxt: fix MAC/VLAN filter allocation
* ea81c1b81 net/mlx5: fix NVGRE matching
* dbda2092d net/i40e: fix request queue in VF
* 721c95301 net/mlx5: fix Rx scatter mode validation
* aa2c00702 net/bnxt: fix traffic stall on Rx queue stop/start

View File

@ -281,18 +281,6 @@ The list of queues that must be polled for a given lcore is stored in a private
The values n_rx_port and rx_port_list[] are used in the main packet processing loop
(see :ref:`l2_fwd_app_rx_tx_packets`).
The global configuration for the RX queues is stored in a static structure:
.. code-block:: c
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
};
.. _l2_fwd_app_tx_init:
TX Queue Initialization

View File

@ -67,10 +67,6 @@
#define DPAA2_MBUF_HW_ANNOTATION 64
#define DPAA2_FD_PTA_SIZE 0
#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
#endif
/* we will re-use the HEADROOM for annotation in RX */
#define DPAA2_HW_BUF_RESERVE 0
#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
@ -206,6 +202,7 @@ enum qbman_fd_format {
((fd)->simple.frc = (0x80000000 | (len)))
#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
#define DPAA2_RESET_FD_FRC(fd) ((fd)->simple.frc = 0)
#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)

View File

@ -15,7 +15,7 @@
* - Enqueue, including setting the enqueue descriptor, and issuing enqueue
* command etc.
* - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
* parsing the dequeue response in DQRR and memeory, parsing the state change
* parsing the dequeue response in DQRR and memory, parsing the state change
* notifications etc.
* - Release, including setting the release descriptor, and issuing the buffer
* release command.

View File

@ -583,7 +583,6 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev)
{
#define VTD_CAP_MGAW_SHIFT 16
#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
struct rte_pci_addr *addr = &dev->addr;
char filename[PATH_MAX];
FILE *fp;

View File

@ -314,12 +314,11 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
loc->domain, loc->bus, loc->devid,
loc->function, res_idx);
if (access(devname, R_OK|W_OK) != -1) {
fd = open(devname, O_RDWR);
if (fd < 0)
RTE_LOG(INFO, EAL, "%s cannot be mapped. "
"Fall-back to non prefetchable mode.\n",
devname);
fd = open(devname, O_RDWR);
if (fd < 0 && errno != ENOENT) {
RTE_LOG(INFO, EAL, "%s cannot be mapped. "
"Fall-back to non prefetchable mode.\n",
devname);
}
}

View File

@ -25,6 +25,18 @@
/** Pathname of VMBUS devices directory. */
#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
/*
* GUID associated with network devices
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
static const rte_uuid_t vmbus_nic_uuid = {
0xf8, 0x61, 0x51, 0x63,
0xdf, 0x3e,
0x46, 0xc5,
0x91, 0x3f,
0xf2, 0xd2, 0xf9, 0x65, 0xed, 0xe
};
extern struct rte_vmbus_bus rte_vmbus_bus;
/* Read sysfs file to get UUID */
@ -242,16 +254,22 @@ vmbus_scan_one(const char *name)
snprintf(dirname, sizeof(dirname), "%s/%s",
SYSFS_VMBUS_DEVICES, name);
/* get device id */
snprintf(filename, sizeof(filename), "%s/device_id", dirname);
if (parse_sysfs_uuid(filename, dev->device_id) < 0)
goto error;
/* get device class */
snprintf(filename, sizeof(filename), "%s/class_id", dirname);
if (parse_sysfs_uuid(filename, dev->class_id) < 0)
goto error;
/* skip non-network devices */
if (rte_uuid_compare(dev->class_id, vmbus_nic_uuid) != 0) {
free(dev);
return 0;
}
/* get device id */
snprintf(filename, sizeof(filename), "%s/device_id", dirname);
if (parse_sysfs_uuid(filename, dev->device_id) < 0)
goto error;
/* get relid */
snprintf(filename, sizeof(filename), "%s/id", dirname);
if (eal_parse_sysfs_value(filename, &tmp) < 0)

View File

@ -99,7 +99,7 @@ read_memory_node(unsigned int *count)
goto cleanup;
}
DPAAX_DEBUG("Size of device-tree mem node: %lu", statbuf.st_size);
DPAAX_DEBUG("Size of device-tree mem node: %" PRIu64, statbuf.st_size);
if (statbuf.st_size > MEM_NODE_FILE_LEN) {
DPAAX_DEBUG("More memory nodes available than assumed.");
DPAAX_DEBUG("System may not work properly!");
@ -118,7 +118,7 @@ read_memory_node(unsigned int *count)
*/
*count = (statbuf.st_size / 16);
if ((*count) <= 0 || (statbuf.st_size % 16 != 0)) {
DPAAX_DEBUG("Invalid memory node values or count. (size=%lu)",
DPAAX_DEBUG("Invalid memory node values or count. (size=%" PRIu64 ")",
statbuf.st_size);
goto cleanup;
}

View File

@ -169,18 +169,12 @@ isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
if (qp == NULL)
return -EINVAL;
if (qp->stream != NULL)
rte_free(qp->stream);
if (qp->stream->level_buf != NULL)
if (qp->stream)
rte_free(qp->stream->level_buf);
if (qp->state != NULL)
rte_free(qp->state);
if (qp->processed_pkts != NULL)
rte_ring_free(qp->processed_pkts);
rte_free(qp->state);
rte_ring_free(qp->processed_pkts);
rte_free(qp->stream);
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;

View File

@ -30,6 +30,7 @@ process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
default:
op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
ZLIB_PMD_ERR("Invalid flush value\n");
return;
}
if (unlikely(!strm)) {

View File

@ -70,7 +70,7 @@ static inline void
caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
{
PMD_INIT_FUNC_TRACE();
/* report op status to sym->op and then free the ctx memeory */
/* report op status to sym->op and then free the ctx memory */
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
}

View File

@ -2193,6 +2193,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
dpaa2_sec_session *session = sess;
int ret;
PMD_INIT_FUNC_TRACE();
@ -2208,37 +2209,37 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
session->ctxt_type = DPAA2_SEC_CIPHER;
dpaa2_sec_cipher_init(dev, xform, session);
ret = dpaa2_sec_cipher_init(dev, xform, session);
/* Authentication Only */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
session->ctxt_type = DPAA2_SEC_AUTH;
dpaa2_sec_auth_init(dev, xform, session);
ret = dpaa2_sec_auth_init(dev, xform, session);
/* Cipher then Authenticate */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
dpaa2_sec_aead_chain_init(dev, xform, session);
ret = dpaa2_sec_aead_chain_init(dev, xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
dpaa2_sec_aead_chain_init(dev, xform, session);
ret = dpaa2_sec_aead_chain_init(dev, xform, session);
/* AEAD operation for AES-GCM kind of Algorithms */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
xform->next == NULL) {
dpaa2_sec_aead_init(dev, xform, session);
ret = dpaa2_sec_aead_init(dev, xform, session);
} else {
DPAA2_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
return 0;
return ret;
}
static int

View File

@ -60,7 +60,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
/* report op status to sym->op and then free the ctx memeory */
/* report op status to sym->op and then free the ctx memory */
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
}

View File

@ -726,7 +726,7 @@ mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev)
/** Configure the session from a crypto xform chain (PMD ops callback).
*
* @param dev Pointer to the device structure.
* @param xform Pointer to the crytpo configuration structure.
* @param xform Pointer to the crypto configuration structure.
* @param sess Pointer to the empty session structure.
* @returns 0 upon success, negative value otherwise.
*/

View File

@ -1528,7 +1528,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
}
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
if (memcmp(dst, op->sym->auth.digest.data,
if (CRYPTO_memcmp(dst, op->sym->auth.digest.data,
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
@ -1605,12 +1605,9 @@ process_openssl_dsa_verify_op(struct rte_crypto_op *cop,
op->y.length,
pub_key);
if (!r || !s || !pub_key) {
if (r)
BN_free(r);
if (s)
BN_free(s);
if (pub_key)
BN_free(pub_key);
BN_free(r);
BN_free(s);
BN_free(pub_key);
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
@ -1781,10 +1778,8 @@ process_openssl_modinv_op(struct rte_crypto_op *cop,
BIGNUM *res = BN_CTX_get(sess->u.m.ctx);
if (unlikely(base == NULL || res == NULL)) {
if (base)
BN_free(base);
if (res)
BN_free(res);
BN_free(base);
BN_free(res);
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
@ -1815,10 +1810,8 @@ process_openssl_modexp_op(struct rte_crypto_op *cop,
BIGNUM *res = BN_CTX_get(sess->u.e.ctx);
if (unlikely(base == NULL || res == NULL)) {
if (base)
BN_free(base);
if (res)
BN_free(res);
BN_free(base);
BN_free(res);
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
@ -1920,7 +1913,7 @@ process_openssl_rsa_op(struct rte_crypto_op *cop,
"Length of public_decrypt %d "
"length of message %zd\n",
ret, op->rsa.message.length);
if ((ret <= 0) || (memcmp(tmp, op->rsa.message.data,
if ((ret <= 0) || (CRYPTO_memcmp(tmp, op->rsa.message.data,
op->rsa.message.length))) {
OPENSSL_LOG(ERR, "RSA sign Verification failed");
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;

View File

@ -911,22 +911,14 @@ static int openssl_set_asym_session_parameters(
asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA;
break;
err_rsa:
if (n)
BN_free(n);
if (e)
BN_free(e);
if (d)
BN_free(d);
if (p)
BN_free(p);
if (q)
BN_free(q);
if (dmp1)
BN_free(dmp1);
if (dmq1)
BN_free(dmq1);
if (iqmp)
BN_free(iqmp);
BN_clear_free(n);
BN_clear_free(e);
BN_clear_free(d);
BN_clear_free(p);
BN_clear_free(q);
BN_clear_free(dmp1);
BN_clear_free(dmq1);
BN_clear_free(iqmp);
return -1;
}
@ -1048,10 +1040,8 @@ err_rsa:
err_dh:
OPENSSL_LOG(ERR, " failed to set dh params\n");
if (p)
BN_free(p);
if (g)
BN_free(g);
BN_free(p);
BN_free(g);
return -1;
}
case RTE_CRYPTO_ASYM_XFORM_DSA:
@ -1117,16 +1107,11 @@ err_dh:
break;
err_dsa:
if (p)
BN_free(p);
if (q)
BN_free(q);
if (g)
BN_free(g);
if (priv_key)
BN_free(priv_key);
if (pub_key)
BN_free(pub_key);
BN_free(p);
BN_free(q);
BN_free(g);
BN_free(priv_key);
BN_free(pub_key);
return -1;
}
default:

View File

@ -530,6 +530,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
qat_req->comn_mid.dest_data_addr =
cookie->qat_sgl_dst_phys_addr;
}
qat_req->comn_mid.src_length = 0;
qat_req->comn_mid.dst_length = 0;
} else {
qat_req->comn_mid.src_data_addr = src_buf_start;
qat_req->comn_mid.dest_data_addr = dst_buf_start;

View File

@ -397,9 +397,13 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
hw->common_cfg = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
rte_pci_read_config(dev, &hw->notify_off_multiplier,
ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,
4, pos + sizeof(cap));
hw->notify_base = get_cfg_addr(dev, &cap);
if (ret != 4)
VIRTIO_CRYPTO_INIT_LOG_ERR(
"failed to read notify_off_multiplier: ret %d", ret);
else
hw->notify_base = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cfg_addr(dev, &cap);

View File

@ -640,7 +640,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
EVENTDEV_INIT_FUNC_TRACE();
RTE_SET_USED(dev);
*timeout_ticks = ns * scale;
*timeout_ticks = ns / scale;
return 0;
}

View File

@ -12,3 +12,8 @@ sources = files('ssovf_worker.c',
allow_experimental_apis = true
deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
# for clang 32-bit compiles we need libatomic for 64-bit atomic ops
if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false
ext_deps += cc.find_library('atomic')
endif

View File

@ -9,3 +9,8 @@ sources = files(
'opdl_test.c',
)
deps += ['bus_vdev']
# for clang 32-bit compiles we need libatomic for 64-bit atomic ops
if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false
ext_deps += cc.find_library('atomic')
endif

View File

@ -102,7 +102,7 @@ opdl_port_link(struct rte_eventdev *dev,
dev->data->dev_id,
queues[0],
p->id);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
@ -113,7 +113,7 @@ opdl_port_link(struct rte_eventdev *dev,
dev->data->dev_id,
num,
p->id);
rte_errno = -EDQUOT;
rte_errno = EDQUOT;
return 0;
}
@ -123,7 +123,7 @@ opdl_port_link(struct rte_eventdev *dev,
dev->data->dev_id,
p->id,
queues[0]);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
@ -134,7 +134,7 @@ opdl_port_link(struct rte_eventdev *dev,
p->id,
p->external_qid,
queues[0]);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
@ -160,7 +160,7 @@ opdl_port_unlink(struct rte_eventdev *dev,
dev->data->dev_id,
queues[0],
p->id);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
RTE_SET_USED(nb_unlinks);

View File

@ -35,7 +35,7 @@ enqueue_check(struct opdl_port *p,
p->id,
ev[i].queue_id,
p->next_external_qid);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
}
@ -63,7 +63,7 @@ enqueue_check(struct opdl_port *p,
} else {
if (num > 0 &&
ev[0].queue_id != p->next_external_qid) {
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
}
@ -116,7 +116,7 @@ opdl_rx_error_enqueue(struct opdl_port *p,
RTE_SET_USED(ev);
RTE_SET_USED(num);
rte_errno = -ENOSPC;
rte_errno = ENOSPC;
return 0;
}
@ -145,7 +145,7 @@ opdl_rx_enqueue(struct opdl_port *p,
if (enqueued < num)
rte_errno = -ENOSPC;
rte_errno = ENOSPC;
return enqueued;
}
@ -164,7 +164,7 @@ opdl_tx_error_dequeue(struct opdl_port *p,
RTE_SET_USED(ev);
RTE_SET_USED(num);
rte_errno = -ENOSPC;
rte_errno = ENOSPC;
return 0;
}
@ -240,7 +240,7 @@ opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num)
"Attempt to dequeue num of events larger than port (%d) max",
opdl_pmd_dev_id(p->opdl),
p->id);
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}

View File

@ -755,7 +755,7 @@ int
opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block)
{
if (num_entries != s->num_event) {
rte_errno = -EINVAL;
rte_errno = EINVAL;
return 0;
}
if (s->threadsafe == false) {

View File

@ -38,12 +38,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
/* check for qid map overflow */
if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
rte_errno = -EDQUOT;
rte_errno = EDQUOT;
break;
}
if (p->is_directed && p->num_qids_mapped > 0) {
rte_errno = -EDQUOT;
rte_errno = EDQUOT;
break;
}
@ -59,12 +59,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
if (q->type == SW_SCHED_TYPE_DIRECT) {
/* check directed qids only map to one port */
if (p->num_qids_mapped > 0) {
rte_errno = -EDQUOT;
rte_errno = EDQUOT;
break;
}
/* check port only takes a directed flow */
if (num > 1) {
rte_errno = -EDQUOT;
rte_errno = EDQUOT;
break;
}

View File

@ -17,9 +17,6 @@ if cc.has_argument('-Wno-format-truncation')
default_cflags += '-Wno-format-truncation'
endif
# specify -D_GNU_SOURCE unconditionally
default_cflags += '-D_GNU_SOURCE'
foreach class:driver_classes
drivers = []
std_deps = []

View File

@ -527,8 +527,6 @@ open_packet_iface(const char *key __rte_unused,
return 0;
}
static struct rte_vdev_driver pmd_af_packet_drv;
static int
rte_pmd_init_internals(struct rte_vdev_device *dev,
const int sockfd,

View File

@ -241,8 +241,7 @@ check_for_ext(struct ark_adapter *ark)
static int
eth_ark_dev_init(struct rte_eth_dev *dev)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct rte_pci_device *pci_dev;
int ret;
int port_count = 1;
@ -403,9 +402,9 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
return ret;
error:
if (dev->data->mac_addrs)
rte_free(dev->data->mac_addrs);
error:
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
return -1;
}
@ -417,8 +416,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
static int
ark_config_device(struct rte_eth_dev *dev)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
uint16_t num_q, i;
struct ark_mpu_t *mpu;
@ -493,8 +491,7 @@ ark_config_device(struct rte_eth_dev *dev)
static int
eth_ark_dev_uninit(struct rte_eth_dev *dev)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@ -516,8 +513,7 @@ static int
eth_ark_dev_configure(struct rte_eth_dev *dev)
{
PMD_FUNC_LOG(DEBUG, "\n");
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
eth_ark_dev_set_link_up(dev);
if (ark->user_ext.dev_configure)
@ -543,8 +539,7 @@ delay_pg_start(void *arg)
static int
eth_ark_dev_start(struct rte_eth_dev *dev)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
int i;
PMD_FUNC_LOG(DEBUG, "\n");
@ -596,8 +591,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
{
uint16_t i;
int status;
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct ark_mpu_t *mpu;
PMD_FUNC_LOG(DEBUG, "\n");
@ -687,8 +681,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
static void
eth_ark_dev_close(struct rte_eth_dev *dev)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
uint16_t i;
if (ark->user_ext.dev_close)
@ -718,8 +711,7 @@ static void
eth_ark_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE);
struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE);
uint16_t ports = ark->num_ports;
@ -754,8 +746,7 @@ eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
PMD_DEBUG_LOG(DEBUG, "link status = %d\n",
dev->data->dev_link.link_status);
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.link_update) {
return ark->user_ext.link_update
@ -769,8 +760,7 @@ static int
eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = 1;
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.dev_set_link_up)
return ark->user_ext.dev_set_link_up(dev,
@ -782,8 +772,7 @@ static int
eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = 0;
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.dev_set_link_down)
return ark->user_ext.dev_set_link_down(dev,
@ -795,8 +784,7 @@ static int
eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
uint16_t i;
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
stats->ipackets = 0;
stats->ibytes = 0;
@ -819,8 +807,7 @@ static void
eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
{
uint16_t i;
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
for (i = 0; i < dev->data->nb_tx_queues; i++)
eth_tx_queue_stats_reset(dev->data->tx_queues[i]);
@ -837,8 +824,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev,
uint32_t index,
uint32_t pool)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.mac_addr_add) {
ark->user_ext.mac_addr_add(dev,
@ -854,8 +840,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev,
static void
eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.mac_addr_remove)
ark->user_ext.mac_addr_remove(dev, index,
@ -866,8 +851,7 @@ static int
eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.mac_addr_set) {
ark->user_ext.mac_addr_set(dev, mac_addr,
@ -880,8 +864,7 @@ eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
static int
eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
if (ark->user_ext.set_mtu)
return ark->user_ext.set_mtu(dev, size,

View File

@ -121,15 +121,13 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mb_pool)
{
static int warning1; /* = 0 */
struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct ark_rx_queue *queue;
uint32_t i;
int status;
/* Future works: divide the Q's evenly with multi-ports */
int port = dev->data->port_id;
int qidx = port + queue_idx;
int qidx = queue_idx;
/* We may already be setup, free memory prior to re-allocation */
if (dev->data->rx_queues[queue_idx] != NULL) {
@ -611,7 +609,7 @@ eth_rx_queue_stats_reset(void *vqueue)
void
eth_ark_udm_force_close(struct rte_eth_dev *dev)
{
struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct ark_rx_queue *queue;
uint32_t index;
uint16_t i;

View File

@ -207,13 +207,11 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
struct ark_adapter *ark = dev->data->dev_private;
struct ark_tx_queue *queue;
int status;
/* Future: divide the Q's evenly with multi-ports */
int port = dev->data->port_id;
int qidx = port + queue_idx;
int qidx = queue_idx;
if (!rte_is_power_of_2(nb_desc)) {
PMD_DRV_LOG(ERR,

View File

@ -322,8 +322,7 @@ atl_disable_intr(struct aq_hw_s *hw)
static int
eth_atl_dev_init(struct rte_eth_dev *eth_dev)
{
struct atl_adapter *adapter =
(struct atl_adapter *)eth_dev->data->dev_private;
struct atl_adapter *adapter = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);

View File

@ -824,13 +824,13 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = ret;
rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
rte_errno = ret;
rte_errno = -ret;
return i;
}
}

View File

@ -993,6 +993,7 @@ avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->imissed = pstats->rx_discards;
stats->oerrors = pstats->tx_errors + pstats->tx_discards;
stats->ibytes = pstats->rx_bytes;
stats->ibytes -= stats->ipackets * ETHER_CRC_LEN;
stats->obytes = pstats->tx_bytes;
} else {
PMD_DRV_LOG(ERR, "Get statistics failed");

View File

@ -144,7 +144,8 @@ check_rx_bulk_allow(struct avf_rx_queue *rxq)
static inline void
reset_rx_queue(struct avf_rx_queue *rxq)
{
uint16_t len, i;
uint16_t len;
uint32_t i;
if (!rxq)
return;
@ -174,7 +175,8 @@ static inline void
reset_tx_queue(struct avf_tx_queue *txq)
{
struct avf_tx_entry *txe;
uint16_t i, prev, size;
uint32_t i, size;
uint16_t prev;
if (!txq) {
PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
@ -1583,6 +1585,9 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX context descriptor if required */
uint64_t cd_type_cmd_tso_mss =
AVF_TX_DESC_DTYPE_CONTEXT;
volatile struct avf_tx_context_desc *ctx_txd =
(volatile struct avf_tx_context_desc *)
&txr[tx_id];
txn = &sw_ring[txe->next_id];
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
@ -1596,6 +1601,9 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
cd_type_cmd_tso_mss |=
avf_set_tso_ctx(tx_pkt, tx_offload);
ctx_txd->type_cmd_tso_mss =
rte_cpu_to_le_64(cd_type_cmd_tso_mss);
AVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
txe->last_id = tx_last;
tx_id = txe->next_id;
@ -1698,31 +1706,31 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
/* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
rte_errno = -EINVAL;
rte_errno = EINVAL;
return i;
}
} else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
(m->tso_segsz > AVF_MAX_TSO_MSS)) {
/* MSS outside the range are considered malicious */
rte_errno = -EINVAL;
rte_errno = EINVAL;
return i;
}
if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
rte_errno = -ENOTSUP;
rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = ret;
rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
rte_errno = ret;
rte_errno = -ret;
return i;
}
}

View File

@ -521,6 +521,7 @@ avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
i++;
if (i == nb_bufs)
return nb_bufs;
rxq->pkt_first_seg = rx_pkts[i];
}
return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
&split_flags[i]);

View File

@ -157,7 +157,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
static int
axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
{
struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
struct axgbe_port *pdata = dev->data->dev_private;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
@ -171,10 +171,11 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
static int
axgbe_dev_start(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
struct axgbe_port *pdata = dev->data->dev_private;
int ret;
PMD_INIT_FUNC_TRACE();
/* Multiqueue RSS */
ret = axgbe_dev_rx_mq_config(dev);
if (ret) {
@ -209,9 +210,10 @@ axgbe_dev_start(struct rte_eth_dev *dev)
static void
axgbe_dev_stop(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
rte_intr_disable(&pdata->pci_dev->intr_handle);
if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
@ -237,27 +239,30 @@ axgbe_dev_close(struct rte_eth_dev *dev)
static void
axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
}
static void
axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
}
static void
axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
return;
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
@ -266,9 +271,10 @@ axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
static void
axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
struct axgbe_port *pdata = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
return;
AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
@ -578,7 +584,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
pdata = (struct axgbe_port *)eth_dev->data->dev_private;
pdata = eth_dev->data->dev_private;
/* initial state */
axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
@ -694,6 +700,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
ret = pdata->phy_if.phy_init(pdata);
if (ret) {
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
return ret;
}

View File

@ -342,7 +342,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
const struct rte_memzone *tz;
tx_desc = nb_desc;
pdata = (struct axgbe_port *)dev->data->dev_private;
pdata = dev->data->dev_private;
/*
* validate tx descriptors count

View File

@ -2015,6 +2015,8 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
uint8_t global = FALSE;
uint32_t val;
PMD_INIT_FUNC_TRACE(sc);
PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload...");
/* mark driver as unloaded in shmem2 */
@ -2118,6 +2120,9 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
bnx2x_free_mem(sc);
}
/* free the host hardware/software hsi structures */
bnx2x_free_hsi_mem(sc);
bnx2x_free_fw_stats_mem(sc);
sc->state = BNX2X_STATE_CLOSED;
@ -4572,6 +4577,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp)
}
}
/* Assuming we have completed slow path completion, clear the flag */
rte_atomic32_set(&sc->scan_fp, 0);
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
@ -7228,6 +7235,14 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
}
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed");
sc->state = BNX2X_STATE_CLOSED;
rc = -ENOMEM;
goto bnx2x_nic_load_error0;
}
if (bnx2x_alloc_fw_stats_mem(sc) != 0) {
sc->state = BNX2X_STATE_CLOSED;
rc = -ENOMEM;
@ -7443,6 +7458,7 @@ bnx2x_nic_load_error1:
bnx2x_nic_load_error0:
bnx2x_free_fw_stats_mem(sc);
bnx2x_free_hsi_mem(sc);
bnx2x_free_mem(sc);
return rc;
@ -8888,9 +8904,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
uint32_t i;
if (IS_PF(sc)) {
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block),
&sc->def_sb_dma, "def_sb",
@ -8900,9 +8916,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
sc->def_sb =
(struct host_sp_status_block *)sc->def_sb_dma.vaddr;
/***************/
/* EVENT QUEUE */
/***************/
/***************/
/* EVENT QUEUE */
/***************/
if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
&sc->eq_dma, "ev_queue",
@ -8913,9 +8929,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr;
/*************/
/* SLOW PATH */
/*************/
/*************/
/* SLOW PATH */
/*************/
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath),
&sc->sp_dma, "sp",
@ -8927,9 +8943,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr;
/*******************/
/* SLOW PATH QUEUE */
/*******************/
/*******************/
/* SLOW PATH QUEUE */
/*******************/
if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE,
&sc->spq_dma, "sp_queue",
@ -8942,9 +8958,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
"fw_buf", RTE_CACHE_LINE_SIZE) != 0) {
@ -8968,9 +8984,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
fp->sc = sc;
fp->index = i;
/*******************/
/* FP STATUS BLOCK */
/*******************/
/*******************/
/* FP STATUS BLOCK */
/*******************/
snprintf(buf, sizeof(buf), "fp_%d_sb", i);
if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
@ -9001,43 +9017,50 @@ void bnx2x_free_hsi_mem(struct bnx2x_softc *sc)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
/*******************/
/* FP STATUS BLOCK */
/*******************/
/*******************/
/* FP STATUS BLOCK */
/*******************/
memset(&fp->status_block, 0, sizeof(fp->status_block));
bnx2x_dma_free(&fp->sb_dma);
}
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
if (IS_PF(sc)) {
/***************************/
/* FW DECOMPRESSION BUFFER */
/***************************/
sc->gz_buf = NULL;
bnx2x_dma_free(&sc->gz_buf_dma);
sc->gz_buf = NULL;
/*******************/
/* SLOW PATH QUEUE */
/*******************/
/*******************/
/* SLOW PATH QUEUE */
/*******************/
sc->spq = NULL;
bnx2x_dma_free(&sc->spq_dma);
sc->spq = NULL;
/*************/
/* SLOW PATH */
/*************/
/*************/
/* SLOW PATH */
/*************/
sc->sp = NULL;
bnx2x_dma_free(&sc->sp_dma);
sc->sp = NULL;
/***************/
/* EVENT QUEUE */
/***************/
/***************/
/* EVENT QUEUE */
/***************/
sc->eq = NULL;
bnx2x_dma_free(&sc->eq_dma);
sc->eq = NULL;
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
sc->def_sb = NULL;
/************************/
/* DEFAULT STATUS BLOCK */
/************************/
bnx2x_dma_free(&sc->def_sb_dma);
sc->def_sb = NULL;
}
}
/*

View File

@ -155,13 +155,14 @@ struct bnx2x_device_type {
* Transmit Buffer Descriptor (tx_bd) definitions*
*/
/* NUM_TX_PAGES must be a power of 2. */
#define NUM_TX_PAGES 16
#define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */
#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */
#define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */
#define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */
#define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */
#define MAX_TX_AVAIL (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES - 2)
#define NEXT_TX_BD(x) \
((((x) & USABLE_TX_BD_PER_PAGE) == \
(USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
@ -182,13 +183,14 @@ struct bnx2x_device_type {
/*
* Receive Buffer Descriptor (rx_bd) definitions*
*/
//#define NUM_RX_PAGES 1
#define MAX_RX_PAGES 8
#define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */
#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */
#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */
#define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */
#define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */
#define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */
#define MAX_RX_AVAIL (USABLE_RX_BD_PER_PAGE * MAX_RX_PAGES - 2)
#define RX_BD_NEXT_PAGE_DESC_CNT 2
#define NEXT_RX_BD(x) \
@ -244,6 +246,10 @@ struct bnx2x_device_type {
#define MIN_RX_AVAIL(sc) \
((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128)
#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
#define MIN_RX_SIZE_NONTPA (RTE_MAX((uint32_t)MIN_RX_SIZE_NONTPA_HW,\
(uint32_t)MIN_RX_AVAIL(sc)))
/*
* dropless fc calculations for RCQs
* Number of RCQs should be as number of buffers in BRB:

View File

@ -87,7 +87,6 @@ bnx2x_link_update(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
bnx2x_link_status_update(sc);
memset(&link, 0, sizeof(link));
mb();
link.link_speed = sc->link_vars.line_speed;
@ -150,7 +149,6 @@ static void bnx2x_periodic_start(void *param)
if (ret) {
PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
" timer rc %d", ret);
assert(false && "Unable to start periodic timer");
}
}
}
@ -206,13 +204,6 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
bnx2x_dev_rxtx_init_dummy(dev);
return 0;
}
@ -226,9 +217,12 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE(sc);
/* start the periodic callout */
if (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP) {
bnx2x_periodic_start(dev);
PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
if (IS_PF(sc)) {
if (atomic_load_acq_long(&sc->periodic_flags) ==
PERIODIC_STOP) {
bnx2x_periodic_start(dev);
PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started");
}
}
ret = bnx2x_init(sc);
@ -266,10 +260,10 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
rte_intr_disable(&sc->pci_dev->intr_handle);
rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
bnx2x_interrupt_handler, (void *)dev);
}
/* stop the periodic callout */
bnx2x_periodic_stop(dev);
/* stop the periodic callout */
bnx2x_periodic_stop(dev);
}
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
@ -293,9 +287,6 @@ bnx2x_dev_close(struct rte_eth_dev *dev)
bnx2x_dev_clear_queues(dev);
memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
/* free the host hardware/software hsi structures */
bnx2x_free_hsi_mem(sc);
/* free ilt */
bnx2x_free_ilt_mem(sc);
}
@ -491,6 +482,7 @@ static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@ -498,6 +490,10 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
}
static int
@ -686,7 +682,9 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
return 0;
out:
bnx2x_periodic_stop(eth_dev);
if (IS_PF(sc))
bnx2x_periodic_stop(eth_dev);
return ret;
}
@ -706,6 +704,13 @@ eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
return bnx2x_common_dev_init(eth_dev, 1);
}
static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev)
{
/* mac_addrs must not be freed alone because part of dev_private */
eth_dev->data->mac_addrs = NULL;
return 0;
}
static struct rte_pci_driver rte_bnx2x_pmd;
static struct rte_pci_driver rte_bnx2xvf_pmd;
@ -724,7 +729,7 @@ static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
{
return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit);
}
static struct rte_pci_driver rte_bnx2x_pmd = {

View File

@ -162,20 +162,26 @@ static inline uint16_t bnx2x_check_me_flags(uint32_t val)
#define BNX2X_ME_ANSWER_DELAY 100
#define BNX2X_ME_ANSWER_TRIES 10
static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc)
static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc, uint32_t *vf_id)
{
uint32_t val;
uint8_t i = 0;
while (i <= BNX2X_ME_ANSWER_TRIES) {
val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0));
if (bnx2x_check_me_flags(val))
return VF_ID(val);
if (bnx2x_check_me_flags(val)) {
PMD_DRV_LOG(DEBUG, sc,
"valid register value: 0x%08x", val);
*vf_id = VF_ID(val);
return 0;
}
DELAY_MS(BNX2X_ME_ANSWER_DELAY);
i++;
}
PMD_DRV_LOG(ERR, sc, "Invalid register value: 0x%08x", val);
return -EINVAL;
}
@ -240,14 +246,13 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
{
struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
int vf_id;
uint32_t vf_id;
int rc;
bnx2x_vf_close(sc);
bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
vf_id = bnx2x_read_vf_id(sc);
if (vf_id < 0) {
if (bnx2x_read_vf_id(sc, &vf_id)) {
rc = -EAGAIN;
goto out;
}
@ -318,25 +323,30 @@ bnx2x_vf_close(struct bnx2x_softc *sc)
{
struct vf_release_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
int vf_id = bnx2x_read_vf_id(sc);
uint32_t vf_id;
int rc;
if (vf_id >= 0) {
query = &sc->vf2pf_mbox->query[0].release;
bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
sizeof(*query));
query = &sc->vf2pf_mbox->query[0].release;
bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
sizeof(*query));
query->vf_id = vf_id;
bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc, "Failed to release VF");
bnx2x_vf_finalize(sc, &query->first_tlv);
if (bnx2x_read_vf_id(sc, &vf_id)) {
rc = -EAGAIN;
goto out;
}
query->vf_id = vf_id;
bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc, "Failed to release VF");
out:
bnx2x_vf_finalize(sc, &query->first_tlv);
}
/* Let PF know the VF status blocks phys_addrs */
@ -347,6 +357,8 @@ bnx2x_vf_init(struct bnx2x_softc *sc)
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
int i, rc;
PMD_INIT_FUNC_TRACE(sc);
query = &sc->vf2pf_mbox->query[0].init;
bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
sizeof(*query));
@ -383,51 +395,38 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
{
struct vf_close_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
struct vf_q_op_tlv *query_op;
int i, vf_id, rc;
uint32_t vf_id;
int i, rc;
vf_id = bnx2x_read_vf_id(sc);
if (vf_id > 0) {
FOR_EACH_QUEUE(sc, i) {
query_op = &sc->vf2pf_mbox->query[0].q_op;
bnx2x_vf_prep(sc, &query_op->first_tlv,
BNX2X_VF_TLV_TEARDOWN_Q,
sizeof(*query_op));
PMD_INIT_FUNC_TRACE(sc);
query_op->vf_qid = i;
FOR_EACH_QUEUE(sc, i)
bnx2x_vf_teardown_queue(sc, i);
bnx2x_add_tlv(sc, query_op,
query_op->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
bnx2x_vf_set_mac(sc, false);
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc,
"Bad reply for vf_q %d teardown", i);
query = &sc->vf2pf_mbox->query[0].close;
bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
sizeof(*query));
bnx2x_vf_finalize(sc, &query_op->first_tlv);
}
bnx2x_vf_set_mac(sc, false);
query = &sc->vf2pf_mbox->query[0].close;
bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
sizeof(*query));
query->vf_id = vf_id;
bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc,
"Bad reply from PF for close message");
bnx2x_vf_finalize(sc, &query->first_tlv);
if (bnx2x_read_vf_id(sc, &vf_id)) {
rc = -EAGAIN;
goto out;
}
query->vf_id = vf_id;
bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc,
"Bad reply from PF for close message");
out:
bnx2x_vf_finalize(sc, &query->first_tlv);
}
static inline uint16_t
@ -521,6 +520,35 @@ out:
return rc;
}
int
bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid)
{
struct vf_q_op_tlv *query_op;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
int rc;
query_op = &sc->vf2pf_mbox->query[0].q_op;
bnx2x_vf_prep(sc, &query_op->first_tlv,
BNX2X_VF_TLV_TEARDOWN_Q,
sizeof(*query_op));
query_op->vf_qid = qid;
bnx2x_add_tlv(sc, query_op,
query_op->first_tlv.tl.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, sc,
"Bad reply for vf_q %d teardown", qid);
bnx2x_vf_finalize(sc, &query_op->first_tlv);
return rc;
}
int
bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
{

View File

@ -328,6 +328,7 @@ struct bnx2x_vf_mbx_msg {
union resp_tlvs resp;
};
int bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid);
int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set);
int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params);

View File

@ -291,10 +291,6 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
cnt *= 20;
ECORE_MSG(sc, "waiting for state to become %d", state);
/* being over protective to remind bnx2x_intr_legacy() to
* process RAMROD
*/
rte_atomic32_set(&sc->scan_fp, 1);
ECORE_MIGHT_SLEEP();
while (cnt--) {

View File

@ -332,6 +332,7 @@ struct bnxt {
uint16_t max_tx_rings;
uint16_t max_rx_rings;
uint16_t max_l2_ctx;
uint16_t max_rx_em_flows;
uint16_t max_vnics;
uint16_t max_stat_ctx;
uint16_t vlan;

View File

@ -10,11 +10,12 @@
#include <rte_io.h>
#define CMP_VALID(cmp, raw_cons, ring) \
(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
!((raw_cons) & ((ring)->ring_size)))
(!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size)))
#define CMPL_VALID(cmp, v) \
(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v))
(!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
CMPL_BASE_V) == !(v))
#define CMP_TYPE(cmp) \
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
@ -31,7 +32,7 @@
#define NEXT_CMPL(cpr, idx, v, inc) do { \
(idx) += (inc); \
if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \
if (unlikely((idx) >= (cpr)->cp_ring_struct->ring_size)) { \
(v) = !(v); \
(idx) = 0; \
} \

View File

@ -211,9 +211,6 @@ static int bnxt_init_chip(struct bnxt *bp)
unsigned int i, j;
int rc;
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
if (bp->eth_dev->data->mtu > ETHER_MTU) {
bp->eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
@ -354,8 +351,9 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
rc = rte_intr_efd_enable(intr_handle, intr_vector);
if (rc)
return rc;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
@ -366,28 +364,31 @@ static int bnxt_init_chip(struct bnxt *bp)
if (intr_handle->intr_vec == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
rc = -ENOMEM;
goto err_disable;
}
PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}
for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
queue_id++) {
intr_handle->intr_vec[queue_id] = vec;
if (vec < base + intr_handle->nb_efd - 1)
vec++;
for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues;
queue_id++) {
intr_handle->intr_vec[queue_id] =
vec + BNXT_RX_VEC_START;
if (vec < base + intr_handle->nb_efd - 1)
vec++;
}
}
/* enable uio/vfio intr/eventfd mapping */
rte_intr_enable(intr_handle);
rc = rte_intr_enable(intr_handle);
if (rc)
goto err_free;
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
goto err_free;
}
if (!bp->link_info.link_up) {
@ -395,16 +396,18 @@ static int bnxt_init_chip(struct bnxt *bp)
if (rc) {
PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
goto err_free;
}
}
bnxt_print_link_info(bp->eth_dev);
return 0;
err_free:
rte_free(intr_handle->intr_vec);
err_disable:
rte_intr_efd_disable(intr_handle);
err_out:
bnxt_free_all_hwrm_resources(bp);
/* Some of the error status returned by FW may not be from errno.h */
if (rc > 0)
rc = -EIO;
@ -441,7 +444,7 @@ static int bnxt_init_nic(struct bnxt *bp)
static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
@ -452,7 +455,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* PF/VF specifics */
if (BNXT_PF(bp))
dev_info->max_vfs = bp->pdev->max_vfs;
max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx);
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
@ -537,7 +540,7 @@ found:
/* Configure the device based on the configuration provided */
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int rc;
@ -618,15 +621,9 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id);
}
static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
{
bnxt_print_link_info(eth_dev);
return 0;
}
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int vlan_mask = 0;
int rc;
@ -636,7 +633,6 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
bp->dev_stopped = 0;
rc = bnxt_init_chip(bp);
if (rc)
@ -652,7 +648,9 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
if (rc)
goto error;
bnxt_enable_int(bp);
bp->flags |= BNXT_FLAG_INIT_DONE;
bp->dev_stopped = 0;
return 0;
error:
@ -664,7 +662,7 @@ error:
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
int rc = 0;
if (!bp->link_info.link_up)
@ -678,7 +676,7 @@ static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
@ -690,7 +688,14 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
/* Unload the driver, release resources */
static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
bnxt_disable_int(bp);
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
@ -698,6 +703,14 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
/* Clean queue intr-vector mapping */
rte_intr_efd_disable(intr_handle);
if (intr_handle->intr_vec != NULL) {
rte_free(intr_handle->intr_vec);
intr_handle->intr_vec = NULL;
}
bnxt_hwrm_port_clr_stats(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
@ -707,7 +720,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
@ -727,7 +740,7 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
uint32_t index)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter, *temp_filter;
@ -763,9 +776,10 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[pool];
struct bnxt_filter_info *filter;
int rc = 0;
if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
@ -789,16 +803,26 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (!rc) {
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
} else {
filter->mac_index = INVALID_MAC_INDEX;
memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
bnxt_free_filter(bp, filter);
}
return rc;
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_link new;
unsigned int cnt = BNXT_LINK_WAIT_CNT;
@ -813,11 +837,12 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
if (!wait_to_complete)
if (!wait_to_complete || new.link_status)
break;
} while (!new.link_status && cnt--);
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
} while (cnt--);
out:
/* Timed out or success */
@ -838,7 +863,7 @@ out:
static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
@ -852,7 +877,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
@ -866,7 +891,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
@ -880,7 +905,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic;
if (bp->vnic_info == NULL)
@ -892,30 +917,72 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
}
/* Return bnxt_rx_queue pointer corresponding to a given rxq. */
static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
{
if (qid >= bp->rx_nr_rings)
return NULL;
return bp->eth_dev->data->rx_queues[qid];
}
/* Return rxq corresponding to a given rss table ring/group ID. */
static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
{
unsigned int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
if (bp->grp_info[i].fw_grp_id == fwr)
return i;
}
return INVALID_HW_RING_ID;
}
static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
uint16_t tbl_size = HW_HASH_INDEX_SIZE;
uint16_t idx, sft;
int i;
if (!vnic->rss_table)
return -EINVAL;
if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
"(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
/* Update the RSS VNIC(s) */
for (i = 0; i < bp->max_vnics; i++) {
vnic = &bp->vnic_info[i];
memcpy(vnic->rss_table, reta_conf, reta_size);
bnxt_hwrm_vnic_rss_cfg(bp, vnic);
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
idx = i / RTE_RETA_GROUP_SIZE;
sft = i % RTE_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << sft)))
continue;
rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
if (!rxq) {
PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n");
return -EINVAL;
}
vnic->rss_table[i] =
vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
bnxt_hwrm_vnic_rss_cfg(bp, vnic);
return 0;
}
@ -923,10 +990,10 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct rte_intr_handle *intr_handle
= &bp->pdev->intr_handle;
uint16_t tbl_size = HW_HASH_INDEX_SIZE;
uint16_t idx, sft, i;
/* Retrieve from the default VNIC */
if (!vnic)
@ -934,18 +1001,28 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
if (!vnic->rss_table)
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
if (reta_size != tbl_size) {
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
"(%d)\n", reta_size, tbl_size);
return -EINVAL;
}
/* EW - need to revisit here copying from uint64_t to uint16_t */
memcpy(reta_conf, vnic->rss_table, reta_size);
if (rte_intr_allow_others(intr_handle)) {
if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
bnxt_dev_lsc_intr_setup(eth_dev);
for (idx = 0, i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
sft = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
if (qid == INVALID_HW_RING_ID) {
PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n");
return -EINVAL;
}
reta_conf[idx].reta[sft] = qid;
}
}
return 0;
@ -954,7 +1031,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic;
uint16_t hash_type = 0;
@ -1010,7 +1087,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int len;
uint32_t hash_types;
@ -1068,7 +1145,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct rte_eth_link link_info;
int rc;
@ -1100,7 +1177,7 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
@ -1156,7 +1233,7 @@ static int
bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
int rc = 0;
@ -1204,7 +1281,7 @@ static int
bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t tunnel_type = 0;
uint16_t port = 0;
int rc = 0;
@ -1404,7 +1481,7 @@ exit:
static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
/* These operations apply to ALL existing MAC/VLAN filters */
if (on)
@ -1416,7 +1493,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
@ -1453,7 +1530,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
static int
bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
/* Default Filter is tied to VNIC 0 */
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct bnxt_filter_info *filter;
@ -1462,26 +1539,28 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
return -EPERM;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
if (is_zero_ether_addr(addr))
return -EINVAL;
STAILQ_FOREACH(filter, &vnic->filter, next) {
/* Default Filter is at Index 0 */
if (filter->mac_index != 0)
continue;
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
return rc;
memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
filter->enables |=
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
return rc;
filter->mac_index = 0;
memcpy(bp->mac_addr, addr, ETHER_ADDR_LEN);
PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
return 0;
}
return 0;
@ -1492,7 +1571,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
char *mc_addr_list = (char *)mc_addr_set;
struct bnxt_vnic_info *vnic;
uint32_t off = 0, i = 0;
@ -1520,7 +1599,7 @@ allmulti:
static int
bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
uint8_t fw_major = (bp->fw_ver >> 24) & 0xff;
uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff;
uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff;
@ -1629,7 +1708,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
static int
bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
uint16_t vlan = bp->vlan;
int rc;
@ -1649,7 +1728,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
static int
bnxt_dev_led_on_op(struct rte_eth_dev *dev)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, true);
}
@ -1657,7 +1736,7 @@ bnxt_dev_led_on_op(struct rte_eth_dev *dev)
static int
bnxt_dev_led_off_op(struct rte_eth_dev *dev)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
return bnxt_hwrm_port_led_cfg(bp, false);
}
@ -1849,7 +1928,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct rte_eth_ethertype_filter *efilter =
(struct rte_eth_ethertype_filter *)arg;
struct bnxt_filter_info *bfilter, *filter1;
@ -1892,7 +1971,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0);
if (filter1 == NULL) {
ret = -1;
ret = -EINVAL;
goto cleanup;
}
bfilter->enables |=
@ -2086,7 +2165,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
vnic0 = &bp->vnic_info[0];
filter1 = STAILQ_FIRST(&vnic0->filter);
if (filter1 == NULL) {
ret = -1;
ret = -EINVAL;
goto free_filter;
}
@ -2153,7 +2232,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
@ -2469,7 +2548,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
struct bnxt_filter_info *filter, *match;
struct bnxt_vnic_info *vnic, *mvnic;
@ -2755,7 +2834,7 @@ static int
bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
{
uint64_t ns;
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
@ -2772,7 +2851,7 @@ static int
bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
{
uint64_t ns, systime_cycles;
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
@ -2787,7 +2866,7 @@ bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
static int
bnxt_timesync_enable(struct rte_eth_dev *dev)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint32_t shift = 0;
@ -2823,7 +2902,7 @@ bnxt_timesync_enable(struct rte_eth_dev *dev)
static int
bnxt_timesync_disable(struct rte_eth_dev *dev)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
@ -2845,7 +2924,7 @@ bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp,
uint32_t flags __rte_unused)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t rx_tstamp_cycles = 0;
uint64_t ns;
@ -2863,7 +2942,7 @@ static int
bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
uint64_t tx_tstamp_cycles = 0;
uint64_t ns;
@ -2881,7 +2960,7 @@ bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
static int
bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
@ -2895,7 +2974,7 @@ bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
static int
bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
int rc;
uint32_t dir_entries;
uint32_t entry_length;
@ -2915,7 +2994,7 @@ static int
bnxt_get_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
uint32_t index;
uint32_t offset;
@ -2986,7 +3065,7 @@ static int
bnxt_set_eeprom_op(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *in_eeprom)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
@ -3026,7 +3105,6 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr,
in_eeprom->data, in_eeprom->length);
return 0;
}
/*
@ -3128,54 +3206,27 @@ bool bnxt_stratus_device(struct bnxt *bp)
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
int rc;
struct bnxt *bp = eth_dev->data->dev_private;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
if (!bp->bar0 || !bp->doorbell_base) {
PMD_DRV_LOG(ERR, "Unable to access Hardware\n");
return -ENODEV;
}
bp->eth_dev = eth_dev;
bp->pdev = pci_dev;
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
if (!pci_dev->mem_resource[2].addr) {
PMD_DRV_LOG(ERR,
"Cannot find PCI device BAR 2 address, aborting\n");
rc = -ENODEV;
goto init_err_release;
} else {
bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
}
return 0;
init_err_release:
if (bp->bar0)
bp->bar0 = NULL;
if (bp->doorbell_base)
bp->doorbell_base = NULL;
init_err_disable:
return rc;
}
#define ALLOW_FUNC(x) \
{ \
typeof(x) arg = (x); \
uint32_t arg = (x); \
bp->pf.vf_req_fwd[((arg) >> 5)] &= \
~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
}
@ -3200,8 +3251,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp->dev_stopped = 1;
eth_dev->dev_ops = &bnxt_dev_ops;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
/*
* For secondary processes, we don't initialise any further
* as primary has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
goto skip_init;
return 0;
if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
@ -3212,12 +3271,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
"Board initialization failed rc: %x\n", rc);
goto error;
}
skip_init:
eth_dev->dev_ops = &bnxt_dev_ops;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@ -3303,7 +3356,8 @@ skip_init:
goto skip_ext_stats;
bp->hw_rx_port_stats_ext = (void *)
(bp->hw_rx_port_stats + sizeof(struct rx_port_stats));
((uint8_t *)bp->hw_rx_port_stats +
sizeof(struct rx_port_stats));
bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats);
bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS;
@ -3311,7 +3365,8 @@ skip_init:
if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) {
bp->hw_tx_port_stats_ext = (void *)
(bp->hw_tx_port_stats + sizeof(struct tx_port_stats));
((uint8_t *)bp->hw_tx_port_stats +
sizeof(struct tx_port_stats));
bp->hw_tx_port_stats_ext_map =
bp->hw_tx_port_stats_map +
sizeof(struct tx_port_stats);
@ -3464,22 +3519,16 @@ skip_ext_stats:
rc = bnxt_alloc_mem(bp);
if (rc)
goto error_free_int;
goto error_free;
bnxt_init_nic(bp);
rc = bnxt_request_int(bp);
if (rc)
goto error_free_int;
bnxt_enable_int(bp);
bnxt_init_nic(bp);
goto error_free;
return 0;
error_free_int:
bnxt_disable_int(bp);
bnxt_hwrm_func_buf_unrgtr(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
error_free:
bnxt_dev_uninit(eth_dev);
error:
@ -3499,6 +3548,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
bnxt_hwrm_func_buf_unrgtr(bp);
if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
bp->grp_info = NULL;

View File

@ -715,7 +715,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
{
const struct rte_flow_action *act =
bnxt_flow_non_void_action(actions);
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_vf *act_vf;
struct bnxt_vnic_info *vnic, *vnic0;
@ -900,7 +900,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_filter_info *filter;
int ret = 0;
@ -998,7 +998,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic = NULL;
bool update_flow = false;
@ -1099,7 +1099,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_filter_info *filter = flow->filter;
struct bnxt_vnic_info *vnic = flow->vnic;
int ret = 0;
@ -1128,7 +1128,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
static int
bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct bnxt_vnic_info *vnic;
struct rte_flow *flow;
unsigned int i;

View File

@ -139,14 +139,11 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
req->req_type);
return -ETIMEDOUT;
}
return 0;
err_ret:
return -1;
}
/*
@ -576,7 +573,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
bp->max_l2_ctx =
rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows;
/* TODO: For now, do not support VMDq/RFS on VFs. */
if (BNXT_PF(bp)) {
if (bp->pf.max_vfs)
@ -770,7 +769,12 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
/* func_resource_qcaps does not return max_rx_em_flows.
* So use the value provided by func_qcaps.
*/
bp->max_l2_ctx =
rte_le_to_cpu_16(resp->max_l2_ctxs) +
bp->max_rx_em_flows;
bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
}
@ -850,7 +854,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
@ -876,7 +880,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
@ -1109,7 +1113,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
if (stats_ctx_id != INVALID_STATS_CTX_ID)
enables |=
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
@ -1126,7 +1130,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
return -EINVAL;
}
req.enables = rte_cpu_to_le_32(enables);
@ -1136,17 +1140,17 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
if (rc == 0 && resp->error_code)
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
@ -1259,7 +1263,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@ -1287,7 +1291,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
HWRM_CHECK_RESULT();
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
HWRM_UNLOCK();
@ -1303,7 +1307,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@ -1382,6 +1386,11 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@ -1408,8 +1417,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_plcmodes_cfg pmodes = { 0 };
uint32_t ctx_enable_flag = 0;
struct bnxt_plcmodes_cfg pmodes;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
@ -1607,6 +1616,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@ -1896,6 +1906,7 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
cpr->valid = 0;
}
void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
@ -2008,7 +2019,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
@ -2046,7 +2057,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@ -2109,6 +2120,11 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
for (i = bp->nr_vnics - 1; i >= 0; i--) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
return;
}
bnxt_clear_hwrm_vnic_flows(bp, vnic);
bnxt_clear_hwrm_vnic_filters(bp, vnic);
@ -2641,14 +2657,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
if (rc) {
PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
HWRM_CHECK_RESULT();
rc = rte_le_to_cpu_16(resp->vlan);
HWRM_UNLOCK();
@ -2683,7 +2692,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
if (!BNXT_PF(bp)) {
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
return -EINVAL;
}
rc = bnxt_hwrm_func_qcaps(bp);
@ -2710,7 +2719,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
if (!BNXT_PF(bp)) {
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
return -EINVAL;
}
rc = bnxt_hwrm_func_qcaps(bp);
@ -2929,7 +2938,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr0 == 0) {
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
@ -2949,6 +2958,9 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
return 0;
HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@ -3327,12 +3339,11 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
if (!rc) {
*entries = rte_le_to_cpu_32(resp->entries);
*length = rte_le_to_cpu_32(resp->entry_length);
}
*entries = rte_le_to_cpu_32(resp->entries);
*length = rte_le_to_cpu_32(resp->entry_length);
HWRM_UNLOCK();
return rc;
}
@ -3362,7 +3373,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
if (buf == NULL)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
@ -3397,7 +3408,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
@ -3451,7 +3462,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
@ -3515,7 +3526,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) {
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
@ -3559,10 +3570,9 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL) {
rc = -ENOMEM;
return rc;
}
if (vnic_ids == NULL)
return -ENOMEM;
for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
rte_mem_lock_page(((char *)vnic_ids) + sz);
@ -3629,10 +3639,8 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
RTE_CACHE_LINE_SIZE);
if (vnic_ids == NULL) {
rc = -ENOMEM;
return rc;
}
if (vnic_ids == NULL)
return -ENOMEM;
for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
rte_mem_lock_page(((char *)vnic_ids) + sz);
@ -3663,7 +3671,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
return rc;
}
int bnxt_hwrm_set_em_filter(struct bnxt *bp,

View File

@ -5,6 +5,7 @@
#include <inttypes.h>
#include <rte_cycles.h>
#include <rte_malloc.h>
#include "bnxt.h"
@ -20,7 +21,7 @@
static void bnxt_int_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
struct cmpl_base *cmp;
uint32_t raw_cons;
@ -31,7 +32,7 @@ static void bnxt_int_handler(void *param)
raw_cons = cpr->cp_raw_cons;
while (1) {
if (!cpr || !cpr->cp_ring_struct)
if (!cpr || !cpr->cp_ring_struct || !cpr->cp_doorbell)
return;
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
@ -48,22 +49,45 @@ static void bnxt_int_handler(void *param)
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
void bnxt_free_int(struct bnxt *bp)
int bnxt_free_int(struct bnxt *bp)
{
struct bnxt_irq *irq;
struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle;
struct bnxt_irq *irq = bp->irq_tbl;
int rc = 0;
irq = bp->irq_tbl;
if (irq) {
if (irq->requested) {
rte_intr_disable(&bp->pdev->intr_handle);
rte_intr_callback_unregister(&bp->pdev->intr_handle,
irq->handler,
(void *)bp->eth_dev);
irq->requested = 0;
if (!irq)
return 0;
if (irq->requested) {
int count = 0;
/*
* Callback deregistration will fail with rc -EAGAIN if the
* callback is currently active. Retry every 50 ms until
* successful or 500 ms has elapsed.
*/
do {
rc = rte_intr_callback_unregister(intr_handle,
irq->handler,
bp->eth_dev);
if (rc >= 0) {
irq->requested = 0;
break;
}
rte_delay_ms(50);
} while (count++ < 10);
if (rc < 0) {
PMD_DRV_LOG(ERR, "irq cb unregister failed rc: %d\n",
rc);
return rc;
}
rte_free((void *)bp->irq_tbl);
bp->irq_tbl = NULL;
}
rte_free(bp->irq_tbl);
bp->irq_tbl = NULL;
return 0;
}
void bnxt_disable_int(struct bnxt *bp)
@ -114,14 +138,20 @@ setup_exit:
int bnxt_request_int(struct bnxt *bp)
{
struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle;
struct bnxt_irq *irq = bp->irq_tbl;
int rc = 0;
struct bnxt_irq *irq = bp->irq_tbl;
if (!irq)
return 0;
rte_intr_callback_register(&bp->pdev->intr_handle, irq->handler,
(void *)bp->eth_dev);
rte_intr_enable(&bp->pdev->intr_handle);
if (!irq->requested) {
rc = rte_intr_callback_register(intr_handle,
irq->handler,
bp->eth_dev);
if (!rc)
irq->requested = 1;
}
irq->requested = 1;
return rc;
}

View File

@ -17,7 +17,7 @@ struct bnxt_irq {
};
struct bnxt;
void bnxt_free_int(struct bnxt *bp);
int bnxt_free_int(struct bnxt *bp);
void bnxt_disable_int(struct bnxt *bp);
void bnxt_enable_int(struct bnxt *bp);
int bnxt_setup_int(struct bnxt *bp);

View File

@ -163,7 +163,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
if (mz_phys_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;

View File

@ -288,7 +288,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
@ -373,7 +373,7 @@ bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
return rc;
}
cpr = rxq->cp_ring;
B_CP_DB_ARM(cpr);
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
return rc;
}
@ -399,7 +399,7 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
@ -439,7 +439,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt *bp = dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = NULL;
struct bnxt_rx_queue *rxq = NULL;

View File

@ -362,6 +362,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
int rc = 0;
uint8_t agg_buf = 0;
uint16_t cmp_type;
uint32_t flags2_f = 0;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@ -440,19 +441,41 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
else
flags2_f = flags2_0xf(rxcmp1);
/* IP Checksum */
if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) &&
(RX_CMP_IP_CS_ERROR(rxcmp1))) ||
(IS_IP_TUNNEL_PKT(flags2_f) &&
(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) {
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
} else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
} else {
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
}
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
/* L4 Checksum */
if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) {
if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
} else if (IS_L4_TUNNEL_PKT(flags2_f)) {
if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
} else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
(flags2_f))) {
mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
} else {
mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
}
} else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);

View File

@ -24,36 +24,116 @@
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
#define RX_CMP_L4_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
#define flags2_0xf(rxcmp1) \
(((rxcmp1)->flags2) & 0xf)
#define RX_CMP_L4_CS_ERR_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
/* IP non tunnel can be with or without L4-
* Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or
* Ether / (vlan) / outer IP|IP6 / ICMP
* we use '==' instead of '&' because tunnel pkts have all 4 fields set.
*/
#define IS_IP_NONTUNNEL_PKT(flags2_f) \
( \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \
)
#define RX_CMP_L4_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
/* IP Tunnel pkt must have atleast tunnel-IP-calc set.
* again tunnel ie outer L4 is optional bcoz of
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
* UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
* also inner L3 chksum error is not taken into consideration by DPDK.
*/
#define IS_IP_TUNNEL_PKT(flags2_f) \
((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))
#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
/* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts.
* For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated
* as good csum pkt.
*/
#define RX_CMP_IP_CS_ERROR(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
#define RX_CMP_IP_CS_ERR_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
#define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
#define RX_CMP_IP_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
#define RX_CMP_IP_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
/* L4 non tunnel pkt-
* Ether / (vlan) / IP6 / UDP|TCP|SCTP
*/
#define IS_L4_NONTUNNEL_PKT(flags2_f) \
( \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC))))
/* L4 tunnel pkt-
* Outer L4 is not mandatory. Eg: GRE-
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
*/
#define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)))
#define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
((flags2_f) == \
(rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)))
#define IS_L4_TUNNEL_PKT(flags2_f) \
( \
IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \
IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \
)
#define RX_CMP_L4_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
#define RX_CMP_T_L4_CS_BITS \
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
#define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \
!((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS)
/* Outer L4 chksum error
*/
#define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
/* Inner L4 chksum error
*/
#define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \
((rxcmp1)->errors_v2 & \
rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
#define BNXT_RX_POST_THRESH 32
enum pkt_hash_types {

View File

@ -350,6 +350,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
int rc = 0;
unsigned int i;
struct bnxt *bp = eth_dev->data->dev_private;
unsigned int num_q_stats;
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
@ -357,7 +358,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
return -1;
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
(unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (i = 0; i < num_q_stats; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@ -369,7 +373,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
(unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (i = 0; i < num_q_stats; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
@ -386,7 +393,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
unsigned int i;
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
@ -405,7 +412,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
unsigned int count, i;
uint64_t tx_drop_pkts;
@ -414,11 +421,17 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
unsigned int stat_size = sizeof(uint64_t);
unsigned int stat_count;
memset(xstats, 0, sizeof(*xstats));
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
bnxt_hwrm_ext_port_qstats(bp);
rx_port_stats_ext_cnt = bp->fw_rx_port_stats_ext_size / stat_size;
tx_port_stats_ext_cnt = bp->fw_tx_port_stats_ext_size / stat_size;
rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings),
(bp->fw_rx_port_stats_ext_size /
stat_size));
tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings),
(bp->fw_tx_port_stats_ext_size /
stat_size));
count = RTE_DIM(bnxt_rx_stats_strings) +
RTE_DIM(bnxt_tx_stats_strings) + 1/* For tx_drop_pkts */ +
@ -453,16 +466,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
for (i = 0; i < tx_port_stats_ext_cnt; i++) {
uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
xstats[count].value = rte_le_to_cpu_64
(*(uint64_t *)((char *)tx_stats_ext +
bnxt_tx_ext_stats_strings[i].offset));
count++;
}
for (i = 0; i < rx_port_stats_ext_cnt; i++) {
uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext;
@ -473,6 +476,16 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count++;
}
for (i = 0; i < tx_port_stats_ext_cnt; i++) {
uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext;
xstats[count].value = rte_le_to_cpu_64
(*(uint64_t *)((char *)tx_stats_ext +
bnxt_tx_ext_stats_strings[i].offset));
count++;
}
return stat_count;
}
@ -536,7 +549,7 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp))
bnxt_hwrm_port_clr_stats(bp);

View File

@ -79,7 +79,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_tx_queue *txq;
int rc = 0;

View File

@ -24,7 +24,6 @@ struct bnxt_tx_queue {
uint8_t wthresh; /* Write-back threshold reg */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
uint8_t cmpl_next; /* Next BD to trigger a compl */
struct bnxt *bp;
int index;

Some files were not shown because too many files have changed in this diff Show More