12 #include <x86intrin.h>
15 #include <emmintrin.h>
17 #include <tmmintrin.h>
28 #define PKTDEV_FOREACH(x) for (int x = 0; x < CNE_MAX_ETHPORTS; x++)
35 #define PKTDEV_FALLBACK_RX_RINGSIZE 512
36 #define PKTDEV_FALLBACK_TX_RINGSIZE 512
37 #define PKTDEV_FALLBACK_RX_NBQUEUES 1
38 #define PKTDEV_FALLBACK_TX_NBQUEUES 1
39 #define PKTDEV_ADMIN_STATE_DOWN 0xFFFF
98 } __cne_cache_aligned;
182 static inline uint16_t
185 struct cne_pktdev *dev = &pktdev_devices[lport_id];
189 if (dev->rx_pkt_burst == NULL)
195 CNE_DEBUG(
"Packet stream is disabled for '%d'\n", lport_id);
196 return PKTDEV_ADMIN_STATE_DOWN;
199 nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queue, rx_pkts, nb_pkts);
257 static inline uint16_t
260 struct cne_pktdev *dev;
263 if (lport_id >= CNE_MAX_ETHPORTS)
267 dev = &pktdev_devices[lport_id];
270 if (dev->tx_pkt_burst == NULL)
276 CNE_DEBUG(
"Packet stream is disabled for '%d'\n", lport_id);
277 return PKTDEV_ADMIN_STATE_DOWN;
280 return (*dev->tx_pkt_burst)(dev->data->tx_queue, tx_pkts, nb_pkts);
329 static inline uint16_t
332 struct cne_pktdev *dev;
334 dev = &pktdev_devices[lport_id];
336 if (!dev->tx_pkt_prepare)
339 return (*dev->tx_pkt_prepare)(dev->data->tx_queue, tx_pkts, nb_pkts);
358 __m128i shfl_msk = _mm_set_epi8(15, 14, 13, 12, 5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6);
360 __m128i hdr = _mm_loadu_si128((
const __m128i_u *)data);
361 hdr = _mm_shuffle_epi8(hdr, shfl_msk);
362 _mm_storeu_si128((__m128i_u *)data, hdr);
#define __cne_cache_min_aligned
static uint16_t pktdev_tx_burst(uint16_t lport_id, pktmbuf_t **tx_pkts, uint16_t nb_pkts)
static uint16_t pktdev_tx_prepare(uint16_t lport_id, pktmbuf_t **tx_pkts, uint16_t nb_pkts)
static uint16_t pktdev_rx_burst(uint16_t lport_id, pktmbuf_t **rx_pkts, const uint16_t nb_pkts)
static void pktdev_mac_swap(void *data)
CNDP_API bool pktdev_admin_state(uint16_t lport_id)
struct pktdev_portconf default_rxportconf
struct pktdev_portconf default_txportconf