DPDK  24.11.3
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
240 #include <rte_bitops.h>
241 #include <rte_common.h>
242 #include <rte_compat.h>
243 #include <rte_errno.h>
244 #include <rte_mbuf_pool_ops.h>
245 #include <rte_mempool.h>
246 
247 #include "rte_eventdev_trace_fp.h"
248 
249 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
250 struct rte_event;
251 
252 /* Event device capability bitmap flags */
253 #define RTE_EVENT_DEV_CAP_QUEUE_QOS RTE_BIT32(0)
271 #define RTE_EVENT_DEV_CAP_EVENT_QOS RTE_BIT32(1)
285 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED RTE_BIT32(2)
295 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES RTE_BIT32(3)
319 #define RTE_EVENT_DEV_CAP_BURST_MODE RTE_BIT32(4)
330 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE RTE_BIT32(5)
343 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE RTE_BIT32(6)
355 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK RTE_BIT32(7)
366 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT RTE_BIT32(8)
376 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID RTE_BIT32(9)
385 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE RTE_BIT32(10)
398 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR RTE_BIT32(11)
408 #define RTE_EVENT_DEV_CAP_PROFILE_LINK RTE_BIT32(12)
422 #define RTE_EVENT_DEV_CAP_ATOMIC RTE_BIT32(13)
430 #define RTE_EVENT_DEV_CAP_ORDERED RTE_BIT32(14)
438 #define RTE_EVENT_DEV_CAP_PARALLEL RTE_BIT32(15)
446 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ RTE_BIT32(16)
465 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE RTE_BIT32(17)
477 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE RTE_BIT32(18)
489 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE RTE_BIT32(19)
499 #define RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT RTE_BIT32(20)
508 /* Event device priority levels */
509 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
516 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
523 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
531 /* Event queue scheduling weights */
532 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
538 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
545 /* Event queue scheduling affinity */
546 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
552 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
565 uint8_t
567 
580 int
581 rte_event_dev_get_dev_id(const char *name);
582 
594 int
595 rte_event_dev_socket_id(uint8_t dev_id);
596 
601  const char *driver_name;
602  struct rte_device *dev;
666  int32_t max_num_events;
673  uint32_t event_dev_cap;
685 };
686 
703 int
704 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
705 
709 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
713 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
717 #define RTE_EVENT_DEV_ATTR_STARTED 2
718 
731 int
732 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
733  uint32_t *attr_value);
734 
735 
736 /* Event device configuration bitmap flags */
737 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT RTE_BIT32(0)
765 };
766 
800  uint8_t nb_event_ports;
829  uint32_t event_dev_cfg;
844 };
845 
870 int
871 rte_event_dev_configure(uint8_t dev_id,
872  const struct rte_event_dev_config *dev_conf);
873 
874 /* Event queue specific APIs */
875 
876 /* Event queue configuration bitmap flags */
877 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES RTE_BIT32(0)
891 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK RTE_BIT32(1)
912  uint32_t nb_atomic_flows;
943  uint32_t event_queue_cfg;
945  uint8_t schedule_type;
955  uint8_t priority;
966  uint8_t weight;
977  uint8_t affinity;
988 };
989 
1011 int
1012 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
1013  struct rte_event_queue_conf *queue_conf);
1014 
1034 int
1035 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
1036  const struct rte_event_queue_conf *queue_conf);
1037 
1041 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
1045 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
1049 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
1053 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
1057 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
1061 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
1065 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
1066 
1087 int
1088 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1089  uint32_t *attr_value);
1090 
1110 int
1111 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1112  uint64_t attr_value);
1113 
1114 /* Event port specific APIs */
1115 
1116 /* Event port configuration bitmap flags */
1117 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL RTE_BIT32(0)
1124 #define RTE_EVENT_PORT_CFG_SINGLE_LINK RTE_BIT32(1)
1132 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER RTE_BIT32(2)
1142 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER RTE_BIT32(3)
1153 #define RTE_EVENT_PORT_CFG_HINT_WORKER RTE_BIT32(4)
1164 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ RTE_BIT32(5)
1195  uint16_t dequeue_depth;
1202  uint16_t enqueue_depth;
1209  uint32_t event_port_cfg;
1210 };
1211 
1235 int
1236 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1237  struct rte_event_port_conf *port_conf);
1238 
1265 int
1266 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1267  const struct rte_event_port_conf *port_conf);
1268 
1269 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1270  struct rte_event event, void *arg);
1300 void
1301 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1302  rte_eventdev_port_flush_t release_cb, void *args);
1303 
1307 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1311 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1317 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1321 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1322 
1340 int
1341 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1342  uint32_t *attr_value);
1343 
1362 int
1363 rte_event_dev_start(uint8_t dev_id);
1364 
1383 void
1384 rte_event_dev_stop(uint8_t dev_id);
1385 
1386 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1387  struct rte_event event, void *arg);
1421  rte_eventdev_stop_flush_t callback, void *userdata);
1422 
1436 int
1437 rte_event_dev_close(uint8_t dev_id);
1438 
1442 struct __rte_aligned(16) rte_event_vector {
1443  uint16_t nb_elem;
1445  uint16_t elem_offset : 12;
1447  uint16_t rsvd : 3;
1449  uint16_t attr_valid : 1;
1452  union {
1453  /* Used by Rx/Tx adapter.
1454  * Indicates that all the elements in this vector belong to the
1455  * same port and queue pair when originating from Rx adapter,
1456  * valid only when event type is ETHDEV_VECTOR or
1457  * ETH_RX_ADAPTER_VECTOR.
1458  * Can also be used to indicate the Tx adapter the destination
1459  * port and queue of the mbufs in the vector
1460  */
1461  struct {
1462  uint16_t port;
1463  uint16_t queue;
1464  };
1465  };
1467  uint64_t impl_opaque;
1468 
1469 /* empty structures do not have zero size in C++ leading to compilation errors
1470  * with clang about structure having different sizes in C and C++.
1471  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1472  * C++ builds, removing the warning.
1473  */
1474 #ifndef __cplusplus
1480  union __rte_aligned(16) {
1481 #endif
1482  struct rte_mbuf *mbufs[0];
1483  void *ptrs[0];
1484  uint64_t u64s[0];
1485 #ifndef __cplusplus
1486  };
1487 #endif
1492 };
1493 
1494 /* Scheduler type definitions */
1495 #define RTE_SCHED_TYPE_ORDERED 0
1533 #define RTE_SCHED_TYPE_ATOMIC 1
1560 #define RTE_SCHED_TYPE_PARALLEL 2
1573 /* Event types to classify the event source */
1574 #define RTE_EVENT_TYPE_ETHDEV 0x0
1576 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1578 #define RTE_EVENT_TYPE_TIMER 0x2
1580 #define RTE_EVENT_TYPE_CPU 0x3
1584 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1586 #define RTE_EVENT_TYPE_DMADEV 0x5
1588 #define RTE_EVENT_TYPE_VECTOR 0x8
1600 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1601  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1603 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1605 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1606  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1608 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1609  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1612 #define RTE_EVENT_TYPE_MAX 0x10
1615 /* Event enqueue operations */
1616 #define RTE_EVENT_OP_NEW 0
1621 #define RTE_EVENT_OP_FORWARD 1
1633 #define RTE_EVENT_OP_RELEASE 2
1672 struct rte_event {
1673  /* WORD0 */
1674  union {
1675  uint64_t event;
1677  struct {
1678  uint32_t flow_id:20;
1690  uint32_t sub_event_type:8;
1697  uint32_t event_type:4;
1702  uint8_t op:2;
1712  uint8_t rsvd:4;
1720  uint8_t sched_type:2;
1737  uint8_t queue_id;
1745  uint8_t priority;
1769  uint8_t impl_opaque;
1783  };
1784  };
1785  /* WORD1 */
1786  union {
1787  uint64_t u64;
1789  void *event_ptr;
1791  struct rte_mbuf *mbuf;
1793  struct rte_event_vector *vec;
1795  };
1796 };
1797 
1798 /* Ethdev Rx adapter capability bitmap flags */
1799 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1803 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1807 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1814 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1835 int
1836 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1837  uint32_t *caps);
1838 
1839 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT RTE_BIT32(0)
1842 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC RTE_BIT32(1)
1858 int
1859 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1860 
1861 /* Crypto adapter capability bitmap flag */
1862 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1869 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1876 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1881 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1886 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1910 int
1911 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1912  uint32_t *caps);
1913 
1914 /* DMA adapter capability bitmap flag */
1915 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1922 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1929 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1952 __rte_experimental
1953 int
1954 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1955 
1956 /* Ethdev Tx adapter capability bitmap flags */
1957 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1960 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1981 int
1982 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1983  uint32_t *caps);
1984 
2009 int
2010 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
2011  uint64_t *timeout_ticks);
2012 
2076 int
2077 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
2078  const uint8_t queues[], const uint8_t priorities[],
2079  uint16_t nb_links);
2080 
2124 int
2125 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2126  uint8_t queues[], uint16_t nb_unlinks);
2127 
2200 __rte_experimental
2201 int
2202 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2203  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2204 
2253 __rte_experimental
2254 int
2255 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2256  uint16_t nb_unlinks, uint8_t profile_id);
2257 
2279 int
2280 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2281 
2308 int
2309 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2310  uint8_t queues[], uint8_t priorities[]);
2311 
2343 __rte_experimental
2344 int
2345 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2346  uint8_t priorities[], uint8_t profile_id);
2347 
2363 int
2364 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2365 
2379 int
2380 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2381 
2383 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2384 
2389  RTE_EVENT_DEV_XSTATS_DEVICE,
2390  RTE_EVENT_DEV_XSTATS_PORT,
2391  RTE_EVENT_DEV_XSTATS_QUEUE,
2392 };
2393 
2401  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2402 };
2403 
2436 int
2438  enum rte_event_dev_xstats_mode mode,
2439  uint8_t queue_port_id,
2440  struct rte_event_dev_xstats_name *xstats_names,
2441  uint64_t *ids,
2442  unsigned int size);
2443 
2470 int
2472  enum rte_event_dev_xstats_mode mode,
2473  uint8_t queue_port_id,
2474  const uint64_t ids[],
2475  uint64_t values[], unsigned int n);
2476 
2493 uint64_t
2494 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2495  uint64_t *id);
2496 
2517 int
2519  enum rte_event_dev_xstats_mode mode,
2520  int16_t queue_port_id,
2521  const uint64_t ids[],
2522  uint32_t nb_ids);
2523 
2534 int rte_event_dev_selftest(uint8_t dev_id);
2535 
2566 struct rte_mempool *
2567 rte_event_vector_pool_create(const char *name, unsigned int n,
2568  unsigned int cache_size, uint16_t nb_elem,
2569  int socket_id);
2570 
2571 #include <rte_eventdev_core.h>
2572 
2573 #ifdef __cplusplus
2574 extern "C" {
2575 #endif
2576 
2577 static __rte_always_inline uint16_t
2578 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2579  const struct rte_event ev[], uint16_t nb_events,
2580  const event_enqueue_burst_t fn)
2581 {
2582  const struct rte_event_fp_ops *fp_ops;
2583  void *port;
2584 
2585  fp_ops = &rte_event_fp_ops[dev_id];
2586  port = fp_ops->data[port_id];
2587 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2588  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2589  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2590  rte_errno = EINVAL;
2591  return 0;
2592  }
2593 
2594  if (port == NULL) {
2595  rte_errno = EINVAL;
2596  return 0;
2597  }
2598 #endif
2599  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2600 
2601  return fn(port, ev, nb_events);
2602 }
2603 
2647 static inline uint16_t
2648 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2649  const struct rte_event ev[], uint16_t nb_events)
2650 {
2651  const struct rte_event_fp_ops *fp_ops;
2652 
2653  fp_ops = &rte_event_fp_ops[dev_id];
2654  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2655  fp_ops->enqueue_burst);
2656 }
2657 
2699 static inline uint16_t
2700 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2701  const struct rte_event ev[], uint16_t nb_events)
2702 {
2703  const struct rte_event_fp_ops *fp_ops;
2704 
2705  fp_ops = &rte_event_fp_ops[dev_id];
2706  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2707  fp_ops->enqueue_new_burst);
2708 }
2709 
2751 static inline uint16_t
2752 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2753  const struct rte_event ev[], uint16_t nb_events)
2754 {
2755  const struct rte_event_fp_ops *fp_ops;
2756 
2757  fp_ops = &rte_event_fp_ops[dev_id];
2758  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2759  fp_ops->enqueue_forward_burst);
2760 }
2761 
2828 static inline uint16_t
2829 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2830  uint16_t nb_events, uint64_t timeout_ticks)
2831 {
2832  const struct rte_event_fp_ops *fp_ops;
2833  void *port;
2834 
2835  fp_ops = &rte_event_fp_ops[dev_id];
2836  port = fp_ops->data[port_id];
2837 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2838  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2839  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2840  rte_errno = EINVAL;
2841  return 0;
2842  }
2843 
2844  if (port == NULL) {
2845  rte_errno = EINVAL;
2846  return 0;
2847  }
2848 #endif
2849  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2850 
2851  return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
2852 }
2853 
2854 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2896 static inline int
2897 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2898 {
2899  const struct rte_event_fp_ops *fp_ops;
2900  void *port;
2901 
2902  fp_ops = &rte_event_fp_ops[dev_id];
2903  port = fp_ops->data[port_id];
2904 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2905  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2906  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2907  return -EINVAL;
2908 
2909  if (port == NULL)
2910  return -EINVAL;
2911 
2912  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2913  return -EINVAL;
2914 #endif
2915  rte_eventdev_trace_maintain(dev_id, port_id, op);
2916 
2917  if (fp_ops->maintain != NULL)
2918  fp_ops->maintain(port, op);
2919 
2920  return 0;
2921 }
2922 
2944 static inline uint8_t
2945 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2946 {
2947  const struct rte_event_fp_ops *fp_ops;
2948  void *port;
2949 
2950  fp_ops = &rte_event_fp_ops[dev_id];
2951  port = fp_ops->data[port_id];
2952 
2953 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2954  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2955  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2956  return -EINVAL;
2957 
2958  if (port == NULL)
2959  return -EINVAL;
2960 
2961  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2962  return -EINVAL;
2963 #endif
2964  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2965 
2966  return fp_ops->profile_switch(port, profile_id);
2967 }
2968 
2992 __rte_experimental
2993 static inline int
2994 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id,
2996 {
2997  const struct rte_event_fp_ops *fp_ops;
2998  void *port;
2999 
3000  fp_ops = &rte_event_fp_ops[dev_id];
3001  port = fp_ops->data[port_id];
3002 
3003 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3004  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3005  return -EINVAL;
3006 
3007  if (port == NULL)
3008  return -EINVAL;
3009 #endif
3010  rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type);
3011 
3012  return fp_ops->preschedule_modify(port, type);
3013 }
3014 
3036 __rte_experimental
3037 static inline void
3038 rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id,
3040 {
3041  const struct rte_event_fp_ops *fp_ops;
3042  void *port;
3043 
3044  fp_ops = &rte_event_fp_ops[dev_id];
3045  port = fp_ops->data[port_id];
3046 
3047 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3048  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3049  return;
3050  if (port == NULL)
3051  return;
3052 #endif
3053  rte_eventdev_trace_port_preschedule(dev_id, port_id, type);
3054 
3055  fp_ops->preschedule(port, type);
3056 }
3057 #ifdef __cplusplus
3058 }
3059 #endif
3060 
3061 #endif /* _RTE_EVENTDEV_H_ */
#define __rte_always_inline
Definition: rte_common.h:413
#define rte_errno
Definition: rte_errno.h:29
struct __rte_aligned(16) rte_event_vector
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
static __rte_experimental int rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
rte_event_dev_xstats_mode
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_dev_selftest(uint8_t dev_id)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
void rte_event_dev_stop(uint8_t dev_id)
uint8_t rte_event_dev_count(void)
rte_event_dev_preschedule_type
Definition: rte_eventdev.h:743
@ RTE_EVENT_PRESCHEDULE
Definition: rte_eventdev.h:749
@ RTE_EVENT_PRESCHEDULE_ADAPTIVE
Definition: rte_eventdev.h:756
@ RTE_EVENT_PRESCHEDULE_NONE
Definition: rte_eventdev.h:744
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_get_dev_id(const char *name)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
int rte_event_dev_close(uint8_t dev_id)
static __rte_experimental void rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:769
enum rte_event_dev_preschedule_type preschedule_type
Definition: rte_eventdev.h:839
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:831
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:822
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:810
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:815
uint8_t max_event_port_links
Definition: rte_eventdev.h:663
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:656
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:607
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:603
uint8_t max_event_queues
Definition: rte_eventdev.h:609
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:614
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:649
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:616
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:681
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:630
uint32_t event_dev_cap
Definition: rte_eventdev.h:673
const char * driver_name
Definition: rte_eventdev.h:601
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:605
struct rte_device * dev
Definition: rte_eventdev.h:602
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:675
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:925
uint8_t priority
uint32_t flow_id
uint8_t rsvd
uint8_t op
uint32_t event_type
struct rte_mbuf * mbuf
uint8_t queue_id
uint8_t sched_type
uint64_t u64
struct rte_event_vector * vec
uint8_t impl_opaque
uint32_t sub_event_type
void * event_ptr
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t cache_size
Definition: rte_mempool.h:241