| /f-stack/dpdk/lib/librte_efd/ |
| H A D | rte_efd.c | 252 struct efd_online_chunk *chunks[RTE_MAX_NUMA_NODES]; member 605 table->chunks[socket_id] = NULL; in rte_efd_create() 621 table->chunks[socket_id] = in rte_efd_create() 627 if (table->chunks[socket_id] == NULL) { in rte_efd_create() 757 rte_free(table->chunks[socket_id]); in rte_efd_free() 825 if (table->chunks[i] != NULL) { in efd_apply_update() 826 memcpy(&(table->chunks[i][chunk_id].groups[group_id]), in efd_apply_update() 829 table->chunks[i][chunk_id].bin_choice_list[bin_index] = in efd_apply_update() 1308 const struct efd_online_chunk * const chunks = table->chunks[socket_id]; in rte_efd_lookup() local 1314 group = &chunks[chunk_id].groups[group_id]; in rte_efd_lookup() [all …]
|
| /f-stack/freebsd/netinet/ |
| H A D | sctp_auth.c | 117 if (list->chunks[chunk] == 0) { in sctp_auth_add_chunk() 118 list->chunks[chunk] = 1; in sctp_auth_add_chunk() 136 if (list->chunks[chunk] == 1) { in sctp_auth_delete_chunk() 137 list->chunks[chunk] = 0; in sctp_auth_delete_chunk() 168 if (list->chunks[i] != 0) { in sctp_serialize_auth_chunks() 187 if (list->chunks[i] != 0) { in sctp_pack_auth_chunks() 197 if (list->chunks[i] != 0) { in sctp_pack_auth_chunks() 1435 num_chunks = plen - sizeof(*chunks); in sctp_auth_get_cookie_params() 1455 if (chunks != NULL) { in sctp_auth_get_cookie_params() 1468 if (chunks != NULL) { in sctp_auth_get_cookie_params() [all …]
|
| H A D | sctp_auth.h | 75 uint8_t chunks[256]; member 101 #define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
|
| H A D | sctp_pcb.c | 6010 struct sctp_auth_chunk_list *chunks = NULL; in sctp_load_addresses_from_init() local 6510 chunks = (struct sctp_auth_chunk_list *)phdr; in sctp_load_addresses_from_init() 6511 num_chunks = plen - sizeof(*chunks); in sctp_load_addresses_from_init() 6520 if (chunks->chunk_types[i] == SCTP_ASCONF) in sctp_load_addresses_from_init() 6522 if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) in sctp_load_addresses_from_init() 6622 if (chunks != NULL) { in sctp_load_addresses_from_init() 6623 keylen += sizeof(*chunks) + num_chunks; in sctp_load_addresses_from_init() 6635 if (chunks != NULL) { in sctp_load_addresses_from_init() 6636 memcpy(new_key->key + keylen, chunks, in sctp_load_addresses_from_init() 6637 sizeof(*chunks) + num_chunks); in sctp_load_addresses_from_init() [all …]
|
| H A D | sctp_output.c | 4798 struct sctp_auth_chunk_list *chunks; in sctp_send_initiate() local 4805 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len); in sctp_send_initiate() 4808 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); in sctp_send_initiate() 4809 chunks->ph.param_length = htons(parameter_len); in sctp_send_initiate() 4810 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); in sctp_send_initiate() 5969 struct sctp_auth_chunk_list *chunks; in sctp_send_initiate_ack() local 6007 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len); in sctp_send_initiate_ack() 6010 chunks->chunk_types); in sctp_send_initiate_ack() 6011 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); in sctp_send_initiate_ack() 6012 chunks->ph.param_length = htons(parameter_len); in sctp_send_initiate_ack()
|
| /f-stack/dpdk/drivers/crypto/octeontx/ |
| H A D | otx_cryptodev_hw_access.c | 508 int chunk_len, chunks, chunk_size; in otx_cpt_get_resource() local 527 chunks = DEFAULT_CMD_QCHUNKS; in otx_cpt_get_resource() 530 qlen = chunks * chunk_len; in otx_cpt_get_resource() 535 len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); in otx_cpt_get_resource() 544 len += chunks * RTE_ALIGN(chunk_size, 128); in otx_cpt_get_resource()
|
| /f-stack/dpdk/doc/guides/eventdevs/ |
| H A D | octeontx2.rst | 85 By default chunks are allocated from NPA then TIM can automatically free 86 them when traversing the list of chunks. The ``tim_disable_npa`` devargs 87 parameter disables NPA and uses software mempool to manage chunks 97 store events. TIM traverses the list of chunks and enqueues the event timers
|
| /f-stack/dpdk/drivers/common/iavf/ |
| H A D | virtchnl.h | 1183 struct virtchnl_queue_chunk chunks[1]; member 1208 struct virtchnl_queue_chunks chunks; member 1495 if (qs->chunks.num_chunks == 0 || in virtchnl_vc_validate_vf_msg() 1496 qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) { in virtchnl_vc_validate_vf_msg() 1500 valid_len += (qs->chunks.num_chunks - 1) * in virtchnl_vc_validate_vf_msg()
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_vchnl.c | 627 queue_chunk = queue_select->chunks.chunks; in iavf_enable_queues_lv() 628 queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; in iavf_enable_queues_lv() 671 queue_chunk = queue_select->chunks.chunks; in iavf_disable_queues_lv() 672 queue_select->chunks.num_chunks = IAVF_RXTX_QUEUE_CHUNKS_NUM; in iavf_disable_queues_lv() 714 queue_chunk = queue_select->chunks.chunks; in iavf_switch_queue_lv() 715 queue_select->chunks.num_chunks = 1; in iavf_switch_queue_lv()
|
| /f-stack/tools/libxo/xolint/ |
| H A D | xolint.pl | 663 my(@chunks) = split(/\./, $ff); 675 if $#chunks >= 2 && $fc !~ /[sS]/;
|
| /f-stack/app/redis-5.0.5/deps/lua/ |
| H A D | HISTORY | 51 + chunks are loaded by using lua_load; new luaL_loadfile and luaL_loadbuffer. 118 - local variables in chunks. 133 + complete debug information stored in pre-compiled chunks.
|
| /f-stack/dpdk/drivers/event/sw/ |
| H A D | sw_evdev.c | 529 if (sw->chunks) in sw_dev_configure() 530 rte_free(sw->chunks); in sw_dev_configure() 532 sw->chunks = rte_malloc_socket(NULL, in sw_dev_configure() 537 if (!sw->chunks) in sw_dev_configure() 542 iq_free_chunk(sw, &sw->chunks[i]); in sw_dev_configure()
|
| H A D | sw_evdev.h | 247 struct sw_queue_chunk *chunks; member
|
| /f-stack/freebsd/contrib/openzfs/module/os/linux/zfs/ |
| H A D | abd_os.c | 263 int chunks = 0, zones = 0; in abd_alloc_chunks() local 295 chunks++; in abd_alloc_chunks() 301 while (sg_alloc_table(&table, chunks, gfp)) { in abd_alloc_chunks()
|
| /f-stack/freebsd/netpfil/ipfw/nat64/ |
| H A D | nat64lsn.c | 264 struct nat64lsn_pgchunk **chunks, struct nat64lsn_pg **pgptr, in nat64lsn_get_pg() argument 297 &chunks[idx / 32]->pgptr[idx % 32]); in nat64lsn_get_pg() 1110 uint32_t *pgmask, struct nat64lsn_pgchunk **chunks, in nat64lsn_alloc_proto_pg() argument 1123 chunks[chunk_idx] = uma_zalloc(nat64lsn_pgchunk_zone, in nat64lsn_alloc_proto_pg() 1125 if (chunks[chunk_idx] == NULL) in nat64lsn_alloc_proto_pg() 1171 ck_pr_store_ptr(&chunks[chunk_idx]->pgptr[pg_idx % 32], pg); in nat64lsn_alloc_proto_pg()
|
| /f-stack/freebsd/contrib/device-tree/Bindings/iommu/ |
| H A D | samsung,sysmmu.yaml | 14 physical memory chunks visible as a contiguous region to DMA-capable peripheral
|
| /f-stack/app/nginx-1.16.1/src/http/modules/ |
| H A D | ngx_http_mp4_module.c | 63 uint32_t chunks; member 2875 next_chunk = trak->chunks + 1; in ngx_http_mp4_crop_stsc_data() 3202 trak->chunks = entries; in ngx_http_mp4_read_stco_atom() 3250 if (trak->start_chunk > trak->chunks) { in ngx_http_mp4_update_stco_atom() 3268 if (trak->end_chunk > trak->chunks) { in ngx_http_mp4_update_stco_atom() 3288 entries = trak->chunks - trak->start_chunk; in ngx_http_mp4_update_stco_atom() 3386 trak->chunks = entries; in ngx_http_mp4_read_co64_atom() 3434 if (trak->start_chunk > trak->chunks) { in ngx_http_mp4_update_co64_atom() 3452 if (trak->end_chunk > trak->chunks) { in ngx_http_mp4_update_co64_atom() 3472 entries = trak->chunks - trak->start_chunk; in ngx_http_mp4_update_co64_atom()
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/ |
| H A D | ChangeLog | 162 aligned "chunks" for virtual memory management, and instead uses page-aligned 244 naturally aligned "chunks", and store all metadata in arbitrarily distant 375 arena chunks to non-huge during purging if that is not their initial state. 396 - Mark partially purged arena chunks as non-huge-page. This improves 764 dirty-run-containing chunks. In practice this change significantly reduces 766 - Integrate whole chunks into the unused dirty page purging machinery. This 768 effectively introduces a cache of chunks. 818 - Remove the "stats.chunks.current", "stats.chunks.total", and 819 "stats.chunks.high" mallctls. 1373 - Fix a chunk leak. The leaked chunks were never touched, so this impacted
|
| /f-stack/freebsd/contrib/zlib/doc/ |
| H A D | txtvsbin.txt | 92 large chunks of textual data. Furthermore, "polluted" plain text should
|
| /f-stack/freebsd/contrib/zlib/ |
| H A D | FAQ | 244 Each call of inflate() or deflate() is limited to input and output chunks 246 type, but there is no limit to the number of chunks. Note however that the
|
| /f-stack/dpdk/doc/guides/prog_guide/ |
| H A D | lpm6_lib.rst | 73 are indexed using the rest of the bytes of the IP address, in chunks of 8 bits.
|
| H A D | env_abstraction_layer.rst | 111 chunks of IOVA-contiguous are required (with "large" defined as "more than one 116 For chunks of memory which must be IOVA-contiguous, it is recommended to use 165 memory at startup, sort all memory into large IOVA-contiguous chunks, and will
|
| H A D | compressdev.rst | 448 file broken into multiple chunks then file is represented by a stream and each chunk of that file is
|
| /f-stack/dpdk/doc/guides/rel_notes/ |
| H A D | release_19_02.rst | 69 chunks of external memory to be registered with DPDK without adding them to
|
| H A D | release_2_1.rst | 48 into smaller memory chunks.
|