1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2020 Dmitry Kozlyuk
3 */
4
5 #include <inttypes.h>
6 #include <io.h>
7
8 #include <rte_eal_paging.h>
9 #include <rte_errno.h>
10
11 #include "eal_internal_cfg.h"
12 #include "eal_memalloc.h"
13 #include "eal_memcfg.h"
14 #include "eal_options.h"
15 #include "eal_private.h"
16 #include "eal_windows.h"
17
18 #include <rte_virt2phys.h>
19
20 /* MinGW-w64 headers lack VirtualAlloc2() in some distributions.
21 * Note: definitions are copied verbatim from Microsoft documentation
22 * and don't follow DPDK code style.
23 */
24 #ifndef MEM_EXTENDED_PARAMETER_TYPE_BITS
25
26 #define MEM_EXTENDED_PARAMETER_TYPE_BITS 4
27
28 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-mem_extended_parameter_type */
29 typedef enum MEM_EXTENDED_PARAMETER_TYPE {
30 MemExtendedParameterInvalidType,
31 MemExtendedParameterAddressRequirements,
32 MemExtendedParameterNumaNode,
33 MemExtendedParameterPartitionHandle,
34 MemExtendedParameterUserPhysicalHandle,
35 MemExtendedParameterAttributeFlags,
36 MemExtendedParameterMax
37 } *PMEM_EXTENDED_PARAMETER_TYPE;
38
39 /* https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-mem_extended_parameter */
40 typedef struct MEM_EXTENDED_PARAMETER {
41 struct {
42 DWORD64 Type : MEM_EXTENDED_PARAMETER_TYPE_BITS;
43 DWORD64 Reserved : 64 - MEM_EXTENDED_PARAMETER_TYPE_BITS;
44 } DUMMYSTRUCTNAME;
45 union {
46 DWORD64 ULong64;
47 PVOID Pointer;
48 SIZE_T Size;
49 HANDLE Handle;
50 DWORD ULong;
51 } DUMMYUNIONNAME;
52 } MEM_EXTENDED_PARAMETER, *PMEM_EXTENDED_PARAMETER;
53
54 #endif /* defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) */
55
56 /* https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc2 */
57 typedef PVOID (*VirtualAlloc2_type)(
58 HANDLE Process,
59 PVOID BaseAddress,
60 SIZE_T Size,
61 ULONG AllocationType,
62 ULONG PageProtection,
63 MEM_EXTENDED_PARAMETER *ExtendedParameters,
64 ULONG ParameterCount
65 );
66
67 /* MinGW-w64 distributions, even those that declare VirtualAlloc2(),
68 * lack it in import libraries, which results in a failure at link time.
69 * Link it dynamically in such case.
70 */
71 static VirtualAlloc2_type VirtualAlloc2_ptr;
72
73 #ifdef RTE_TOOLCHAIN_GCC
74
75 #define MEM_COALESCE_PLACEHOLDERS 0x00000001
76 #define MEM_PRESERVE_PLACEHOLDER 0x00000002
77 #define MEM_REPLACE_PLACEHOLDER 0x00004000
78 #define MEM_RESERVE_PLACEHOLDER 0x00040000
79
80 int
eal_mem_win32api_init(void)81 eal_mem_win32api_init(void)
82 {
83 /* Contrary to the docs, VirtualAlloc2() is not in kernel32.dll,
84 * see https://github.com/MicrosoftDocs/feedback/issues/1129.
85 */
86 static const char library_name[] = "kernelbase.dll";
87 static const char function[] = "VirtualAlloc2";
88
89 HMODULE library = NULL;
90 int ret = 0;
91
92 /* Already done. */
93 if (VirtualAlloc2_ptr != NULL)
94 return 0;
95
96 library = LoadLibraryA(library_name);
97 if (library == NULL) {
98 RTE_LOG_WIN32_ERR("LoadLibraryA(\"%s\")", library_name);
99 return -1;
100 }
101
102 VirtualAlloc2_ptr = (VirtualAlloc2_type)(
103 (void *)GetProcAddress(library, function));
104 if (VirtualAlloc2_ptr == NULL) {
105 RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n",
106 library_name, function);
107
108 /* Contrary to the docs, Server 2016 is not supported. */
109 RTE_LOG(ERR, EAL, "Windows 10 or Windows Server 2019 "
110 " is required for memory management\n");
111 ret = -1;
112 }
113
114 FreeLibrary(library);
115
116 return ret;
117 }
118
119 #else
120
121 /* Stub in case VirtualAlloc2() is provided by the toolchain. */
122 int
eal_mem_win32api_init(void)123 eal_mem_win32api_init(void)
124 {
125 VirtualAlloc2_ptr = VirtualAlloc2;
126 return 0;
127 }
128
129 #endif /* defined(RTE_TOOLCHAIN_GCC) */
130
131 static HANDLE virt2phys_device = INVALID_HANDLE_VALUE;
132
133 int
eal_mem_virt2iova_init(void)134 eal_mem_virt2iova_init(void)
135 {
136 HDEVINFO list = INVALID_HANDLE_VALUE;
137 SP_DEVICE_INTERFACE_DATA ifdata;
138 SP_DEVICE_INTERFACE_DETAIL_DATA *detail = NULL;
139 DWORD detail_size;
140 int ret = -1;
141
142 list = SetupDiGetClassDevs(
143 &GUID_DEVINTERFACE_VIRT2PHYS, NULL, NULL,
144 DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
145 if (list == INVALID_HANDLE_VALUE) {
146 RTE_LOG_WIN32_ERR("SetupDiGetClassDevs()");
147 goto exit;
148 }
149
150 ifdata.cbSize = sizeof(ifdata);
151 if (!SetupDiEnumDeviceInterfaces(
152 list, NULL, &GUID_DEVINTERFACE_VIRT2PHYS, 0, &ifdata)) {
153 RTE_LOG_WIN32_ERR("SetupDiEnumDeviceInterfaces()");
154 goto exit;
155 }
156
157 if (!SetupDiGetDeviceInterfaceDetail(
158 list, &ifdata, NULL, 0, &detail_size, NULL)) {
159 if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
160 RTE_LOG_WIN32_ERR(
161 "SetupDiGetDeviceInterfaceDetail(probe)");
162 goto exit;
163 }
164 }
165
166 detail = malloc(detail_size);
167 if (detail == NULL) {
168 RTE_LOG(ERR, EAL, "Cannot allocate virt2phys "
169 "device interface detail data\n");
170 goto exit;
171 }
172
173 detail->cbSize = sizeof(*detail);
174 if (!SetupDiGetDeviceInterfaceDetail(
175 list, &ifdata, detail, detail_size, NULL, NULL)) {
176 RTE_LOG_WIN32_ERR("SetupDiGetDeviceInterfaceDetail(read)");
177 goto exit;
178 }
179
180 RTE_LOG(DEBUG, EAL, "Found virt2phys device: %s\n", detail->DevicePath);
181
182 virt2phys_device = CreateFile(
183 detail->DevicePath, 0, 0, NULL, OPEN_EXISTING, 0, NULL);
184 if (virt2phys_device == INVALID_HANDLE_VALUE) {
185 RTE_LOG_WIN32_ERR("CreateFile()");
186 goto exit;
187 }
188
189 /* Indicate success. */
190 ret = 0;
191
192 exit:
193 free(detail);
194 if (list != INVALID_HANDLE_VALUE)
195 SetupDiDestroyDeviceInfoList(list);
196
197 return ret;
198 }
199
200 void
eal_mem_virt2iova_cleanup(void)201 eal_mem_virt2iova_cleanup(void)
202 {
203 if (virt2phys_device != INVALID_HANDLE_VALUE)
204 CloseHandle(virt2phys_device);
205 }
206
207 phys_addr_t
rte_mem_virt2phy(const void * virt)208 rte_mem_virt2phy(const void *virt)
209 {
210 LARGE_INTEGER phys;
211 DWORD bytes_returned;
212
213 if (virt2phys_device == INVALID_HANDLE_VALUE)
214 return RTE_BAD_PHYS_ADDR;
215
216 if (!DeviceIoControl(
217 virt2phys_device, IOCTL_VIRT2PHYS_TRANSLATE,
218 &virt, sizeof(virt), &phys, sizeof(phys),
219 &bytes_returned, NULL)) {
220 RTE_LOG_WIN32_ERR("DeviceIoControl(IOCTL_VIRT2PHYS_TRANSLATE)");
221 return RTE_BAD_PHYS_ADDR;
222 }
223
224 return phys.QuadPart;
225 }
226
227 rte_iova_t
rte_mem_virt2iova(const void * virt)228 rte_mem_virt2iova(const void *virt)
229 {
230 phys_addr_t phys;
231
232 if (rte_eal_iova_mode() == RTE_IOVA_VA)
233 return (rte_iova_t)virt;
234
235 phys = rte_mem_virt2phy(virt);
236 if (phys == RTE_BAD_PHYS_ADDR)
237 return RTE_BAD_IOVA;
238 return (rte_iova_t)phys;
239 }
240
241 /* Always using physical addresses under Windows if they can be obtained. */
242 int
rte_eal_using_phys_addrs(void)243 rte_eal_using_phys_addrs(void)
244 {
245 return virt2phys_device != INVALID_HANDLE_VALUE;
246 }
247
248 /* Approximate error mapping from VirtualAlloc2() to POSIX mmap(3). */
249 static void
set_errno_from_win32_alloc_error(DWORD code)250 set_errno_from_win32_alloc_error(DWORD code)
251 {
252 switch (code) {
253 case ERROR_SUCCESS:
254 rte_errno = 0;
255 break;
256
257 case ERROR_INVALID_ADDRESS:
258 /* A valid requested address is not available. */
259 case ERROR_COMMITMENT_LIMIT:
260 /* May occur when committing regular memory. */
261 case ERROR_NO_SYSTEM_RESOURCES:
262 /* Occurs when the system runs out of hugepages. */
263 rte_errno = ENOMEM;
264 break;
265
266 case ERROR_INVALID_PARAMETER:
267 default:
268 rte_errno = EINVAL;
269 break;
270 }
271 }
272
273 void *
eal_mem_reserve(void * requested_addr,size_t size,int flags)274 eal_mem_reserve(void *requested_addr, size_t size, int flags)
275 {
276 HANDLE process;
277 void *virt;
278
279 /* Windows requires hugepages to be committed. */
280 if (flags & EAL_RESERVE_HUGEPAGES) {
281 rte_errno = ENOTSUP;
282 return NULL;
283 }
284
285 process = GetCurrentProcess();
286
287 virt = VirtualAlloc2_ptr(process, requested_addr, size,
288 MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS,
289 NULL, 0);
290 if (virt == NULL) {
291 DWORD err = GetLastError();
292 RTE_LOG_WIN32_ERR("VirtualAlloc2()");
293 set_errno_from_win32_alloc_error(err);
294 return NULL;
295 }
296
297 if ((flags & EAL_RESERVE_FORCE_ADDRESS) && (virt != requested_addr)) {
298 if (!VirtualFreeEx(process, virt, 0, MEM_RELEASE))
299 RTE_LOG_WIN32_ERR("VirtualFreeEx()");
300 rte_errno = ENOMEM;
301 return NULL;
302 }
303
304 return virt;
305 }
306
307 void *
eal_mem_alloc_socket(size_t size,int socket_id)308 eal_mem_alloc_socket(size_t size, int socket_id)
309 {
310 DWORD flags = MEM_RESERVE | MEM_COMMIT;
311 void *addr;
312
313 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
314 addr = VirtualAllocExNuma(GetCurrentProcess(), NULL, size, flags,
315 PAGE_READWRITE, eal_socket_numa_node(socket_id));
316 if (addr == NULL)
317 rte_errno = ENOMEM;
318 return addr;
319 }
320
321 void *
eal_mem_commit(void * requested_addr,size_t size,int socket_id)322 eal_mem_commit(void *requested_addr, size_t size, int socket_id)
323 {
324 HANDLE process;
325 MEM_EXTENDED_PARAMETER param;
326 DWORD param_count = 0;
327 DWORD flags;
328 void *addr;
329
330 process = GetCurrentProcess();
331
332 if (requested_addr != NULL) {
333 MEMORY_BASIC_INFORMATION info;
334
335 if (VirtualQueryEx(process, requested_addr, &info,
336 sizeof(info)) != sizeof(info)) {
337 RTE_LOG_WIN32_ERR("VirtualQuery(%p)", requested_addr);
338 return NULL;
339 }
340
341 /* Split reserved region if only a part is committed. */
342 flags = MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER;
343 if ((info.RegionSize > size) && !VirtualFreeEx(
344 process, requested_addr, size, flags)) {
345 RTE_LOG_WIN32_ERR(
346 "VirtualFreeEx(%p, %zu, preserve placeholder)",
347 requested_addr, size);
348 return NULL;
349 }
350
351 /* Temporarily release the region to be committed.
352 *
353 * There is an inherent race for this memory range
354 * if another thread allocates memory via OS API.
355 * However, VirtualAlloc2(MEM_REPLACE_PLACEHOLDER)
356 * doesn't work with MEM_LARGE_PAGES on Windows Server.
357 */
358 if (!VirtualFreeEx(process, requested_addr, 0, MEM_RELEASE)) {
359 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
360 requested_addr);
361 return NULL;
362 }
363 }
364
365 if (socket_id != SOCKET_ID_ANY) {
366 param_count = 1;
367 memset(¶m, 0, sizeof(param));
368 param.Type = MemExtendedParameterNumaNode;
369 param.ULong = eal_socket_numa_node(socket_id);
370 }
371
372 flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
373 addr = VirtualAlloc2_ptr(process, requested_addr, size,
374 flags, PAGE_READWRITE, ¶m, param_count);
375 if (addr == NULL) {
376 /* Logging may overwrite GetLastError() result. */
377 DWORD err = GetLastError();
378 RTE_LOG_WIN32_ERR("VirtualAlloc2(%p, %zu, commit large pages)",
379 requested_addr, size);
380 set_errno_from_win32_alloc_error(err);
381 return NULL;
382 }
383
384 if ((requested_addr != NULL) && (addr != requested_addr)) {
385 /* We lost the race for the requested_addr. */
386 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE))
387 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", addr);
388
389 rte_errno = EADDRNOTAVAIL;
390 return NULL;
391 }
392
393 return addr;
394 }
395
396 int
eal_mem_decommit(void * addr,size_t size)397 eal_mem_decommit(void *addr, size_t size)
398 {
399 HANDLE process;
400 void *stub;
401 DWORD flags;
402
403 process = GetCurrentProcess();
404
405 /* Hugepages cannot be decommited on Windows,
406 * so free them and replace the block with a placeholder.
407 * There is a race for VA in this block until VirtualAlloc2 call.
408 */
409 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
410 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
411 return -1;
412 }
413
414 flags = MEM_RESERVE | MEM_RESERVE_PLACEHOLDER;
415 stub = VirtualAlloc2_ptr(
416 process, addr, size, flags, PAGE_NOACCESS, NULL, 0);
417 if (stub == NULL) {
418 /* We lost the race for the VA. */
419 if (!VirtualFreeEx(process, stub, 0, MEM_RELEASE))
420 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, release)", stub);
421 rte_errno = EADDRNOTAVAIL;
422 return -1;
423 }
424
425 /* No need to join reserved regions adjacent to the freed one:
426 * eal_mem_commit() will just pick up the page-size placeholder
427 * created here.
428 */
429 return 0;
430 }
431
432 /**
433 * Free a reserved memory region in full or in part.
434 *
435 * @param addr
436 * Starting address of the area to free.
437 * @param size
438 * Number of bytes to free. Must be a multiple of page size.
439 * @param reserved
440 * Fail if the region is not in reserved state.
441 * @return
442 * * 0 on successful deallocation;
443 * * 1 if region must be in reserved state but it is not;
444 * * (-1) on system API failures.
445 */
446 static int
mem_free(void * addr,size_t size,bool reserved)447 mem_free(void *addr, size_t size, bool reserved)
448 {
449 MEMORY_BASIC_INFORMATION info;
450 HANDLE process;
451
452 process = GetCurrentProcess();
453
454 if (VirtualQueryEx(
455 process, addr, &info, sizeof(info)) != sizeof(info)) {
456 RTE_LOG_WIN32_ERR("VirtualQueryEx(%p)", addr);
457 return -1;
458 }
459
460 if (reserved && (info.State != MEM_RESERVE))
461 return 1;
462
463 /* Free complete region. */
464 if ((addr == info.AllocationBase) && (size == info.RegionSize)) {
465 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
466 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)",
467 addr);
468 }
469 return 0;
470 }
471
472 /* Split the part to be freed and the remaining reservation. */
473 if (!VirtualFreeEx(process, addr, size,
474 MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
475 RTE_LOG_WIN32_ERR(
476 "VirtualFreeEx(%p, %zu, preserve placeholder)",
477 addr, size);
478 return -1;
479 }
480
481 /* Actually free reservation part. */
482 if (!VirtualFreeEx(process, addr, 0, MEM_RELEASE)) {
483 RTE_LOG_WIN32_ERR("VirtualFreeEx(%p, 0, release)", addr);
484 return -1;
485 }
486
487 return 0;
488 }
489
490 void
eal_mem_free(void * virt,size_t size)491 eal_mem_free(void *virt, size_t size)
492 {
493 mem_free(virt, size, false);
494 }
495
496 int
eal_mem_set_dump(void * virt,size_t size,bool dump)497 eal_mem_set_dump(void *virt, size_t size, bool dump)
498 {
499 RTE_SET_USED(virt);
500 RTE_SET_USED(size);
501 RTE_SET_USED(dump);
502
503 /* Windows does not dump reserved memory by default.
504 *
505 * There is <werapi.h> to include or exclude regions from the dump,
506 * but this is not currently required by EAL.
507 */
508
509 rte_errno = ENOTSUP;
510 return -1;
511 }
512
513 void *
rte_mem_map(void * requested_addr,size_t size,int prot,int flags,int fd,uint64_t offset)514 rte_mem_map(void *requested_addr, size_t size, int prot, int flags,
515 int fd, uint64_t offset)
516 {
517 HANDLE file_handle = INVALID_HANDLE_VALUE;
518 HANDLE mapping_handle = INVALID_HANDLE_VALUE;
519 DWORD sys_prot = 0;
520 DWORD sys_access = 0;
521 DWORD size_high = (DWORD)(size >> 32);
522 DWORD size_low = (DWORD)size;
523 DWORD offset_high = (DWORD)(offset >> 32);
524 DWORD offset_low = (DWORD)offset;
525 LPVOID virt = NULL;
526
527 if (prot & RTE_PROT_EXECUTE) {
528 if (prot & RTE_PROT_READ) {
529 sys_prot = PAGE_EXECUTE_READ;
530 sys_access = FILE_MAP_READ | FILE_MAP_EXECUTE;
531 }
532 if (prot & RTE_PROT_WRITE) {
533 sys_prot = PAGE_EXECUTE_READWRITE;
534 sys_access = FILE_MAP_WRITE | FILE_MAP_EXECUTE;
535 }
536 } else {
537 if (prot & RTE_PROT_READ) {
538 sys_prot = PAGE_READONLY;
539 sys_access = FILE_MAP_READ;
540 }
541 if (prot & RTE_PROT_WRITE) {
542 sys_prot = PAGE_READWRITE;
543 sys_access = FILE_MAP_WRITE;
544 }
545 }
546
547 if (flags & RTE_MAP_PRIVATE)
548 sys_access |= FILE_MAP_COPY;
549
550 if ((flags & RTE_MAP_ANONYMOUS) == 0)
551 file_handle = (HANDLE)_get_osfhandle(fd);
552
553 mapping_handle = CreateFileMapping(
554 file_handle, NULL, sys_prot, size_high, size_low, NULL);
555 if (mapping_handle == INVALID_HANDLE_VALUE) {
556 RTE_LOG_WIN32_ERR("CreateFileMapping()");
557 return NULL;
558 }
559
560 /* There is a race for the requested_addr between mem_free()
561 * and MapViewOfFileEx(). MapViewOfFile3() that can replace a reserved
562 * region with a mapping in a single operation, but it does not support
563 * private mappings.
564 */
565 if (requested_addr != NULL) {
566 int ret = mem_free(requested_addr, size, true);
567 if (ret) {
568 if (ret > 0) {
569 RTE_LOG(ERR, EAL, "Cannot map memory "
570 "to a region not reserved\n");
571 rte_errno = EADDRNOTAVAIL;
572 }
573 return NULL;
574 }
575 }
576
577 virt = MapViewOfFileEx(mapping_handle, sys_access,
578 offset_high, offset_low, size, requested_addr);
579 if (!virt) {
580 RTE_LOG_WIN32_ERR("MapViewOfFileEx()");
581 return NULL;
582 }
583
584 if ((flags & RTE_MAP_FORCE_ADDRESS) && (virt != requested_addr)) {
585 if (!UnmapViewOfFile(virt))
586 RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
587 virt = NULL;
588 }
589
590 if (!CloseHandle(mapping_handle))
591 RTE_LOG_WIN32_ERR("CloseHandle()");
592
593 return virt;
594 }
595
596 int
rte_mem_unmap(void * virt,size_t size)597 rte_mem_unmap(void *virt, size_t size)
598 {
599 RTE_SET_USED(size);
600
601 if (!UnmapViewOfFile(virt)) {
602 RTE_LOG_WIN32_ERR("UnmapViewOfFile()");
603 rte_errno = EINVAL;
604 return -1;
605 }
606 return 0;
607 }
608
609 uint64_t
eal_get_baseaddr(void)610 eal_get_baseaddr(void)
611 {
612 /* Windows strategy for memory allocation is undocumented.
613 * Returning 0 here effectively disables address guessing
614 * unless user provides an address hint.
615 */
616 return 0;
617 }
618
619 size_t
rte_mem_page_size(void)620 rte_mem_page_size(void)
621 {
622 static SYSTEM_INFO info;
623
624 if (info.dwPageSize == 0)
625 GetSystemInfo(&info);
626
627 return info.dwPageSize;
628 }
629
630 int
rte_mem_lock(const void * virt,size_t size)631 rte_mem_lock(const void *virt, size_t size)
632 {
633 /* VirtualLock() takes `void*`, work around compiler warning. */
634 void *addr = (void *)((uintptr_t)virt);
635
636 if (!VirtualLock(addr, size)) {
637 RTE_LOG_WIN32_ERR("VirtualLock(%p %#zx)", virt, size);
638 return -1;
639 }
640
641 return 0;
642 }
643
644 int
rte_eal_memseg_init(void)645 rte_eal_memseg_init(void)
646 {
647 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
648 EAL_LOG_NOT_IMPLEMENTED();
649 return -1;
650 }
651
652 return eal_dynmem_memseg_lists_init();
653 }
654
655 static int
eal_nohuge_init(void)656 eal_nohuge_init(void)
657 {
658 struct rte_mem_config *mcfg;
659 struct rte_memseg_list *msl;
660 int n_segs;
661 uint64_t mem_sz, page_sz;
662 void *addr;
663
664 mcfg = rte_eal_get_configuration()->mem_config;
665 struct internal_config *internal_conf =
666 eal_get_internal_configuration();
667
668 /* nohuge mode is legacy mode */
669 internal_conf->legacy_mem = 1;
670
671 msl = &mcfg->memsegs[0];
672
673 mem_sz = internal_conf->memory;
674 page_sz = RTE_PGSIZE_4K;
675 n_segs = mem_sz / page_sz;
676
677 if (eal_memseg_list_init_named(
678 msl, "nohugemem", page_sz, n_segs, 0, true)) {
679 return -1;
680 }
681
682 addr = VirtualAlloc(
683 NULL, mem_sz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
684 if (addr == NULL) {
685 RTE_LOG_WIN32_ERR("VirtualAlloc(size=%#zx)", mem_sz);
686 RTE_LOG(ERR, EAL, "Cannot allocate memory\n");
687 return -1;
688 }
689
690 msl->base_va = addr;
691 msl->len = mem_sz;
692
693 eal_memseg_list_populate(msl, addr, n_segs);
694
695 if (mcfg->dma_maskbits &&
696 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
697 RTE_LOG(ERR, EAL,
698 "%s(): couldn't allocate memory due to IOVA "
699 "exceeding limits of current DMA mask.\n", __func__);
700 return -1;
701 }
702
703 return 0;
704 }
705
706 int
rte_eal_hugepage_init(void)707 rte_eal_hugepage_init(void)
708 {
709 const struct internal_config *internal_conf =
710 eal_get_internal_configuration();
711
712 return internal_conf->no_hugetlbfs ?
713 eal_nohuge_init() : eal_dynmem_hugepage_init();
714 }
715
716 int
rte_eal_hugepage_attach(void)717 rte_eal_hugepage_attach(void)
718 {
719 EAL_LOG_NOT_IMPLEMENTED();
720 return -1;
721 }
722