xref: /pciutils/lib/physmem-djgpp.c (revision de4cfd13)
1 /*
2  *      The PCI Library -- Physical memory mapping for DJGPP
3  *
4  *      Copyright (c) 2023 Pali Rohár <[email protected]>
5  *
6  *      Can be freely distributed and used under the terms of the GNU GPL v2+
7  *
8  *      SPDX-License-Identifier: GPL-2.0-or-later
9  */
10 
11 #include "internal.h"
12 #include "physmem.h"
13 
14 #include <errno.h>
15 #include <stdlib.h>
16 #include <stdio.h> /* for __DJGPP__ and __DJGPP_MINOR__, available since DJGPP v2.02 and defined indirectly via sys/version.h */
17 #include <string.h> /* for ffs() */
18 #include <malloc.h> /* for memalign() */
19 
20 #include <dpmi.h>
21 #include <crt0.h> /* for _crt0_startup_flags, __djgpp_memory_handle_list, __djgpp_memory_handle_size and __djgpp_memory_handle() */
22 #include <sys/nearptr.h> /* for __djgpp_conventional_base, __djgpp_nearptr_enable() and __djgpp_nearptr_disable() */
23 
24 #ifndef EOVERFLOW
25 #define EOVERFLOW 40 /* defined since DJGPP v2.04 */
26 #endif
27 
28 /*
29  * For using __djgpp_conventional_base it is needed to ensure that Unix-like
30  * sbrk algorithm is not active (by setting _CRT0_FLAG_NONMOVE_SBRK startup flag)
31  * and avoiding to call functions like system, spawn*, or exec*.
32  */
33 int _crt0_startup_flags = _CRT0_FLAG_NONMOVE_SBRK;
34 
35 static void *
aligned_alloc(size_t alignment,size_t size)36 aligned_alloc(size_t alignment, size_t size)
37 {
38   /*
39    * Unfortunately DJGPP prior to 2.6 has broken memalign() function,
40    * so for older DJGPP versions use malloc() with manual aligning.
41    */
42 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
43   void *ptr_alloc, *ptr_aligned;
44 
45   if (alignment < 8)
46     alignment = 8;
47 
48   ptr_alloc = malloc(size + alignment);
49   if (!ptr_alloc)
50     return NULL;
51 
52   ptr_aligned = (void *)(((unsigned long)ptr_alloc & ~(alignment-1)) + alignment);
53 
54   /*
55    * Store original pointer from malloc() before our aligned pointer.
56    * DJGPP malloc()'ed ptr_alloc is aligned to 8 bytes, our ptr_alloc is
57    * aligned at least to 8 bytes, so we have always 4 bytes of free space
58    * before memory where is pointing ptr_alloc.
59    */
60   *((unsigned long *)ptr_aligned-1) = (unsigned long)ptr_alloc;
61 
62   return ptr_aligned;
63 #else
64   return memalign(alignment, size);
65 #endif
66 }
67 
68 static void
aligned_free(void * ptr)69 aligned_free(void *ptr)
70 {
71 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
72   /* Take original pointer returned by malloc() for releasing memory. */
73   ptr = (void *)*((unsigned long *)ptr-1);
74 #endif
75   free(ptr);
76 }
77 
78 static int
find_sbrk_memory_handle(void * ptr,unsigned long max_length UNUSED,unsigned long pagesize UNUSED,const __djgpp_sbrk_handle ** sh,unsigned long * sh_size)79 find_sbrk_memory_handle(void *ptr, unsigned long max_length UNUSED /*pre-v2.04*/, unsigned long pagesize UNUSED /*pre-v2.04*/, const __djgpp_sbrk_handle **sh, unsigned long *sh_size)
80 {
81   /*
82    * Find a DJGPP's sbrk memory handle which belongs to the ptr address pointer
83    * and detects size of this memory handle. DJGPP since v2.04 has arrays
84    * __djgpp_memory_handle_list[] and __djgpp_memory_handle_size[] with sbrk
85    * ranges which can be simple traversed. Older DJGPP versions have only
86    * __djgpp_memory_handle() function which returns information to which handle
87    * passed pointer belongs. So finding the size of the memory handle for DJGPP
88    * pre-v2.04 version is slower, its time complexity is O(N^2).
89    */
90 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 4)
91 
92   const __djgpp_sbrk_handle *sh2;
93   unsigned long end_offset;
94 
95   *sh = __djgpp_memory_handle((unsigned long)ptr);
96 
97   for (end_offset = max_length-1; end_offset != 0; end_offset = end_offset > pagesize ? end_offset - pagesize : 0)
98     {
99       sh2 = __djgpp_memory_handle((unsigned long)ptr + end_offset);
100       if (!*sh || !sh2)
101         {
102           /*
103            * If sh or sh2 is NULL then it is probably a memory corruption in
104            * DJGPP's __djgpp_memory_handle_list[] structure.
105            */
106           return 0;
107         }
108       if ((*sh)->handle == sh2->handle)
109         break;
110     }
111 
112   if (end_offset == 0)
113     {
114       /*
115        * If end page of the sh handle was not found then it is probably a memory
116        * corruption in DJGPP's __djgpp_memory_handle_list[] structure.
117        */
118       return 0;
119     }
120 
121   *sh_size = (unsigned long)ptr + end_offset+1 - (*sh)->address;
122   return 1;
123 
124 #else
125 
126   size_t i;
127 
128   for (i = 0; i < sizeof(__djgpp_memory_handle_list)/sizeof(__djgpp_memory_handle_list[0]) && (i == 0 || __djgpp_memory_handle_list[i].address != 0); i++)
129     {
130       if ((unsigned long)ptr >= __djgpp_memory_handle_list[i].address &&
131           (unsigned long)ptr < __djgpp_memory_handle_list[i].address + __djgpp_memory_handle_size[i])
132         break;
133     }
134 
135   if ((i != 0 && __djgpp_memory_handle_list[i].address == 0) || __djgpp_memory_handle_size[i] == 0)
136     {
137       /*
138        * If address range was not found in __djgpp_memory_handle_list[]
139        * then it is probably memory corruption in this list.
140        */
141       return 0;
142     }
143 
144   *sh = &__djgpp_memory_handle_list[i];
145   *sh_size = __djgpp_memory_handle_size[i];
146   return 1;
147 
148 #endif
149 }
150 
151 static int
set_and_get_page_attributes(__dpmi_meminfo * mi,short * attributes)152 set_and_get_page_attributes(__dpmi_meminfo *mi, short *attributes)
153 {
154   unsigned long size;
155   int error;
156   size_t i;
157 
158   /* __dpmi_set_page_attributes modifies mi.size */
159   size = mi->size;
160   if (__dpmi_set_page_attributes(mi, attributes) != 0)
161     {
162       error = __dpmi_error;
163       free(attributes);
164       switch (error)
165         {
166         case 0x0000: /* Unsupported function (returned by Windows NTVDM, error number is cleared) */
167         case 0x0507: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
168         case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
169           errno = ENOSYS;
170           break;
171         case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
172         case 0x8013: /* Physical memory unavailable */
173         case 0x8014: /* Backing store unavailable */
174           errno = ENOMEM;
175           break;
176         case 0x8002: /* Invalid state (page in wrong state for request) */
177         case 0x8021: /* Invalid value (illegal request in bits 0-2 of one or more page attribute words) */
178         case 0x8023: /* Invalid handle (in ESI) */
179         case 0x8025: /* Invalid linear address (specified range is not within specified block) */
180           errno = EINVAL;
181           break;
182         default: /* Other unspecified error */
183           errno = EACCES;
184           break;
185         }
186       return -1;
187     }
188   mi->size = size;
189 
190   /* Cleanup output buffer. */
191   for (i = 0; i < mi->size; i++)
192     attributes[i] = 0;
193 
194   if (__dpmi_get_page_attributes(mi, attributes) != 0)
195     {
196       error = __dpmi_error;
197       free(attributes);
198       switch (error)
199         {
200         case 0x0000: /* Unsupported function (returned by Windows NTVDM, error number is cleared) */
201         case 0x0506: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
202         case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
203           errno = ENOSYS;
204           break;
205         case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
206           errno = ENOMEM;
207           break;
208         case 0x8023: /* Invalid handle (in ESI) */
209         case 0x8025: /* Invalid linear address (specified range is not within specified block) */
210           errno = EINVAL;
211           break;
212         default: /* Other unspecified error */
213           errno = EACCES;
214           break;
215         }
216       return -1;
217     }
218 
219   return 0;
220 }
221 
222 void
physmem_init_config(struct pci_access * a)223 physmem_init_config(struct pci_access *a)
224 {
225   pci_define_param(a, "devmem.path", "auto", "DJGPP physical memory access method: auto, devmap, physmap");
226 }
227 
228 int
physmem_access(struct pci_access * a UNUSED,int w UNUSED)229 physmem_access(struct pci_access *a UNUSED, int w UNUSED)
230 {
231   return 0;
232 }
233 
234 #define PHYSMEM_DEVICE_MAPPING ((struct physmem *)1)
235 #define PHYSMEM_PHYSADDR_MAPPING ((struct physmem *)2)
236 
237 static int fat_ds_count;
238 
239 struct physmem *
physmem_open(struct pci_access * a,int w UNUSED)240 physmem_open(struct pci_access *a, int w UNUSED)
241 {
242   const char *devmem = pci_get_param(a, "devmem.path");
243   __dpmi_version_ret version;
244   char vendor[128];
245   int capabilities;
246   int try_devmap;
247   int try_physmap;
248   int ret;
249 
250   if (strcmp(devmem, "auto") == 0)
251     {
252       try_devmap = 1;
253       try_physmap = 1;
254     }
255   else if (strcmp(devmem, "devmap") == 0)
256     {
257       try_devmap = 1;
258       try_physmap = 0;
259     }
260   else if (strcmp(devmem, "physmap") == 0)
261     {
262       try_devmap = 0;
263       try_physmap = 1;
264     }
265   else
266     {
267       try_devmap = 0;
268       try_physmap = 0;
269     }
270 
271   ret = __dpmi_get_version(&version);
272   if (ret != 0)
273     a->debug("detected unknown DPMI host...");
274   else
275     {
276       /*
277        * Call DPMI 1.0 function __dpmi_get_capabilities() for detecting if DPMI
278        * host supports Device mapping. Some DPMI 0.9 hosts like Windows's NTVDM
279        * do not support this function, so does not fill capabilities and vendor
280        * buffer, but returns success. Detect this kind of failure by checking
281        * if AX register (low 16-bits of capabilities variable) was not modified
282        * and contains the number of called DPMI function (0x0401).
283        */
284       vendor[0] = vendor[1] = vendor[2] = 0;
285       ret = __dpmi_get_capabilities(&capabilities, vendor);
286       if (ret == 0 && (capabilities & 0xffff) == 0x0401)
287         ret = -1;
288 
289       if (ret == 0)
290         a->debug("detected DPMI %u.%02u host %.126s %u.%u with flags 0x%x and capabilities 0x%x...",
291                   (unsigned)version.major, (unsigned)version.minor, vendor+2,
292                   (unsigned)(unsigned char)vendor[0], (unsigned)(unsigned char)vendor[1],
293                   (unsigned)version.flags, capabilities);
294       else
295         a->debug("detected DPMI %u.%02u host with flags 0x%x...",
296                   (unsigned)version.major, (unsigned)version.minor, (unsigned)version.flags);
297     }
298 
299   /*
300    * If device mapping was selected then use __dpmi_map_device_in_memory_block()
301    * for physical memory mapping. Does not have to be supported by DPMI 0.9 host.
302    * Device mapping is supported when capability bit 2 is set.
303    */
304   if (try_devmap)
305     {
306       if (ret == 0 && (capabilities & (1<<2)))
307         {
308           a->debug("using physical memory access via Device Mapping...");
309           return PHYSMEM_DEVICE_MAPPING;
310         }
311       a->debug("DPMI Device Mapping not supported...");
312     }
313 
314   /*
315    * If device mapping was not tried or not supported by DPMI host then fallback
316    * to __dpmi_physical_address_mapping(). But this requires Fat DS descriptor,
317    * meaning to increase DS descriptor limit to 4 GB, which does not have to be
318    * supported by some DPMI hosts.
319    */
320   if (try_physmap)
321     {
322       if (fat_ds_count != 0 || __djgpp_nearptr_enable())
323         {
324           fat_ds_count++;
325           a->debug("using physical memory access via Physical Address Mapping...");
326           return PHYSMEM_PHYSADDR_MAPPING;
327         }
328 
329       /*
330        * DJGPP prior to 2.6 has semi-broken __djgpp_nearptr_enable() function.
331        * On failure it may let DS descriptor limit in semi-broken state. So for
332        * older DJGPP versions call __djgpp_nearptr_disable() which fixes it.
333        */
334 #if !defined(__DJGPP__) || __DJGPP__ < 2 || (__DJGPP__ == 2 && __DJGPP_MINOR__ < 6)
335       __djgpp_nearptr_disable();
336 #endif
337       a->debug("DPMI Physical Address Mapping not usable because Fat DS descriptor not supported...");
338     }
339 
340   /*
341    * Otherwise we do not have access to physical memory mapping. Theoretically
342    * it could be possible to use __dpmi_physical_address_mapping() and then
343    * create new segment where mapped linear address would be available, but this
344    * would require to access memory in newly created segment via far pointers,
345    * which is not only mess in the native 32-bit application but also these far
346    * pointers are not supported by gcc. If DPMI host does not allow us to change
347    * DS descriptor limit to 4 GB then it is mostly due to security reasons and
348    * probably does not allow access to physical memory mapping. This applies
349    * for non-DOS OS systems with integrated DPMI hosts like in Windows NT NTVDM
350    * or older version of Linux dosemu.
351    */
352   a->debug("physical memory access not allowed...");
353   errno = EACCES;
354   return NULL;
355 }
356 
357 void
physmem_close(struct physmem * physmem)358 physmem_close(struct physmem *physmem)
359 {
360   /* Disable 4 GB limit on DS descriptor if it was the last user. */
361   if (physmem == PHYSMEM_PHYSADDR_MAPPING)
362     {
363       fat_ds_count--;
364       if (fat_ds_count == 0)
365         __djgpp_nearptr_disable();
366     }
367 }
368 
369 long
physmem_get_pagesize(struct physmem * physmem UNUSED)370 physmem_get_pagesize(struct physmem *physmem UNUSED)
371 {
372   static unsigned long pagesize;
373   if (!pagesize)
374     {
375       if (__dpmi_get_page_size(&pagesize) != 0)
376         pagesize = 0;
377       if (pagesize & (pagesize-1))
378         pagesize = 0;
379       if (!pagesize)
380         pagesize = 4096; /* Fallback value, the most commonly used on x86. */
381     }
382   return pagesize;
383 }
384 
385 void *
physmem_map(struct physmem * physmem,u64 addr,size_t length,int w)386 physmem_map(struct physmem *physmem, u64 addr, size_t length, int w)
387 {
388   long pagesize = physmem_get_pagesize(physmem);
389   unsigned pagesize_shift = ffs(pagesize)-1;
390   const __djgpp_sbrk_handle *sh;
391   unsigned long sh_size;
392   unsigned long size;
393   __dpmi_meminfo mi;
394   short *attributes;
395   short one_pg_attr;
396   size_t offset;
397   int error;
398   void *ptr;
399   size_t i;
400 
401   /* Align length to page size. */
402   if (length & (pagesize-1))
403     length = (length & ~(pagesize-1)) + pagesize;
404 
405   /* Mapping of physical memory above 4 GB is not possible. */
406   if (addr >= 0xffffffffUL || addr + length > 0xffffffffUL)
407     {
408       errno = EOVERFLOW;
409       return (void *)-1;
410     }
411 
412   if (physmem == PHYSMEM_DEVICE_MAPPING)
413     {
414       /*
415        * __dpmi_map_device_in_memory_block() maps physical memory to any
416        * page-aligned linear address for which we have DPMI memory handle. But
417        * DPMI host does not have to support mapping of memory below 1 MB which
418        * lies in RAM, and is not device memory.
419        *
420        * __djgpp_map_physical_memory() function is DJGPP wrapper around
421        * __dpmi_map_device_in_memory_block() which properly handles memory
422        * range that span multiple DPMI memory handles. It is common that
423        * DJGPP sbrk() or malloc() allocator returns continuous memory range
424        * which is backed by two or more DPMI memory handles which represents
425        * consecutive memory ranges without any gap.
426        *
427        * __dpmi_map_conventional_memory_in_memory_block() aliases memory range
428        * specified by page-aligned linear address to another page-aligned linear
429        * address. This can be used for mapping memory below 1 MB which lies in
430        * RAM and for which cannot be used __dpmi_map_device_in_memory_block().
431        * This function calls takes (virtual) linear address as opposite of the
432        * __dpmi_map_device_in_memory_block() which takes physical address.
433        *
434        * Unfortunately __djgpp_map_physical_memory() internally calls only
435        * __djgpp_map_physical_memory() function and does not return information
436        * for which memory range the call failed. So it cannot be used for
437        * generic memory mapping requests.
438        *
439        * Also it does not return usefull errno. And even in the latest released
440        * DJGPP version v2.5 this function has suboptimal implementation. Its
441        * time complexity is O(N^2) (where N is number of pages).
442        *
443        * So do not use __djgpp_map_physical_memory() function and instead write
444        * own logic handling virtual memory ranges which spans multiple DPMI
445        * memory handles and manually calls __dpmi_map_device_in_memory_block()
446        * or __dpmi_map_conventional_memory_in_memory_block() per every handle.
447        *
448        * We can easily access only linear addresses in our DS segment which
449        * is managed by DJGPP sbrk allocator. So allocate page-aligned range
450        * by aligned_alloc() (our wrapper around malloc()/memalign()) and then
451        * for every subrange which is backed by different DPMI memory handle
452        * call appropriate mapping function which correctly calculated offset
453        * and length to have continuous representation of physical memory range.
454        *
455        * This approach has disadvantage that for each mapping it is required
456        * to reserve and allocate committed memory in RAM with the size of the
457        * mapping itself. This has negative impact for mappings of large sizes.
458        * Unfortunately this is the only way because DJGPP sbrk allocator does
459        * not have any (public) function for directly allocating uncommitted
460        * memory which is not backed by the RAM. Even if DJGPP sbrk code is
461        * extended for this functionality, the corresponding DPMI function
462        * __dpmi_allocate_linear_memory() is DPMI 1.0 function and not widely
463        * supported by DPMI hosts, even the default DJGPP's CWSDPMI does not
464        * support it.
465        */
466 
467       ptr = aligned_alloc(pagesize, length);
468       if (!ptr)
469         {
470           errno = ENOMEM;
471           return (void *)-1;
472         }
473 
474       for (offset = 0; offset < length; offset += (mi.size << pagesize_shift))
475         {
476           /*
477            * Find a memory handle with its size which belongs to the pointer
478            * address ptr+offset. Base address and size of the memory handle
479            * must be page aligned for memory mapping support.
480            */
481           if (!find_sbrk_memory_handle(ptr + offset, length - offset, pagesize, &sh, &sh_size) ||
482               (sh->address & (pagesize-1)) || (sh_size & (pagesize-1)))
483             {
484               /*
485                * Failure detected. If we have some partial mapping, try to undo
486                * it via physmem_unmap() which also release ptr. If we do not
487                * have partial mapping, just release ptr.
488                */
489               if (offset != 0)
490                 physmem_unmap(physmem, ptr, offset);
491               else
492                 aligned_free(ptr);
493               errno = EINVAL;
494               return (void *)-1;
495             }
496 
497           mi.handle = sh->handle;
498           mi.address = (unsigned long)ptr + offset - sh->address;
499           mi.size = (length - offset) >> pagesize_shift;
500           if (mi.size > ((sh_size - mi.address) >> pagesize_shift))
501             mi.size = (sh_size - mi.address) >> pagesize_shift;
502           if (__dpmi_map_device_in_memory_block(&mi, addr + offset) != 0)
503             {
504               /*
505                * __dpmi_map_device_in_memory_block() may fail for memory range
506                * which belongs to non-device memory below 1 MB. DPMI host in
507                * this case returns DPMI error code 0x8003 (System integrity -
508                * invalid device address). For example this is behavior of DPMI
509                * host HX HDPMI32, which strictly differs between non-device and
510                * device memory. If the physical memory range belongs to the
511                * non-device conventional memory and DPMI host uses 1:1 mappings
512                * for memory below 1 MB then we can try to alias range of linear
513                * address below 1 MB to DJGPP's accessible linear address range.
514                * For this aliasing of linear (not the physical) memory address
515                * ranges below 1 MB boundary is there an additional DPMI 1.0
516                * function __dpmi_map_conventional_memory_in_memory_block().
517                * But DPMI host does not have to support it. HDPMI32 supports it.
518                * If the memory range crosses 1 MB boundary then call it only for
519                * the subrange of memory which below 1 MB boundary and let the
520                * remaining subrange for the next iteration of the outer loop.
521                * Because the remaining memory range is above 1 MB limit, only
522                * the __dpmi_map_device_in_memory_block() would be used. This
523                * approach makes continues linear range of the mapped memory.
524                */
525               if (__dpmi_error == 0x8003 && addr + offset < 1*1024*1024UL)
526                 {
527                   /* reuse mi */
528                   if (addr + offset + (mi.size << pagesize_shift) > 1*1024*1024UL)
529                     mi.size = (1*1024*1024UL - addr - offset) >> pagesize_shift;
530                   if (__dpmi_map_conventional_memory_in_memory_block(&mi, addr + offset) != 0)
531                     {
532                       /*
533                        * Save __dpmi_error because any DJGPP function may change
534                        * it. If we have some partial mapping, try to undo it via
535                        * physmem_unmap() which also release ptr. If we do not
536                        * have partial mapping, just release ptr.
537                        */
538                       error = __dpmi_error;
539                       if (offset != 0)
540                         physmem_unmap(physmem, ptr, offset);
541                       else
542                         aligned_free(ptr);
543                       switch (error)
544                         {
545                         case 0x0000: /* Unsupported function (returned by Windows NTVDM, error number is cleared) */
546                         case 0x0509: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
547                         case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
548                           /*
549                            * Conventional Memory Mapping is not supported.
550                            * Device Mapping is supported, but DPMI host rejected
551                            * Device Mapping request. So reports same errno value
552                            * as from the failed Device Mapping switch case,
553                            * which is ENXIO (because __dpmi_error == 0x8003).
554                            */
555                           errno = ENXIO;
556                           break;
557                         case 0x8003: /* System integrity (invalid conventional memory address) */
558                           errno = ENXIO;
559                           break;
560                         case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
561                           errno = ENOMEM;
562                           break;
563                         case 0x8023: /* Invalid handle (in ESI) */
564                         case 0x8025: /* Invalid linear address (specified range is not within specified block, or EBX/EDX is not page aligned) */
565                           errno = EINVAL;
566                           break;
567                         default: /* Other unspecified error */
568                           errno = EACCES;
569                           break;
570                         }
571                       return (void *)-1;
572                     }
573                 }
574               else
575                 {
576                   /*
577                    * Save __dpmi_error because any DJGPP function may change
578                    * it. If we have some partial mapping, try to undo it via
579                    * physmem_unmap() which also release ptr. If we do not
580                    * have partial mapping, just release ptr.
581                    */
582                   error = __dpmi_error;
583                   if (offset != 0)
584                     physmem_unmap(physmem, ptr, offset);
585                   else
586                     aligned_free(ptr);
587                   switch (error)
588                     {
589                     case 0x0000: /* Unsupported function (returned by Windows NTVDM, error number is cleared) */
590                     case 0x0508: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
591                     case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
592                       errno = ENOSYS;
593                       break;
594                     case 0x8003: /* System integrity (invalid device address) */
595                       errno = ENXIO;
596                       break;
597                     case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
598                       errno = ENOMEM;
599                       break;
600                     case 0x8023: /* Invalid handle (in ESI) */
601                     case 0x8025: /* Invalid linear address (specified range is not within specified block or EBX/EDX is not page-aligned) */
602                       errno = EINVAL;
603                       break;
604                     default: /* Other unspecified error */
605                       errno = EACCES;
606                       break;
607                     }
608                   return (void *)-1;
609                 }
610             }
611 
612           /*
613            * For read-only mapping try to change page attributes with not changing
614            * page type (3) and setting read-only access (bit 3 unset). Ignore any
615            * failure as this function requires DPMI 1.0 host and so it does not have
616            * to be supported by other DPMI 0.9 hosts. Note that by default newly
617            * created mapping has read/write access and so we can use it also for
618            * mappings which were requested as read-only too.
619            */
620           if (!w)
621             {
622               attributes = malloc(mi.size * sizeof(*attributes));
623               if (attributes)
624                 {
625                   /* reuse mi */
626                   for (i = 0; i < mi.size; i++)
627                     attributes[i] = (0<<3) | 3;
628 
629                   /* __dpmi_set_page_attributes modifies mi.size */
630                   size = mi.size;
631                   __dpmi_set_page_attributes(&mi, attributes);
632                   mi.size = size;
633 
634                   free(attributes);
635                 }
636             }
637         }
638 
639       return ptr;
640     }
641   else if (physmem == PHYSMEM_PHYSADDR_MAPPING)
642     {
643       /*
644        * __dpmi_physical_address_mapping() is DPMI 0.9 function and so does not
645        * require device mapping support. But DPMI hosts often allow to used it
646        * only for memory above 1 MB and also we do not have control where DPMI
647        * host maps physical memory. Because this is DPMI 0.9 function, error
648        * code on failure does not have to be provided. If DPMI host does not
649        * provide error code then in __dpmi_error variable is stored the called
650        * DPMI function number (0x0800 is for Physical Address Mapping).
651        * Error codes are provided only by DPMI 1.0 hosts.
652        */
653 
654       mi.address = addr;
655       mi.size = length;
656       if (__dpmi_physical_address_mapping(&mi) != 0)
657         {
658           /*
659            * __dpmi_physical_address_mapping() may fail for memory range which
660            * starts below 1 MB. DPMI 1.0 host in this case returns DPMI error
661            * code 0x8021 (Invalid value - address is below 1 MB boundary).
662            * DPMI 0.9 host does not provide error code, so __dpmi_error contains
663            * value 0x0800. For example this is behavior of the default DJGPP's
664            * DPMI host CWSDPMI and also of Windows 3.x DPMI host. On the other
665            * hand DPMI host HX HDPMI32 or Windows 9x DPMI host allow requests
666            * for memory ranges below 1 MB and do not fail.
667            */
668           if ((__dpmi_error == 0x0800 || __dpmi_error == 0x8021) && addr < 1*1024*1024UL)
669             {
670               /*
671                * Expects that conventional memory below 1 MB is always 1:1
672                * mapped. On non-paging DPMI hosts it is always truth and paging
673                * DPMI hosts should do it too or at least provide mapping with
674                * compatible or emulated content for compatibility with existing
675                * DOS applications. So check that requested range is below 1 MB.
676                */
677               if (addr + length > 1*1024*1024UL)
678                 {
679                   errno = ENXIO;
680                   return (void *)-1;
681                 }
682 
683               /*
684                * Simulate successful __dpmi_physical_address_mapping() call by
685                * setting the 1:1 mapped address.
686                */
687               mi.address = addr;
688             }
689           else
690             {
691               switch (__dpmi_error)
692                 {
693                 case 0x0800: /* Error code was not provided (returned by DPMI 0.9 host, error number is same as DPMI function number) */
694                   errno = EACCES;
695                   break;
696                 case 0x8003: /* System integrity (DPMI host memory region) */
697                 case 0x8021: /* Invalid value (address is below 1 MB boundary) */
698                   errno = ENXIO;
699                   break;
700                 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
701                   errno = ENOMEM;
702                   break;
703                 default: /* Other unspecified error */
704                   errno = EACCES;
705                   break;
706                 }
707               return (void *)-1;
708             }
709         }
710 
711       /*
712        * Function returns linear address of the mapping. On non-paging DPMI
713        * hosts it does nothing and just returns same passed physical address.
714        * With DS descriptor limit set to 4 GB (set by __djgpp_nearptr_enable())
715        * we have direct access to any linear address. Direct access to specified
716        * linear address is from the __djgpp_conventional_base offset. Note that
717        * this is always read/write access, and there is no way to make access
718        * just read-only.
719        */
720       ptr = (void *)(mi.address + __djgpp_conventional_base);
721 
722       /*
723        * DJGPP CRT code on paging DPMI hosts enables NULL pointer protection by
724        * disabling access to the zero page. If we are running on DPMI host which
725        * does 1:1 mapping and we were asked for physical address range mapping
726        * which includes also our zero page, then we have to disable NULL pointer
727        * protection to allow access to that mapped page. Detect this by checking
728        * that our zero page [0, pagesize-1] does not conflict with the returned
729        * address range [ptr, ptr+length] (note that length is already multiply
730        * of pagesize) and change page attributes to committed page type (1) and
731        * set read/write access (bit 3 set). Ignore any failure as this function
732        * requires DPMI 1.0 host and so it does not have to be supported by other
733        * DPMI 0.9 hosts. In this case DJGPP CRT code did not enable NULL pointer
734        * protection and so zero page can be normally accessed.
735        */
736       if ((unsigned long)ptr - 1 > (unsigned long)ptr - 1 + length)
737         {
738           mi.handle = __djgpp_memory_handle_list[0].handle;
739           mi.address = 0;
740           mi.size = 1; /* number of pages */
741           one_pg_attr = (1<<3) | 1;
742           /* __dpmi_set_page_attributes modifies mi.size */
743           __dpmi_set_page_attributes(&mi, &one_pg_attr);
744         }
745 
746       return ptr;
747     }
748 
749   /* invalid physmem parameter */
750   errno = EBADF;
751   return (void *)-1;
752 }
753 
754 int
physmem_unmap(struct physmem * physmem,void * ptr,size_t length)755 physmem_unmap(struct physmem *physmem, void *ptr, size_t length)
756 {
757   long pagesize = physmem_get_pagesize(physmem);
758   unsigned pagesize_shift = ffs(pagesize)-1;
759   const __djgpp_sbrk_handle *sh;
760   unsigned long sh_size;
761   __dpmi_meminfo mi;
762   short *attributes;
763   size_t offset;
764   size_t i;
765 
766   /* Align length to page size. */
767   if (length & (pagesize-1))
768     length = (length & ~(pagesize-1)) + pagesize;
769 
770   if (physmem == PHYSMEM_DEVICE_MAPPING)
771     {
772       /*
773        * Memory mapped by __dpmi_map_conventional_memory_in_memory_block() or by
774        * __dpmi_map_device_in_memory_block() can be unmapped by changing page
775        * attributes back to the what allocator use: page type to committed (1),
776        * access to read/write (bit 3 set) and not setting initial page access
777        * and dirty bits (bit 4 unset).
778        *
779        * There is a DJGPP function __djgpp_set_page_attributes() which sets page
780        * attributes for the memory range specified by ptr pointer, but it has
781        * same disadvantages as __djgpp_map_physical_memory() function (see
782        * comment in map functionality). So use __dpmi_set_page_attributes()
783        * instead.
784        *
785        * If changing page attributes fails then do not return memory back to the
786        * malloc pool because it is still mapped to physical memory and cannot be
787        * used by allocator for general purpose anymore.
788        *
789        * Some DPMI hosts like HDPMI pre-v3.22 (part of HX pre-v2.22) or DPMIONE
790        * do not support changing page type directly from mapped to committed.
791        * But they support changing it indirectly: first from mapped to uncommitted
792        * and then from uncommitted to committed. So if direct change from mapped
793        * to committed fails then try workaround via indirect change.
794        */
795 
796       static int do_indirect_change = 0;
797 
798       for (offset = 0; offset < length; offset += (mi.size << pagesize_shift))
799         {
800           /*
801            * Find a memory handle with its size which belongs to the pointer
802            * address ptr+offset. Base address and size of the memory handle
803            * must be page aligned for changing page attributes.
804            */
805           if (!find_sbrk_memory_handle(ptr + offset, length - offset, pagesize, &sh, &sh_size) ||
806               (sh->address & (pagesize-1)) || (sh_size & (pagesize-1)))
807             {
808               errno = EINVAL;
809               return -1;
810             }
811 
812           mi.handle = sh->handle;
813           mi.address = (unsigned long)ptr + offset - sh->address;
814           mi.size = (length - offset) >> pagesize_shift;
815           if (mi.size > ((sh_size - mi.address) >> pagesize_shift))
816             mi.size = (sh_size - mi.address) >> pagesize_shift;
817 
818           attributes = malloc(mi.size * sizeof(*attributes));
819           if (!attributes)
820             {
821               errno = ENOMEM;
822               return -1;
823             }
824 
825 retry_via_indirect_change:
826           if (do_indirect_change)
827             {
828               for (i = 0; i < mi.size; i++)
829                 attributes[i] = (0<<4) | (0<<3) | 0; /* 0 = page type uncommitted */
830 
831               if (set_and_get_page_attributes(&mi, attributes) != 0)
832                 return -1;
833 
834               for (i = 0; i < mi.size; i++)
835                 {
836                   /* Check that every page type is uncommitted (0). */
837                   if ((attributes[i] & 0x7) != 0)
838                     {
839                       free(attributes);
840                       errno = EACCES;
841                       return -1;
842                     }
843                 }
844             }
845 
846           for (i = 0; i < mi.size; i++)
847             attributes[i] = (0<<4) | (1<<3) | 1; /* 1 = page type committed */
848 
849           if (set_and_get_page_attributes(&mi, attributes) != 0)
850             return -1;
851 
852           for (i = 0; i < mi.size; i++)
853             {
854               /* Check that every page type is committed (1) and has read/write access (bit 3 set). */
855               if (((attributes[i] & 0x7) != 1) || !(attributes[i] & (1<<3)))
856                 {
857                   if (!do_indirect_change)
858                     {
859                       /*
860                        * Some DPMI hosts do not support changing page type
861                        * from mapped to committed but for such change request
862                        * do not report any error. Try following workaround:
863                        * Change page type indirectly. First change page type
864                        * from mapped to uncommitted and then to committed.
865                        */
866                       do_indirect_change = 1;
867                       goto retry_via_indirect_change;
868                     }
869                   free(attributes);
870                   errno = EACCES;
871                   return -1;
872                 }
873             }
874 
875           free(attributes);
876         }
877 
878       /*
879        * Now we are sure that ptr is backed by committed memory which can be
880        * returned back to the DJGPP sbrk pool.
881        */
882       aligned_free(ptr);
883       return 0;
884     }
885   else if (physmem == PHYSMEM_PHYSADDR_MAPPING)
886     {
887       /*
888        * Physical address mapping done by __dpmi_physical_address_mapping() can
889        * be unmapped only by __dpmi_free_physical_address_mapping() function.
890        * This function takes linear address of the mapped region. Direct access
891        * pointer refers to linear address from the __djgpp_conventional_base
892        * offset. On non-paging DPMI hosts, physical memory cannot be unmapped at
893        * all because whole physical memory is always available and so this
894        * function either fails or does nothing. Moreover this unmapping function
895        * requires DPMI 1.0 host as opposite of the mapping function which is
896        * available also in DPMI 0.9. It means that DPMI 0.9 hosts do not provide
897        * ability to unmap already mapped physical addresses. This DPMI unmapping
898        * function is not commonly supported by DPMI hosts, even the default
899        * DJGPP's CWSDPMI does not support it. But few alternative DPMI host like
900        * PMODE/DJ, WDOSX, HDPMI32 or DPMIONE support it. So expects failure from
901        * this function call, in most cases it is not possible to unmap physical
902        * memory which was previously mapped by __dpmi_physical_address_mapping().
903        */
904       mi.address = (unsigned long)ptr - __djgpp_conventional_base;
905       if (__dpmi_free_physical_address_mapping(&mi) != 0)
906         {
907           /*
908            * Do not report error when DPMI function failed with error code
909            * 0x8025 (invalid linear address) and linear address is below 1 MB.
910            * First 1 MB of memory space should stay always mapped.
911            */
912           if (__dpmi_error != 0x8025 || mi.address >= 1*1024*1024UL)
913             {
914               switch (__dpmi_error)
915                 {
916                 case 0x0000: /* Unsupported function (returned by Windows NTVDM, error number is cleared) */
917                 case 0x0801: /* Unsupported function (returned by DPMI 0.9 host, error number is same as DPMI function number) */
918                 case 0x8001: /* Unsupported function (returned by DPMI 1.0 host) */
919                   errno = ENOSYS;
920                   break;
921                 case 0x8010: /* Resource unavailable (DPMI host cannot allocate internal resources to complete an operation) */
922                   errno = ENOMEM;
923                   break;
924                 case 0x8025: /* Invalid linear address */
925                   errno = EINVAL;
926                   break;
927                 default: /* Other unspecified error */
928                   errno = EACCES;
929                   break;
930                 }
931               return -1;
932             }
933         }
934 
935       return 0;
936     }
937 
938   /* invalid physmem parameter */
939   errno = EBADF;
940   return -1;
941 }
942