1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <kern/iotrace.h>
36 #include <mach/machine.h>
37 #include <mach/machine/vm_param.h>
38 #include <mach_kdp.h>
39 #include <kdp/kdp_udp.h>
40 #if !MACH_KDP
41 #include <kdp/kdp_callout.h>
42 #endif /* !MACH_KDP */
43 #include <arm/cpu_data.h>
44 #include <arm/cpu_data_internal.h>
45 #include <arm/caches_internal.h>
46
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/pmap.h>
50
51 #include <arm/misc_protos.h>
52
53 #include <sys/errno.h>
54
55 #include <libkern/section_keywords.h>
56 #include <libkern/OSDebug.h>
57
58 #define INT_SIZE (BYTE_SIZE * sizeof (int))
59
60 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
61 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
62 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
63 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
64
65 static kern_return_t
bcopy_phys_internal(addr64_t src,addr64_t dst,vm_size_t bytes,int flags)66 bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
67 {
68 unsigned int src_index;
69 unsigned int dst_index;
70 vm_offset_t src_offset;
71 vm_offset_t dst_offset;
72 unsigned int wimg_bits_src, wimg_bits_dst;
73 unsigned int cpu_num = 0;
74 ppnum_t pn_src;
75 ppnum_t pn_dst;
76 addr64_t end __assert_only;
77 kern_return_t res = KERN_SUCCESS;
78
79 if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
80 assert(!__improbable(os_add_overflow(src, bytes, &end)));
81 }
82 if (!BCOPY_PHYS_DST_IS_USER(flags)) {
83 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
84 }
85
86 while ((bytes > 0) && (res == KERN_SUCCESS)) {
87 src_offset = src & PAGE_MASK;
88 dst_offset = dst & PAGE_MASK;
89 boolean_t use_copy_window_src = FALSE;
90 boolean_t use_copy_window_dst = FALSE;
91 vm_size_t count = bytes;
92 vm_size_t count2 = bytes;
93 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
94 use_copy_window_src = !pmap_valid_address(src);
95 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
96 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
97 count = PAGE_SIZE - src_offset;
98 wimg_bits_src = pmap_cache_attributes(pn_src);
99 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
100 use_copy_window_src = TRUE;
101 }
102 #else
103 if (use_copy_window_src) {
104 wimg_bits_src = pmap_cache_attributes(pn_src);
105 count = PAGE_SIZE - src_offset;
106 }
107 #endif
108 }
109 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
110 // write preflighting needed for things like dtrace which may write static read-only mappings
111 use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
112 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
113 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
114 count2 = PAGE_SIZE - dst_offset;
115 wimg_bits_dst = pmap_cache_attributes(pn_dst);
116 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
117 use_copy_window_dst = TRUE;
118 }
119 #else
120 if (use_copy_window_dst) {
121 wimg_bits_dst = pmap_cache_attributes(pn_dst);
122 count2 = PAGE_SIZE - dst_offset;
123 }
124 #endif
125 }
126
127 char *tmp_src;
128 char *tmp_dst;
129
130 if (use_copy_window_src || use_copy_window_dst) {
131 mp_disable_preemption();
132 cpu_num = cpu_number();
133 }
134
135 if (use_copy_window_src) {
136 src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
137 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
138 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
139 tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
140 } else {
141 tmp_src = (char*)src;
142 }
143 if (use_copy_window_dst) {
144 dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
145 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
146 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
147 tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
148 } else {
149 tmp_dst = (char*)dst;
150 }
151
152 if (count > count2) {
153 count = count2;
154 }
155 if (count > bytes) {
156 count = bytes;
157 }
158
159
160 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
161 res = copyin((user_addr_t)src, tmp_dst, count);
162 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
163 res = copyout(tmp_src, (user_addr_t)dst, count);
164 } else {
165 bcopy(tmp_src, tmp_dst, count);
166 }
167
168
169 if (use_copy_window_src) {
170 pmap_unmap_cpu_windows_copy(src_index);
171 }
172 if (use_copy_window_dst) {
173 pmap_unmap_cpu_windows_copy(dst_index);
174 }
175 if (use_copy_window_src || use_copy_window_dst) {
176 mp_enable_preemption();
177 }
178
179 src += count;
180 dst += count;
181 bytes -= count;
182 }
183 return res;
184 }
185
186 void
bcopy_phys(addr64_t src,addr64_t dst,vm_size_t bytes)187 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
188 {
189 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
190 }
191
192 void
bzero_phys_nc(addr64_t src64,vm_size_t bytes)193 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
194 {
195 bzero_phys(src64, bytes);
196 }
197
198 extern void *secure_memset(void *, int, size_t);
199
200 /* Zero bytes starting at a physical address */
201 void
bzero_phys(addr64_t src,vm_size_t bytes)202 bzero_phys(addr64_t src, vm_size_t bytes)
203 {
204 unsigned int wimg_bits;
205 unsigned int cpu_num = cpu_number();
206 ppnum_t pn;
207 addr64_t end __assert_only;
208
209 assert(!__improbable(os_add_overflow(src, bytes, &end)));
210
211 vm_offset_t offset = src & PAGE_MASK;
212 while (bytes > 0) {
213 vm_size_t count = bytes;
214
215 boolean_t use_copy_window = !pmap_valid_address(src);
216 pn = (ppnum_t)(src >> PAGE_SHIFT);
217 wimg_bits = pmap_cache_attributes(pn);
218 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
219 count = PAGE_SIZE - offset;
220 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
221 use_copy_window = TRUE;
222 }
223 #else
224 if (use_copy_window) {
225 count = PAGE_SIZE - offset;
226 }
227 #endif
228 char *buf;
229 unsigned int index;
230 if (use_copy_window) {
231 mp_disable_preemption();
232 cpu_num = cpu_number();
233 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
234 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
235 } else {
236 buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
237 }
238
239 if (count > bytes) {
240 count = bytes;
241 }
242
243 switch (wimg_bits & VM_WIMG_MASK) {
244 case VM_WIMG_DEFAULT:
245 case VM_WIMG_WCOMB:
246 case VM_WIMG_INNERWBACK:
247 case VM_WIMG_WTHRU:
248 #if HAS_UCNORMAL_MEM
249 case VM_WIMG_RT:
250 #endif
251 /**
252 * When we are zerofilling a normal page, there are a couple of assumptions that can
253 * be made.
254 *
255 * 1. The destination to be zeroed is page-sized and page-aligned, making the unconditional
256 * 4 stp instructions in bzero redundant.
257 * 2. The dczva loop for zerofilling can be fully unrolled at compile-time thanks to
258 * known size of the destination, reducing instruction fetch overhead caused by
259 * the branch backward in a tight loop.
260 */
261 if (count == PAGE_SIZE) {
262 /**
263 * Thanks to how count is computed above, buf should always be page-size aligned
264 * when count == PAGE_SIZE.
265 */
266 assert((((addr64_t) buf) & PAGE_MASK) == 0);
267
268 #pragma clang diagnostic push
269 #pragma clang diagnostic ignored "-Wpass-failed"
270 #pragma unroll
271 for (addr64_t dczva_offset = 0; dczva_offset < PAGE_SIZE; dczva_offset += (1ULL << MMU_CLINE)) {
272 asm volatile ("dc zva, %0" : : "r"(buf + dczva_offset) : "memory");
273 }
274 #pragma clang diagnostic pop
275 } else {
276 bzero(buf, count);
277 }
278 break;
279 default:
280 /* 'dc zva' performed by bzero is not safe for device memory */
281 secure_memset((void*)buf, 0, count);
282 }
283
284 if (use_copy_window) {
285 pmap_unmap_cpu_windows_copy(index);
286 mp_enable_preemption();
287 }
288
289 src += count;
290 bytes -= count;
291 offset = 0;
292 }
293 }
294
295 /*
296 * Read data from a physical address.
297 */
298
299 #if BUILD_QUAD_WORD_FUNCS
300 static inline uint128_t
__read128(vm_address_t addr)301 __read128(vm_address_t addr)
302 {
303 uint64_t hi, lo;
304
305 asm volatile (
306 "ldp %[lo], %[hi], [%[addr]]" "\n"
307 : [lo] "=r"(lo), [hi] "=r"(hi)
308 : [addr] "r"(addr)
309 : "memory"
310 );
311
312 return (((uint128_t)hi) << 64) + lo;
313 }
314 #endif /* BUILD_QUAD_WORD_FUNCS */
315
316 static uint128_t
ml_phys_read_data(pmap_paddr_t paddr,int size)317 ml_phys_read_data(pmap_paddr_t paddr, int size)
318 {
319 vm_address_t addr;
320 ppnum_t pn = atop_kernel(paddr);
321 ppnum_t pn_end = atop_kernel(paddr + size - 1);
322 uint128_t result = 0;
323 uint8_t s1;
324 uint16_t s2;
325 uint32_t s4;
326 uint64_t s8;
327 unsigned int index;
328 bool use_copy_window = true;
329
330 if (__improbable(pn_end != pn)) {
331 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
332 }
333
334 #ifdef ML_IO_TIMEOUTS_ENABLED
335 bool istate, timeread = false;
336 uint64_t sabs, eabs;
337
338 uint32_t report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed);
339 uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed);
340
341 if (__improbable(report_phy_read_delay != 0)) {
342 istate = ml_set_interrupts_enabled_with_debug(false, false);
343 sabs = ml_get_timebase();
344 timeread = true;
345 }
346 #ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
347 if (__improbable(timeread && simulate_stretched_io)) {
348 sabs -= simulate_stretched_io;
349 }
350 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
351 #endif /* ML_IO_TIMEOUTS_ENABLED */
352
353 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
354 if (pmap_valid_address(paddr)) {
355 addr = phystokv(paddr);
356 use_copy_window = false;
357 }
358 #endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
359
360 if (use_copy_window) {
361 mp_disable_preemption();
362 unsigned int wimg_bits = pmap_cache_attributes(pn);
363 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
364 addr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
365 }
366
367 switch (size) {
368 case 1:
369 s1 = *(volatile uint8_t *)addr;
370 result = s1;
371 break;
372 case 2:
373 s2 = *(volatile uint16_t *)addr;
374 result = s2;
375 break;
376 case 4:
377 s4 = *(volatile uint32_t *)addr;
378 result = s4;
379 break;
380 case 8:
381 s8 = *(volatile uint64_t *)addr;
382 result = s8;
383 break;
384 #if BUILD_QUAD_WORD_FUNCS
385 case 16:
386 result = __read128(addr);
387 break;
388 #endif /* BUILD_QUAD_WORD_FUNCS */
389 default:
390 panic("Invalid size %d for ml_phys_read_data", size);
391 break;
392 }
393
394 if (use_copy_window) {
395 pmap_unmap_cpu_windows_copy(index);
396 mp_enable_preemption();
397 }
398
399 #ifdef ML_IO_TIMEOUTS_ENABLED
400 if (__improbable(timeread)) {
401 eabs = ml_get_timebase();
402
403 iotrace(IOTRACE_PHYS_READ, 0, addr, size, result, sabs, eabs - sabs);
404
405 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
406 DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
407 uint64_t, addr, uint32_t, size, uint64_t, result);
408
409 uint64_t override = 0;
410 override_io_timeouts(0, paddr, &override, NULL);
411
412 if (override != 0) {
413 #if SCHED_HYGIENE_DEBUG
414 /*
415 * The IO timeout was overridden. If we were called in an
416 * interrupt handler context, that can lead to a timeout
417 * panic, so we need to abandon the measurement.
418 */
419 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
420 ml_irq_debug_abandon();
421 }
422 #endif
423 report_phy_read_delay = override;
424 }
425 }
426
427 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
428 if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
429 const uint64_t hi = (uint64_t)(result >> 64);
430 const uint64_t lo = (uint64_t)(result);
431 uint64_t nsec = 0;
432 absolutetime_to_nanoseconds(eabs - sabs, &nsec);
433 panic("Read from physical addr 0x%llx took %llu ns, "
434 "result: 0x%016llx%016llx (start: %llu, end: %llu), ceiling: %llu",
435 (unsigned long long)addr, nsec, hi, lo, sabs, eabs,
436 (uint64_t)report_phy_read_delay);
437 }
438 }
439
440 if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
441 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
442 (eabs - sabs), sabs, addr, result);
443 }
444
445 ml_set_interrupts_enabled_with_debug(istate, false);
446 }
447 #endif /* ML_IO_TIMEOUTS_ENABLED */
448
449 return result;
450 }
451
452 unsigned int
ml_phys_read(vm_offset_t paddr)453 ml_phys_read(vm_offset_t paddr)
454 {
455 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
456 }
457
458 unsigned int
ml_phys_read_word(vm_offset_t paddr)459 ml_phys_read_word(vm_offset_t paddr)
460 {
461 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
462 }
463
464 unsigned int
ml_phys_read_64(addr64_t paddr64)465 ml_phys_read_64(addr64_t paddr64)
466 {
467 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
468 }
469
470 unsigned int
ml_phys_read_word_64(addr64_t paddr64)471 ml_phys_read_word_64(addr64_t paddr64)
472 {
473 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
474 }
475
476 unsigned int
ml_phys_read_half(vm_offset_t paddr)477 ml_phys_read_half(vm_offset_t paddr)
478 {
479 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
480 }
481
482 unsigned int
ml_phys_read_half_64(addr64_t paddr64)483 ml_phys_read_half_64(addr64_t paddr64)
484 {
485 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
486 }
487
488 unsigned int
ml_phys_read_byte(vm_offset_t paddr)489 ml_phys_read_byte(vm_offset_t paddr)
490 {
491 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
492 }
493
494 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)495 ml_phys_read_byte_64(addr64_t paddr64)
496 {
497 return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
498 }
499
500 unsigned long long
ml_phys_read_double(vm_offset_t paddr)501 ml_phys_read_double(vm_offset_t paddr)
502 {
503 return ml_phys_read_data((pmap_paddr_t)paddr, 8);
504 }
505
506 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)507 ml_phys_read_double_64(addr64_t paddr64)
508 {
509 return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
510 }
511
512 #if BUILD_QUAD_WORD_FUNCS
513 uint128_t
ml_phys_read_quad(vm_offset_t paddr)514 ml_phys_read_quad(vm_offset_t paddr)
515 {
516 return ml_phys_read_data((pmap_paddr_t)paddr, 16);
517 }
518
519 uint128_t
ml_phys_read_quad_64(addr64_t paddr64)520 ml_phys_read_quad_64(addr64_t paddr64)
521 {
522 return ml_phys_read_data((pmap_paddr_t)paddr64, 16);
523 }
524 #endif /* BUILD_QUAD_WORD_FUNCS */
525
526 /*
527 * Write data to a physical address.
528 */
529
530 #if BUILD_QUAD_WORD_FUNCS
531 static inline void
__write128(vm_address_t addr,uint128_t data)532 __write128(vm_address_t addr, uint128_t data)
533 {
534 const uint64_t hi = (uint64_t)(data >> 64);
535 const uint64_t lo = (uint64_t)(data);
536
537 asm volatile (
538 "stp %[lo], %[hi], [%[addr]]" "\n"
539 : /**/
540 : [lo] "r"(lo), [hi] "r"(hi), [addr] "r"(addr)
541 : "memory"
542 );
543 }
544 #endif /* BUILD_QUAD_WORD_FUNCS */
545
546 static void
ml_phys_write_data(pmap_paddr_t paddr,uint128_t data,int size)547 ml_phys_write_data(pmap_paddr_t paddr, uint128_t data, int size)
548 {
549 vm_address_t addr;
550 ppnum_t pn = atop_kernel(paddr);
551 ppnum_t pn_end = atop_kernel(paddr + size - 1);
552 unsigned int index;
553 bool use_copy_window = true;
554
555 if (__improbable(pn_end != pn)) {
556 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
557 }
558
559 #ifdef ML_IO_TIMEOUTS_ENABLED
560 bool istate, timewrite = false;
561 uint64_t sabs, eabs;
562
563 uint32_t report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed);
564 uint32_t const trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed);
565
566 if (__improbable(report_phy_write_delay != 0)) {
567 istate = ml_set_interrupts_enabled_with_debug(false, false);
568 sabs = ml_get_timebase();
569 timewrite = true;
570 }
571 #ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
572 if (__improbable(timewrite && simulate_stretched_io)) {
573 sabs -= simulate_stretched_io;
574 }
575 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
576 #endif /* ML_IO_TIMEOUTS_ENABLED */
577
578 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
579 if (pmap_valid_address(paddr)) {
580 addr = phystokv(paddr);
581 use_copy_window = false;
582 }
583 #endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
584
585 if (use_copy_window) {
586 mp_disable_preemption();
587 unsigned int wimg_bits = pmap_cache_attributes(pn);
588 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
589 addr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
590 }
591
592 switch (size) {
593 case 1:
594 *(volatile uint8_t *)addr = (uint8_t)data;
595 break;
596 case 2:
597 *(volatile uint16_t *)addr = (uint16_t)data;
598 break;
599 case 4:
600 *(volatile uint32_t *)addr = (uint32_t)data;
601 break;
602 case 8:
603 *(volatile uint64_t *)addr = (uint64_t)data;
604 break;
605 #if BUILD_QUAD_WORD_FUNCS
606 case 16:
607 __write128(addr, data);
608 break;
609 #endif /* BUILD_QUAD_WORD_FUNCS */
610 default:
611 panic("Invalid size %d for ml_phys_write_data", size);
612 }
613
614 if (use_copy_window) {
615 pmap_unmap_cpu_windows_copy(index);
616 mp_enable_preemption();
617 }
618
619 #ifdef ML_IO_TIMEOUTS_ENABLED
620 if (__improbable(timewrite)) {
621 eabs = ml_get_timebase();
622
623 iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
624
625 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
626 DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
627 uint64_t, paddr, uint32_t, size, uint64_t, data);
628
629 uint64_t override = 0;
630 override_io_timeouts(0, paddr, NULL, &override);
631 if (override != 0) {
632 #if SCHED_HYGIENE_DEBUG
633 /*
634 * The IO timeout was overridden. If we were called in an
635 * interrupt handler context, that can lead to a timeout
636 * panic, so we need to abandon the measurement.
637 */
638 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
639 ml_irq_debug_abandon();
640 }
641 #endif
642 report_phy_write_delay = override;
643 }
644 }
645
646 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
647 if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
648 const uint64_t hi = (uint64_t)(data >> 64);
649 const uint64_t lo = (uint64_t)(data);
650 uint64_t nsec = 0;
651 absolutetime_to_nanoseconds(eabs - sabs, &nsec);
652 panic("Write from physical addr 0x%llx took %llu ns, "
653 "data: 0x%016llx%016llx (start: %llu, end: %llu), ceiling: %llu",
654 (unsigned long long)paddr, nsec, hi, lo, sabs, eabs,
655 (uint64_t)report_phy_write_delay);
656 }
657 }
658
659 if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
660 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
661 (eabs - sabs), sabs, paddr, data);
662 }
663
664 ml_set_interrupts_enabled_with_debug(istate, false);
665 }
666 #endif /* ML_IO_TIMEOUTS_ENABLED */
667 }
668
669 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)670 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
671 {
672 ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
673 }
674
675 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)676 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
677 {
678 ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
679 }
680
681 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)682 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
683 {
684 ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
685 }
686
687 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)688 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
689 {
690 ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
691 }
692
693 void
ml_phys_write(vm_offset_t paddr,unsigned int data)694 ml_phys_write(vm_offset_t paddr, unsigned int data)
695 {
696 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
697 }
698
699 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)700 ml_phys_write_64(addr64_t paddr64, unsigned int data)
701 {
702 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
703 }
704
705 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)706 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
707 {
708 ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
709 }
710
711 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)712 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
713 {
714 ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
715 }
716
717 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)718 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
719 {
720 ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
721 }
722
723 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)724 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
725 {
726 ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
727 }
728
729 #if BUILD_QUAD_WORD_FUNCS
730 void
ml_phys_write_quad(vm_offset_t paddr,uint128_t data)731 ml_phys_write_quad(vm_offset_t paddr, uint128_t data)
732 {
733 ml_phys_write_data((pmap_paddr_t)paddr, data, 16);
734 }
735
736 void
ml_phys_write_quad_64(addr64_t paddr64,uint128_t data)737 ml_phys_write_quad_64(addr64_t paddr64, uint128_t data)
738 {
739 ml_phys_write_data((pmap_paddr_t)paddr64, data, 16);
740 }
741 #endif /* BUILD_QUAD_WORD_FUNCS */
742
743 /*
744 * Set indicated bit in bit string.
745 */
746 void
setbit(int bitno,int * s)747 setbit(int bitno, int *s)
748 {
749 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
750 }
751
752 /*
753 * Clear indicated bit in bit string.
754 */
755 void
clrbit(int bitno,int * s)756 clrbit(int bitno, int *s)
757 {
758 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
759 }
760
761 /*
762 * Test if indicated bit is set in bit string.
763 */
764 int
testbit(int bitno,int * s)765 testbit(int bitno, int *s)
766 {
767 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
768 }
769
770 /*
771 * Find first bit set in bit string.
772 */
773 int
ffsbit(int * s)774 ffsbit(int *s)
775 {
776 int offset;
777
778 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
779 ;
780 }
781 return offset + __builtin_ctz(*s);
782 }
783
784 int
ffs(unsigned int mask)785 ffs(unsigned int mask)
786 {
787 if (mask == 0) {
788 return 0;
789 }
790
791 /*
792 * NOTE: cannot use __builtin_ffs because it generates a call to
793 * 'ffs'
794 */
795 return 1 + __builtin_ctz(mask);
796 }
797
798 int
ffsll(unsigned long long mask)799 ffsll(unsigned long long mask)
800 {
801 if (mask == 0) {
802 return 0;
803 }
804
805 /*
806 * NOTE: cannot use __builtin_ffsll because it generates a call to
807 * 'ffsll'
808 */
809 return 1 + __builtin_ctzll(mask);
810 }
811
812 /*
813 * Find last bit set in bit string.
814 */
815 int
fls(unsigned int mask)816 fls(unsigned int mask)
817 {
818 if (mask == 0) {
819 return 0;
820 }
821
822 return (sizeof(mask) << 3) - __builtin_clz(mask);
823 }
824
825 int
flsll(unsigned long long mask)826 flsll(unsigned long long mask)
827 {
828 if (mask == 0) {
829 return 0;
830 }
831
832 return (sizeof(mask) << 3) - __builtin_clzll(mask);
833 }
834
835 kern_return_t
copypv(addr64_t source,addr64_t sink,unsigned int size,int which)836 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
837 {
838 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
839 panic("%s: no more than 1 parameter may be virtual", __func__);
840 }
841
842 kern_return_t res = bcopy_phys_internal(source, sink, size, which);
843
844 #ifndef __ARM_COHERENT_IO__
845 if (which & cppvFsrc) {
846 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
847 }
848
849 if (which & cppvFsnk) {
850 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
851 }
852 #endif
853
854 return res;
855 }
856
857 int
clr_be_bit(void)858 clr_be_bit(void)
859 {
860 panic("clr_be_bit");
861 return 0;
862 }
863
864 boolean_t
ml_probe_read(__unused vm_offset_t paddr,__unused unsigned int * val)865 ml_probe_read(
866 __unused vm_offset_t paddr,
867 __unused unsigned int *val)
868 {
869 panic("ml_probe_read() unimplemented");
870 return 1;
871 }
872
873 boolean_t
ml_probe_read_64(__unused addr64_t paddr,__unused unsigned int * val)874 ml_probe_read_64(
875 __unused addr64_t paddr,
876 __unused unsigned int *val)
877 {
878 panic("ml_probe_read_64() unimplemented");
879 return 1;
880 }
881
882
883 void
ml_thread_policy(__unused thread_t thread,__unused unsigned policy_id,__unused unsigned policy_info)884 ml_thread_policy(
885 __unused thread_t thread,
886 __unused unsigned policy_id,
887 __unused unsigned policy_info)
888 {
889 // <rdar://problem/7141284>: Reduce print noise
890 // kprintf("ml_thread_policy() unimplemented\n");
891 }
892
893 __dead2
894 void
panic_unimplemented(void)895 panic_unimplemented(void)
896 {
897 panic("Not yet implemented.");
898 }
899
900 /* ARM64_TODO <rdar://problem/9198953> */
901 void abort(void) __dead2;
902
903 void
abort(void)904 abort(void)
905 {
906 panic("Abort.");
907 }
908
909
910 #if !MACH_KDP
911 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)912 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
913 {
914 #pragma unused(fn,arg)
915 }
916 #endif
917
918 /*
919 * Get a quick virtual mapping of a physical page and run a callback on that
920 * page's virtual address.
921 *
922 * @param dst64 Physical address to access (doesn't need to be page-aligned).
923 * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
924 * @param func Callback function to call with the page's virtual address.
925 * @param arg Argument passed directly to `func`.
926 *
927 * @return The return value from `func`.
928 */
929 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)930 apply_func_phys(
931 addr64_t dst64,
932 vm_size_t bytes,
933 int (*func)(void * buffer, vm_size_t bytes, void * arg),
934 void * arg)
935 {
936 /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
937 if (!pmap_valid_address(dst64)) {
938 panic("%s address error: passed in address (%#llx) not a kernel managed address",
939 __FUNCTION__, dst64);
940 }
941
942 /* Ensure we stay within a single page */
943 if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
944 panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
945 __FUNCTION__, dst64, bytes);
946 }
947
948 return func((void*)phystokv(dst64), bytes, arg);
949 }
950