1071327ecSAlexander Beregalov /*
21da177e4SLinus Torvalds * This file is subject to the terms and conditions of the GNU General Public
31da177e4SLinus Torvalds * License. See the file "COPYING" in the main directory of this archive
41da177e4SLinus Torvalds * for more details.
51da177e4SLinus Torvalds *
667a5a59dSHelge Deller * Copyright (C) 1999-2006 Helge Deller <[email protected]> (07-13-1999)
71da177e4SLinus Torvalds * Copyright (C) 1999 SuSE GmbH Nuernberg
81da177e4SLinus Torvalds * Copyright (C) 2000 Philipp Rumpf ([email protected])
91da177e4SLinus Torvalds *
101da177e4SLinus Torvalds * Cache and TLB management
111da177e4SLinus Torvalds *
121da177e4SLinus Torvalds */
131da177e4SLinus Torvalds
141da177e4SLinus Torvalds #include <linux/init.h>
151da177e4SLinus Torvalds #include <linux/kernel.h>
161da177e4SLinus Torvalds #include <linux/mm.h>
171da177e4SLinus Torvalds #include <linux/module.h>
181da177e4SLinus Torvalds #include <linux/seq_file.h>
191da177e4SLinus Torvalds #include <linux/pagemap.h>
20e8edc6e0SAlexey Dobriyan #include <linux/sched.h>
2101042607SIngo Molnar #include <linux/sched/mm.h>
22c6d96328SHelge Deller #include <linux/syscalls.h>
2372d95924SJohn David Anglin #include <linux/vmalloc.h>
241da177e4SLinus Torvalds #include <asm/pdc.h>
251da177e4SLinus Torvalds #include <asm/cache.h>
261da177e4SLinus Torvalds #include <asm/cacheflush.h>
271da177e4SLinus Torvalds #include <asm/tlbflush.h>
281da177e4SLinus Torvalds #include <asm/page.h>
291da177e4SLinus Torvalds #include <asm/processor.h>
302464212fSStuart Brady #include <asm/sections.h>
31f311847cSJames Bottomley #include <asm/shmparam.h>
322de8b4ccSJohn David Anglin #include <asm/mmu_context.h>
33c6d96328SHelge Deller #include <asm/cachectl.h>
341da177e4SLinus Torvalds
3572d95924SJohn David Anglin #define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
3672d95924SJohn David Anglin
3772d95924SJohn David Anglin /*
3872d95924SJohn David Anglin * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
3972d95924SJohn David Anglin * of page flushes done flush_cache_page_if_present. There are some
4072d95924SJohn David Anglin * pros and cons in using this option. It may increase the risk of
4172d95924SJohn David Anglin * random segmentation faults.
4272d95924SJohn David Anglin */
4372d95924SJohn David Anglin #define CONFIG_FLUSH_PAGE_ACCESSED 0
4472d95924SJohn David Anglin
45271c29a1SHelge Deller int split_tlb __ro_after_init;
46271c29a1SHelge Deller int dcache_stride __ro_after_init;
47271c29a1SHelge Deller int icache_stride __ro_after_init;
481da177e4SLinus Torvalds EXPORT_SYMBOL(dcache_stride);
491da177e4SLinus Torvalds
5072d95924SJohn David Anglin /* Internal implementation in arch/parisc/kernel/pacache.S */
51f311847cSJames Bottomley void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
52f311847cSJames Bottomley EXPORT_SYMBOL(flush_dcache_page_asm);
534c5fe5dbSJohn David Anglin void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
54f311847cSJames Bottomley void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
550a575497SHelge Deller void flush_data_cache_local(void *); /* flushes local data-cache only */
560a575497SHelge Deller void flush_instruction_cache_local(void); /* flushes local code-cache only */
571da177e4SLinus Torvalds
5872d95924SJohn David Anglin static void flush_kernel_dcache_page_addr(const void *addr);
5972d95924SJohn David Anglin
60b37d1c18SMikulas Patocka /* On some machines (i.e., ones with the Merced bus), there can be
611da177e4SLinus Torvalds * only a single PxTLB broadcast at a time; this must be guaranteed
62b37d1c18SMikulas Patocka * by software. We need a spinlock around all TLB flushes to ensure
63b37d1c18SMikulas Patocka * this.
641da177e4SLinus Torvalds */
65b37d1c18SMikulas Patocka DEFINE_SPINLOCK(pa_tlb_flush_lock);
66b37d1c18SMikulas Patocka
67b37d1c18SMikulas Patocka #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
68271c29a1SHelge Deller int pa_serialize_tlb_flushes __ro_after_init;
69b37d1c18SMikulas Patocka #endif
701da177e4SLinus Torvalds
71271c29a1SHelge Deller struct pdc_cache_info cache_info __ro_after_init;
721da177e4SLinus Torvalds #ifndef CONFIG_PA20
73913b9d44SHelge Deller struct pdc_btlb_info btlb_info;
741da177e4SLinus Torvalds #endif
751da177e4SLinus Torvalds
760a575497SHelge Deller DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
770a575497SHelge Deller DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
780a575497SHelge Deller DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
791da177e4SLinus Torvalds
cache_flush_local_cpu(void * dummy)800a575497SHelge Deller static void cache_flush_local_cpu(void *dummy)
811da177e4SLinus Torvalds {
820a575497SHelge Deller if (static_branch_likely(&parisc_has_icache))
830a575497SHelge Deller flush_instruction_cache_local();
840a575497SHelge Deller if (static_branch_likely(&parisc_has_dcache))
851b2425e3SMatthew Wilcox flush_data_cache_local(NULL);
861da177e4SLinus Torvalds }
870a575497SHelge Deller
flush_cache_all_local(void)880a575497SHelge Deller void flush_cache_all_local(void)
890a575497SHelge Deller {
900a575497SHelge Deller cache_flush_local_cpu(NULL);
910a575497SHelge Deller }
920a575497SHelge Deller
flush_cache_all(void)930a575497SHelge Deller void flush_cache_all(void)
940a575497SHelge Deller {
950a575497SHelge Deller if (static_branch_likely(&parisc_has_cache))
960a575497SHelge Deller on_each_cpu(cache_flush_local_cpu, NULL, 1);
970a575497SHelge Deller }
980a575497SHelge Deller
flush_data_cache(void)990a575497SHelge Deller static inline void flush_data_cache(void)
1000a575497SHelge Deller {
1010a575497SHelge Deller if (static_branch_likely(&parisc_has_dcache))
1020a575497SHelge Deller on_each_cpu(flush_data_cache_local, NULL, 1);
1030a575497SHelge Deller }
1040a575497SHelge Deller
1051da177e4SLinus Torvalds
1062de8b4ccSJohn David Anglin /* Kernel virtual address of pfn. */
10750861f5aSJohn David Anglin #define pfn_va(pfn) __va(PFN_PHYS(pfn))
10850861f5aSJohn David Anglin
__update_cache(pte_t pte)109e70bbca6SMatthew Wilcox (Oracle) void __update_cache(pte_t pte)
1101da177e4SLinus Torvalds {
11138860b2cSJohn David Anglin unsigned long pfn = pte_pfn(pte);
112e70bbca6SMatthew Wilcox (Oracle) struct folio *folio;
113e70bbca6SMatthew Wilcox (Oracle) unsigned int nr;
1141da177e4SLinus Torvalds
11550861f5aSJohn David Anglin /* We don't have pte special. As a result, we can be called with
11650861f5aSJohn David Anglin an invalid pfn and we don't need to flush the kernel dcache page.
11750861f5aSJohn David Anglin This occurs with FireGL card in C8000. */
11850861f5aSJohn David Anglin if (!pfn_valid(pfn))
11950861f5aSJohn David Anglin return;
1201da177e4SLinus Torvalds
121e70bbca6SMatthew Wilcox (Oracle) folio = page_folio(pfn_to_page(pfn));
122e70bbca6SMatthew Wilcox (Oracle) pfn = folio_pfn(folio);
123e70bbca6SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
124e70bbca6SMatthew Wilcox (Oracle) if (folio_flush_mapping(folio) &&
125e70bbca6SMatthew Wilcox (Oracle) test_bit(PG_dcache_dirty, &folio->flags)) {
126e70bbca6SMatthew Wilcox (Oracle) while (nr--)
127e70bbca6SMatthew Wilcox (Oracle) flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
128e70bbca6SMatthew Wilcox (Oracle) clear_bit(PG_dcache_dirty, &folio->flags);
12920f4d3cbSJames Bottomley } else if (parisc_requires_coherency())
130e70bbca6SMatthew Wilcox (Oracle) while (nr--)
131e70bbca6SMatthew Wilcox (Oracle) flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
1321da177e4SLinus Torvalds }
1331da177e4SLinus Torvalds
1341da177e4SLinus Torvalds void
show_cache_info(struct seq_file * m)1351da177e4SLinus Torvalds show_cache_info(struct seq_file *m)
1361da177e4SLinus Torvalds {
137e5a2e7fdSKyle McMartin char buf[32];
138e5a2e7fdSKyle McMartin
1391da177e4SLinus Torvalds seq_printf(m, "I-cache\t\t: %ld KB\n",
1401da177e4SLinus Torvalds cache_info.ic_size/1024 );
1412f75c12cSHelge Deller if (cache_info.dc_loop != 1)
142e5a2e7fdSKyle McMartin snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
1432de8b4ccSJohn David Anglin seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
1441da177e4SLinus Torvalds cache_info.dc_size/1024,
1451da177e4SLinus Torvalds (cache_info.dc_conf.cc_wt ? "WT":"WB"),
1461da177e4SLinus Torvalds (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
1472de8b4ccSJohn David Anglin ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
1482de8b4ccSJohn David Anglin cache_info.dc_conf.cc_alias
1492de8b4ccSJohn David Anglin );
1501da177e4SLinus Torvalds seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
1511da177e4SLinus Torvalds cache_info.it_size,
1521da177e4SLinus Torvalds cache_info.dt_size,
1531da177e4SLinus Torvalds cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
1541da177e4SLinus Torvalds );
1551da177e4SLinus Torvalds
1561da177e4SLinus Torvalds #ifndef CONFIG_PA20
1571da177e4SLinus Torvalds /* BTLB - Block TLB */
1581da177e4SLinus Torvalds if (btlb_info.max_size==0) {
1591da177e4SLinus Torvalds seq_printf(m, "BTLB\t\t: not supported\n" );
1601da177e4SLinus Torvalds } else {
1611da177e4SLinus Torvalds seq_printf(m,
1621da177e4SLinus Torvalds "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
1631da177e4SLinus Torvalds "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
1641da177e4SLinus Torvalds "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
1651da177e4SLinus Torvalds btlb_info.max_size, (int)4096,
1661da177e4SLinus Torvalds btlb_info.max_size>>8,
1671da177e4SLinus Torvalds btlb_info.fixed_range_info.num_i,
1681da177e4SLinus Torvalds btlb_info.fixed_range_info.num_d,
1691da177e4SLinus Torvalds btlb_info.fixed_range_info.num_comb,
1701da177e4SLinus Torvalds btlb_info.variable_range_info.num_i,
1711da177e4SLinus Torvalds btlb_info.variable_range_info.num_d,
1721da177e4SLinus Torvalds btlb_info.variable_range_info.num_comb
1731da177e4SLinus Torvalds );
1741da177e4SLinus Torvalds }
1751da177e4SLinus Torvalds #endif
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds
1781da177e4SLinus Torvalds void __init
parisc_cache_init(void)1791da177e4SLinus Torvalds parisc_cache_init(void)
1801da177e4SLinus Torvalds {
1811da177e4SLinus Torvalds if (pdc_cache_info(&cache_info) < 0)
1821da177e4SLinus Torvalds panic("parisc_cache_init: pdc_cache_info failed");
1831da177e4SLinus Torvalds
1841da177e4SLinus Torvalds #if 0
1851da177e4SLinus Torvalds printk("ic_size %lx dc_size %lx it_size %lx\n",
1861da177e4SLinus Torvalds cache_info.ic_size,
1871da177e4SLinus Torvalds cache_info.dc_size,
1881da177e4SLinus Torvalds cache_info.it_size);
1891da177e4SLinus Torvalds
1901da177e4SLinus Torvalds printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
1911da177e4SLinus Torvalds cache_info.dc_base,
1921da177e4SLinus Torvalds cache_info.dc_stride,
1931da177e4SLinus Torvalds cache_info.dc_count,
1941da177e4SLinus Torvalds cache_info.dc_loop);
1951da177e4SLinus Torvalds
1961da177e4SLinus Torvalds printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
1971da177e4SLinus Torvalds *(unsigned long *) (&cache_info.dc_conf),
1981da177e4SLinus Torvalds cache_info.dc_conf.cc_alias,
1991da177e4SLinus Torvalds cache_info.dc_conf.cc_block,
2001da177e4SLinus Torvalds cache_info.dc_conf.cc_line,
2011da177e4SLinus Torvalds cache_info.dc_conf.cc_shift);
202e5a2e7fdSKyle McMartin printk(" wt %d sh %d cst %d hv %d\n",
2031da177e4SLinus Torvalds cache_info.dc_conf.cc_wt,
2041da177e4SLinus Torvalds cache_info.dc_conf.cc_sh,
2051da177e4SLinus Torvalds cache_info.dc_conf.cc_cst,
206e5a2e7fdSKyle McMartin cache_info.dc_conf.cc_hv);
2071da177e4SLinus Torvalds
2081da177e4SLinus Torvalds printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
2091da177e4SLinus Torvalds cache_info.ic_base,
2101da177e4SLinus Torvalds cache_info.ic_stride,
2111da177e4SLinus Torvalds cache_info.ic_count,
2121da177e4SLinus Torvalds cache_info.ic_loop);
2131da177e4SLinus Torvalds
2142c2277dcSHelge Deller printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
2152c2277dcSHelge Deller cache_info.it_sp_base,
2162c2277dcSHelge Deller cache_info.it_sp_stride,
2172c2277dcSHelge Deller cache_info.it_sp_count,
2182c2277dcSHelge Deller cache_info.it_loop,
2192c2277dcSHelge Deller cache_info.it_off_base,
2202c2277dcSHelge Deller cache_info.it_off_stride,
2212c2277dcSHelge Deller cache_info.it_off_count);
2222c2277dcSHelge Deller
2232c2277dcSHelge Deller printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
2242c2277dcSHelge Deller cache_info.dt_sp_base,
2252c2277dcSHelge Deller cache_info.dt_sp_stride,
2262c2277dcSHelge Deller cache_info.dt_sp_count,
2272c2277dcSHelge Deller cache_info.dt_loop,
2282c2277dcSHelge Deller cache_info.dt_off_base,
2292c2277dcSHelge Deller cache_info.dt_off_stride,
2302c2277dcSHelge Deller cache_info.dt_off_count);
2312c2277dcSHelge Deller
2321da177e4SLinus Torvalds printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
2331da177e4SLinus Torvalds *(unsigned long *) (&cache_info.ic_conf),
2341da177e4SLinus Torvalds cache_info.ic_conf.cc_alias,
2351da177e4SLinus Torvalds cache_info.ic_conf.cc_block,
2361da177e4SLinus Torvalds cache_info.ic_conf.cc_line,
2371da177e4SLinus Torvalds cache_info.ic_conf.cc_shift);
238e5a2e7fdSKyle McMartin printk(" wt %d sh %d cst %d hv %d\n",
2391da177e4SLinus Torvalds cache_info.ic_conf.cc_wt,
2401da177e4SLinus Torvalds cache_info.ic_conf.cc_sh,
2411da177e4SLinus Torvalds cache_info.ic_conf.cc_cst,
242e5a2e7fdSKyle McMartin cache_info.ic_conf.cc_hv);
2431da177e4SLinus Torvalds
2442c2277dcSHelge Deller printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
2451da177e4SLinus Torvalds cache_info.dt_conf.tc_sh,
2461da177e4SLinus Torvalds cache_info.dt_conf.tc_page,
2471da177e4SLinus Torvalds cache_info.dt_conf.tc_cst,
2481da177e4SLinus Torvalds cache_info.dt_conf.tc_aid,
2492c2277dcSHelge Deller cache_info.dt_conf.tc_sr);
2501da177e4SLinus Torvalds
2512c2277dcSHelge Deller printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
2521da177e4SLinus Torvalds cache_info.it_conf.tc_sh,
2531da177e4SLinus Torvalds cache_info.it_conf.tc_page,
2541da177e4SLinus Torvalds cache_info.it_conf.tc_cst,
2551da177e4SLinus Torvalds cache_info.it_conf.tc_aid,
2562c2277dcSHelge Deller cache_info.it_conf.tc_sr);
2571da177e4SLinus Torvalds #endif
2581da177e4SLinus Torvalds
2591da177e4SLinus Torvalds split_tlb = 0;
2601da177e4SLinus Torvalds if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
2611da177e4SLinus Torvalds if (cache_info.dt_conf.tc_sh == 2)
2621da177e4SLinus Torvalds printk(KERN_WARNING "Unexpected TLB configuration. "
2631da177e4SLinus Torvalds "Will flush I/D separately (could be optimized).\n");
2641da177e4SLinus Torvalds
2651da177e4SLinus Torvalds split_tlb = 1;
2661da177e4SLinus Torvalds }
2671da177e4SLinus Torvalds
2681da177e4SLinus Torvalds /* "New and Improved" version from Jim Hull
2691da177e4SLinus Torvalds * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
2702464212fSStuart Brady * The following CAFL_STRIDE is an optimized version, see
2712464212fSStuart Brady * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
2722464212fSStuart Brady * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
2731da177e4SLinus Torvalds */
2741da177e4SLinus Torvalds #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
2751da177e4SLinus Torvalds dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
2761da177e4SLinus Torvalds icache_stride = CAFL_STRIDE(cache_info.ic_conf);
2771da177e4SLinus Torvalds #undef CAFL_STRIDE
2781da177e4SLinus Torvalds
279b9402e3bSHelge Deller /* stride needs to be non-zero, otherwise cache flushes will not work */
280b9402e3bSHelge Deller WARN_ON(cache_info.dc_size && dcache_stride == 0);
281b9402e3bSHelge Deller WARN_ON(cache_info.ic_size && icache_stride == 0);
282b9402e3bSHelge Deller
2831da177e4SLinus Torvalds if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
2841da177e4SLinus Torvalds PDC_MODEL_NVA_UNSUPPORTED) {
2851da177e4SLinus Torvalds printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
2861da177e4SLinus Torvalds #if 0
2871da177e4SLinus Torvalds panic("SMP kernel required to avoid non-equivalent aliasing");
2881da177e4SLinus Torvalds #endif
2891da177e4SLinus Torvalds }
2901da177e4SLinus Torvalds }
2911da177e4SLinus Torvalds
disable_sr_hashing(void)292beb48dfdSHelge Deller void disable_sr_hashing(void)
2931da177e4SLinus Torvalds {
294a9d2d386SKyle McMartin int srhash_type, retval;
295a9d2d386SKyle McMartin unsigned long space_bits;
2961da177e4SLinus Torvalds
2971da177e4SLinus Torvalds switch (boot_cpu_data.cpu_type) {
2981da177e4SLinus Torvalds case pcx: /* We shouldn't get this far. setup.c should prevent it. */
2991da177e4SLinus Torvalds BUG();
3001da177e4SLinus Torvalds return;
3011da177e4SLinus Torvalds
3021da177e4SLinus Torvalds case pcxs:
3031da177e4SLinus Torvalds case pcxt:
3041da177e4SLinus Torvalds case pcxt_:
3051da177e4SLinus Torvalds srhash_type = SRHASH_PCXST;
3061da177e4SLinus Torvalds break;
3071da177e4SLinus Torvalds
3081da177e4SLinus Torvalds case pcxl:
3091da177e4SLinus Torvalds srhash_type = SRHASH_PCXL;
3101da177e4SLinus Torvalds break;
3111da177e4SLinus Torvalds
3121da177e4SLinus Torvalds case pcxl2: /* pcxl2 doesn't support space register hashing */
3131da177e4SLinus Torvalds return;
3141da177e4SLinus Torvalds
3151da177e4SLinus Torvalds default: /* Currently all PA2.0 machines use the same ins. sequence */
3161da177e4SLinus Torvalds srhash_type = SRHASH_PA20;
3171da177e4SLinus Torvalds break;
3181da177e4SLinus Torvalds }
3191da177e4SLinus Torvalds
3201da177e4SLinus Torvalds disable_sr_hashing_asm(srhash_type);
321a9d2d386SKyle McMartin
322a9d2d386SKyle McMartin retval = pdc_spaceid_bits(&space_bits);
323a9d2d386SKyle McMartin /* If this procedure isn't implemented, don't panic. */
324a9d2d386SKyle McMartin if (retval < 0 && retval != PDC_BAD_OPTION)
325a9d2d386SKyle McMartin panic("pdc_spaceid_bits call failed.\n");
326a9d2d386SKyle McMartin if (space_bits != 0)
327a9d2d386SKyle McMartin panic("SpaceID hashing is still on!\n");
3281da177e4SLinus Torvalds }
3291da177e4SLinus Torvalds
330d6ce8626SRandolph Chung static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)331f311847cSJames Bottomley __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
332f311847cSJames Bottomley unsigned long physaddr)
333d6ce8626SRandolph Chung {
334411fadd6SHelge Deller if (!static_branch_likely(&parisc_has_cache))
335411fadd6SHelge Deller return;
33672d95924SJohn David Anglin
33772d95924SJohn David Anglin /*
33872d95924SJohn David Anglin * The TLB is the engine of coherence on parisc. The CPU is
33972d95924SJohn David Anglin * entitled to speculate any page with a TLB mapping, so here
34072d95924SJohn David Anglin * we kill the mapping then flush the page along a special flush
34172d95924SJohn David Anglin * only alias mapping. This guarantees that the page is no-longer
34272d95924SJohn David Anglin * in the cache for any process and nor may it be speculatively
34372d95924SJohn David Anglin * read in (until the user or kernel specifically accesses it,
34472d95924SJohn David Anglin * of course).
34572d95924SJohn David Anglin */
34672d95924SJohn David Anglin flush_tlb_page(vma, vmaddr);
34772d95924SJohn David Anglin
348027f27c4SJohn David Anglin preempt_disable();
349f311847cSJames Bottomley flush_dcache_page_asm(physaddr, vmaddr);
350d6ce8626SRandolph Chung if (vma->vm_flags & VM_EXEC)
351f311847cSJames Bottomley flush_icache_page_asm(physaddr, vmaddr);
352027f27c4SJohn David Anglin preempt_enable();
353d6ce8626SRandolph Chung }
354d6ce8626SRandolph Chung
flush_kernel_dcache_page_addr(const void * addr)35572d95924SJohn David Anglin static void flush_kernel_dcache_page_addr(const void *addr)
3564c5fe5dbSJohn David Anglin {
35772d95924SJohn David Anglin unsigned long vaddr = (unsigned long)addr;
35872d95924SJohn David Anglin unsigned long flags;
3592de8b4ccSJohn David Anglin
36072d95924SJohn David Anglin /* Purge TLB entry to remove translation on all CPUs */
36172d95924SJohn David Anglin purge_tlb_start(flags);
36272d95924SJohn David Anglin pdtlb(SR_KERNEL, addr);
36372d95924SJohn David Anglin purge_tlb_end(flags);
3642de8b4ccSJohn David Anglin
36572d95924SJohn David Anglin /* Use tmpalias flush to prevent data cache move-in */
3664c5fe5dbSJohn David Anglin preempt_disable();
36772d95924SJohn David Anglin flush_dcache_page_asm(__pa(vaddr), vaddr);
3684c5fe5dbSJohn David Anglin preempt_enable();
3694c5fe5dbSJohn David Anglin }
3704c5fe5dbSJohn David Anglin
flush_kernel_icache_page_addr(const void * addr)37172d95924SJohn David Anglin static void flush_kernel_icache_page_addr(const void *addr)
37272d95924SJohn David Anglin {
37372d95924SJohn David Anglin unsigned long vaddr = (unsigned long)addr;
37472d95924SJohn David Anglin unsigned long flags;
37572d95924SJohn David Anglin
37672d95924SJohn David Anglin /* Purge TLB entry to remove translation on all CPUs */
37772d95924SJohn David Anglin purge_tlb_start(flags);
37872d95924SJohn David Anglin pdtlb(SR_KERNEL, addr);
37972d95924SJohn David Anglin purge_tlb_end(flags);
38072d95924SJohn David Anglin
38172d95924SJohn David Anglin /* Use tmpalias flush to prevent instruction cache move-in */
38272d95924SJohn David Anglin preempt_disable();
38372d95924SJohn David Anglin flush_icache_page_asm(__pa(vaddr), vaddr);
38472d95924SJohn David Anglin preempt_enable();
38572d95924SJohn David Anglin }
38672d95924SJohn David Anglin
kunmap_flush_on_unmap(const void * addr)38772d95924SJohn David Anglin void kunmap_flush_on_unmap(const void *addr)
38872d95924SJohn David Anglin {
38972d95924SJohn David Anglin flush_kernel_dcache_page_addr(addr);
39072d95924SJohn David Anglin }
39172d95924SJohn David Anglin EXPORT_SYMBOL(kunmap_flush_on_unmap);
39272d95924SJohn David Anglin
flush_icache_pages(struct vm_area_struct * vma,struct page * page,unsigned int nr)393e70bbca6SMatthew Wilcox (Oracle) void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
394e70bbca6SMatthew Wilcox (Oracle) unsigned int nr)
395e70bbca6SMatthew Wilcox (Oracle) {
396e70bbca6SMatthew Wilcox (Oracle) void *kaddr = page_address(page);
397e70bbca6SMatthew Wilcox (Oracle)
398e70bbca6SMatthew Wilcox (Oracle) for (;;) {
399e70bbca6SMatthew Wilcox (Oracle) flush_kernel_dcache_page_addr(kaddr);
40072d95924SJohn David Anglin flush_kernel_icache_page_addr(kaddr);
401e70bbca6SMatthew Wilcox (Oracle) if (--nr == 0)
402e70bbca6SMatthew Wilcox (Oracle) break;
403e70bbca6SMatthew Wilcox (Oracle) kaddr += PAGE_SIZE;
404e70bbca6SMatthew Wilcox (Oracle) }
405e70bbca6SMatthew Wilcox (Oracle) }
406e70bbca6SMatthew Wilcox (Oracle)
40772d95924SJohn David Anglin /*
40872d95924SJohn David Anglin * Walk page directory for MM to find PTEP pointer for address ADDR.
40972d95924SJohn David Anglin */
get_ptep(struct mm_struct * mm,unsigned long addr)4102de8b4ccSJohn David Anglin static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
4112de8b4ccSJohn David Anglin {
4122de8b4ccSJohn David Anglin pte_t *ptep = NULL;
4132de8b4ccSJohn David Anglin pgd_t *pgd = mm->pgd;
4142de8b4ccSJohn David Anglin p4d_t *p4d;
4152de8b4ccSJohn David Anglin pud_t *pud;
4162de8b4ccSJohn David Anglin pmd_t *pmd;
4172de8b4ccSJohn David Anglin
4182de8b4ccSJohn David Anglin if (!pgd_none(*pgd)) {
4192de8b4ccSJohn David Anglin p4d = p4d_offset(pgd, addr);
4202de8b4ccSJohn David Anglin if (!p4d_none(*p4d)) {
4212de8b4ccSJohn David Anglin pud = pud_offset(p4d, addr);
4222de8b4ccSJohn David Anglin if (!pud_none(*pud)) {
4232de8b4ccSJohn David Anglin pmd = pmd_offset(pud, addr);
4242de8b4ccSJohn David Anglin if (!pmd_none(*pmd))
4252de8b4ccSJohn David Anglin ptep = pte_offset_map(pmd, addr);
4262de8b4ccSJohn David Anglin }
4272de8b4ccSJohn David Anglin }
4282de8b4ccSJohn David Anglin }
4292de8b4ccSJohn David Anglin return ptep;
4302de8b4ccSJohn David Anglin }
4312de8b4ccSJohn David Anglin
pte_needs_flush(pte_t pte)4322de8b4ccSJohn David Anglin static inline bool pte_needs_flush(pte_t pte)
4332de8b4ccSJohn David Anglin {
4342de8b4ccSJohn David Anglin return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
4352de8b4ccSJohn David Anglin == (_PAGE_PRESENT | _PAGE_ACCESSED);
4362de8b4ccSJohn David Anglin }
4372de8b4ccSJohn David Anglin
43872d95924SJohn David Anglin /*
43972d95924SJohn David Anglin * Return user physical address. Returns 0 if page is not present.
44072d95924SJohn David Anglin */
get_upa(struct mm_struct * mm,unsigned long addr)44172d95924SJohn David Anglin static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
44272d95924SJohn David Anglin {
44372d95924SJohn David Anglin unsigned long flags, space, pgd, prot, pa;
44472d95924SJohn David Anglin #ifdef CONFIG_TLB_PTLOCK
44572d95924SJohn David Anglin unsigned long pgd_lock;
44672d95924SJohn David Anglin #endif
44772d95924SJohn David Anglin
44872d95924SJohn David Anglin /* Save context */
44972d95924SJohn David Anglin local_irq_save(flags);
45072d95924SJohn David Anglin prot = mfctl(8);
45172d95924SJohn David Anglin space = mfsp(SR_USER);
45272d95924SJohn David Anglin pgd = mfctl(25);
45372d95924SJohn David Anglin #ifdef CONFIG_TLB_PTLOCK
45472d95924SJohn David Anglin pgd_lock = mfctl(28);
45572d95924SJohn David Anglin #endif
45672d95924SJohn David Anglin
45772d95924SJohn David Anglin /* Set context for lpa_user */
45872d95924SJohn David Anglin switch_mm_irqs_off(NULL, mm, NULL);
45972d95924SJohn David Anglin pa = lpa_user(addr);
46072d95924SJohn David Anglin
46172d95924SJohn David Anglin /* Restore previous context */
46272d95924SJohn David Anglin #ifdef CONFIG_TLB_PTLOCK
46372d95924SJohn David Anglin mtctl(pgd_lock, 28);
46472d95924SJohn David Anglin #endif
46572d95924SJohn David Anglin mtctl(pgd, 25);
46672d95924SJohn David Anglin mtsp(space, SR_USER);
46772d95924SJohn David Anglin mtctl(prot, 8);
46872d95924SJohn David Anglin local_irq_restore(flags);
46972d95924SJohn David Anglin
47072d95924SJohn David Anglin return pa;
47172d95924SJohn David Anglin }
47272d95924SJohn David Anglin
flush_dcache_folio(struct folio * folio)473e70bbca6SMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio)
4741da177e4SLinus Torvalds {
475e70bbca6SMatthew Wilcox (Oracle) struct address_space *mapping = folio_flush_mapping(folio);
476e70bbca6SMatthew Wilcox (Oracle) struct vm_area_struct *vma;
477f311847cSJames Bottomley unsigned long addr, old_addr = 0;
478e70bbca6SMatthew Wilcox (Oracle) void *kaddr;
4792de8b4ccSJohn David Anglin unsigned long count = 0;
480e70bbca6SMatthew Wilcox (Oracle) unsigned long i, nr, flags;
4811da177e4SLinus Torvalds pgoff_t pgoff;
4821da177e4SLinus Torvalds
4831da177e4SLinus Torvalds if (mapping && !mapping_mapped(mapping)) {
484e70bbca6SMatthew Wilcox (Oracle) set_bit(PG_dcache_dirty, &folio->flags);
4851da177e4SLinus Torvalds return;
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds
488e70bbca6SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
489e70bbca6SMatthew Wilcox (Oracle) kaddr = folio_address(folio);
490e70bbca6SMatthew Wilcox (Oracle) for (i = 0; i < nr; i++)
491e70bbca6SMatthew Wilcox (Oracle) flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
4921da177e4SLinus Torvalds
4931da177e4SLinus Torvalds if (!mapping)
4941da177e4SLinus Torvalds return;
4951da177e4SLinus Torvalds
496e70bbca6SMatthew Wilcox (Oracle) pgoff = folio->index;
4971da177e4SLinus Torvalds
4982de8b4ccSJohn David Anglin /*
4992de8b4ccSJohn David Anglin * We have carefully arranged in arch_get_unmapped_area() that
5001da177e4SLinus Torvalds * *any* mappings of a file are always congruently mapped (whether
5011da177e4SLinus Torvalds * declared as MAP_PRIVATE or MAP_SHARED), so we only need
5022de8b4ccSJohn David Anglin * to flush one address here for them all to become coherent
5032de8b4ccSJohn David Anglin * on machines that support equivalent aliasing
5042de8b4ccSJohn David Anglin */
50561e150fbSHelge Deller flush_dcache_mmap_lock_irqsave(mapping, flags);
506e70bbca6SMatthew Wilcox (Oracle) vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
507e70bbca6SMatthew Wilcox (Oracle) unsigned long offset = pgoff - vma->vm_pgoff;
508e70bbca6SMatthew Wilcox (Oracle) unsigned long pfn = folio_pfn(folio);
5091da177e4SLinus Torvalds
510e70bbca6SMatthew Wilcox (Oracle) addr = vma->vm_start;
511e70bbca6SMatthew Wilcox (Oracle) nr = folio_nr_pages(folio);
512e70bbca6SMatthew Wilcox (Oracle) if (offset > -nr) {
513e70bbca6SMatthew Wilcox (Oracle) pfn -= offset;
514e70bbca6SMatthew Wilcox (Oracle) nr += offset;
515e70bbca6SMatthew Wilcox (Oracle) } else {
516e70bbca6SMatthew Wilcox (Oracle) addr += offset * PAGE_SIZE;
517e70bbca6SMatthew Wilcox (Oracle) }
518e70bbca6SMatthew Wilcox (Oracle) if (addr + nr * PAGE_SIZE > vma->vm_end)
519e70bbca6SMatthew Wilcox (Oracle) nr = (vma->vm_end - addr) / PAGE_SIZE;
520e70bbca6SMatthew Wilcox (Oracle)
5210ef36bd2SHelge Deller if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
5220ef36bd2SHelge Deller != (addr & (SHM_COLOUR - 1))) {
523e70bbca6SMatthew Wilcox (Oracle) for (i = 0; i < nr; i++)
524e70bbca6SMatthew Wilcox (Oracle) __flush_cache_page(vma,
525e70bbca6SMatthew Wilcox (Oracle) addr + i * PAGE_SIZE,
526e70bbca6SMatthew Wilcox (Oracle) (pfn + i) * PAGE_SIZE);
5272de8b4ccSJohn David Anglin /*
5282de8b4ccSJohn David Anglin * Software is allowed to have any number
5292de8b4ccSJohn David Anglin * of private mappings to a page.
5302de8b4ccSJohn David Anglin */
531e70bbca6SMatthew Wilcox (Oracle) if (!(vma->vm_flags & VM_SHARED))
5322de8b4ccSJohn David Anglin continue;
5332de8b4ccSJohn David Anglin if (old_addr)
5342de8b4ccSJohn David Anglin pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
535e70bbca6SMatthew Wilcox (Oracle) old_addr, addr, vma->vm_file);
536e70bbca6SMatthew Wilcox (Oracle) if (nr == folio_nr_pages(folio))
537f311847cSJames Bottomley old_addr = addr;
5381da177e4SLinus Torvalds }
5392de8b4ccSJohn David Anglin WARN_ON(++count == 4096);
5402de8b4ccSJohn David Anglin }
54161e150fbSHelge Deller flush_dcache_mmap_unlock_irqrestore(mapping, flags);
5421da177e4SLinus Torvalds }
543e70bbca6SMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio);
5441da177e4SLinus Torvalds
5451da177e4SLinus Torvalds /* Defined in arch/parisc/kernel/pacache.S */
5461da177e4SLinus Torvalds EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
5471da177e4SLinus Torvalds EXPORT_SYMBOL(flush_kernel_icache_range_asm);
5481da177e4SLinus Torvalds
5491da177e4SLinus Torvalds #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
550271c29a1SHelge Deller static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
55101ab6057SJohn David Anglin
552a886c979SJohn David Anglin #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
553a50d3d3cSJohn David Anglin static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
5541da177e4SLinus Torvalds
parisc_setup_cache_timing(void)555d6ce8626SRandolph Chung void __init parisc_setup_cache_timing(void)
5561da177e4SLinus Torvalds {
5571da177e4SLinus Torvalds unsigned long rangetime, alltime;
558a50d3d3cSJohn David Anglin unsigned long size;
5592de8b4ccSJohn David Anglin unsigned long threshold, threshold2;
5601da177e4SLinus Torvalds
5611da177e4SLinus Torvalds alltime = mfctl(16);
5621da177e4SLinus Torvalds flush_data_cache();
5631da177e4SLinus Torvalds alltime = mfctl(16) - alltime;
5641da177e4SLinus Torvalds
5652464212fSStuart Brady size = (unsigned long)(_end - _text);
5661da177e4SLinus Torvalds rangetime = mfctl(16);
5672464212fSStuart Brady flush_kernel_dcache_range((unsigned long)_text, size);
5681da177e4SLinus Torvalds rangetime = mfctl(16) - rangetime;
5691da177e4SLinus Torvalds
5701da177e4SLinus Torvalds printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
5711da177e4SLinus Torvalds alltime, size, rangetime);
5721da177e4SLinus Torvalds
5732de8b4ccSJohn David Anglin threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
5742de8b4ccSJohn David Anglin pr_info("Calculated flush threshold is %lu KiB\n",
5752de8b4ccSJohn David Anglin threshold/1024);
5762de8b4ccSJohn David Anglin
5772de8b4ccSJohn David Anglin /*
5782de8b4ccSJohn David Anglin * The threshold computed above isn't very reliable. The following
5792de8b4ccSJohn David Anglin * heuristic works reasonably well on c8000/rp3440.
5802de8b4ccSJohn David Anglin */
5812de8b4ccSJohn David Anglin threshold2 = cache_info.dc_size * num_online_cpus();
5822de8b4ccSJohn David Anglin parisc_cache_flush_threshold = threshold2;
583741dc7bfSJohn David Anglin printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
58401ab6057SJohn David Anglin parisc_cache_flush_threshold/1024);
58501ab6057SJohn David Anglin
58601ab6057SJohn David Anglin /* calculate TLB flush threshold */
58701ab6057SJohn David Anglin
58824d0492bSHelge Deller /* On SMP machines, skip the TLB measure of kernel text which
58924d0492bSHelge Deller * has been mapped as huge pages. */
59024d0492bSHelge Deller if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
59124d0492bSHelge Deller threshold = max(cache_info.it_size, cache_info.dt_size);
59224d0492bSHelge Deller threshold *= PAGE_SIZE;
59324d0492bSHelge Deller threshold /= num_online_cpus();
59424d0492bSHelge Deller goto set_tlb_threshold;
59524d0492bSHelge Deller }
59624d0492bSHelge Deller
597a50d3d3cSJohn David Anglin size = (unsigned long)_end - (unsigned long)_text;
59801ab6057SJohn David Anglin rangetime = mfctl(16);
599a50d3d3cSJohn David Anglin flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
60001ab6057SJohn David Anglin rangetime = mfctl(16) - rangetime;
60101ab6057SJohn David Anglin
602a886c979SJohn David Anglin alltime = mfctl(16);
603a886c979SJohn David Anglin flush_tlb_all();
604a886c979SJohn David Anglin alltime = mfctl(16) - alltime;
605a886c979SJohn David Anglin
606a886c979SJohn David Anglin printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
60701ab6057SJohn David Anglin alltime, size, rangetime);
60801ab6057SJohn David Anglin
609a886c979SJohn David Anglin threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
610a886c979SJohn David Anglin printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
611a886c979SJohn David Anglin threshold/1024);
61224d0492bSHelge Deller
61324d0492bSHelge Deller set_tlb_threshold:
614*2fd4e52eSThorsten Blum parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
615741dc7bfSJohn David Anglin printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
61601ab6057SJohn David Anglin parisc_tlb_flush_threshold/1024);
6171da177e4SLinus Torvalds }
61820f4d3cbSJames Bottomley
61976334539SJohn David Anglin extern void purge_kernel_dcache_page_asm(unsigned long);
62076334539SJohn David Anglin extern void clear_user_page_asm(void *, unsigned long);
62176334539SJohn David Anglin extern void copy_user_page_asm(void *, void *, unsigned long);
62220f4d3cbSJames Bottomley
flush_cache_page_if_present(struct vm_area_struct * vma,unsigned long vmaddr)6232de8b4ccSJohn David Anglin static void flush_cache_page_if_present(struct vm_area_struct *vma,
62472d95924SJohn David Anglin unsigned long vmaddr)
62557737c49SHelge Deller {
62672d95924SJohn David Anglin #if CONFIG_FLUSH_PAGE_ACCESSED
6276a2561f9SHugh Dickins bool needs_flush = false;
62872d95924SJohn David Anglin pte_t *ptep, pte;
6292de8b4ccSJohn David Anglin
6306a2561f9SHugh Dickins ptep = get_ptep(vma->vm_mm, vmaddr);
6316a2561f9SHugh Dickins if (ptep) {
63272d95924SJohn David Anglin pte = ptep_get(ptep);
63372d95924SJohn David Anglin needs_flush = pte_needs_flush(pte);
6346a2561f9SHugh Dickins pte_unmap(ptep);
6356a2561f9SHugh Dickins }
6366a2561f9SHugh Dickins if (needs_flush)
63772d95924SJohn David Anglin __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
63872d95924SJohn David Anglin #else
63972d95924SJohn David Anglin struct mm_struct *mm = vma->vm_mm;
64072d95924SJohn David Anglin unsigned long physaddr = get_upa(mm, vmaddr);
64172d95924SJohn David Anglin
64272d95924SJohn David Anglin if (physaddr)
64372d95924SJohn David Anglin __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
64472d95924SJohn David Anglin #endif
64557737c49SHelge Deller }
6462de8b4ccSJohn David Anglin
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)6472de8b4ccSJohn David Anglin void copy_user_highpage(struct page *to, struct page *from,
6482de8b4ccSJohn David Anglin unsigned long vaddr, struct vm_area_struct *vma)
6492de8b4ccSJohn David Anglin {
6502de8b4ccSJohn David Anglin void *kto, *kfrom;
6512de8b4ccSJohn David Anglin
6522de8b4ccSJohn David Anglin kfrom = kmap_local_page(from);
6532de8b4ccSJohn David Anglin kto = kmap_local_page(to);
65472d95924SJohn David Anglin __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
6552de8b4ccSJohn David Anglin copy_page_asm(kto, kfrom);
6562de8b4ccSJohn David Anglin kunmap_local(kto);
6572de8b4ccSJohn David Anglin kunmap_local(kfrom);
6582de8b4ccSJohn David Anglin }
6592de8b4ccSJohn David Anglin
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)6602de8b4ccSJohn David Anglin void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
6612de8b4ccSJohn David Anglin unsigned long user_vaddr, void *dst, void *src, int len)
6622de8b4ccSJohn David Anglin {
66372d95924SJohn David Anglin __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
6642de8b4ccSJohn David Anglin memcpy(dst, src, len);
66572d95924SJohn David Anglin flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
6662de8b4ccSJohn David Anglin }
6672de8b4ccSJohn David Anglin
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long user_vaddr,void * dst,void * src,int len)6682de8b4ccSJohn David Anglin void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
6692de8b4ccSJohn David Anglin unsigned long user_vaddr, void *dst, void *src, int len)
6702de8b4ccSJohn David Anglin {
67172d95924SJohn David Anglin __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
6722de8b4ccSJohn David Anglin memcpy(dst, src, len);
67372d95924SJohn David Anglin flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
6742de8b4ccSJohn David Anglin }
67557737c49SHelge Deller
67601ab6057SJohn David Anglin /* __flush_tlb_range()
67701ab6057SJohn David Anglin *
67801ab6057SJohn David Anglin * returns 1 if all TLBs were flushed.
67901ab6057SJohn David Anglin */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)68001ab6057SJohn David Anglin int __flush_tlb_range(unsigned long sid, unsigned long start,
681d6ce8626SRandolph Chung unsigned long end)
682d6ce8626SRandolph Chung {
6830adb24e0SJohn David Anglin unsigned long flags;
684d6ce8626SRandolph Chung
6850adb24e0SJohn David Anglin if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
6860adb24e0SJohn David Anglin end - start >= parisc_tlb_flush_threshold) {
687d6ce8626SRandolph Chung flush_tlb_all();
68801ab6057SJohn David Anglin return 1;
68901ab6057SJohn David Anglin }
690e82a3b75SHelge Deller
69101ab6057SJohn David Anglin /* Purge TLB entries for small ranges using the pdtlb and
69201ab6057SJohn David Anglin pitlb instructions. These instructions execute locally
69301ab6057SJohn David Anglin but cause a purge request to be broadcast to other TLBs. */
69401ab6057SJohn David Anglin while (start < end) {
69501ab6057SJohn David Anglin purge_tlb_start(flags);
696360bd6c6SHelge Deller mtsp(sid, SR_TEMP1);
697360bd6c6SHelge Deller pdtlb(SR_TEMP1, start);
698360bd6c6SHelge Deller pitlb(SR_TEMP1, start);
699e82a3b75SHelge Deller purge_tlb_end(flags);
70001ab6057SJohn David Anglin start += PAGE_SIZE;
701d6ce8626SRandolph Chung }
70201ab6057SJohn David Anglin return 0;
703d6ce8626SRandolph Chung }
704d6ce8626SRandolph Chung
flush_cache_pages(struct vm_area_struct * vma,unsigned long start,unsigned long end)7052de8b4ccSJohn David Anglin static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
7064f193867SSven Schnelle {
70772d95924SJohn David Anglin unsigned long addr;
7084f193867SSven Schnelle
70972d95924SJohn David Anglin for (addr = start; addr < end; addr += PAGE_SIZE)
71072d95924SJohn David Anglin flush_cache_page_if_present(vma, addr);
7112de8b4ccSJohn David Anglin }
7122de8b4ccSJohn David Anglin
mm_total_size(struct mm_struct * mm)7132de8b4ccSJohn David Anglin static inline unsigned long mm_total_size(struct mm_struct *mm)
7142de8b4ccSJohn David Anglin {
7152de8b4ccSJohn David Anglin struct vm_area_struct *vma;
7162de8b4ccSJohn David Anglin unsigned long usize = 0;
71770fa2031SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
7182de8b4ccSJohn David Anglin
71970fa2031SMatthew Wilcox (Oracle) for_each_vma(vmi, vma) {
72070fa2031SMatthew Wilcox (Oracle) if (usize >= parisc_cache_flush_threshold)
72170fa2031SMatthew Wilcox (Oracle) break;
7222de8b4ccSJohn David Anglin usize += vma->vm_end - vma->vm_start;
72370fa2031SMatthew Wilcox (Oracle) }
7242de8b4ccSJohn David Anglin return usize;
7252de8b4ccSJohn David Anglin }
7264f193867SSven Schnelle
flush_cache_mm(struct mm_struct * mm)727d6ce8626SRandolph Chung void flush_cache_mm(struct mm_struct *mm)
728d6ce8626SRandolph Chung {
72950861f5aSJohn David Anglin struct vm_area_struct *vma;
73070fa2031SMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, 0);
73150861f5aSJohn David Anglin
7322de8b4ccSJohn David Anglin /*
7332de8b4ccSJohn David Anglin * Flushing the whole cache on each cpu takes forever on
7342de8b4ccSJohn David Anglin * rp3440, etc. So, avoid it if the mm isn't too big.
7352de8b4ccSJohn David Anglin *
7362de8b4ccSJohn David Anglin * Note that we must flush the entire cache on machines
7372de8b4ccSJohn David Anglin * with aliasing caches to prevent random segmentation
7382de8b4ccSJohn David Anglin * faults.
7392de8b4ccSJohn David Anglin */
7402de8b4ccSJohn David Anglin if (!parisc_requires_coherency()
7412de8b4ccSJohn David Anglin || mm_total_size(mm) >= parisc_cache_flush_threshold) {
7422de8b4ccSJohn David Anglin if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
7432de8b4ccSJohn David Anglin return;
7440adb24e0SJohn David Anglin flush_tlb_all();
74550861f5aSJohn David Anglin flush_cache_all();
74650861f5aSJohn David Anglin return;
74750861f5aSJohn David Anglin }
7486d2439d9SJohn David Anglin
7492de8b4ccSJohn David Anglin /* Flush mm */
75070fa2031SMatthew Wilcox (Oracle) for_each_vma(vmi, vma)
7512de8b4ccSJohn David Anglin flush_cache_pages(vma, vma->vm_start, vma->vm_end);
7526d2439d9SJohn David Anglin }
753d6ce8626SRandolph Chung
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)7542de8b4ccSJohn David Anglin void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
755d6ce8626SRandolph Chung {
7562de8b4ccSJohn David Anglin if (!parisc_requires_coherency()
7572de8b4ccSJohn David Anglin || end - start >= parisc_cache_flush_threshold) {
7582de8b4ccSJohn David Anglin if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
7592de8b4ccSJohn David Anglin return;
760ae7a609cSJohn David Anglin flush_tlb_range(vma, start, end);
76172d95924SJohn David Anglin if (vma->vm_flags & VM_EXEC)
76250861f5aSJohn David Anglin flush_cache_all();
76372d95924SJohn David Anglin else
76472d95924SJohn David Anglin flush_data_cache();
76550861f5aSJohn David Anglin return;
76650861f5aSJohn David Anglin }
76750861f5aSJohn David Anglin
76872d95924SJohn David Anglin flush_cache_pages(vma, start & PAGE_MASK, end);
769d6ce8626SRandolph Chung }
770d6ce8626SRandolph Chung
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)7712de8b4ccSJohn David Anglin void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
772d6ce8626SRandolph Chung {
77350861f5aSJohn David Anglin __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
7744c5fe5dbSJohn David Anglin }
7752de8b4ccSJohn David Anglin
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)7762de8b4ccSJohn David Anglin void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
7772de8b4ccSJohn David Anglin {
7782de8b4ccSJohn David Anglin if (!PageAnon(page))
7792de8b4ccSJohn David Anglin return;
7802de8b4ccSJohn David Anglin
78172d95924SJohn David Anglin __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
78272d95924SJohn David Anglin }
78372d95924SJohn David Anglin
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)78472d95924SJohn David Anglin int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
78572d95924SJohn David Anglin pte_t *ptep)
78672d95924SJohn David Anglin {
78772d95924SJohn David Anglin pte_t pte = ptep_get(ptep);
78872d95924SJohn David Anglin
78972d95924SJohn David Anglin if (!pte_young(pte))
79072d95924SJohn David Anglin return 0;
79172d95924SJohn David Anglin set_pte(ptep, pte_mkold(pte));
79272d95924SJohn David Anglin #if CONFIG_FLUSH_PAGE_ACCESSED
79372d95924SJohn David Anglin __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
79472d95924SJohn David Anglin #endif
79572d95924SJohn David Anglin return 1;
79672d95924SJohn David Anglin }
79772d95924SJohn David Anglin
79872d95924SJohn David Anglin /*
79972d95924SJohn David Anglin * After a PTE is cleared, we have no way to flush the cache for
80072d95924SJohn David Anglin * the physical page. On PA8800 and PA8900 processors, these lines
80172d95924SJohn David Anglin * can cause random cache corruption. Thus, we must flush the cache
80272d95924SJohn David Anglin * as well as the TLB when clearing a PTE that's valid.
80372d95924SJohn David Anglin */
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)80472d95924SJohn David Anglin pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
80572d95924SJohn David Anglin pte_t *ptep)
80672d95924SJohn David Anglin {
80772d95924SJohn David Anglin struct mm_struct *mm = (vma)->vm_mm;
80872d95924SJohn David Anglin pte_t pte = ptep_get_and_clear(mm, addr, ptep);
80972d95924SJohn David Anglin unsigned long pfn = pte_pfn(pte);
81072d95924SJohn David Anglin
81172d95924SJohn David Anglin if (pfn_valid(pfn))
81272d95924SJohn David Anglin __flush_cache_page(vma, addr, PFN_PHYS(pfn));
81372d95924SJohn David Anglin else if (pte_accessible(mm, pte))
81472d95924SJohn David Anglin flush_tlb_page(vma, addr);
81572d95924SJohn David Anglin
81672d95924SJohn David Anglin return pte;
81772d95924SJohn David Anglin }
81872d95924SJohn David Anglin
81972d95924SJohn David Anglin /*
82072d95924SJohn David Anglin * The physical address for pages in the ioremap case can be obtained
82172d95924SJohn David Anglin * from the vm_struct struct. I wasn't able to successfully handle the
82272d95924SJohn David Anglin * vmalloc and vmap cases. We have an array of struct page pointers in
82372d95924SJohn David Anglin * the uninitialized vmalloc case but the flush failed using page_to_pfn.
82472d95924SJohn David Anglin */
flush_cache_vmap(unsigned long start,unsigned long end)82572d95924SJohn David Anglin void flush_cache_vmap(unsigned long start, unsigned long end)
82672d95924SJohn David Anglin {
82772d95924SJohn David Anglin unsigned long addr, physaddr;
82872d95924SJohn David Anglin struct vm_struct *vm;
82972d95924SJohn David Anglin
83072d95924SJohn David Anglin /* Prevent cache move-in */
83172d95924SJohn David Anglin flush_tlb_kernel_range(start, end);
83272d95924SJohn David Anglin
83372d95924SJohn David Anglin if (end - start >= parisc_cache_flush_threshold) {
83472d95924SJohn David Anglin flush_cache_all();
8352de8b4ccSJohn David Anglin return;
83650861f5aSJohn David Anglin }
8372de8b4ccSJohn David Anglin
83872d95924SJohn David Anglin if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
83972d95924SJohn David Anglin flush_cache_all();
84072d95924SJohn David Anglin return;
841d6ce8626SRandolph Chung }
842316ec062SJohn David Anglin
84372d95924SJohn David Anglin vm = find_vm_area((void *)start);
84472d95924SJohn David Anglin if (WARN_ON_ONCE(!vm)) {
84572d95924SJohn David Anglin flush_cache_all();
84672d95924SJohn David Anglin return;
84772d95924SJohn David Anglin }
84872d95924SJohn David Anglin
84972d95924SJohn David Anglin /* The physical addresses of IOREMAP regions are contiguous */
85072d95924SJohn David Anglin if (vm->flags & VM_IOREMAP) {
85172d95924SJohn David Anglin physaddr = vm->phys_addr;
85272d95924SJohn David Anglin for (addr = start; addr < end; addr += PAGE_SIZE) {
85372d95924SJohn David Anglin preempt_disable();
85472d95924SJohn David Anglin flush_dcache_page_asm(physaddr, start);
85572d95924SJohn David Anglin flush_icache_page_asm(physaddr, start);
85672d95924SJohn David Anglin preempt_enable();
85772d95924SJohn David Anglin physaddr += PAGE_SIZE;
85872d95924SJohn David Anglin }
85972d95924SJohn David Anglin return;
86072d95924SJohn David Anglin }
86172d95924SJohn David Anglin
86272d95924SJohn David Anglin flush_cache_all();
86372d95924SJohn David Anglin }
86472d95924SJohn David Anglin EXPORT_SYMBOL(flush_cache_vmap);
86572d95924SJohn David Anglin
86672d95924SJohn David Anglin /*
86772d95924SJohn David Anglin * The vm_struct has been retired and the page table is set up. The
86872d95924SJohn David Anglin * last page in the range is a guard page. Its physical address can't
86972d95924SJohn David Anglin * be determined using lpa, so there is no way to flush the range
87072d95924SJohn David Anglin * using flush_dcache_page_asm.
87172d95924SJohn David Anglin */
flush_cache_vunmap(unsigned long start,unsigned long end)87272d95924SJohn David Anglin void flush_cache_vunmap(unsigned long start, unsigned long end)
87372d95924SJohn David Anglin {
87472d95924SJohn David Anglin /* Prevent cache move-in */
87572d95924SJohn David Anglin flush_tlb_kernel_range(start, end);
87672d95924SJohn David Anglin flush_data_cache();
87772d95924SJohn David Anglin }
87872d95924SJohn David Anglin EXPORT_SYMBOL(flush_cache_vunmap);
87972d95924SJohn David Anglin
88072d95924SJohn David Anglin /*
88172d95924SJohn David Anglin * On systems with PA8800/PA8900 processors, there is no way to flush
88272d95924SJohn David Anglin * a vmap range other than using the architected loop to flush the
88372d95924SJohn David Anglin * entire cache. The page directory is not set up, so we can't use
88472d95924SJohn David Anglin * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
88572d95924SJohn David Anglin * L2 is physically indexed but FDCE/FICE instructions in virtual
88672d95924SJohn David Anglin * mode output their virtual address on the core bus, not their
88772d95924SJohn David Anglin * real address. As a result, the L2 cache index formed from the
88872d95924SJohn David Anglin * virtual address will most likely not be the same as the L2 index
88972d95924SJohn David Anglin * formed from the real address.
89072d95924SJohn David Anglin */
flush_kernel_vmap_range(void * vaddr,int size)891316ec062SJohn David Anglin void flush_kernel_vmap_range(void *vaddr, int size)
892316ec062SJohn David Anglin {
893316ec062SJohn David Anglin unsigned long start = (unsigned long)vaddr;
8940adb24e0SJohn David Anglin unsigned long end = start + size;
895316ec062SJohn David Anglin
8960adb24e0SJohn David Anglin flush_tlb_kernel_range(start, end);
89772d95924SJohn David Anglin
89872d95924SJohn David Anglin if (!static_branch_likely(&parisc_has_dcache))
89972d95924SJohn David Anglin return;
90072d95924SJohn David Anglin
90172d95924SJohn David Anglin /* If interrupts are disabled, we can only do local flush */
90272d95924SJohn David Anglin if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
90372d95924SJohn David Anglin flush_data_cache_local(NULL);
9040adb24e0SJohn David Anglin return;
9050adb24e0SJohn David Anglin }
9060adb24e0SJohn David Anglin
90772d95924SJohn David Anglin flush_data_cache();
908316ec062SJohn David Anglin }
909316ec062SJohn David Anglin EXPORT_SYMBOL(flush_kernel_vmap_range);
910316ec062SJohn David Anglin
invalidate_kernel_vmap_range(void * vaddr,int size)911316ec062SJohn David Anglin void invalidate_kernel_vmap_range(void *vaddr, int size)
912316ec062SJohn David Anglin {
913316ec062SJohn David Anglin unsigned long start = (unsigned long)vaddr;
9140adb24e0SJohn David Anglin unsigned long end = start + size;
915316ec062SJohn David Anglin
9161fc7db24SJohn David Anglin /* Ensure DMA is complete */
9171fc7db24SJohn David Anglin asm_syncdma();
9181fc7db24SJohn David Anglin
9190adb24e0SJohn David Anglin flush_tlb_kernel_range(start, end);
92072d95924SJohn David Anglin
92172d95924SJohn David Anglin if (!static_branch_likely(&parisc_has_dcache))
92272d95924SJohn David Anglin return;
92372d95924SJohn David Anglin
92472d95924SJohn David Anglin /* If interrupts are disabled, we can only do local flush */
92572d95924SJohn David Anglin if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
92672d95924SJohn David Anglin flush_data_cache_local(NULL);
9270adb24e0SJohn David Anglin return;
9280adb24e0SJohn David Anglin }
9290adb24e0SJohn David Anglin
93072d95924SJohn David Anglin flush_data_cache();
931316ec062SJohn David Anglin }
932316ec062SJohn David Anglin EXPORT_SYMBOL(invalidate_kernel_vmap_range);
933c6d96328SHelge Deller
934c6d96328SHelge Deller
SYSCALL_DEFINE3(cacheflush,unsigned long,addr,unsigned long,bytes,unsigned int,cache)935c6d96328SHelge Deller SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
936c6d96328SHelge Deller unsigned int, cache)
937c6d96328SHelge Deller {
938c6d96328SHelge Deller unsigned long start, end;
939c6d96328SHelge Deller ASM_EXCEPTIONTABLE_VAR(error);
940c6d96328SHelge Deller
941c6d96328SHelge Deller if (bytes == 0)
942c6d96328SHelge Deller return 0;
943c6d96328SHelge Deller if (!access_ok((void __user *) addr, bytes))
944c6d96328SHelge Deller return -EFAULT;
945c6d96328SHelge Deller
946c6d96328SHelge Deller end = addr + bytes;
947c6d96328SHelge Deller
948c6d96328SHelge Deller if (cache & DCACHE) {
949c6d96328SHelge Deller start = addr;
950c6d96328SHelge Deller __asm__ __volatile__ (
951c6d96328SHelge Deller #ifdef CONFIG_64BIT
952c6d96328SHelge Deller "1: cmpb,*<<,n %0,%2,1b\n"
953c6d96328SHelge Deller #else
954c6d96328SHelge Deller "1: cmpb,<<,n %0,%2,1b\n"
955c6d96328SHelge Deller #endif
956c6d96328SHelge Deller " fic,m %3(%4,%0)\n"
957c6d96328SHelge Deller "2: sync\n"
9588b1d7239SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
959c6d96328SHelge Deller : "+r" (start), "+r" (error)
960c6d96328SHelge Deller : "r" (end), "r" (dcache_stride), "i" (SR_USER));
961c6d96328SHelge Deller }
962c6d96328SHelge Deller
963c6d96328SHelge Deller if (cache & ICACHE && error == 0) {
964c6d96328SHelge Deller start = addr;
965c6d96328SHelge Deller __asm__ __volatile__ (
966c6d96328SHelge Deller #ifdef CONFIG_64BIT
967c6d96328SHelge Deller "1: cmpb,*<<,n %0,%2,1b\n"
968c6d96328SHelge Deller #else
969c6d96328SHelge Deller "1: cmpb,<<,n %0,%2,1b\n"
970c6d96328SHelge Deller #endif
971c6d96328SHelge Deller " fdc,m %3(%4,%0)\n"
972c6d96328SHelge Deller "2: sync\n"
9738b1d7239SHelge Deller ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
974c6d96328SHelge Deller : "+r" (start), "+r" (error)
975c6d96328SHelge Deller : "r" (end), "r" (icache_stride), "i" (SR_USER));
976c6d96328SHelge Deller }
977c6d96328SHelge Deller
978c6d96328SHelge Deller return error;
979c6d96328SHelge Deller }
980