diff --exclude=CVS -rNu sys.copy/arch/evbmips/cavium/machdep.c sys/arch/evbmips/cavium/machdep.c --- sys.copy/arch/evbmips/cavium/machdep.c 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/evbmips/cavium/machdep.c 2016-07-09 06:36:42.000000000 +0100 @@ -194,7 +194,7 @@ mach_init_bss(); KASSERT(MIPS_XKPHYS_P(arg3)); - btinfo_paddr = mips64_ld_a64(arg3 + OCTEON_BTINFO_PADDR_OFFSET); + btinfo_paddr = mips3_ld(arg3 + OCTEON_BTINFO_PADDR_OFFSET); /* Should be in first 256MB segment */ KASSERT(btinfo_paddr < 256 * 1024 * 1024); @@ -241,7 +241,7 @@ curcpu()->ci_nmi_stack = octeon_nmi_stack + sizeof(octeon_nmi_stack) - sizeof(struct kernframe); *(uint64_t *)MIPS_PHYS_TO_KSEG0(0x800) = (intptr_t)octeon_reset_vector; const uint64_t wdog_reg = MIPS_PHYS_TO_XKPHYS_UNCACHED(CIU_WDOG0); - uint64_t wdog = mips64_ld_a64(wdog_reg); + uint64_t wdog = mips3_ld(wdog_reg); wdog &= ~(CIU_WDOGX_MODE|CIU_WDOGX_LEN); wdog |= __SHIFTIN(3, CIU_WDOGX_MODE); wdog |= CIU_WDOGX_LEN; // max period diff --exclude=CVS -rNu sys.copy/arch/hpcmips/tx/tx3912video.c sys/arch/hpcmips/tx/tx3912video.c --- sys.copy/arch/hpcmips/tx/tx3912video.c 2014-06-07 19:59:16.000000000 +0100 +++ sys/arch/hpcmips/tx/tx3912video.c 2016-07-08 10:32:21.000000000 +0100 @@ -39,11 +39,10 @@ #include #include +#include #include #include - #include -#include #include @@ -53,6 +52,8 @@ #include #include +#include + #include #include #include diff --exclude=CVS -rNu sys.copy/arch/mips/cavium/dev/octeon_dwctwo.c sys/arch/mips/cavium/dev/octeon_dwctwo.c --- sys.copy/arch/mips/cavium/dev/octeon_dwctwo.c 2016-06-30 21:19:24.000000000 +0100 +++ sys/arch/mips/cavium/dev/octeon_dwctwo.c 2016-07-09 06:45:12.000000000 +0100 @@ -321,7 +321,7 @@ { /* dwc2 uses little-endian addressing */ - return mips3_lw_a64((h + off) ^ 4); + return mips3_ld((h + off) ^ 4); } static void @@ -330,7 +330,7 @@ { /* dwc2 uses little-endian addressing */ - mips3_sw_a64((h + off) ^ 4, val); + mips3_sd((h + off) ^ 4, val); } int diff --exclude=CVS -rNu sys.copy/arch/mips/conf/files.mips sys/arch/mips/conf/files.mips --- sys.copy/arch/mips/conf/files.mips 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/conf/files.mips 2016-07-08 10:25:47.000000000 +0100 @@ -44,9 +44,8 @@ file arch/mips/mips/db_trace.c ddb file arch/mips/mips/ipifuncs.c multiprocessor file arch/mips/mips/kgdb_machdep.c kgdb -#file arch/mips/mips/pmap.c -file uvm/pmap/pmap.c file arch/mips/mips/pmap_machdep.c +file uvm/pmap/pmap.c file uvm/pmap/pmap_segtab.c file uvm/pmap/pmap_synci.c file uvm/pmap/pmap_tlb.c diff --exclude=CVS -rNu sys.copy/arch/mips/include/cache_r4k.h sys/arch/mips/include/cache_r4k.h --- sys.copy/arch/mips/include/cache_r4k.h 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/include/cache_r4k.h 2016-07-07 11:59:29.000000000 +0100 @@ -1,4 +1,4 @@ -/* cache_r4k.h,v 1.11.96.3 2012/01/19 08:28:48 matt Exp */ +/* $NetBSD: cache_r4k.h,v 1.11 2005/12/24 20:07:19 perry Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. @@ -62,250 +62,278 @@ * * Perform the specified cache operation on a single line. */ -static inline void -cache_op_r4k_line(register_t va, u_int op) -{ - __CTASSERT(__builtin_constant_p(op)); - __asm volatile( - ".set push" "\n\t" - ".set noreorder" "\n\t" - "cache %[op], 0(%[va])" "\n\t" - ".set pop" - : - : [op] "n" (op), [va] "r" (va) - : "memory"); -} - -/* - * cache_r4k_op_8lines_NN: - * - * Perform the specified cache operation on 8 n-byte cache lines. - */ -static inline void -cache_r4k_op_8lines_NN(size_t n, register_t va, u_int op) -{ - __asm volatile( - ".set push" "\n\t" - ".set noreorder" "\n\t" - "cache %[op], (0*%[n])(%[va])" "\n\t" - "cache %[op], (1*%[n])(%[va])" "\n\t" - "cache %[op], (2*%[n])(%[va])" "\n\t" - "cache %[op], (3*%[n])(%[va])" "\n\t" - "cache %[op], (4*%[n])(%[va])" "\n\t" - "cache %[op], (5*%[n])(%[va])" "\n\t" - "cache %[op], (6*%[n])(%[va])" "\n\t" - "cache %[op], (7*%[n])(%[va])" "\n\t" - ".set pop" - : - : [va] "r" (va), [op] "i" (op), [n] "n" (n) - : "memory"); -} +#define cache_op_r4k_line(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0(%0) \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) /* * cache_r4k_op_8lines_16: + * * Perform the specified cache operation on 8 16-byte cache lines. + */ +#define cache_r4k_op_8lines_16(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x00(%0); cache %1, 0x10(%0) \n\t" \ + "cache %1, 0x20(%0); cache %1, 0x30(%0) \n\t" \ + "cache %1, 0x40(%0); cache %1, 0x50(%0) \n\t" \ + "cache %1, 0x60(%0); cache %1, 0x70(%0) \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* * cache_r4k_op_8lines_32: + * * Perform the specified cache operation on 8 32-byte cache lines. */ -#define cache_r4k_op_8lines_16(va, op) \ - cache_r4k_op_8lines_NN(16, (va), (op)) -#define cache_r4k_op_8lines_32(va, op) \ - cache_r4k_op_8lines_NN(32, (va), (op)) -#define cache_r4k_op_8lines_64(va, op) \ - cache_r4k_op_8lines_NN(64, (va), (op)) -#define cache_r4k_op_8lines_128(va, op) \ - cache_r4k_op_8lines_NN(128, (va), (op)) - -/* - * cache_r4k_op_32lines_NN: - * - * Perform the specified cache operation on 32 n-byte cache lines. - */ -static inline void -cache_r4k_op_32lines_NN(size_t n, register_t va, u_int op) -{ - __CTASSERT(__builtin_constant_p(n)); - __CTASSERT(__builtin_constant_p(op)); - __asm volatile( - ".set push" "\n\t" - ".set noreorder" "\n\t" - "cache %[op], (0*%[n])(%[va])" "\n\t" - "cache %[op], (1*%[n])(%[va])" "\n\t" - "cache %[op], (2*%[n])(%[va])" "\n\t" - "cache %[op], (3*%[n])(%[va])" "\n\t" - "cache %[op], (4*%[n])(%[va])" "\n\t" - "cache %[op], (5*%[n])(%[va])" "\n\t" - "cache %[op], (6*%[n])(%[va])" "\n\t" - "cache %[op], (7*%[n])(%[va])" "\n\t" - "cache %[op], (8*%[n])(%[va])" "\n\t" - "cache %[op], (9*%[n])(%[va])" "\n\t" - "cache %[op], (10*%[n])(%[va])" "\n\t" - "cache %[op], (11*%[n])(%[va])" "\n\t" - "cache %[op], (12*%[n])(%[va])" "\n\t" - "cache %[op], (13*%[n])(%[va])" "\n\t" - "cache %[op], (14*%[n])(%[va])" "\n\t" - "cache %[op], (15*%[n])(%[va])" "\n\t" - "cache %[op], (16*%[n])(%[va])" "\n\t" - "cache %[op], (17*%[n])(%[va])" "\n\t" - "cache %[op], (18*%[n])(%[va])" "\n\t" - "cache %[op], (19*%[n])(%[va])" "\n\t" - "cache %[op], (20*%[n])(%[va])" "\n\t" - "cache %[op], (21*%[n])(%[va])" "\n\t" - "cache %[op], (22*%[n])(%[va])" "\n\t" - "cache %[op], (23*%[n])(%[va])" "\n\t" - "cache %[op], (24*%[n])(%[va])" "\n\t" - "cache %[op], (25*%[n])(%[va])" "\n\t" - "cache %[op], (26*%[n])(%[va])" "\n\t" - "cache %[op], (27*%[n])(%[va])" "\n\t" - "cache %[op], (28*%[n])(%[va])" "\n\t" - "cache %[op], (29*%[n])(%[va])" "\n\t" - "cache %[op], (30*%[n])(%[va])" "\n\t" - "cache %[op], (31*%[n])(%[va])" "\n\t" - ".set pop" - : - : [n] "n" ((uint8_t)n), [va] "r" (va), [op] "i" ((uint8_t)op) - : "memory"); -} +#define cache_r4k_op_8lines_32(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x00(%0); cache %1, 0x20(%0) \n\t" \ + "cache %1, 0x40(%0); cache %1, 0x60(%0) \n\t" \ + "cache %1, 0x80(%0); cache %1, 0xa0(%0) \n\t" \ + "cache %1, 0xc0(%0); cache %1, 0xe0(%0) \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) /* * cache_r4k_op_32lines_16: * - * Perform the specified cache operation on 32 16-byte cache lines. + * Perform the specified cache operation on 32 16-byte + * cache lines. */ -#define cache_r4k_op_32lines_16(va, op) \ - cache_r4k_op_32lines_NN(16, (va), (op)) -#define cache_r4k_op_32lines_32(va, op) \ - cache_r4k_op_32lines_NN(32, (va), (op)) -#define cache_r4k_op_32lines_64(va, op) \ - cache_r4k_op_32lines_NN(64, (va), (op)) -#define cache_r4k_op_32lines_128(va, op) \ - cache_r4k_op_32lines_NN(128, (va), (op)) +#define cache_r4k_op_32lines_16(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \ + "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \ + "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \ + "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \ + "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \ + "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \ + "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \ + "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \ + "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \ + "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \ + "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \ + "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \ + "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \ + "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \ + "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \ + "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) /* - * cache_r4k_op_16lines_16_2way: - * Perform the specified cache operation on 16 n-byte cache lines, 2-ways. + * cache_r4k_op_32lines_32: + * + * Perform the specified cache operation on 32 32-byte + * cache lines. */ -static inline void -cache_r4k_op_16lines_NN_2way(size_t n, register_t va1, register_t va2, u_int op) -{ - __asm volatile( - ".set push" "\n\t" - ".set noreorder" "\n\t" - "cache %[op], (0*%[n])(%[va1])" "\n\t" - "cache %[op], (0*%[n])(%[va2])" "\n\t" - "cache %[op], (1*%[n])(%[va1])" "\n\t" - "cache %[op], (1*%[n])(%[va2])" "\n\t" - "cache %[op], (2*%[n])(%[va1])" "\n\t" - "cache %[op], (2*%[n])(%[va2])" "\n\t" - "cache %[op], (3*%[n])(%[va1])" "\n\t" - "cache %[op], (3*%[n])(%[va2])" "\n\t" - "cache %[op], (4*%[n])(%[va1])" "\n\t" - "cache %[op], (4*%[n])(%[va2])" "\n\t" - "cache %[op], (5*%[n])(%[va1])" "\n\t" - "cache %[op], (5*%[n])(%[va2])" "\n\t" - "cache %[op], (6*%[n])(%[va1])" "\n\t" - "cache %[op], (6*%[n])(%[va2])" "\n\t" - "cache %[op], (7*%[n])(%[va1])" "\n\t" - "cache %[op], (7*%[n])(%[va2])" "\n\t" - "cache %[op], (8*%[n])(%[va1])" "\n\t" - "cache %[op], (8*%[n])(%[va2])" "\n\t" - "cache %[op], (9*%[n])(%[va1])" "\n\t" - "cache %[op], (9*%[n])(%[va2])" "\n\t" - "cache %[op], (10*%[n])(%[va1])" "\n\t" - "cache %[op], (10*%[n])(%[va2])" "\n\t" - "cache %[op], (11*%[n])(%[va1])" "\n\t" - "cache %[op], (11*%[n])(%[va2])" "\n\t" - "cache %[op], (12*%[n])(%[va1])" "\n\t" - "cache %[op], (12*%[n])(%[va2])" "\n\t" - "cache %[op], (13*%[n])(%[va1])" "\n\t" - "cache %[op], (13*%[n])(%[va2])" "\n\t" - "cache %[op], (14*%[n])(%[va1])" "\n\t" - "cache %[op], (14*%[n])(%[va2])" "\n\t" - "cache %[op], (15*%[n])(%[va1])" "\n\t" - "cache %[op], (15*%[n])(%[va2])" "\n\t" - ".set pop" - : - : [va1] "r" (va1), [va2] "r" (va2), [op] "i" (op), [n] "n" (n) - : "memory"); -} +#define cache_r4k_op_32lines_32(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \ + "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \ + "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \ + "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \ + "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \ + "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \ + "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \ + "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \ + "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \ + "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \ + "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \ + "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \ + "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \ + "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \ + "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \ + "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* + * cache_r4k_op_32lines_128: + * + * Perform the specified cache operation on 32 128-byte + * cache lines. + */ +#define cache_r4k_op_32lines_128(va, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \ + "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \ + "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \ + "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \ + "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \ + "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \ + "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \ + "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \ + "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \ + "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \ + "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \ + "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \ + "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \ + "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \ + "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \ + "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \ + ".set reorder" \ + : \ + : "r" (va), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) /* * cache_r4k_op_16lines_16_2way: - * Perform the specified cache operation on 16 16-byte cache lines, 2-ways. - * cache_r4k_op_16lines_32_2way: - * Perform the specified cache operation on 16 32-byte cache lines, 2-ways. + * + * Perform the specified cache operation on 16 16-byte + * cache lines, 2-ways. */ #define cache_r4k_op_16lines_16_2way(va1, va2, op) \ - cache_r4k_op_16lines_NN_2way(16, (va1), (va2), (op)) +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ + "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \ + "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ + "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \ + "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ + "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \ + "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ + "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \ + "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ + "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \ + "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ + "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \ + "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ + "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \ + "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ + "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \ + ".set reorder" \ + : \ + : "r" (va1), "r" (va2), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* + * cache_r4k_op_16lines_32_2way: + * + * Perform the specified cache operation on 16 32-byte + * cache lines, 2-ways. + */ #define cache_r4k_op_16lines_32_2way(va1, va2, op) \ - cache_r4k_op_16lines_NN_2way(32, (va1), (va2), (op)) -#define cache_r4k_op_16lines_64_2way(va1, va2, op) \ - cache_r4k_op_16lines_NN_2way(64, (va1), (va2), (op)) - -/* - * cache_r4k_op_8lines_NN_4way: - * Perform the specified cache operation on 8 n-byte cache lines, 4-ways. - */ -static inline void -cache_r4k_op_8lines_NN_4way(size_t n, register_t va1, register_t va2, - register_t va3, register_t va4, u_int op) -{ - __asm volatile( - ".set push" "\n\t" - ".set noreorder" "\n\t" - "cache %[op], (0*%[n])(%[va1])" "\n\t" - "cache %[op], (0*%[n])(%[va2])" "\n\t" - "cache %[op], (0*%[n])(%[va3])" "\n\t" - "cache %[op], (0*%[n])(%[va4])" "\n\t" - "cache %[op], (1*%[n])(%[va1])" "\n\t" - "cache %[op], (1*%[n])(%[va2])" "\n\t" - "cache %[op], (1*%[n])(%[va3])" "\n\t" - "cache %[op], (1*%[n])(%[va4])" "\n\t" - "cache %[op], (2*%[n])(%[va1])" "\n\t" - "cache %[op], (2*%[n])(%[va2])" "\n\t" - "cache %[op], (2*%[n])(%[va3])" "\n\t" - "cache %[op], (2*%[n])(%[va4])" "\n\t" - "cache %[op], (3*%[n])(%[va1])" "\n\t" - "cache %[op], (3*%[n])(%[va2])" "\n\t" - "cache %[op], (3*%[n])(%[va3])" "\n\t" - "cache %[op], (3*%[n])(%[va4])" "\n\t" - "cache %[op], (4*%[n])(%[va1])" "\n\t" - "cache %[op], (4*%[n])(%[va2])" "\n\t" - "cache %[op], (4*%[n])(%[va3])" "\n\t" - "cache %[op], (4*%[n])(%[va4])" "\n\t" - "cache %[op], (5*%[n])(%[va1])" "\n\t" - "cache %[op], (5*%[n])(%[va2])" "\n\t" - "cache %[op], (5*%[n])(%[va3])" "\n\t" - "cache %[op], (5*%[n])(%[va4])" "\n\t" - "cache %[op], (6*%[n])(%[va1])" "\n\t" - "cache %[op], (6*%[n])(%[va2])" "\n\t" - "cache %[op], (6*%[n])(%[va3])" "\n\t" - "cache %[op], (6*%[n])(%[va4])" "\n\t" - "cache %[op], (7*%[n])(%[va1])" "\n\t" - "cache %[op], (7*%[n])(%[va2])" "\n\t" - "cache %[op], (7*%[n])(%[va3])" "\n\t" - "cache %[op], (7*%[n])(%[va4])" "\n\t" - ".set pop" - : - : [va1] "r" (va1), [va2] "r" (va2), - [va3] "r" (va3), [va4] "r" (va4), - [op] "i" (op), [n] "n" (n) - : "memory"); -} +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \ + "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \ + "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \ + "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \ + "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \ + "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \ + "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \ + "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \ + "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \ + "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \ + "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \ + "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \ + "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \ + "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \ + "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \ + "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \ + ".set reorder" \ + : \ + : "r" (va1), "r" (va2), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + /* * cache_r4k_op_8lines_16_4way: - * Perform the specified cache operation on 8 16-byte cache lines, 4-ways. + * + * Perform the specified cache operation on 8 16-byte + * cache lines, 4-ways. + */ +#define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ + "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ + "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \ + "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \ + "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ + "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ + "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \ + "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \ + "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ + "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ + "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \ + "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \ + "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ + "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ + "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \ + "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \ + ".set reorder" \ + : \ + : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) + +/* * cache_r4k_op_8lines_32_4way: - * Perform the specified cache operation on 8 32-byte cache lines, 4-ways. + * + * Perform the specified cache operation on 8 32-byte + * cache lines, 4-ways. */ -#define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \ - cache_r4k_op_8lines_NN_4way(16, (va1), (va2), (va3), (va4), (op)) -#define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \ - cache_r4k_op_8lines_NN_4way(32, (va1), (va2), (va3), (va4), (op)) -#define cache_r4k_op_8lines_64_4way(va1, va2, va3, va4, op) \ - cache_r4k_op_8lines_NN_4way(64, (va1), (va2), (va3), (va4), (op)) -#define cache_r4k_op_8lines_128_4way(va1, va2, va3, va4, op) \ - cache_r4k_op_8lines_NN_4way(128, (va1), (va2), (va3), (va4), (op)) +#define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \ +do { \ + __asm volatile( \ + ".set noreorder \n\t" \ + "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \ + "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \ + "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \ + "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \ + "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \ + "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \ + "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \ + "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \ + "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \ + "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \ + "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \ + "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \ + "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \ + "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \ + "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \ + "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \ + ".set reorder" \ + : \ + : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \ + : "memory"); \ +} while (/*CONSTCOND*/0) void r4k_icache_sync_all_16(void); void r4k_icache_sync_range_16(register_t, vsize_t); diff --exclude=CVS -rNu sys.copy/arch/mips/include/cpu.h sys/arch/mips/include/cpu.h --- sys.copy/arch/mips/include/cpu.h 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/include/cpu.h 2016-07-08 10:31:32.000000000 +0100 @@ -37,8 +37,6 @@ #ifndef _CPU_H_ #define _CPU_H_ -//#include - /* * Exported definitions unique to NetBSD/mips cpu support. */ diff --exclude=CVS -rNu sys.copy/arch/mips/mips/bus_space_alignstride_chipdep.c sys/arch/mips/mips/bus_space_alignstride_chipdep.c --- sys.copy/arch/mips/mips/bus_space_alignstride_chipdep.c 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/mips/bus_space_alignstride_chipdep.c 2016-07-09 06:46:47.000000000 +0100 @@ -727,7 +727,7 @@ const int shift = (h & (CHIP_ACCESS_SIZE - 1)) * 8; h &= ~((bus_space_handle_t)(CHIP_ACCESS_SIZE - 1)); #if CHIP_ACCESS_SIZE == 8 - const CHIP_TYPE val = mips_ld(h); + const CHIP_TYPE val = mips3_ld(h); #elif CHIP_ACCESS_SIZE == 4 const CHIP_TYPE val = mips_lwu(h); #else @@ -1015,8 +1015,6 @@ __BS(read_stream_8)(void *v, bus_space_handle_t h, bus_size_t off) { #ifdef MIPS3_64BIT - volatile uint64_t *ptr; - h += CHIP_OFF64(off); return mips3_ld(h); #else diff --exclude=CVS -rNu sys.copy/arch/mips/mips/cache_mipsNN.c sys/arch/mips/mips/cache_mipsNN.c --- sys.copy/arch/mips/mips/cache_mipsNN.c 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/mips/cache_mipsNN.c 2016-07-02 07:46:44.000000000 +0100 @@ -1,4 +1,4 @@ -/* cache_mipsNN.c,v 1.11.78.8 2012/01/19 08:28:49 matt Exp */ +/* $NetBSD: cache_mipsNN.c,v 1.15 2015/05/20 07:04:49 matt Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. @@ -36,7 +36,7 @@ */ #include -__KERNEL_RCSID(0, "cache_mipsNN.c,v 1.11.78.8 2012/01/19 08:28:49 matt Exp"); +__KERNEL_RCSID(0, "$NetBSD: cache_mipsNN.c,v 1.15 2015/05/20 07:04:49 matt Exp $"); #include diff --exclude=CVS -rNu sys.copy/arch/mips/mips/mipsX_subr.S sys/arch/mips/mips/mipsX_subr.S --- sys.copy/arch/mips/mips/mipsX_subr.S 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/mips/mipsX_subr.S 2016-07-05 14:23:43.000000000 +0100 @@ -1,4 +1,4 @@ -/* $NetBSD: mipsX_subr.S,v 1.62 2015/06/11 07:30:10 matt Exp $ */ +/* $NetBSD: mipsX_subr.S,v 1.67 2016/07/04 15:52:31 dholland Exp $ */ /* * Copyright 2002 Wasabi Systems, Inc. @@ -19,7 +19,7 @@ * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse - l or promote products derived from this software without specific prior + * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND diff --exclude=CVS -rNu sys.copy/arch/mips/mips/pmap_machdep.c sys/arch/mips/mips/pmap_machdep.c --- sys.copy/arch/mips/mips/pmap_machdep.c 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/mips/pmap_machdep.c 2016-07-09 07:29:11.000000000 +0100 @@ -119,8 +119,6 @@ #define __MUTEX_PRIVATE #define __PMAP_PRIVATE -#define PMAP_FAULTINFO - #include #include #include @@ -900,8 +898,9 @@ */ if (mips_cache_badalias(nva, va)) { for (pv_entry_t npv = pv; npv; npv = npv->pv_next) { + if (npv->pv_va & PV_KENTER) + continue; vaddr_t nva = trunc_page(npv->pv_va); - /* XXXNH PV_KENTER skip */ pmap_t npm = npv->pv_pmap; VM_PAGEMD_PVLIST_UNLOCK(mdpg); pmap_remove(npm, nva, nva + PAGE_SIZE); diff --exclude=CVS -rNu sys.copy/arch/mips/mips/trap.c sys/arch/mips/mips/trap.c --- sys.copy/arch/mips/mips/trap.c 2016-07-09 07:17:17.000000000 +0100 +++ sys/arch/mips/mips/trap.c 2016-07-09 07:22:34.000000000 +0100 @@ -46,8 +46,6 @@ #include "opt_kgdb.h" #include "opt_multiprocessor.h" -#define PMAP_FAULTINFO - #include #include #include @@ -372,7 +370,7 @@ #ifdef PMAP_FAULTINFO if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) { - if (++pfi->pfi_repeats > 500) { + if (++pfi->pfi_repeats > 4) { tlb_asid_t asid = tlb_get_asid(); pt_entry_t *ptep = pfi->pfi_faultpte; printf("trap: fault #%u (%s/%s) for %#"PRIxVADDR" (%#"PRIxVADDR") at pc %#"PRIxVADDR" curpid=%u/%u ptep@%p=%#"PRIxPTE")\n", pfi->pfi_repeats, trap_names[TRAPTYPE(cause)], trap_names[pfi->pfi_faulttype], va, vaddr, pc, map->pmap->pm_pai[0].pai_asid, asid, ptep, ptep ? pte_value(*ptep) : 0); diff --exclude=CVS -rNu sys.copy/arch/mips/mips/vm_machdep.c sys/arch/mips/mips/vm_machdep.c diff --exclude=CVS -rNu sys.copy/uvm/pmap/pmap.c sys/uvm/pmap/pmap.c --- sys.copy/uvm/pmap/pmap.c 2016-07-09 07:17:24.000000000 +0100 +++ sys/uvm/pmap/pmap.c 2016-07-09 06:53:44.000000000 +0100 @@ -110,7 +110,6 @@ #include #include #include -#include /* XXX: for sock_loan_thresh */ #include