Skip to content

Commit

Permalink
tile: use __ro_after_init instead of tile-specific __write_once
Browse files Browse the repository at this point in the history
The semantics of the old tile __write_once are the same as the
newer generic __ro_after_init, so rename them all and get rid
of the tile-specific version.

This does not enable actual support for __ro_after_init,
which had been dropped from the tile architecture before the
initial upstreaming was done, since we had at that time switched
to using 16MB huge pages to map the kernel.

Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
  • Loading branch information
Chris Metcalf committed Dec 16, 2016
1 parent 18bfd3e commit 14e73e7
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 27 deletions.
7 changes: 2 additions & 5 deletions arch/tile/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,18 +50,15 @@

/*
* Originally we used small TLB pages for kernel data and grouped some
* things together as "write once", enforcing the property at the end
* things together as ro-after-init, enforcing the property at the end
* of initialization by making those pages read-only and non-coherent.
* This allowed better cache utilization since cache inclusion did not
* need to be maintained. However, to do this requires an extra TLB
* entry, which on balance is more of a performance hit than the
* non-coherence is a performance gain, so we now just make "read
* mostly" and "write once" be synonyms. We keep the attribute
* mostly" and "ro-after-init" be synonyms. We keep the attribute
* separate in case we change our minds at a future date.
*/
#define __write_once __read_mostly

/* __ro_after_init is the generic name for the tile arch __write_once. */
#define __ro_after_init __read_mostly

#endif /* _ASM_TILE_CACHE_H */
3 changes: 0 additions & 3 deletions arch/tile/include/asm/sections.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@

#include <asm-generic/sections.h>

/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];

extern char vdso_start[], vdso_end[];
#ifdef CONFIG_COMPAT
extern char vdso32_start[], vdso32_end[];
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/kernel/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ static int pci_probe = 1;
* This flag tells if the platform is TILEmpower that needs
* special configuration for the PLX switch chip.
*/
int __write_once tile_plx_gen1;
int __ro_after_init tile_plx_gen1;

static struct pci_controller controllers[TILE_NUM_PCIE];
static int num_controllers;
Expand Down
18 changes: 9 additions & 9 deletions arch/tile/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
static inline int ABS(int x) { return x >= 0 ? x : -x; }

/* Chip information */
char chip_model[64] __write_once;
char chip_model[64] __ro_after_init;

#ifdef CONFIG_VT
struct screen_info screen_info;
Expand Down Expand Up @@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
#ifdef CONFIG_HIGHMEM
/* Map information from VAs to PAs */
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
__ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(pbase_map);

/* Map information from PAs to VAs */
void *vbase_map[NR_PA_HIGHBIT_VALUES]
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
__ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(vbase_map);
#endif

/* Node number as a function of the high PA bits */
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
EXPORT_SYMBOL(highbits_to_node);

static unsigned int __initdata maxmem_pfn = -1U;
Expand Down Expand Up @@ -844,11 +844,11 @@ static void __init zone_sizes_init(void)
#ifdef CONFIG_NUMA

/* which logical CPUs are on which nodes */
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
EXPORT_SYMBOL(node_2_cpu_mask);

/* which node each logical CPU is on */
char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
EXPORT_SYMBOL(cpu_2_node);

/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
Expand Down Expand Up @@ -1269,7 +1269,7 @@ static void __init validate_va(void)
* cpus plus any other cpus that are willing to share their cache.
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
*/
struct cpumask __write_once cpu_lotar_map;
struct cpumask __ro_after_init cpu_lotar_map;
EXPORT_SYMBOL(cpu_lotar_map);

/*
Expand All @@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map);
* cache, those tiles will only appear in cpu_lotar_map, NOT in
* cpu_cacheable_map, as they are a special case.
*/
struct cpumask __write_once cpu_cacheable_map;
struct cpumask __ro_after_init cpu_cacheable_map;
EXPORT_SYMBOL(cpu_cacheable_map);

static __initdata struct cpumask disabled_map;
Expand Down Expand Up @@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p)
* Set up per-cpu memory.
*/

unsigned long __per_cpu_offset[NR_CPUS] __write_once;
unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
EXPORT_SYMBOL(__per_cpu_offset);

static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
* We write to width and height with a single store in head_NN.S,
* so make the variable aligned to "long".
*/
HV_Topology smp_topology __write_once __aligned(sizeof(long));
HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
EXPORT_SYMBOL(smp_topology);

#if CHIP_HAS_IPI()
Expand Down
4 changes: 2 additions & 2 deletions arch/tile/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
*/

/* How many cycles per second we are running at. */
static cycles_t cycles_per_sec __write_once;
static cycles_t cycles_per_sec __ro_after_init;

cycles_t get_clock_rate(void)
{
Expand Down Expand Up @@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles);
*/
#define SCHED_CLOCK_SHIFT 10

static unsigned long sched_clock_mult __write_once;
static unsigned long sched_clock_mult __ro_after_init;

static cycles_t clocksource_get_cycles(struct clocksource *cs)
{
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/mm/homecache.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
* The noallocl2 option suppresses all use of the L2 cache to cache
* locally from a remote home.
*/
static int __write_once noallocl2;
static int __ro_after_init noallocl2;
static int __init set_noallocl2(char *str)
{
noallocl2 = 1;
Expand Down
10 changes: 5 additions & 5 deletions arch/tile/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start,

static int __initdata ktext_hash = 1; /* .text pages */
static int __initdata kdata_hash = 1; /* .data and .bss pages */
int __write_once hash_default = 1; /* kernel allocator pages */
int __ro_after_init hash_default = 1; /* kernel allocator pages */
EXPORT_SYMBOL(hash_default);
int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */

/*
* CPUs to use to for striping the pages of kernel data. If hash-for-home
Expand All @@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
static __initdata struct cpumask kdata_mask;
static __initdata int kdata_arg_seen;

int __write_once kdata_huge; /* if no homecaching, small pages */
int __ro_after_init kdata_huge; /* if no homecaching, small pages */


/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
Expand Down Expand Up @@ -896,8 +896,8 @@ void __init pgtable_cache_init(void)
panic("pgtable_cache_init(): Cannot create pgd cache");
}

static long __write_once initfree = 1;
static bool __write_once set_initfree_done;
static long __ro_after_init initfree = 1;
static bool __ro_after_init set_initfree_done;

/* Select whether to free (1) or mark unusable (0) the __init pages. */
static int __init set_initfree(char *str)
Expand Down

0 comments on commit 14e73e7

Please sign in to comment.