[PATCH 21/48] scheduler: Replace __get_cpu_var with this_cpu_ptr

From: Christoph Lameter
Date: Fri Feb 14 2014 - 15:29:05 EST


[Patch depends on another patch in this series that introduces raw_cpu_ops]

Convert all uses of __get_cpu_var for address calculation to use
this_cpu_ptr instead.

Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Acked-by: Ingo Molnar <mingo@xxxxxxxxxx>
Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>

Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h 2014-01-30 14:41:01.816304114 -0600
+++ linux/include/linux/kernel_stat.h 2014-01-30 14:41:01.816304114 -0600
@@ -44,8 +44,8 @@
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);

/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu (&__get_cpu_var(kstat))
-#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)

Index: linux/kernel/events/callchain.c
===================================================================
--- linux.orig/kernel/events/callchain.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/events/callchain.c 2014-01-30 14:41:01.816304114 -0600
@@ -137,7 +137,7 @@
int cpu;
struct callchain_cpus_entries *entries;

- *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
if (*rctx == -1)
return NULL;

@@ -153,7 +153,7 @@
static void
put_callchain_entry(int rctx)
{
- put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+ put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
}

struct perf_callchain_entry *
Index: linux/kernel/events/core.c
===================================================================
--- linux.orig/kernel/events/core.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/events/core.c 2014-01-30 14:41:01.816304114 -0600
@@ -241,10 +241,10 @@
return;

/* decay the counter by 1 average sample */
- local_samples_len = __get_cpu_var(running_sample_length);
+ local_samples_len = __this_cpu_read(running_sample_length);
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
local_samples_len += sample_len_ns;
- __get_cpu_var(running_sample_length) = local_samples_len;
+ __this_cpu_write(running_sample_length, local_samples_len);

/*
* note: this will be biased artifically low until we have
@@ -870,7 +870,7 @@
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- struct list_head *head = &__get_cpu_var(rotation_list);
+ struct list_head *head = this_cpu_ptr(&rotation_list);

WARN_ON(!irqs_disabled());

@@ -2366,7 +2366,7 @@
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
- if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}

@@ -2611,11 +2611,11 @@
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
- if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);

/* check for system-wide branch_stack events */
- if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
+ if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}

@@ -2870,7 +2870,7 @@

void perf_event_task_tick(void)
{
- struct list_head *head = &__get_cpu_var(rotation_list);
+ struct list_head *head = this_cpu_ptr(&rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
@@ -5584,7 +5584,7 @@
struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct perf_event *event;
struct hlist_head *head;

@@ -5603,7 +5603,7 @@

int perf_swevent_get_recursion_context(void)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);

return get_recursion_context(swhash->recursion);
}
@@ -5611,7 +5611,7 @@

inline void perf_swevent_put_recursion_context(int rctx)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);

put_recursion_context(swhash->recursion, rctx);
}
@@ -5640,7 +5640,7 @@

static int perf_swevent_add(struct perf_event *event, int flags)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;

Index: linux/kernel/sched/fair.c
===================================================================
--- linux.orig/kernel/sched/fair.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/sched/fair.c 2014-01-30 14:41:01.816304114 -0600
@@ -6123,7 +6123,7 @@
struct sched_group *group;
struct rq *busiest;
unsigned long flags;
- struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+ struct cpumask *cpus = this_cpu_ptr(load_balance_mask);

struct lb_env env = {
.sd = sd,
Index: linux/kernel/sched/rt.c
===================================================================
--- linux.orig/kernel/sched/rt.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/sched/rt.c 2014-01-30 14:41:01.816304114 -0600
@@ -1401,7 +1401,7 @@
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
- struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
+ struct cpumask *lowest_mask = this_cpu_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);

Index: linux/kernel/sched/sched.h
===================================================================
--- linux.orig/kernel/sched/sched.h 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/sched/sched.h 2014-01-30 14:41:01.816304114 -0600
@@ -668,10 +668,10 @@
DECLARE_PER_CPU(struct rq, runqueues);

#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
-#define this_rq() (&__get_cpu_var(runqueues))
+#define this_rq() this_cpu_ptr(&runqueues)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-#define raw_rq() (&__raw_get_cpu_var(runqueues))
+#define raw_rq() raw_cpu_ptr(&runqueues)

static inline u64 rq_clock(struct rq *rq)
{
Index: linux/kernel/user-return-notifier.c
===================================================================
--- linux.orig/kernel/user-return-notifier.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/user-return-notifier.c 2014-01-30 14:41:01.816304114 -0600
@@ -14,7 +14,7 @@
void user_return_notifier_register(struct user_return_notifier *urn)
{
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
- hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
+ hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
}
EXPORT_SYMBOL_GPL(user_return_notifier_register);

@@ -25,7 +25,7 @@
void user_return_notifier_unregister(struct user_return_notifier *urn)
{
hlist_del(&urn->link);
- if (hlist_empty(&__get_cpu_var(return_notifier_list)))
+ if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
}
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
Index: linux/kernel/taskstats.c
===================================================================
--- linux.orig/kernel/taskstats.c 2014-01-30 14:41:01.816304114 -0600
+++ linux/kernel/taskstats.c 2014-01-30 14:41:01.816304114 -0600
@@ -638,7 +638,7 @@
fill_tgid_exit(tsk);
}

- listeners = __this_cpu_ptr(&listener_array);
+ listeners = raw_cpu_ptr(&listener_array);
if (list_empty(&listeners->list))
return;


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/