Re: [peterz-queue:sched/core 6/8] kernel/sched/fair.c:9199:30: error: implicit declaration of function 'adjust_numa_imbalance'

From: Mel Gorman
Date: Thu May 19 2022 - 05:07:57 EST


On Thu, May 19, 2022 at 09:55:17AM +0800, kernel test robot wrote:
> tree: https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/core
> head: 45ff65aa1bfd4826331c9c4daafdca21ef8f79f8
> commit: c81394419b54c2df2644a34892a6d6434fd922b3 [6/8] sched/numa: Apply imbalance limitations consistently
> config: arc-allyesconfig (https://download.01.org/0day-ci/archive/20220519/202205190911.n5iX1ftB-lkp@xxxxxxxxx/config)
> compiler: arceb-elf-gcc (GCC) 11.3.0
> reproduce (this is a W=1 build):
> wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
> chmod +x ~/bin/make.cross
> # https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?id=c81394419b54c2df2644a34892a6d6434fd922b3
> git remote add peterz-queue https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
> git fetch --no-tags peterz-queue sched/core
> git checkout c81394419b54c2df2644a34892a6d6434fd922b3
> # save the config file
> mkdir build_dir && cp config build_dir/.config
> COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.3.0 make.cross W=1 O=build_dir ARCH=arc SHELL=/bin/bash
>
> If you fix the issue, kindly add following tag as appropriate
> Reported-by: kernel test robot <lkp@xxxxxxxxx>
>

Oops, the converged function just needs to move outside of
CONFIG_NUMA_BALANCING as it's not related to automatic NUMA balancing.

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f619d9f70c8da..9b26afefc769b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1043,6 +1043,31 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
* Scheduling class queueing methods:
*/

+#define NUMA_IMBALANCE_MIN 2
+
+static inline long
+adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
+{
+ /*
+ * Allow a NUMA imbalance if busy CPUs is less than the maximum
+ * threshold. Above this threshold, individual tasks may be contending
+ * for both memory bandwidth and any shared HT resources. This is an
+ * approximation as the number of running tasks may not be related to
+ * the number of busy CPUs due to sched_setaffinity.
+ */
+ if (dst_running > imb_numa_nr)
+ return imbalance;
+
+ /*
+ * Allow a small imbalance based on a simple pair of communicating
+ * tasks that remain local when the destination is lightly loaded.
+ */
+ if (imbalance <= NUMA_IMBALANCE_MIN)
+ return 0;
+
+ return imbalance;
+}
+
#ifdef CONFIG_NUMA_BALANCING
/*
* Approximate time to scan a full NUMA task in ms. The task scan period is
@@ -1537,31 +1562,6 @@ struct task_numa_env {
static unsigned long cpu_load(struct rq *rq);
static unsigned long cpu_runnable(struct rq *rq);

-#define NUMA_IMBALANCE_MIN 2
-
-static inline long
-adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
-{
- /*
- * Allow a NUMA imbalance if busy CPUs is less than the maximum
- * threshold. Above this threshold, individual tasks may be contending
- * for both memory bandwidth and any shared HT resources. This is an
- * approximation as the number of running tasks may not be related to
- * the number of busy CPUs due to sched_setaffinity.
- */
- if (dst_running > imb_numa_nr)
- return imbalance;
-
- /*
- * Allow a small imbalance based on a simple pair of communicating
- * tasks that remain local when the destination is lightly loaded.
- */
- if (imbalance <= NUMA_IMBALANCE_MIN)
- return 0;
-
- return imbalance;
-}
-
static inline enum
numa_type numa_classify(unsigned int imbalance_pct,
struct numa_stats *ns)