[RFC PATCH 19/19] selftests/vm: Add pins-cgroup selftest for mlock/mmap

From: Alistair Popple
Date: Tue Jan 24 2023 - 00:50:58 EST


Add some basic tests of mlock/mmap cgroup accounting for pinned
memory.

Signed-off-by: Alistair Popple <apopple@xxxxxxxxxx>
Cc: Shuah Khan <shuah@xxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: linux-kselftest@xxxxxxxxxxxxxxx
Cc: cgroups@xxxxxxxxxxxxxxx
---
MAINTAINERS | 1 +-
tools/testing/selftests/vm/Makefile | 1 +-
tools/testing/selftests/vm/pins-cgroup.c | 271 ++++++++++++++++++++++++-
3 files changed, 273 insertions(+)
create mode 100644 tools/testing/selftests/vm/pins-cgroup.c

diff --git a/MAINTAINERS b/MAINTAINERS
index f8526e2..4c4eed9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5387,6 +5387,7 @@ L: cgroups@xxxxxxxxxxxxxxx
L: linux-mm@xxxxxxxxx
S: Maintained
F: mm/pins_cgroup.c
+F: tools/testing/selftests/vm/pins-cgroup.c

CORETEMP HARDWARE MONITORING DRIVER
M: Fenghua Yu <fenghua.yu@xxxxxxxxx>
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 89c14e4..0653720 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -56,6 +56,7 @@ TEST_GEN_PROGS += soft-dirty
TEST_GEN_PROGS += split_huge_page_test
TEST_GEN_FILES += ksm_tests
TEST_GEN_PROGS += ksm_functional_tests
+TEST_GEN_FILES += pins-cgroup

ifeq ($(MACHINE),x86_64)
CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32)
diff --git a/tools/testing/selftests/vm/pins-cgroup.c b/tools/testing/selftests/vm/pins-cgroup.c
new file mode 100644
index 0000000..c2eabc2
--- /dev/null
+++ b/tools/testing/selftests/vm/pins-cgroup.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kselftest_harness.h"
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/capability.h>
+#include <unistd.h>
+
+#define CGROUP_TEMP "/sys/fs/cgroup/pins_XXXXXX"
+#define PINS_MAX (-1UL)
+
+FIXTURE(pins_cg)
+{
+ char *cg_path;
+ long page_size;
+};
+
+static char *cgroup_new(void)
+{
+ char *cg;
+
+ cg = malloc(sizeof(CGROUP_TEMP));
+ strcpy(cg, CGROUP_TEMP);
+ if (!mkdtemp(cg)) {
+ perror("Failed to create cgroup");
+ return NULL;
+ }
+
+ return cg;
+}
+
+static int cgroup_add_proc(char *cg, pid_t pid)
+{
+ char *cg_proc;
+ FILE *f;
+ int ret = 0;
+
+ if (asprintf(&cg_proc, "%s/cgroup.procs", cg) < 0)
+ return -1;
+
+ f = fopen(cg_proc, "w");
+ free(cg_proc);
+ if (!f)
+ return -1;
+
+ if (fprintf(f, "%ld\n", (long) pid) < 0)
+ ret = -1;
+
+ fclose(f);
+ return ret;
+}
+
+static int cgroup_set_limit(char *cg, unsigned long limit)
+{
+ char *cg_pins_max;
+ FILE *f;
+ int ret = 0;
+
+ if (asprintf(&cg_pins_max, "%s/pins.max", cg) < 0)
+ return -1;
+
+ f = fopen(cg_pins_max, "w");
+ free(cg_pins_max);
+ if (!f)
+ return -1;
+
+ if (limit != PINS_MAX) {
+ if (fprintf(f, "%ld\n", limit) < 0)
+ ret = -1;
+ } else {
+ if (fprintf(f, "max\n") < 0)
+ ret = -1;
+ }
+
+ fclose(f);
+ return ret;
+}
+
+FIXTURE_SETUP(pins_cg)
+{
+ char *cg_subtree_control;
+ FILE *f;
+
+ if (asprintf(&cg_subtree_control,
+ "/sys/fs/cgroup/cgroup.subtree_control") < 0)
+ return;
+
+ f = fopen(cg_subtree_control, "w");
+ free(cg_subtree_control);
+ if (!f)
+ return;
+
+ fprintf(f, "+pins\n");
+ fclose(f);
+
+ self->cg_path = cgroup_new();
+ self->page_size = sysconf(_SC_PAGE_SIZE);
+}
+
+FIXTURE_TEARDOWN(pins_cg)
+{
+ cgroup_add_proc("/sys/fs/cgroup", getpid());
+
+ rmdir(self->cg_path);
+ free(self->cg_path);
+}
+
+static long cgroup_pins(char *cg)
+{
+ long pin_count;
+ char *cg_pins_current;
+ FILE *f;
+ int ret;
+
+ if (asprintf(&cg_pins_current, "%s/pins.current", cg) < 0)
+ return -1;
+
+ f = fopen(cg_pins_current, "r");
+ if (!f) {
+ printf("Can't open %s\n", cg_pins_current);
+ getchar();
+ free(cg_pins_current);
+ return -2;
+ }
+
+ free(cg_pins_current);
+
+ if (fscanf(f, "%ld", &pin_count) == EOF)
+ ret = -3;
+ else
+ ret = pin_count;
+
+ fclose(f);
+ return ret;
+}
+
+static int set_rlim_memlock(unsigned long size)
+{
+ struct rlimit rlim_memlock = {
+ .rlim_cur = size,
+ .rlim_max = size,
+ };
+ cap_t cap;
+ cap_value_t capability[1] = { CAP_IPC_LOCK };
+
+ /*
+ * Many of the rlimit checks are skipped if a process has
+ * CAP_IP_LOCK. As this test should be run as root we need to
+ * explicitly drop it.
+ */
+ cap = cap_get_proc();
+ if (!cap)
+ return -1;
+ if (cap_set_flag(cap, CAP_EFFECTIVE, 1, capability, CAP_CLEAR))
+ return -1;
+ if (cap_set_proc(cap))
+ return -1;
+ return setrlimit(RLIMIT_MEMLOCK, &rlim_memlock);
+}
+
+TEST_F(pins_cg, basic)
+{
+ pid_t child_pid;
+ long page_size = self->page_size;
+ char *p;
+
+ ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+ p = mmap(NULL, 32*page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ ASSERT_NE(p, MAP_FAILED);
+
+ ASSERT_EQ(cgroup_pins(self->cg_path), 0);
+ memset(p, 0, 16*page_size);
+ ASSERT_EQ(mlock(p, page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 1);
+ ASSERT_EQ(mlock(p + page_size, page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+ ASSERT_EQ(mlock(p, page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+ ASSERT_EQ(mlock(p, 4*page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+ ASSERT_EQ(munlock(p + 2*page_size, 2*page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+ ASSERT_EQ(cgroup_set_limit(self->cg_path, 8), 0);
+ ASSERT_EQ(mlock(p, 16*page_size), -1);
+ ASSERT_EQ(errno, ENOMEM);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 2);
+ ASSERT_EQ(cgroup_set_limit(self->cg_path, PINS_MAX), 0);
+
+ /* check mremap() a locked region correctly accounts locked pages */
+ ASSERT_EQ(mlock(p, 32*page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+ p = mremap(p, 32*page_size, 64*page_size, MREMAP_MAYMOVE);
+ ASSERT_NE(p, MAP_FAILED);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 64);
+ ASSERT_EQ(munmap(p + 32*page_size, 32*page_size), 0)
+ ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+ p = mremap(p, 32*page_size, 32*page_size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP);
+ ASSERT_NE(p, MAP_FAILED);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+ ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+ /* mremap() a locked region should fail if limit exceeded */
+ ASSERT_EQ(set_rlim_memlock(32*page_size), 0);
+ ASSERT_EQ(mlock(p, 32*page_size), 0);
+ ASSERT_EQ(mremap(p, 32*page_size, 64*page_size, 0), MAP_FAILED);
+ ASSERT_EQ(munlock(p, 32*page_size), 0);
+
+ /* Exceeds rlimit, expected to fail */
+ ASSERT_EQ(set_rlim_memlock(16*page_size), 0);
+ ASSERT_EQ(mlock(p, 32*page_size), -1);
+ ASSERT_EQ(errno, ENOMEM);
+
+ /* memory in the child isn't locked so shouldn't increase pin_cg count */
+ ASSERT_EQ(mlock(p, 16*page_size), 0);
+ child_pid = fork();
+ if (!child_pid) {
+ ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+ ASSERT_EQ(mlock(p, 16*page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 32);
+ return;
+
+ }
+ waitpid(child_pid, NULL, 0);
+
+ /* check that child exit uncharged the pins */
+ ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+}
+
+TEST_F(pins_cg, mmap)
+{
+ char *p;
+
+ ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+ p = mmap(NULL, 4*self->page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_LOCKED, -1, 0);
+ ASSERT_NE(p, MAP_FAILED);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 4);
+}
+
+/*
+ * Test moving to a different cgroup.
+ */
+TEST_F(pins_cg, move_cg)
+{
+ char *p, *new_cg;
+
+ ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+ p = mmap(NULL, 16*self->page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ ASSERT_NE(p, MAP_FAILED);
+ memset(p, 0, 16*self->page_size);
+ ASSERT_EQ(mlock(p, 16*self->page_size), 0);
+ ASSERT_EQ(cgroup_pins(self->cg_path), 16);
+ ASSERT_NE(new_cg = cgroup_new(), NULL);
+ ASSERT_EQ(cgroup_add_proc(new_cg, getpid()), 0);
+ ASSERT_EQ(cgroup_pins(new_cg), 16);
+ ASSERT_EQ(cgroup_add_proc(self->cg_path, getpid()), 0);
+ rmdir(new_cg);
+}
+TEST_HARNESS_MAIN
--
git-series 0.9.1