101a5ec42SMathieu Desnoyers // SPDX-License-Identifier: LGPL-2.1
201a5ec42SMathieu Desnoyers #define _GNU_SOURCE
301a5ec42SMathieu Desnoyers #include <assert.h>
401a5ec42SMathieu Desnoyers #include <pthread.h>
501a5ec42SMathieu Desnoyers #include <sched.h>
601a5ec42SMathieu Desnoyers #include <stdint.h>
701a5ec42SMathieu Desnoyers #include <stdio.h>
801a5ec42SMathieu Desnoyers #include <stdlib.h>
901a5ec42SMathieu Desnoyers #include <string.h>
1001a5ec42SMathieu Desnoyers #include <stddef.h>
1101a5ec42SMathieu Desnoyers 
1207ad4f76SShuah Khan #include "../kselftest.h"
1301a5ec42SMathieu Desnoyers #include "rseq.h"
1401a5ec42SMathieu Desnoyers 
15cead7206SMathieu Desnoyers #ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
16cead7206SMathieu Desnoyers # define RSEQ_PERCPU	RSEQ_PERCPU_MM_CID
17cead7206SMathieu Desnoyers static
get_current_cpu_id(void)18cead7206SMathieu Desnoyers int get_current_cpu_id(void)
19cead7206SMathieu Desnoyers {
20cead7206SMathieu Desnoyers 	return rseq_current_mm_cid();
21cead7206SMathieu Desnoyers }
22cead7206SMathieu Desnoyers static
rseq_validate_cpu_id(void)23cead7206SMathieu Desnoyers bool rseq_validate_cpu_id(void)
24cead7206SMathieu Desnoyers {
25cead7206SMathieu Desnoyers 	return rseq_mm_cid_available();
26cead7206SMathieu Desnoyers }
27*d53271c0SMathieu Desnoyers static
rseq_use_cpu_index(void)28*d53271c0SMathieu Desnoyers bool rseq_use_cpu_index(void)
29*d53271c0SMathieu Desnoyers {
30*d53271c0SMathieu Desnoyers 	return false;	/* Use mm_cid */
31*d53271c0SMathieu Desnoyers }
32cead7206SMathieu Desnoyers #else
33cead7206SMathieu Desnoyers # define RSEQ_PERCPU	RSEQ_PERCPU_CPU_ID
34cead7206SMathieu Desnoyers static
get_current_cpu_id(void)35cead7206SMathieu Desnoyers int get_current_cpu_id(void)
36cead7206SMathieu Desnoyers {
37cead7206SMathieu Desnoyers 	return rseq_cpu_start();
38cead7206SMathieu Desnoyers }
39cead7206SMathieu Desnoyers static
rseq_validate_cpu_id(void)40cead7206SMathieu Desnoyers bool rseq_validate_cpu_id(void)
41cead7206SMathieu Desnoyers {
42cead7206SMathieu Desnoyers 	return rseq_current_cpu_raw() >= 0;
43cead7206SMathieu Desnoyers }
44*d53271c0SMathieu Desnoyers static
rseq_use_cpu_index(void)45*d53271c0SMathieu Desnoyers bool rseq_use_cpu_index(void)
46*d53271c0SMathieu Desnoyers {
47*d53271c0SMathieu Desnoyers 	return true;	/* Use cpu_id as index. */
48*d53271c0SMathieu Desnoyers }
49cead7206SMathieu Desnoyers #endif
50cead7206SMathieu Desnoyers 
5101a5ec42SMathieu Desnoyers struct percpu_lock_entry {
5201a5ec42SMathieu Desnoyers 	intptr_t v;
5301a5ec42SMathieu Desnoyers } __attribute__((aligned(128)));
5401a5ec42SMathieu Desnoyers 
5501a5ec42SMathieu Desnoyers struct percpu_lock {
5601a5ec42SMathieu Desnoyers 	struct percpu_lock_entry c[CPU_SETSIZE];
5701a5ec42SMathieu Desnoyers };
5801a5ec42SMathieu Desnoyers 
5901a5ec42SMathieu Desnoyers struct test_data_entry {
6001a5ec42SMathieu Desnoyers 	intptr_t count;
6101a5ec42SMathieu Desnoyers } __attribute__((aligned(128)));
6201a5ec42SMathieu Desnoyers 
6301a5ec42SMathieu Desnoyers struct spinlock_test_data {
6401a5ec42SMathieu Desnoyers 	struct percpu_lock lock;
6501a5ec42SMathieu Desnoyers 	struct test_data_entry c[CPU_SETSIZE];
6601a5ec42SMathieu Desnoyers 	int reps;
6701a5ec42SMathieu Desnoyers };
6801a5ec42SMathieu Desnoyers 
6901a5ec42SMathieu Desnoyers struct percpu_list_node {
7001a5ec42SMathieu Desnoyers 	intptr_t data;
7101a5ec42SMathieu Desnoyers 	struct percpu_list_node *next;
7201a5ec42SMathieu Desnoyers };
7301a5ec42SMathieu Desnoyers 
7401a5ec42SMathieu Desnoyers struct percpu_list_entry {
7501a5ec42SMathieu Desnoyers 	struct percpu_list_node *head;
7601a5ec42SMathieu Desnoyers } __attribute__((aligned(128)));
7701a5ec42SMathieu Desnoyers 
7801a5ec42SMathieu Desnoyers struct percpu_list {
7901a5ec42SMathieu Desnoyers 	struct percpu_list_entry c[CPU_SETSIZE];
8001a5ec42SMathieu Desnoyers };
8101a5ec42SMathieu Desnoyers 
8201a5ec42SMathieu Desnoyers /* A simple percpu spinlock.  Returns the cpu lock was acquired on. */
rseq_this_cpu_lock(struct percpu_lock * lock)8301a5ec42SMathieu Desnoyers int rseq_this_cpu_lock(struct percpu_lock *lock)
8401a5ec42SMathieu Desnoyers {
8501a5ec42SMathieu Desnoyers 	int cpu;
8601a5ec42SMathieu Desnoyers 
8701a5ec42SMathieu Desnoyers 	for (;;) {
8801a5ec42SMathieu Desnoyers 		int ret;
8901a5ec42SMathieu Desnoyers 
90cead7206SMathieu Desnoyers 		cpu = get_current_cpu_id();
91cead7206SMathieu Desnoyers 		ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
92cead7206SMathieu Desnoyers 					 &lock->c[cpu].v, 0, 1, cpu);
9301a5ec42SMathieu Desnoyers 		if (rseq_likely(!ret))
9401a5ec42SMathieu Desnoyers 			break;
9501a5ec42SMathieu Desnoyers 		/* Retry if comparison fails or rseq aborts. */
9601a5ec42SMathieu Desnoyers 	}
9701a5ec42SMathieu Desnoyers 	/*
9801a5ec42SMathieu Desnoyers 	 * Acquire semantic when taking lock after control dependency.
9901a5ec42SMathieu Desnoyers 	 * Matches rseq_smp_store_release().
10001a5ec42SMathieu Desnoyers 	 */
10101a5ec42SMathieu Desnoyers 	rseq_smp_acquire__after_ctrl_dep();
10201a5ec42SMathieu Desnoyers 	return cpu;
10301a5ec42SMathieu Desnoyers }
10401a5ec42SMathieu Desnoyers 
rseq_percpu_unlock(struct percpu_lock * lock,int cpu)10501a5ec42SMathieu Desnoyers void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
10601a5ec42SMathieu Desnoyers {
10701a5ec42SMathieu Desnoyers 	assert(lock->c[cpu].v == 1);
10801a5ec42SMathieu Desnoyers 	/*
10901a5ec42SMathieu Desnoyers 	 * Release lock, with release semantic. Matches
11001a5ec42SMathieu Desnoyers 	 * rseq_smp_acquire__after_ctrl_dep().
11101a5ec42SMathieu Desnoyers 	 */
11201a5ec42SMathieu Desnoyers 	rseq_smp_store_release(&lock->c[cpu].v, 0);
11301a5ec42SMathieu Desnoyers }
11401a5ec42SMathieu Desnoyers 
test_percpu_spinlock_thread(void * arg)11501a5ec42SMathieu Desnoyers void *test_percpu_spinlock_thread(void *arg)
11601a5ec42SMathieu Desnoyers {
11701a5ec42SMathieu Desnoyers 	struct spinlock_test_data *data = arg;
11801a5ec42SMathieu Desnoyers 	int i, cpu;
11901a5ec42SMathieu Desnoyers 
12001a5ec42SMathieu Desnoyers 	if (rseq_register_current_thread()) {
12101a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
12201a5ec42SMathieu Desnoyers 			errno, strerror(errno));
12301a5ec42SMathieu Desnoyers 		abort();
12401a5ec42SMathieu Desnoyers 	}
12501a5ec42SMathieu Desnoyers 	for (i = 0; i < data->reps; i++) {
12601a5ec42SMathieu Desnoyers 		cpu = rseq_this_cpu_lock(&data->lock);
12701a5ec42SMathieu Desnoyers 		data->c[cpu].count++;
12801a5ec42SMathieu Desnoyers 		rseq_percpu_unlock(&data->lock, cpu);
12901a5ec42SMathieu Desnoyers 	}
13001a5ec42SMathieu Desnoyers 	if (rseq_unregister_current_thread()) {
13101a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
13201a5ec42SMathieu Desnoyers 			errno, strerror(errno));
13301a5ec42SMathieu Desnoyers 		abort();
13401a5ec42SMathieu Desnoyers 	}
13501a5ec42SMathieu Desnoyers 
13601a5ec42SMathieu Desnoyers 	return NULL;
13701a5ec42SMathieu Desnoyers }
13801a5ec42SMathieu Desnoyers 
13901a5ec42SMathieu Desnoyers /*
14001a5ec42SMathieu Desnoyers  * A simple test which implements a sharded counter using a per-cpu
14101a5ec42SMathieu Desnoyers  * lock.  Obviously real applications might prefer to simply use a
14201a5ec42SMathieu Desnoyers  * per-cpu increment; however, this is reasonable for a test and the
14301a5ec42SMathieu Desnoyers  * lock can be extended to synchronize more complicated operations.
14401a5ec42SMathieu Desnoyers  */
test_percpu_spinlock(void)14501a5ec42SMathieu Desnoyers void test_percpu_spinlock(void)
14601a5ec42SMathieu Desnoyers {
14701a5ec42SMathieu Desnoyers 	const int num_threads = 200;
14801a5ec42SMathieu Desnoyers 	int i;
14901a5ec42SMathieu Desnoyers 	uint64_t sum;
15001a5ec42SMathieu Desnoyers 	pthread_t test_threads[num_threads];
15101a5ec42SMathieu Desnoyers 	struct spinlock_test_data data;
15201a5ec42SMathieu Desnoyers 
15301a5ec42SMathieu Desnoyers 	memset(&data, 0, sizeof(data));
15401a5ec42SMathieu Desnoyers 	data.reps = 5000;
15501a5ec42SMathieu Desnoyers 
15601a5ec42SMathieu Desnoyers 	for (i = 0; i < num_threads; i++)
15701a5ec42SMathieu Desnoyers 		pthread_create(&test_threads[i], NULL,
15801a5ec42SMathieu Desnoyers 			       test_percpu_spinlock_thread, &data);
15901a5ec42SMathieu Desnoyers 
16001a5ec42SMathieu Desnoyers 	for (i = 0; i < num_threads; i++)
16101a5ec42SMathieu Desnoyers 		pthread_join(test_threads[i], NULL);
16201a5ec42SMathieu Desnoyers 
16301a5ec42SMathieu Desnoyers 	sum = 0;
16401a5ec42SMathieu Desnoyers 	for (i = 0; i < CPU_SETSIZE; i++)
16501a5ec42SMathieu Desnoyers 		sum += data.c[i].count;
16601a5ec42SMathieu Desnoyers 
16701a5ec42SMathieu Desnoyers 	assert(sum == (uint64_t)data.reps * num_threads);
16801a5ec42SMathieu Desnoyers }
16901a5ec42SMathieu Desnoyers 
this_cpu_list_push(struct percpu_list * list,struct percpu_list_node * node,int * _cpu)17001a5ec42SMathieu Desnoyers void this_cpu_list_push(struct percpu_list *list,
17101a5ec42SMathieu Desnoyers 			struct percpu_list_node *node,
17201a5ec42SMathieu Desnoyers 			int *_cpu)
17301a5ec42SMathieu Desnoyers {
17401a5ec42SMathieu Desnoyers 	int cpu;
17501a5ec42SMathieu Desnoyers 
17601a5ec42SMathieu Desnoyers 	for (;;) {
17701a5ec42SMathieu Desnoyers 		intptr_t *targetptr, newval, expect;
17801a5ec42SMathieu Desnoyers 		int ret;
17901a5ec42SMathieu Desnoyers 
180cead7206SMathieu Desnoyers 		cpu = get_current_cpu_id();
18101a5ec42SMathieu Desnoyers 		/* Load list->c[cpu].head with single-copy atomicity. */
18201a5ec42SMathieu Desnoyers 		expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
18301a5ec42SMathieu Desnoyers 		newval = (intptr_t)node;
18401a5ec42SMathieu Desnoyers 		targetptr = (intptr_t *)&list->c[cpu].head;
18501a5ec42SMathieu Desnoyers 		node->next = (struct percpu_list_node *)expect;
186cead7206SMathieu Desnoyers 		ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
187cead7206SMathieu Desnoyers 					 targetptr, expect, newval, cpu);
18801a5ec42SMathieu Desnoyers 		if (rseq_likely(!ret))
18901a5ec42SMathieu Desnoyers 			break;
19001a5ec42SMathieu Desnoyers 		/* Retry if comparison fails or rseq aborts. */
19101a5ec42SMathieu Desnoyers 	}
19201a5ec42SMathieu Desnoyers 	if (_cpu)
19301a5ec42SMathieu Desnoyers 		*_cpu = cpu;
19401a5ec42SMathieu Desnoyers }
19501a5ec42SMathieu Desnoyers 
19601a5ec42SMathieu Desnoyers /*
19701a5ec42SMathieu Desnoyers  * Unlike a traditional lock-less linked list; the availability of a
19801a5ec42SMathieu Desnoyers  * rseq primitive allows us to implement pop without concerns over
19901a5ec42SMathieu Desnoyers  * ABA-type races.
20001a5ec42SMathieu Desnoyers  */
this_cpu_list_pop(struct percpu_list * list,int * _cpu)20101a5ec42SMathieu Desnoyers struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
20201a5ec42SMathieu Desnoyers 					   int *_cpu)
20301a5ec42SMathieu Desnoyers {
20401a5ec42SMathieu Desnoyers 	for (;;) {
20501a5ec42SMathieu Desnoyers 		struct percpu_list_node *head;
20601a5ec42SMathieu Desnoyers 		intptr_t *targetptr, expectnot, *load;
20726dc8a6dSMathieu Desnoyers 		long offset;
20801a5ec42SMathieu Desnoyers 		int ret, cpu;
20901a5ec42SMathieu Desnoyers 
210cead7206SMathieu Desnoyers 		cpu = get_current_cpu_id();
21101a5ec42SMathieu Desnoyers 		targetptr = (intptr_t *)&list->c[cpu].head;
21201a5ec42SMathieu Desnoyers 		expectnot = (intptr_t)NULL;
21301a5ec42SMathieu Desnoyers 		offset = offsetof(struct percpu_list_node, next);
21401a5ec42SMathieu Desnoyers 		load = (intptr_t *)&head;
215cead7206SMathieu Desnoyers 		ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
216cead7206SMathieu Desnoyers 						 targetptr, expectnot,
21701a5ec42SMathieu Desnoyers 						 offset, load, cpu);
21801a5ec42SMathieu Desnoyers 		if (rseq_likely(!ret)) {
21901a5ec42SMathieu Desnoyers 			if (_cpu)
22001a5ec42SMathieu Desnoyers 				*_cpu = cpu;
22101a5ec42SMathieu Desnoyers 			return head;
22201a5ec42SMathieu Desnoyers 		}
22301a5ec42SMathieu Desnoyers 		if (ret > 0)
22401a5ec42SMathieu Desnoyers 			return NULL;
22501a5ec42SMathieu Desnoyers 		/* Retry if rseq aborts. */
22601a5ec42SMathieu Desnoyers 	}
22701a5ec42SMathieu Desnoyers }
22801a5ec42SMathieu Desnoyers 
22901a5ec42SMathieu Desnoyers /*
23001a5ec42SMathieu Desnoyers  * __percpu_list_pop is not safe against concurrent accesses. Should
23101a5ec42SMathieu Desnoyers  * only be used on lists that are not concurrently modified.
23201a5ec42SMathieu Desnoyers  */
__percpu_list_pop(struct percpu_list * list,int cpu)23301a5ec42SMathieu Desnoyers struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
23401a5ec42SMathieu Desnoyers {
23501a5ec42SMathieu Desnoyers 	struct percpu_list_node *node;
23601a5ec42SMathieu Desnoyers 
23701a5ec42SMathieu Desnoyers 	node = list->c[cpu].head;
23801a5ec42SMathieu Desnoyers 	if (!node)
23901a5ec42SMathieu Desnoyers 		return NULL;
24001a5ec42SMathieu Desnoyers 	list->c[cpu].head = node->next;
24101a5ec42SMathieu Desnoyers 	return node;
24201a5ec42SMathieu Desnoyers }
24301a5ec42SMathieu Desnoyers 
test_percpu_list_thread(void * arg)24401a5ec42SMathieu Desnoyers void *test_percpu_list_thread(void *arg)
24501a5ec42SMathieu Desnoyers {
24601a5ec42SMathieu Desnoyers 	int i;
24701a5ec42SMathieu Desnoyers 	struct percpu_list *list = (struct percpu_list *)arg;
24801a5ec42SMathieu Desnoyers 
24901a5ec42SMathieu Desnoyers 	if (rseq_register_current_thread()) {
25001a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
25101a5ec42SMathieu Desnoyers 			errno, strerror(errno));
25201a5ec42SMathieu Desnoyers 		abort();
25301a5ec42SMathieu Desnoyers 	}
25401a5ec42SMathieu Desnoyers 
25501a5ec42SMathieu Desnoyers 	for (i = 0; i < 100000; i++) {
25601a5ec42SMathieu Desnoyers 		struct percpu_list_node *node;
25701a5ec42SMathieu Desnoyers 
25801a5ec42SMathieu Desnoyers 		node = this_cpu_list_pop(list, NULL);
25901a5ec42SMathieu Desnoyers 		sched_yield();  /* encourage shuffling */
26001a5ec42SMathieu Desnoyers 		if (node)
26101a5ec42SMathieu Desnoyers 			this_cpu_list_push(list, node, NULL);
26201a5ec42SMathieu Desnoyers 	}
26301a5ec42SMathieu Desnoyers 
26401a5ec42SMathieu Desnoyers 	if (rseq_unregister_current_thread()) {
26501a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
26601a5ec42SMathieu Desnoyers 			errno, strerror(errno));
26701a5ec42SMathieu Desnoyers 		abort();
26801a5ec42SMathieu Desnoyers 	}
26901a5ec42SMathieu Desnoyers 
27001a5ec42SMathieu Desnoyers 	return NULL;
27101a5ec42SMathieu Desnoyers }
27201a5ec42SMathieu Desnoyers 
27301a5ec42SMathieu Desnoyers /* Simultaneous modification to a per-cpu linked list from many threads.  */
test_percpu_list(void)27401a5ec42SMathieu Desnoyers void test_percpu_list(void)
27501a5ec42SMathieu Desnoyers {
27601a5ec42SMathieu Desnoyers 	int i, j;
27701a5ec42SMathieu Desnoyers 	uint64_t sum = 0, expected_sum = 0;
27801a5ec42SMathieu Desnoyers 	struct percpu_list list;
27901a5ec42SMathieu Desnoyers 	pthread_t test_threads[200];
28001a5ec42SMathieu Desnoyers 	cpu_set_t allowed_cpus;
28101a5ec42SMathieu Desnoyers 
28201a5ec42SMathieu Desnoyers 	memset(&list, 0, sizeof(list));
28301a5ec42SMathieu Desnoyers 
28401a5ec42SMathieu Desnoyers 	/* Generate list entries for every usable cpu. */
28501a5ec42SMathieu Desnoyers 	sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
28601a5ec42SMathieu Desnoyers 	for (i = 0; i < CPU_SETSIZE; i++) {
287*d53271c0SMathieu Desnoyers 		if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
28801a5ec42SMathieu Desnoyers 			continue;
28901a5ec42SMathieu Desnoyers 		for (j = 1; j <= 100; j++) {
29001a5ec42SMathieu Desnoyers 			struct percpu_list_node *node;
29101a5ec42SMathieu Desnoyers 
29201a5ec42SMathieu Desnoyers 			expected_sum += j;
29301a5ec42SMathieu Desnoyers 
29401a5ec42SMathieu Desnoyers 			node = malloc(sizeof(*node));
29501a5ec42SMathieu Desnoyers 			assert(node);
29601a5ec42SMathieu Desnoyers 			node->data = j;
29701a5ec42SMathieu Desnoyers 			node->next = list.c[i].head;
29801a5ec42SMathieu Desnoyers 			list.c[i].head = node;
29901a5ec42SMathieu Desnoyers 		}
30001a5ec42SMathieu Desnoyers 	}
30101a5ec42SMathieu Desnoyers 
30201a5ec42SMathieu Desnoyers 	for (i = 0; i < 200; i++)
30301a5ec42SMathieu Desnoyers 		pthread_create(&test_threads[i], NULL,
30401a5ec42SMathieu Desnoyers 		       test_percpu_list_thread, &list);
30501a5ec42SMathieu Desnoyers 
30601a5ec42SMathieu Desnoyers 	for (i = 0; i < 200; i++)
30701a5ec42SMathieu Desnoyers 		pthread_join(test_threads[i], NULL);
30801a5ec42SMathieu Desnoyers 
30901a5ec42SMathieu Desnoyers 	for (i = 0; i < CPU_SETSIZE; i++) {
31001a5ec42SMathieu Desnoyers 		struct percpu_list_node *node;
31101a5ec42SMathieu Desnoyers 
312*d53271c0SMathieu Desnoyers 		if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
31301a5ec42SMathieu Desnoyers 			continue;
31401a5ec42SMathieu Desnoyers 
31501a5ec42SMathieu Desnoyers 		while ((node = __percpu_list_pop(&list, i))) {
31601a5ec42SMathieu Desnoyers 			sum += node->data;
31701a5ec42SMathieu Desnoyers 			free(node);
31801a5ec42SMathieu Desnoyers 		}
31901a5ec42SMathieu Desnoyers 	}
32001a5ec42SMathieu Desnoyers 
32101a5ec42SMathieu Desnoyers 	/*
32201a5ec42SMathieu Desnoyers 	 * All entries should now be accounted for (unless some external
32301a5ec42SMathieu Desnoyers 	 * actor is interfering with our allowed affinity while this
32401a5ec42SMathieu Desnoyers 	 * test is running).
32501a5ec42SMathieu Desnoyers 	 */
32601a5ec42SMathieu Desnoyers 	assert(sum == expected_sum);
32701a5ec42SMathieu Desnoyers }
32801a5ec42SMathieu Desnoyers 
main(int argc,char ** argv)32901a5ec42SMathieu Desnoyers int main(int argc, char **argv)
33001a5ec42SMathieu Desnoyers {
33101a5ec42SMathieu Desnoyers 	if (rseq_register_current_thread()) {
33201a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
33301a5ec42SMathieu Desnoyers 			errno, strerror(errno));
33401a5ec42SMathieu Desnoyers 		goto error;
33501a5ec42SMathieu Desnoyers 	}
336cead7206SMathieu Desnoyers 	if (!rseq_validate_cpu_id()) {
337cead7206SMathieu Desnoyers 		fprintf(stderr, "Error: cpu id getter unavailable\n");
338cead7206SMathieu Desnoyers 		goto error;
339cead7206SMathieu Desnoyers 	}
34001a5ec42SMathieu Desnoyers 	printf("spinlock\n");
34101a5ec42SMathieu Desnoyers 	test_percpu_spinlock();
34201a5ec42SMathieu Desnoyers 	printf("percpu_list\n");
34301a5ec42SMathieu Desnoyers 	test_percpu_list();
34401a5ec42SMathieu Desnoyers 	if (rseq_unregister_current_thread()) {
34501a5ec42SMathieu Desnoyers 		fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
34601a5ec42SMathieu Desnoyers 			errno, strerror(errno));
34701a5ec42SMathieu Desnoyers 		goto error;
34801a5ec42SMathieu Desnoyers 	}
34901a5ec42SMathieu Desnoyers 	return 0;
35001a5ec42SMathieu Desnoyers 
35101a5ec42SMathieu Desnoyers error:
35201a5ec42SMathieu Desnoyers 	return -1;
35301a5ec42SMathieu Desnoyers }
354