11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
297e1c18eSMathieu Desnoyers /*
3de7b2973SMathieu Desnoyers * Copyright (C) 2008-2014 Mathieu Desnoyers
497e1c18eSMathieu Desnoyers */
597e1c18eSMathieu Desnoyers #include <linux/module.h>
697e1c18eSMathieu Desnoyers #include <linux/mutex.h>
797e1c18eSMathieu Desnoyers #include <linux/types.h>
897e1c18eSMathieu Desnoyers #include <linux/jhash.h>
997e1c18eSMathieu Desnoyers #include <linux/list.h>
1097e1c18eSMathieu Desnoyers #include <linux/rcupdate.h>
1197e1c18eSMathieu Desnoyers #include <linux/tracepoint.h>
1297e1c18eSMathieu Desnoyers #include <linux/err.h>
1397e1c18eSMathieu Desnoyers #include <linux/slab.h>
143f07c014SIngo Molnar #include <linux/sched/signal.h>
1529930025SIngo Molnar #include <linux/sched/task.h>
16c5905afbSIngo Molnar #include <linux/static_key.h>
1797e1c18eSMathieu Desnoyers
18231264d6SMathieu Desnoyers enum tp_func_state {
19231264d6SMathieu Desnoyers TP_FUNC_0,
20231264d6SMathieu Desnoyers TP_FUNC_1,
21231264d6SMathieu Desnoyers TP_FUNC_2,
22231264d6SMathieu Desnoyers TP_FUNC_N,
23231264d6SMathieu Desnoyers };
24231264d6SMathieu Desnoyers
259c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __start___tracepoints_ptrs[];
269c0be3f6SMathieu Desnoyers extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
2797e1c18eSMathieu Desnoyers
287b40066cSMathieu Desnoyers enum tp_transition_sync {
297b40066cSMathieu Desnoyers TP_TRANSITION_SYNC_1_0_1,
307b40066cSMathieu Desnoyers TP_TRANSITION_SYNC_N_2_1,
317b40066cSMathieu Desnoyers
327b40066cSMathieu Desnoyers _NR_TP_TRANSITION_SYNC,
337b40066cSMathieu Desnoyers };
347b40066cSMathieu Desnoyers
357b40066cSMathieu Desnoyers struct tp_transition_snapshot {
367b40066cSMathieu Desnoyers unsigned long rcu;
377b40066cSMathieu Desnoyers bool ongoing;
387b40066cSMathieu Desnoyers };
397b40066cSMathieu Desnoyers
407b40066cSMathieu Desnoyers /* Protected by tracepoints_mutex */
417b40066cSMathieu Desnoyers static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC];
427b40066cSMathieu Desnoyers
tp_rcu_get_state(enum tp_transition_sync sync)437b40066cSMathieu Desnoyers static void tp_rcu_get_state(enum tp_transition_sync sync)
447b40066cSMathieu Desnoyers {
457b40066cSMathieu Desnoyers struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
467b40066cSMathieu Desnoyers
477b40066cSMathieu Desnoyers /* Keep the latest get_state snapshot. */
487b40066cSMathieu Desnoyers snapshot->rcu = get_state_synchronize_rcu();
497b40066cSMathieu Desnoyers snapshot->ongoing = true;
507b40066cSMathieu Desnoyers }
517b40066cSMathieu Desnoyers
tp_rcu_cond_sync(enum tp_transition_sync sync)527b40066cSMathieu Desnoyers static void tp_rcu_cond_sync(enum tp_transition_sync sync)
537b40066cSMathieu Desnoyers {
547b40066cSMathieu Desnoyers struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync];
557b40066cSMathieu Desnoyers
567b40066cSMathieu Desnoyers if (!snapshot->ongoing)
577b40066cSMathieu Desnoyers return;
587b40066cSMathieu Desnoyers cond_synchronize_rcu(snapshot->rcu);
597b40066cSMathieu Desnoyers snapshot->ongoing = false;
607b40066cSMathieu Desnoyers }
617b40066cSMathieu Desnoyers
6297e1c18eSMathieu Desnoyers /* Set to 1 to enable tracepoint debug output */
6397e1c18eSMathieu Desnoyers static const int tracepoint_debug;
6497e1c18eSMathieu Desnoyers
65b75ef8b4SMathieu Desnoyers #ifdef CONFIG_MODULES
66de7b2973SMathieu Desnoyers /*
67de7b2973SMathieu Desnoyers * Tracepoint module list mutex protects the local module list.
68de7b2973SMathieu Desnoyers */
69de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoint_module_list_mutex);
70de7b2973SMathieu Desnoyers
71de7b2973SMathieu Desnoyers /* Local list of struct tp_module */
72b75ef8b4SMathieu Desnoyers static LIST_HEAD(tracepoint_module_list);
73b75ef8b4SMathieu Desnoyers #endif /* CONFIG_MODULES */
74b75ef8b4SMathieu Desnoyers
7597e1c18eSMathieu Desnoyers /*
76de7b2973SMathieu Desnoyers * tracepoints_mutex protects the builtin and module tracepoints.
77de7b2973SMathieu Desnoyers * tracepoints_mutex nests inside tracepoint_module_list_mutex.
7897e1c18eSMathieu Desnoyers */
79de7b2973SMathieu Desnoyers static DEFINE_MUTEX(tracepoints_mutex);
8097e1c18eSMathieu Desnoyers
8197e1c18eSMathieu Desnoyers /*
8297e1c18eSMathieu Desnoyers * Note about RCU :
83fd589a8fSAnand Gadiyar * It is used to delay the free of multiple probes array until a quiescent
8497e1c18eSMathieu Desnoyers * state is reached.
8597e1c18eSMathieu Desnoyers */
8619dba33cSLai Jiangshan struct tp_probes {
8719dba33cSLai Jiangshan struct rcu_head rcu;
889d0a49c7SGustavo A. R. Silva struct tracepoint_func probes[];
8919dba33cSLai Jiangshan };
9097e1c18eSMathieu Desnoyers
91befe6d94SSteven Rostedt (VMware) /* Called in removal of a func but failed to allocate a new tp_funcs */
tp_stub_func(void)92befe6d94SSteven Rostedt (VMware) static void tp_stub_func(void)
93befe6d94SSteven Rostedt (VMware) {
94befe6d94SSteven Rostedt (VMware) return;
95befe6d94SSteven Rostedt (VMware) }
96befe6d94SSteven Rostedt (VMware)
allocate_probes(int count)9719dba33cSLai Jiangshan static inline void *allocate_probes(int count)
9897e1c18eSMathieu Desnoyers {
99f0553dcbSGustavo A. R. Silva struct tp_probes *p = kmalloc(struct_size(p, probes, count),
100f0553dcbSGustavo A. R. Silva GFP_KERNEL);
10119dba33cSLai Jiangshan return p == NULL ? NULL : p->probes;
10297e1c18eSMathieu Desnoyers }
10397e1c18eSMathieu Desnoyers
rcu_free_old_probes(struct rcu_head * head)104e53244e2SSteven Rostedt static void rcu_free_old_probes(struct rcu_head *head)
10597e1c18eSMathieu Desnoyers {
1060dea6d52SMathieu Desnoyers kfree(container_of(head, struct tp_probes, rcu));
10719dba33cSLai Jiangshan }
10819dba33cSLai Jiangshan
release_probes(struct tracepoint * tp,struct tracepoint_func * old)1092e8a12b8SMathieu Desnoyers static inline void release_probes(struct tracepoint *tp, struct tracepoint_func *old)
11019dba33cSLai Jiangshan {
11119dba33cSLai Jiangshan if (old) {
11219dba33cSLai Jiangshan struct tp_probes *tp_probes = container_of(old,
11319dba33cSLai Jiangshan struct tp_probes, probes[0]);
114f8a79d5cSSteven Rostedt (VMware)
1152e8a12b8SMathieu Desnoyers if (tracepoint_is_faultable(tp))
1162e8a12b8SMathieu Desnoyers call_rcu_tasks_trace(&tp_probes->rcu, rcu_free_old_probes);
1172e8a12b8SMathieu Desnoyers else
11874401729SPaul E. McKenney call_rcu(&tp_probes->rcu, rcu_free_old_probes);
11919dba33cSLai Jiangshan }
12097e1c18eSMathieu Desnoyers }
12197e1c18eSMathieu Desnoyers
debug_print_probes(struct tracepoint_func * funcs)122de7b2973SMathieu Desnoyers static void debug_print_probes(struct tracepoint_func *funcs)
12397e1c18eSMathieu Desnoyers {
12497e1c18eSMathieu Desnoyers int i;
12597e1c18eSMathieu Desnoyers
126de7b2973SMathieu Desnoyers if (!tracepoint_debug || !funcs)
12797e1c18eSMathieu Desnoyers return;
12897e1c18eSMathieu Desnoyers
129de7b2973SMathieu Desnoyers for (i = 0; funcs[i].func; i++)
130*30c94bbcSHuang Shijie printk(KERN_DEBUG "Probe %d : %pSb\n", i, funcs[i].func);
13197e1c18eSMathieu Desnoyers }
13297e1c18eSMathieu Desnoyers
1337904b5c4SSteven Rostedt (Red Hat) static struct tracepoint_func *
func_add(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func,int prio)1347904b5c4SSteven Rostedt (Red Hat) func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
1357904b5c4SSteven Rostedt (Red Hat) int prio)
13697e1c18eSMathieu Desnoyers {
13738516ab5SSteven Rostedt struct tracepoint_func *old, *new;
1387211f0a2SSteven Rostedt (VMware) int iter_probes; /* Iterate over old probe array. */
1397211f0a2SSteven Rostedt (VMware) int nr_probes = 0; /* Counter for probes */
1407211f0a2SSteven Rostedt (VMware) int pos = -1; /* Insertion position into new array */
14197e1c18eSMathieu Desnoyers
142de7b2973SMathieu Desnoyers if (WARN_ON(!tp_func->func))
1434c69e6eaSSahara return ERR_PTR(-EINVAL);
14497e1c18eSMathieu Desnoyers
145de7b2973SMathieu Desnoyers debug_print_probes(*funcs);
146de7b2973SMathieu Desnoyers old = *funcs;
14797e1c18eSMathieu Desnoyers if (old) {
14897e1c18eSMathieu Desnoyers /* (N -> N+1), (N != 0, 1) probes */
1497211f0a2SSteven Rostedt (VMware) for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
1507211f0a2SSteven Rostedt (VMware) if (old[iter_probes].func == tp_stub_func)
1517211f0a2SSteven Rostedt (VMware) continue; /* Skip stub functions. */
1527211f0a2SSteven Rostedt (VMware) if (old[iter_probes].func == tp_func->func &&
1537211f0a2SSteven Rostedt (VMware) old[iter_probes].data == tp_func->data)
15497e1c18eSMathieu Desnoyers return ERR_PTR(-EEXIST);
1557211f0a2SSteven Rostedt (VMware) nr_probes++;
15697e1c18eSMathieu Desnoyers }
1577904b5c4SSteven Rostedt (Red Hat) }
1587211f0a2SSteven Rostedt (VMware) /* + 2 : one for new probe, one for NULL func */
1597211f0a2SSteven Rostedt (VMware) new = allocate_probes(nr_probes + 2);
16097e1c18eSMathieu Desnoyers if (new == NULL)
16197e1c18eSMathieu Desnoyers return ERR_PTR(-ENOMEM);
1627904b5c4SSteven Rostedt (Red Hat) if (old) {
1637211f0a2SSteven Rostedt (VMware) nr_probes = 0;
1647211f0a2SSteven Rostedt (VMware) for (iter_probes = 0; old[iter_probes].func; iter_probes++) {
1657211f0a2SSteven Rostedt (VMware) if (old[iter_probes].func == tp_stub_func)
166befe6d94SSteven Rostedt (VMware) continue;
1677211f0a2SSteven Rostedt (VMware) /* Insert before probes of lower priority */
1687211f0a2SSteven Rostedt (VMware) if (pos < 0 && old[iter_probes].prio < prio)
1697211f0a2SSteven Rostedt (VMware) pos = nr_probes++;
1707211f0a2SSteven Rostedt (VMware) new[nr_probes++] = old[iter_probes];
171befe6d94SSteven Rostedt (VMware) }
172befe6d94SSteven Rostedt (VMware) if (pos < 0)
1737211f0a2SSteven Rostedt (VMware) pos = nr_probes++;
1747211f0a2SSteven Rostedt (VMware) /* nr_probes now points to the end of the new array */
1757904b5c4SSteven Rostedt (Red Hat) } else {
1767904b5c4SSteven Rostedt (Red Hat) pos = 0;
1777211f0a2SSteven Rostedt (VMware) nr_probes = 1; /* must point at end of array */
1787211f0a2SSteven Rostedt (VMware) }
1797904b5c4SSteven Rostedt (Red Hat) new[pos] = *tp_func;
1807211f0a2SSteven Rostedt (VMware) new[nr_probes].func = NULL;
181de7b2973SMathieu Desnoyers *funcs = new;
182de7b2973SMathieu Desnoyers debug_print_probes(*funcs);
18397e1c18eSMathieu Desnoyers return old;
18497e1c18eSMathieu Desnoyers }
18597e1c18eSMathieu Desnoyers
func_remove(struct tracepoint_func ** funcs,struct tracepoint_func * tp_func)186de7b2973SMathieu Desnoyers static void *func_remove(struct tracepoint_func **funcs,
187de7b2973SMathieu Desnoyers struct tracepoint_func *tp_func)
18897e1c18eSMathieu Desnoyers {
18997e1c18eSMathieu Desnoyers int nr_probes = 0, nr_del = 0, i;
19038516ab5SSteven Rostedt struct tracepoint_func *old, *new;
19197e1c18eSMathieu Desnoyers
192de7b2973SMathieu Desnoyers old = *funcs;
19397e1c18eSMathieu Desnoyers
194f66af459SFrederic Weisbecker if (!old)
19519dba33cSLai Jiangshan return ERR_PTR(-ENOENT);
196f66af459SFrederic Weisbecker
197de7b2973SMathieu Desnoyers debug_print_probes(*funcs);
19897e1c18eSMathieu Desnoyers /* (N -> M), (N > 1, M >= 0) probes */
199de7b2973SMathieu Desnoyers if (tp_func->func) {
20038516ab5SSteven Rostedt for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
201befe6d94SSteven Rostedt (VMware) if ((old[nr_probes].func == tp_func->func &&
202befe6d94SSteven Rostedt (VMware) old[nr_probes].data == tp_func->data) ||
203befe6d94SSteven Rostedt (VMware) old[nr_probes].func == tp_stub_func)
20497e1c18eSMathieu Desnoyers nr_del++;
20597e1c18eSMathieu Desnoyers }
2064c69e6eaSSahara }
20797e1c18eSMathieu Desnoyers
2084c69e6eaSSahara /*
2094c69e6eaSSahara * If probe is NULL, then nr_probes = nr_del = 0, and then the
2104c69e6eaSSahara * entire entry will be removed.
2114c69e6eaSSahara */
21297e1c18eSMathieu Desnoyers if (nr_probes - nr_del == 0) {
21397e1c18eSMathieu Desnoyers /* N -> 0, (N > 1) */
214de7b2973SMathieu Desnoyers *funcs = NULL;
215de7b2973SMathieu Desnoyers debug_print_probes(*funcs);
21697e1c18eSMathieu Desnoyers return old;
21797e1c18eSMathieu Desnoyers } else {
21897e1c18eSMathieu Desnoyers int j = 0;
21997e1c18eSMathieu Desnoyers /* N -> M, (N > 1, M > 0) */
22097e1c18eSMathieu Desnoyers /* + 1 for NULL */
22119dba33cSLai Jiangshan new = allocate_probes(nr_probes - nr_del + 1);
222befe6d94SSteven Rostedt (VMware) if (new) {
2237211f0a2SSteven Rostedt (VMware) for (i = 0; old[i].func; i++) {
2247211f0a2SSteven Rostedt (VMware) if ((old[i].func != tp_func->func ||
2257211f0a2SSteven Rostedt (VMware) old[i].data != tp_func->data) &&
2267211f0a2SSteven Rostedt (VMware) old[i].func != tp_stub_func)
22797e1c18eSMathieu Desnoyers new[j++] = old[i];
2287211f0a2SSteven Rostedt (VMware) }
22938516ab5SSteven Rostedt new[nr_probes - nr_del].func = NULL;
230de7b2973SMathieu Desnoyers *funcs = new;
231befe6d94SSteven Rostedt (VMware) } else {
232befe6d94SSteven Rostedt (VMware) /*
233befe6d94SSteven Rostedt (VMware) * Failed to allocate, replace the old function
234befe6d94SSteven Rostedt (VMware) * with calls to tp_stub_func.
235befe6d94SSteven Rostedt (VMware) */
2367211f0a2SSteven Rostedt (VMware) for (i = 0; old[i].func; i++) {
237befe6d94SSteven Rostedt (VMware) if (old[i].func == tp_func->func &&
2387211f0a2SSteven Rostedt (VMware) old[i].data == tp_func->data)
2397211f0a2SSteven Rostedt (VMware) WRITE_ONCE(old[i].func, tp_stub_func);
240befe6d94SSteven Rostedt (VMware) }
241befe6d94SSteven Rostedt (VMware) *funcs = old;
242befe6d94SSteven Rostedt (VMware) }
24397e1c18eSMathieu Desnoyers }
244de7b2973SMathieu Desnoyers debug_print_probes(*funcs);
24597e1c18eSMathieu Desnoyers return old;
24697e1c18eSMathieu Desnoyers }
24797e1c18eSMathieu Desnoyers
248231264d6SMathieu Desnoyers /*
249231264d6SMathieu Desnoyers * Count the number of functions (enum tp_func_state) in a tp_funcs array.
250231264d6SMathieu Desnoyers */
nr_func_state(const struct tracepoint_func * tp_funcs)251231264d6SMathieu Desnoyers static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs)
252231264d6SMathieu Desnoyers {
253231264d6SMathieu Desnoyers if (!tp_funcs)
254231264d6SMathieu Desnoyers return TP_FUNC_0;
255231264d6SMathieu Desnoyers if (!tp_funcs[1].func)
256231264d6SMathieu Desnoyers return TP_FUNC_1;
257231264d6SMathieu Desnoyers if (!tp_funcs[2].func)
258231264d6SMathieu Desnoyers return TP_FUNC_2;
259231264d6SMathieu Desnoyers return TP_FUNC_N; /* 3 or more */
260231264d6SMathieu Desnoyers }
261231264d6SMathieu Desnoyers
tracepoint_update_call(struct tracepoint * tp,struct tracepoint_func * tp_funcs)262231264d6SMathieu Desnoyers static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs)
263d25e37d8SSteven Rostedt (VMware) {
264d25e37d8SSteven Rostedt (VMware) void *func = tp->iterator;
265d25e37d8SSteven Rostedt (VMware)
266d25e37d8SSteven Rostedt (VMware) /* Synthetic events do not have static call sites */
267d25e37d8SSteven Rostedt (VMware) if (!tp->static_call_key)
268d25e37d8SSteven Rostedt (VMware) return;
269231264d6SMathieu Desnoyers if (nr_func_state(tp_funcs) == TP_FUNC_1)
270d25e37d8SSteven Rostedt (VMware) func = tp_funcs[0].func;
271d25e37d8SSteven Rostedt (VMware) __static_call_update(tp->static_call_key, tp->static_call_tramp, func);
272d25e37d8SSteven Rostedt (VMware) }
273d25e37d8SSteven Rostedt (VMware)
27497e1c18eSMathieu Desnoyers /*
275de7b2973SMathieu Desnoyers * Add the probe function to a tracepoint.
27697e1c18eSMathieu Desnoyers */
tracepoint_add_func(struct tracepoint * tp,struct tracepoint_func * func,int prio,bool warn)277de7b2973SMathieu Desnoyers static int tracepoint_add_func(struct tracepoint *tp,
2789913d574SSteven Rostedt (VMware) struct tracepoint_func *func, int prio,
2799913d574SSteven Rostedt (VMware) bool warn)
28097e1c18eSMathieu Desnoyers {
281de7b2973SMathieu Desnoyers struct tracepoint_func *old, *tp_funcs;
2828cf868afSSteven Rostedt (Red Hat) int ret;
28397e1c18eSMathieu Desnoyers
284a9cfb877SMathieu Desnoyers if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) {
285a9cfb877SMathieu Desnoyers ret = tp->ext->regfunc();
2868cf868afSSteven Rostedt (Red Hat) if (ret < 0)
2878cf868afSSteven Rostedt (Red Hat) return ret;
2888cf868afSSteven Rostedt (Red Hat) }
28997e1c18eSMathieu Desnoyers
290b725dfeaSMathieu Desnoyers tp_funcs = rcu_dereference_protected(tp->funcs,
291b725dfeaSMathieu Desnoyers lockdep_is_held(&tracepoints_mutex));
2927904b5c4SSteven Rostedt (Red Hat) old = func_add(&tp_funcs, func, prio);
293de7b2973SMathieu Desnoyers if (IS_ERR(old)) {
2949913d574SSteven Rostedt (VMware) WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
295de7b2973SMathieu Desnoyers return PTR_ERR(old);
29697e1c18eSMathieu Desnoyers }
29797419875SJosh Stone
29897e1c18eSMathieu Desnoyers /*
299243d1a79SPaul E. McKenney * rcu_assign_pointer has as smp_store_release() which makes sure
300243d1a79SPaul E. McKenney * that the new probe callbacks array is consistent before setting
301243d1a79SPaul E. McKenney * a pointer to it. This array is referenced by __DO_TRACE from
302243d1a79SPaul E. McKenney * include/linux/tracepoint.h using rcu_dereference_sched().
30397e1c18eSMathieu Desnoyers */
304231264d6SMathieu Desnoyers switch (nr_func_state(tp_funcs)) {
305231264d6SMathieu Desnoyers case TP_FUNC_1: /* 0->1 */
3067b40066cSMathieu Desnoyers /*
3077b40066cSMathieu Desnoyers * Make sure new static func never uses old data after a
3087b40066cSMathieu Desnoyers * 1->0->1 transition sequence.
3097b40066cSMathieu Desnoyers */
3107b40066cSMathieu Desnoyers tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1);
311231264d6SMathieu Desnoyers /* Set static call to first function */
312231264d6SMathieu Desnoyers tracepoint_update_call(tp, tp_funcs);
313231264d6SMathieu Desnoyers /* Both iterator and static call handle NULL tp->funcs */
314352384d5SSteven Rostedt (VMware) rcu_assign_pointer(tp->funcs, tp_funcs);
3154a8840afSJosh Poimboeuf static_branch_enable(&tp->key);
316231264d6SMathieu Desnoyers break;
317231264d6SMathieu Desnoyers case TP_FUNC_2: /* 1->2 */
318231264d6SMathieu Desnoyers /* Set iterator static call */
319231264d6SMathieu Desnoyers tracepoint_update_call(tp, tp_funcs);
320231264d6SMathieu Desnoyers /*
321231264d6SMathieu Desnoyers * Iterator callback installed before updating tp->funcs.
322231264d6SMathieu Desnoyers * Requires ordering between RCU assign/dereference and
323231264d6SMathieu Desnoyers * static call update/call.
324231264d6SMathieu Desnoyers */
3257b40066cSMathieu Desnoyers fallthrough;
326231264d6SMathieu Desnoyers case TP_FUNC_N: /* N->N+1 (N>1) */
327231264d6SMathieu Desnoyers rcu_assign_pointer(tp->funcs, tp_funcs);
3287b40066cSMathieu Desnoyers /*
3297b40066cSMathieu Desnoyers * Make sure static func never uses incorrect data after a
3307b40066cSMathieu Desnoyers * N->...->2->1 (N>1) transition sequence.
3317b40066cSMathieu Desnoyers */
3327b40066cSMathieu Desnoyers if (tp_funcs[0].data != old[0].data)
3337b40066cSMathieu Desnoyers tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
334231264d6SMathieu Desnoyers break;
335231264d6SMathieu Desnoyers default:
336231264d6SMathieu Desnoyers WARN_ON_ONCE(1);
337231264d6SMathieu Desnoyers break;
338231264d6SMathieu Desnoyers }
339d25e37d8SSteven Rostedt (VMware)
3402e8a12b8SMathieu Desnoyers release_probes(tp, old);
341de7b2973SMathieu Desnoyers return 0;
34297e1c18eSMathieu Desnoyers }
34397e1c18eSMathieu Desnoyers
34497e1c18eSMathieu Desnoyers /*
345de7b2973SMathieu Desnoyers * Remove a probe function from a tracepoint.
34697e1c18eSMathieu Desnoyers * Note: only waiting an RCU period after setting elem->call to the empty
34797e1c18eSMathieu Desnoyers * function insures that the original callback is not used anymore. This insured
34897e1c18eSMathieu Desnoyers * by preempt_disable around the call site.
34997e1c18eSMathieu Desnoyers */
tracepoint_remove_func(struct tracepoint * tp,struct tracepoint_func * func)350de7b2973SMathieu Desnoyers static int tracepoint_remove_func(struct tracepoint *tp,
351de7b2973SMathieu Desnoyers struct tracepoint_func *func)
35297e1c18eSMathieu Desnoyers {
353de7b2973SMathieu Desnoyers struct tracepoint_func *old, *tp_funcs;
35497419875SJosh Stone
355b725dfeaSMathieu Desnoyers tp_funcs = rcu_dereference_protected(tp->funcs,
356b725dfeaSMathieu Desnoyers lockdep_is_held(&tracepoints_mutex));
357de7b2973SMathieu Desnoyers old = func_remove(&tp_funcs, func);
358befe6d94SSteven Rostedt (VMware) if (WARN_ON_ONCE(IS_ERR(old)))
359de7b2973SMathieu Desnoyers return PTR_ERR(old);
360befe6d94SSteven Rostedt (VMware)
361befe6d94SSteven Rostedt (VMware) if (tp_funcs == old)
362befe6d94SSteven Rostedt (VMware) /* Failed allocating new tp_funcs, replaced func with stub */
363befe6d94SSteven Rostedt (VMware) return 0;
36497e1c18eSMathieu Desnoyers
365231264d6SMathieu Desnoyers switch (nr_func_state(tp_funcs)) {
366231264d6SMathieu Desnoyers case TP_FUNC_0: /* 1->0 */
367de7b2973SMathieu Desnoyers /* Removed last function */
368a9cfb877SMathieu Desnoyers if (tp->ext && tp->ext->unregfunc && static_key_enabled(&tp->key))
369a9cfb877SMathieu Desnoyers tp->ext->unregfunc();
3704a8840afSJosh Poimboeuf static_branch_disable(&tp->key);
371231264d6SMathieu Desnoyers /* Set iterator static call */
372231264d6SMathieu Desnoyers tracepoint_update_call(tp, tp_funcs);
373231264d6SMathieu Desnoyers /* Both iterator and static call handle NULL tp->funcs */
374231264d6SMathieu Desnoyers rcu_assign_pointer(tp->funcs, NULL);
375231264d6SMathieu Desnoyers /*
3767b40066cSMathieu Desnoyers * Make sure new static func never uses old data after a
3777b40066cSMathieu Desnoyers * 1->0->1 transition sequence.
378231264d6SMathieu Desnoyers */
3797b40066cSMathieu Desnoyers tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1);
380231264d6SMathieu Desnoyers break;
381231264d6SMathieu Desnoyers case TP_FUNC_1: /* 2->1 */
382de7b2973SMathieu Desnoyers rcu_assign_pointer(tp->funcs, tp_funcs);
383231264d6SMathieu Desnoyers /*
3847b40066cSMathieu Desnoyers * Make sure static func never uses incorrect data after a
3857b40066cSMathieu Desnoyers * N->...->2->1 (N>2) transition sequence. If the first
3867b40066cSMathieu Desnoyers * element's data has changed, then force the synchronization
3877b40066cSMathieu Desnoyers * to prevent current readers that have loaded the old data
3887b40066cSMathieu Desnoyers * from calling the new function.
389231264d6SMathieu Desnoyers */
3907b40066cSMathieu Desnoyers if (tp_funcs[0].data != old[0].data)
3917b40066cSMathieu Desnoyers tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
3927b40066cSMathieu Desnoyers tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1);
393231264d6SMathieu Desnoyers /* Set static call to first function */
394231264d6SMathieu Desnoyers tracepoint_update_call(tp, tp_funcs);
395231264d6SMathieu Desnoyers break;
396231264d6SMathieu Desnoyers case TP_FUNC_2: /* N->N-1 (N>2) */
397231264d6SMathieu Desnoyers fallthrough;
398231264d6SMathieu Desnoyers case TP_FUNC_N:
399547305a6SSteven Rostedt (VMware) rcu_assign_pointer(tp->funcs, tp_funcs);
4007b40066cSMathieu Desnoyers /*
4017b40066cSMathieu Desnoyers * Make sure static func never uses incorrect data after a
4027b40066cSMathieu Desnoyers * N->...->2->1 (N>2) transition sequence.
4037b40066cSMathieu Desnoyers */
4047b40066cSMathieu Desnoyers if (tp_funcs[0].data != old[0].data)
4057b40066cSMathieu Desnoyers tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1);
406231264d6SMathieu Desnoyers break;
407231264d6SMathieu Desnoyers default:
408231264d6SMathieu Desnoyers WARN_ON_ONCE(1);
409231264d6SMathieu Desnoyers break;
410547305a6SSteven Rostedt (VMware) }
4112e8a12b8SMathieu Desnoyers release_probes(tp, old);
412de7b2973SMathieu Desnoyers return 0;
413127cafbbSLai Jiangshan }
414127cafbbSLai Jiangshan
41597e1c18eSMathieu Desnoyers /**
4169913d574SSteven Rostedt (VMware) * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
4179913d574SSteven Rostedt (VMware) * @tp: tracepoint
4189913d574SSteven Rostedt (VMware) * @probe: probe handler
4199913d574SSteven Rostedt (VMware) * @data: tracepoint data
4209913d574SSteven Rostedt (VMware) * @prio: priority of this function over other registered functions
4219913d574SSteven Rostedt (VMware) *
4229913d574SSteven Rostedt (VMware) * Same as tracepoint_probe_register_prio() except that it will not warn
4239913d574SSteven Rostedt (VMware) * if the tracepoint is already registered.
4249913d574SSteven Rostedt (VMware) */
tracepoint_probe_register_prio_may_exist(struct tracepoint * tp,void * probe,void * data,int prio)4259913d574SSteven Rostedt (VMware) int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
4269913d574SSteven Rostedt (VMware) void *data, int prio)
4279913d574SSteven Rostedt (VMware) {
4289913d574SSteven Rostedt (VMware) struct tracepoint_func tp_func;
4299913d574SSteven Rostedt (VMware) int ret;
4309913d574SSteven Rostedt (VMware)
4319913d574SSteven Rostedt (VMware) mutex_lock(&tracepoints_mutex);
4329913d574SSteven Rostedt (VMware) tp_func.func = probe;
4339913d574SSteven Rostedt (VMware) tp_func.data = data;
4349913d574SSteven Rostedt (VMware) tp_func.prio = prio;
4359913d574SSteven Rostedt (VMware) ret = tracepoint_add_func(tp, &tp_func, prio, false);
4369913d574SSteven Rostedt (VMware) mutex_unlock(&tracepoints_mutex);
4379913d574SSteven Rostedt (VMware) return ret;
4389913d574SSteven Rostedt (VMware) }
4399913d574SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
4409913d574SSteven Rostedt (VMware)
4419913d574SSteven Rostedt (VMware) /**
442f39e2391SLee, Chun-Yi * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
443de7b2973SMathieu Desnoyers * @tp: tracepoint
44497e1c18eSMathieu Desnoyers * @probe: probe handler
445cac92ba7SFabian Frederick * @data: tracepoint data
4467904b5c4SSteven Rostedt (Red Hat) * @prio: priority of this function over other registered functions
4477904b5c4SSteven Rostedt (Red Hat) *
4487904b5c4SSteven Rostedt (Red Hat) * Returns 0 if ok, error value on error.
4497904b5c4SSteven Rostedt (Red Hat) * Note: if @tp is within a module, the caller is responsible for
4507904b5c4SSteven Rostedt (Red Hat) * unregistering the probe before the module is gone. This can be
4517904b5c4SSteven Rostedt (Red Hat) * performed either with a tracepoint module going notifier, or from
4527904b5c4SSteven Rostedt (Red Hat) * within module exit functions.
4537904b5c4SSteven Rostedt (Red Hat) */
tracepoint_probe_register_prio(struct tracepoint * tp,void * probe,void * data,int prio)4547904b5c4SSteven Rostedt (Red Hat) int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
4557904b5c4SSteven Rostedt (Red Hat) void *data, int prio)
4567904b5c4SSteven Rostedt (Red Hat) {
4577904b5c4SSteven Rostedt (Red Hat) struct tracepoint_func tp_func;
4587904b5c4SSteven Rostedt (Red Hat) int ret;
4597904b5c4SSteven Rostedt (Red Hat)
4607904b5c4SSteven Rostedt (Red Hat) mutex_lock(&tracepoints_mutex);
4617904b5c4SSteven Rostedt (Red Hat) tp_func.func = probe;
4627904b5c4SSteven Rostedt (Red Hat) tp_func.data = data;
4637904b5c4SSteven Rostedt (Red Hat) tp_func.prio = prio;
4649913d574SSteven Rostedt (VMware) ret = tracepoint_add_func(tp, &tp_func, prio, true);
4657904b5c4SSteven Rostedt (Red Hat) mutex_unlock(&tracepoints_mutex);
4667904b5c4SSteven Rostedt (Red Hat) return ret;
4677904b5c4SSteven Rostedt (Red Hat) }
4687904b5c4SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
4697904b5c4SSteven Rostedt (Red Hat)
4707904b5c4SSteven Rostedt (Red Hat) /**
4717904b5c4SSteven Rostedt (Red Hat) * tracepoint_probe_register - Connect a probe to a tracepoint
4727904b5c4SSteven Rostedt (Red Hat) * @tp: tracepoint
4737904b5c4SSteven Rostedt (Red Hat) * @probe: probe handler
4747904b5c4SSteven Rostedt (Red Hat) * @data: tracepoint data
47597e1c18eSMathieu Desnoyers *
476de7b2973SMathieu Desnoyers * Returns 0 if ok, error value on error.
477de7b2973SMathieu Desnoyers * Note: if @tp is within a module, the caller is responsible for
478de7b2973SMathieu Desnoyers * unregistering the probe before the module is gone. This can be
479de7b2973SMathieu Desnoyers * performed either with a tracepoint module going notifier, or from
480de7b2973SMathieu Desnoyers * within module exit functions.
48197e1c18eSMathieu Desnoyers */
tracepoint_probe_register(struct tracepoint * tp,void * probe,void * data)482de7b2973SMathieu Desnoyers int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
48397e1c18eSMathieu Desnoyers {
4847904b5c4SSteven Rostedt (Red Hat) return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
48597e1c18eSMathieu Desnoyers }
48697e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_register);
48797e1c18eSMathieu Desnoyers
48897e1c18eSMathieu Desnoyers /**
48997e1c18eSMathieu Desnoyers * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
490de7b2973SMathieu Desnoyers * @tp: tracepoint
49197e1c18eSMathieu Desnoyers * @probe: probe function pointer
492cac92ba7SFabian Frederick * @data: tracepoint data
49397e1c18eSMathieu Desnoyers *
494de7b2973SMathieu Desnoyers * Returns 0 if ok, error value on error.
49597e1c18eSMathieu Desnoyers */
tracepoint_probe_unregister(struct tracepoint * tp,void * probe,void * data)496de7b2973SMathieu Desnoyers int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
49797e1c18eSMathieu Desnoyers {
498de7b2973SMathieu Desnoyers struct tracepoint_func tp_func;
499de7b2973SMathieu Desnoyers int ret;
50097e1c18eSMathieu Desnoyers
50197e1c18eSMathieu Desnoyers mutex_lock(&tracepoints_mutex);
502de7b2973SMathieu Desnoyers tp_func.func = probe;
503de7b2973SMathieu Desnoyers tp_func.data = data;
504de7b2973SMathieu Desnoyers ret = tracepoint_remove_func(tp, &tp_func);
50597e1c18eSMathieu Desnoyers mutex_unlock(&tracepoints_mutex);
506de7b2973SMathieu Desnoyers return ret;
50797e1c18eSMathieu Desnoyers }
50897e1c18eSMathieu Desnoyers EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
50997e1c18eSMathieu Desnoyers
for_each_tracepoint_range(tracepoint_ptr_t * begin,tracepoint_ptr_t * end,void (* fct)(struct tracepoint * tp,void * priv),void * priv)5109c0be3f6SMathieu Desnoyers static void for_each_tracepoint_range(
5119c0be3f6SMathieu Desnoyers tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
51246e0c9beSArd Biesheuvel void (*fct)(struct tracepoint *tp, void *priv),
51346e0c9beSArd Biesheuvel void *priv)
51446e0c9beSArd Biesheuvel {
5159c0be3f6SMathieu Desnoyers tracepoint_ptr_t *iter;
5169c0be3f6SMathieu Desnoyers
51746e0c9beSArd Biesheuvel if (!begin)
51846e0c9beSArd Biesheuvel return;
51946e0c9beSArd Biesheuvel for (iter = begin; iter < end; iter++)
5209c0be3f6SMathieu Desnoyers fct(tracepoint_ptr_deref(iter), priv);
52146e0c9beSArd Biesheuvel }
52246e0c9beSArd Biesheuvel
523227a8375SIngo Molnar #ifdef CONFIG_MODULES
trace_module_has_bad_taint(struct module * mod)52445ab2813SSteven Rostedt (Red Hat) bool trace_module_has_bad_taint(struct module *mod)
52545ab2813SSteven Rostedt (Red Hat) {
52666cc69e3SMathieu Desnoyers return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
527e7bb66f7SJianlin Lv (1 << TAINT_UNSIGNED_MODULE) | (1 << TAINT_TEST) |
528e7bb66f7SJianlin Lv (1 << TAINT_LIVEPATCH));
52945ab2813SSteven Rostedt (Red Hat) }
53045ab2813SSteven Rostedt (Red Hat)
531de7b2973SMathieu Desnoyers static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
532de7b2973SMathieu Desnoyers
533de7b2973SMathieu Desnoyers /**
534bd740953Szhaoxiao * register_tracepoint_module_notifier - register tracepoint coming/going notifier
535de7b2973SMathieu Desnoyers * @nb: notifier block
536de7b2973SMathieu Desnoyers *
537de7b2973SMathieu Desnoyers * Notifiers registered with this function are called on module
538de7b2973SMathieu Desnoyers * coming/going with the tracepoint_module_list_mutex held.
539de7b2973SMathieu Desnoyers * The notifier block callback should expect a "struct tp_module" data
540de7b2973SMathieu Desnoyers * pointer.
541de7b2973SMathieu Desnoyers */
register_tracepoint_module_notifier(struct notifier_block * nb)542de7b2973SMathieu Desnoyers int register_tracepoint_module_notifier(struct notifier_block *nb)
543de7b2973SMathieu Desnoyers {
544de7b2973SMathieu Desnoyers struct tp_module *tp_mod;
545de7b2973SMathieu Desnoyers int ret;
546de7b2973SMathieu Desnoyers
547de7b2973SMathieu Desnoyers mutex_lock(&tracepoint_module_list_mutex);
548de7b2973SMathieu Desnoyers ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
549de7b2973SMathieu Desnoyers if (ret)
550de7b2973SMathieu Desnoyers goto end;
551de7b2973SMathieu Desnoyers list_for_each_entry(tp_mod, &tracepoint_module_list, list)
552de7b2973SMathieu Desnoyers (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
553de7b2973SMathieu Desnoyers end:
554de7b2973SMathieu Desnoyers mutex_unlock(&tracepoint_module_list_mutex);
555de7b2973SMathieu Desnoyers return ret;
556de7b2973SMathieu Desnoyers }
557de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
558de7b2973SMathieu Desnoyers
559de7b2973SMathieu Desnoyers /**
560bd740953Szhaoxiao * unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier
561de7b2973SMathieu Desnoyers * @nb: notifier block
562de7b2973SMathieu Desnoyers *
563de7b2973SMathieu Desnoyers * The notifier block callback should expect a "struct tp_module" data
564de7b2973SMathieu Desnoyers * pointer.
565de7b2973SMathieu Desnoyers */
unregister_tracepoint_module_notifier(struct notifier_block * nb)566de7b2973SMathieu Desnoyers int unregister_tracepoint_module_notifier(struct notifier_block *nb)
567de7b2973SMathieu Desnoyers {
568de7b2973SMathieu Desnoyers struct tp_module *tp_mod;
569de7b2973SMathieu Desnoyers int ret;
570de7b2973SMathieu Desnoyers
571de7b2973SMathieu Desnoyers mutex_lock(&tracepoint_module_list_mutex);
572de7b2973SMathieu Desnoyers ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
573de7b2973SMathieu Desnoyers if (ret)
574de7b2973SMathieu Desnoyers goto end;
575de7b2973SMathieu Desnoyers list_for_each_entry(tp_mod, &tracepoint_module_list, list)
576de7b2973SMathieu Desnoyers (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
577de7b2973SMathieu Desnoyers end:
578de7b2973SMathieu Desnoyers mutex_unlock(&tracepoint_module_list_mutex);
579de7b2973SMathieu Desnoyers return ret;
580de7b2973SMathieu Desnoyers
581de7b2973SMathieu Desnoyers }
582de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
583de7b2973SMathieu Desnoyers
584de7b2973SMathieu Desnoyers /*
585de7b2973SMathieu Desnoyers * Ensure the tracer unregistered the module's probes before the module
586de7b2973SMathieu Desnoyers * teardown is performed. Prevents leaks of probe and data pointers.
587de7b2973SMathieu Desnoyers */
tp_module_going_check_quiescent(struct tracepoint * tp,void * priv)58846e0c9beSArd Biesheuvel static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
589de7b2973SMathieu Desnoyers {
59046e0c9beSArd Biesheuvel WARN_ON_ONCE(tp->funcs);
591de7b2973SMathieu Desnoyers }
592de7b2973SMathieu Desnoyers
tracepoint_module_coming(struct module * mod)593b75ef8b4SMathieu Desnoyers static int tracepoint_module_coming(struct module *mod)
594b75ef8b4SMathieu Desnoyers {
5950dea6d52SMathieu Desnoyers struct tp_module *tp_mod;
596b75ef8b4SMathieu Desnoyers
5977dec935aSSteven Rostedt (Red Hat) if (!mod->num_tracepoints)
5987dec935aSSteven Rostedt (Red Hat) return 0;
5997dec935aSSteven Rostedt (Red Hat)
600b75ef8b4SMathieu Desnoyers /*
601c10076c4SSteven Rostedt * We skip modules that taint the kernel, especially those with different
602c10076c4SSteven Rostedt * module headers (for forced load), to make sure we don't cause a crash.
60354be5509SAlison Schofield * Staging, out-of-tree, unsigned GPL, and test modules are fine.
604b75ef8b4SMathieu Desnoyers */
60545ab2813SSteven Rostedt (Red Hat) if (trace_module_has_bad_taint(mod))
606b75ef8b4SMathieu Desnoyers return 0;
60751714678SZhen Lei
608b75ef8b4SMathieu Desnoyers tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
60951714678SZhen Lei if (!tp_mod)
61051714678SZhen Lei return -ENOMEM;
611eb7d035cSSteven Rostedt (Red Hat) tp_mod->mod = mod;
61251714678SZhen Lei
61351714678SZhen Lei mutex_lock(&tracepoint_module_list_mutex);
6140dea6d52SMathieu Desnoyers list_add_tail(&tp_mod->list, &tracepoint_module_list);
615de7b2973SMathieu Desnoyers blocking_notifier_call_chain(&tracepoint_notify_list,
616de7b2973SMathieu Desnoyers MODULE_STATE_COMING, tp_mod);
617de7b2973SMathieu Desnoyers mutex_unlock(&tracepoint_module_list_mutex);
61851714678SZhen Lei return 0;
619b75ef8b4SMathieu Desnoyers }
620b75ef8b4SMathieu Desnoyers
tracepoint_module_going(struct module * mod)621de7b2973SMathieu Desnoyers static void tracepoint_module_going(struct module *mod)
622b75ef8b4SMathieu Desnoyers {
623de7b2973SMathieu Desnoyers struct tp_module *tp_mod;
624b75ef8b4SMathieu Desnoyers
6257dec935aSSteven Rostedt (Red Hat) if (!mod->num_tracepoints)
626de7b2973SMathieu Desnoyers return;
6277dec935aSSteven Rostedt (Red Hat)
628de7b2973SMathieu Desnoyers mutex_lock(&tracepoint_module_list_mutex);
629de7b2973SMathieu Desnoyers list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
630eb7d035cSSteven Rostedt (Red Hat) if (tp_mod->mod == mod) {
631de7b2973SMathieu Desnoyers blocking_notifier_call_chain(&tracepoint_notify_list,
632de7b2973SMathieu Desnoyers MODULE_STATE_GOING, tp_mod);
633de7b2973SMathieu Desnoyers list_del(&tp_mod->list);
634de7b2973SMathieu Desnoyers kfree(tp_mod);
635de7b2973SMathieu Desnoyers /*
636de7b2973SMathieu Desnoyers * Called the going notifier before checking for
637de7b2973SMathieu Desnoyers * quiescence.
638de7b2973SMathieu Desnoyers */
63946e0c9beSArd Biesheuvel for_each_tracepoint_range(mod->tracepoints_ptrs,
64046e0c9beSArd Biesheuvel mod->tracepoints_ptrs + mod->num_tracepoints,
64146e0c9beSArd Biesheuvel tp_module_going_check_quiescent, NULL);
642b75ef8b4SMathieu Desnoyers break;
643b75ef8b4SMathieu Desnoyers }
644b75ef8b4SMathieu Desnoyers }
645b75ef8b4SMathieu Desnoyers /*
646b75ef8b4SMathieu Desnoyers * In the case of modules that were tainted at "coming", we'll simply
647b75ef8b4SMathieu Desnoyers * walk through the list without finding it. We cannot use the "tainted"
648b75ef8b4SMathieu Desnoyers * flag on "going", in case a module taints the kernel only after being
649b75ef8b4SMathieu Desnoyers * loaded.
650b75ef8b4SMathieu Desnoyers */
651de7b2973SMathieu Desnoyers mutex_unlock(&tracepoint_module_list_mutex);
652b75ef8b4SMathieu Desnoyers }
653227a8375SIngo Molnar
tracepoint_module_notify(struct notifier_block * self,unsigned long val,void * data)654de7b2973SMathieu Desnoyers static int tracepoint_module_notify(struct notifier_block *self,
65532f85742SMathieu Desnoyers unsigned long val, void *data)
65632f85742SMathieu Desnoyers {
65732f85742SMathieu Desnoyers struct module *mod = data;
658b75ef8b4SMathieu Desnoyers int ret = 0;
65932f85742SMathieu Desnoyers
66032f85742SMathieu Desnoyers switch (val) {
66132f85742SMathieu Desnoyers case MODULE_STATE_COMING:
662b75ef8b4SMathieu Desnoyers ret = tracepoint_module_coming(mod);
663b75ef8b4SMathieu Desnoyers break;
664b75ef8b4SMathieu Desnoyers case MODULE_STATE_LIVE:
665b75ef8b4SMathieu Desnoyers break;
66632f85742SMathieu Desnoyers case MODULE_STATE_GOING:
667de7b2973SMathieu Desnoyers tracepoint_module_going(mod);
668de7b2973SMathieu Desnoyers break;
669de7b2973SMathieu Desnoyers case MODULE_STATE_UNFORMED:
67032f85742SMathieu Desnoyers break;
67132f85742SMathieu Desnoyers }
6720340a6b7SPeter Zijlstra return notifier_from_errno(ret);
67332f85742SMathieu Desnoyers }
67432f85742SMathieu Desnoyers
675de7b2973SMathieu Desnoyers static struct notifier_block tracepoint_module_nb = {
67632f85742SMathieu Desnoyers .notifier_call = tracepoint_module_notify,
67732f85742SMathieu Desnoyers .priority = 0,
67832f85742SMathieu Desnoyers };
67932f85742SMathieu Desnoyers
init_tracepoints(void)680de7b2973SMathieu Desnoyers static __init int init_tracepoints(void)
68132f85742SMathieu Desnoyers {
682de7b2973SMathieu Desnoyers int ret;
683de7b2973SMathieu Desnoyers
684de7b2973SMathieu Desnoyers ret = register_module_notifier(&tracepoint_module_nb);
685eb7d035cSSteven Rostedt (Red Hat) if (ret)
686a395d6a7SJoe Perches pr_warn("Failed to register tracepoint module enter notifier\n");
687eb7d035cSSteven Rostedt (Red Hat)
688de7b2973SMathieu Desnoyers return ret;
68932f85742SMathieu Desnoyers }
69032f85742SMathieu Desnoyers __initcall(init_tracepoints);
691d5dbf8b4SMasami Hiramatsu (Google)
692d5dbf8b4SMasami Hiramatsu (Google) /**
693d4df54f3SMasami Hiramatsu (Google) * for_each_tracepoint_in_module - iteration on all tracepoints in a module
694d4df54f3SMasami Hiramatsu (Google) * @mod: module
695d4df54f3SMasami Hiramatsu (Google) * @fct: callback
696d4df54f3SMasami Hiramatsu (Google) * @priv: private data
697d4df54f3SMasami Hiramatsu (Google) */
for_each_tracepoint_in_module(struct module * mod,void (* fct)(struct tracepoint * tp,struct module * mod,void * priv),void * priv)698d4df54f3SMasami Hiramatsu (Google) void for_each_tracepoint_in_module(struct module *mod,
699d4df54f3SMasami Hiramatsu (Google) void (*fct)(struct tracepoint *tp,
700d4df54f3SMasami Hiramatsu (Google) struct module *mod, void *priv),
701d4df54f3SMasami Hiramatsu (Google) void *priv)
702d4df54f3SMasami Hiramatsu (Google) {
703d4df54f3SMasami Hiramatsu (Google) tracepoint_ptr_t *begin, *end, *iter;
704d4df54f3SMasami Hiramatsu (Google)
705d4df54f3SMasami Hiramatsu (Google) lockdep_assert_held(&tracepoint_module_list_mutex);
706d4df54f3SMasami Hiramatsu (Google)
707d4df54f3SMasami Hiramatsu (Google) if (!mod)
708d4df54f3SMasami Hiramatsu (Google) return;
709d4df54f3SMasami Hiramatsu (Google)
710d4df54f3SMasami Hiramatsu (Google) begin = mod->tracepoints_ptrs;
711d4df54f3SMasami Hiramatsu (Google) end = mod->tracepoints_ptrs + mod->num_tracepoints;
712d4df54f3SMasami Hiramatsu (Google)
713d4df54f3SMasami Hiramatsu (Google) for (iter = begin; iter < end; iter++)
714d4df54f3SMasami Hiramatsu (Google) fct(tracepoint_ptr_deref(iter), mod, priv);
715d4df54f3SMasami Hiramatsu (Google) }
716d4df54f3SMasami Hiramatsu (Google)
717d4df54f3SMasami Hiramatsu (Google) /**
718d5dbf8b4SMasami Hiramatsu (Google) * for_each_module_tracepoint - iteration on all tracepoints in all modules
719d5dbf8b4SMasami Hiramatsu (Google) * @fct: callback
720d5dbf8b4SMasami Hiramatsu (Google) * @priv: private data
721d5dbf8b4SMasami Hiramatsu (Google) */
for_each_module_tracepoint(void (* fct)(struct tracepoint * tp,struct module * mod,void * priv),void * priv)722d4df54f3SMasami Hiramatsu (Google) void for_each_module_tracepoint(void (*fct)(struct tracepoint *tp,
723d4df54f3SMasami Hiramatsu (Google) struct module *mod, void *priv),
724d5dbf8b4SMasami Hiramatsu (Google) void *priv)
725d5dbf8b4SMasami Hiramatsu (Google) {
726d5dbf8b4SMasami Hiramatsu (Google) struct tp_module *tp_mod;
727d5dbf8b4SMasami Hiramatsu (Google)
728d5dbf8b4SMasami Hiramatsu (Google) mutex_lock(&tracepoint_module_list_mutex);
729d4df54f3SMasami Hiramatsu (Google) list_for_each_entry(tp_mod, &tracepoint_module_list, list)
730d4df54f3SMasami Hiramatsu (Google) for_each_tracepoint_in_module(tp_mod->mod, fct, priv);
731d5dbf8b4SMasami Hiramatsu (Google) mutex_unlock(&tracepoint_module_list_mutex);
732d5dbf8b4SMasami Hiramatsu (Google) }
733227a8375SIngo Molnar #endif /* CONFIG_MODULES */
734a871bd33SJason Baron
735de7b2973SMathieu Desnoyers /**
736de7b2973SMathieu Desnoyers * for_each_kernel_tracepoint - iteration on all kernel tracepoints
737de7b2973SMathieu Desnoyers * @fct: callback
738de7b2973SMathieu Desnoyers * @priv: private data
739de7b2973SMathieu Desnoyers */
for_each_kernel_tracepoint(void (* fct)(struct tracepoint * tp,void * priv),void * priv)740de7b2973SMathieu Desnoyers void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
741de7b2973SMathieu Desnoyers void *priv)
742de7b2973SMathieu Desnoyers {
743de7b2973SMathieu Desnoyers for_each_tracepoint_range(__start___tracepoints_ptrs,
744de7b2973SMathieu Desnoyers __stop___tracepoints_ptrs, fct, priv);
745de7b2973SMathieu Desnoyers }
746de7b2973SMathieu Desnoyers EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
747de7b2973SMathieu Desnoyers
7483d27d8cbSJosh Stone #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
74960d970c2SIngo Molnar
75097419875SJosh Stone /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
751a871bd33SJason Baron static int sys_tracepoint_refcount;
752a871bd33SJason Baron
syscall_regfunc(void)7538cf868afSSteven Rostedt (Red Hat) int syscall_regfunc(void)
754a871bd33SJason Baron {
7558063e41dSOleg Nesterov struct task_struct *p, *t;
756a871bd33SJason Baron
757a871bd33SJason Baron if (!sys_tracepoint_refcount) {
7588063e41dSOleg Nesterov read_lock(&tasklist_lock);
7598063e41dSOleg Nesterov for_each_process_thread(p, t) {
760524666cbSGabriel Krisman Bertazi set_task_syscall_work(t, SYSCALL_TRACEPOINT);
7618063e41dSOleg Nesterov }
7628063e41dSOleg Nesterov read_unlock(&tasklist_lock);
763a871bd33SJason Baron }
764a871bd33SJason Baron sys_tracepoint_refcount++;
7658cf868afSSteven Rostedt (Red Hat)
7668cf868afSSteven Rostedt (Red Hat) return 0;
767a871bd33SJason Baron }
768a871bd33SJason Baron
syscall_unregfunc(void)769a871bd33SJason Baron void syscall_unregfunc(void)
770a871bd33SJason Baron {
7718063e41dSOleg Nesterov struct task_struct *p, *t;
772a871bd33SJason Baron
773a871bd33SJason Baron sys_tracepoint_refcount--;
774a871bd33SJason Baron if (!sys_tracepoint_refcount) {
7758063e41dSOleg Nesterov read_lock(&tasklist_lock);
7768063e41dSOleg Nesterov for_each_process_thread(p, t) {
777524666cbSGabriel Krisman Bertazi clear_task_syscall_work(t, SYSCALL_TRACEPOINT);
7788063e41dSOleg Nesterov }
7798063e41dSOleg Nesterov read_unlock(&tasklist_lock);
780a871bd33SJason Baron }
781a871bd33SJason Baron }
78260d970c2SIngo Molnar #endif
783