xref: /linux-6.15/kernel/taskstats.c (revision 3b64b188)
1 /*
2  * taskstats.c - Export per-task statistics to userland
3  *
4  * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5  *           (C) Balbir Singh,   IBM Corp. 2006
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
28 #include <linux/fs.h>
29 #include <linux/file.h>
30 #include <linux/pid_namespace.h>
31 #include <net/genetlink.h>
32 #include <linux/atomic.h>
33 
34 /*
35  * Maximum length of a cpumask that can be specified in
36  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
37  */
38 #define TASKSTATS_CPUMASK_MAXLEN	(100+6*NR_CPUS)
39 
40 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41 static int family_registered;
42 struct kmem_cache *taskstats_cache;
43 
44 static struct genl_family family = {
45 	.id		= GENL_ID_GENERATE,
46 	.name		= TASKSTATS_GENL_NAME,
47 	.version	= TASKSTATS_GENL_VERSION,
48 	.maxattr	= TASKSTATS_CMD_ATTR_MAX,
49 };
50 
51 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
53 	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56 
57 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
58 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
59 };
60 
61 struct listener {
62 	struct list_head list;
63 	pid_t pid;
64 	char valid;
65 };
66 
67 struct listener_list {
68 	struct rw_semaphore sem;
69 	struct list_head list;
70 };
71 static DEFINE_PER_CPU(struct listener_list, listener_array);
72 
73 enum actions {
74 	REGISTER,
75 	DEREGISTER,
76 	CPU_DONT_CARE
77 };
78 
79 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
80 				size_t size)
81 {
82 	struct sk_buff *skb;
83 	void *reply;
84 
85 	/*
86 	 * If new attributes are added, please revisit this allocation
87 	 */
88 	skb = genlmsg_new(size, GFP_KERNEL);
89 	if (!skb)
90 		return -ENOMEM;
91 
92 	if (!info) {
93 		int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
94 
95 		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 	} else
97 		reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
98 	if (reply == NULL) {
99 		nlmsg_free(skb);
100 		return -EINVAL;
101 	}
102 
103 	*skbp = skb;
104 	return 0;
105 }
106 
107 /*
108  * Send taskstats data in @skb to listener with nl_pid @pid
109  */
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
111 {
112 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 	void *reply = genlmsg_data(genlhdr);
114 	int rc;
115 
116 	rc = genlmsg_end(skb, reply);
117 	if (rc < 0) {
118 		nlmsg_free(skb);
119 		return rc;
120 	}
121 
122 	return genlmsg_reply(skb, info);
123 }
124 
125 /*
126  * Send taskstats data in @skb to listeners registered for @cpu's exit data
127  */
128 static void send_cpu_listeners(struct sk_buff *skb,
129 					struct listener_list *listeners)
130 {
131 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
132 	struct listener *s, *tmp;
133 	struct sk_buff *skb_next, *skb_cur = skb;
134 	void *reply = genlmsg_data(genlhdr);
135 	int rc, delcount = 0;
136 
137 	rc = genlmsg_end(skb, reply);
138 	if (rc < 0) {
139 		nlmsg_free(skb);
140 		return;
141 	}
142 
143 	rc = 0;
144 	down_read(&listeners->sem);
145 	list_for_each_entry(s, &listeners->list, list) {
146 		skb_next = NULL;
147 		if (!list_is_last(&s->list, &listeners->list)) {
148 			skb_next = skb_clone(skb_cur, GFP_KERNEL);
149 			if (!skb_next)
150 				break;
151 		}
152 		rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
153 		if (rc == -ECONNREFUSED) {
154 			s->valid = 0;
155 			delcount++;
156 		}
157 		skb_cur = skb_next;
158 	}
159 	up_read(&listeners->sem);
160 
161 	if (skb_cur)
162 		nlmsg_free(skb_cur);
163 
164 	if (!delcount)
165 		return;
166 
167 	/* Delete invalidated entries */
168 	down_write(&listeners->sem);
169 	list_for_each_entry_safe(s, tmp, &listeners->list, list) {
170 		if (!s->valid) {
171 			list_del(&s->list);
172 			kfree(s);
173 		}
174 	}
175 	up_write(&listeners->sem);
176 }
177 
178 static void fill_stats(struct user_namespace *user_ns,
179 		       struct pid_namespace *pid_ns,
180 		       struct task_struct *tsk, struct taskstats *stats)
181 {
182 	memset(stats, 0, sizeof(*stats));
183 	/*
184 	 * Each accounting subsystem adds calls to its functions to
185 	 * fill in relevant parts of struct taskstsats as follows
186 	 *
187 	 *	per-task-foo(stats, tsk);
188 	 */
189 
190 	delayacct_add_tsk(stats, tsk);
191 
192 	/* fill in basic acct fields */
193 	stats->version = TASKSTATS_VERSION;
194 	stats->nvcsw = tsk->nvcsw;
195 	stats->nivcsw = tsk->nivcsw;
196 	bacct_add_tsk(user_ns, pid_ns, stats, tsk);
197 
198 	/* fill in extended acct fields */
199 	xacct_add_tsk(stats, tsk);
200 }
201 
202 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
203 {
204 	struct task_struct *tsk;
205 
206 	rcu_read_lock();
207 	tsk = find_task_by_vpid(pid);
208 	if (tsk)
209 		get_task_struct(tsk);
210 	rcu_read_unlock();
211 	if (!tsk)
212 		return -ESRCH;
213 	fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
214 	put_task_struct(tsk);
215 	return 0;
216 }
217 
218 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
219 {
220 	struct task_struct *tsk, *first;
221 	unsigned long flags;
222 	int rc = -ESRCH;
223 
224 	/*
225 	 * Add additional stats from live tasks except zombie thread group
226 	 * leaders who are already counted with the dead tasks
227 	 */
228 	rcu_read_lock();
229 	first = find_task_by_vpid(tgid);
230 
231 	if (!first || !lock_task_sighand(first, &flags))
232 		goto out;
233 
234 	if (first->signal->stats)
235 		memcpy(stats, first->signal->stats, sizeof(*stats));
236 	else
237 		memset(stats, 0, sizeof(*stats));
238 
239 	tsk = first;
240 	do {
241 		if (tsk->exit_state)
242 			continue;
243 		/*
244 		 * Accounting subsystem can call its functions here to
245 		 * fill in relevant parts of struct taskstsats as follows
246 		 *
247 		 *	per-task-foo(stats, tsk);
248 		 */
249 		delayacct_add_tsk(stats, tsk);
250 
251 		stats->nvcsw += tsk->nvcsw;
252 		stats->nivcsw += tsk->nivcsw;
253 	} while_each_thread(first, tsk);
254 
255 	unlock_task_sighand(first, &flags);
256 	rc = 0;
257 out:
258 	rcu_read_unlock();
259 
260 	stats->version = TASKSTATS_VERSION;
261 	/*
262 	 * Accounting subsystems can also add calls here to modify
263 	 * fields of taskstats.
264 	 */
265 	return rc;
266 }
267 
268 static void fill_tgid_exit(struct task_struct *tsk)
269 {
270 	unsigned long flags;
271 
272 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
273 	if (!tsk->signal->stats)
274 		goto ret;
275 
276 	/*
277 	 * Each accounting subsystem calls its functions here to
278 	 * accumalate its per-task stats for tsk, into the per-tgid structure
279 	 *
280 	 *	per-task-foo(tsk->signal->stats, tsk);
281 	 */
282 	delayacct_add_tsk(tsk->signal->stats, tsk);
283 ret:
284 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
285 	return;
286 }
287 
288 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
289 {
290 	struct listener_list *listeners;
291 	struct listener *s, *tmp, *s2;
292 	unsigned int cpu;
293 
294 	if (!cpumask_subset(mask, cpu_possible_mask))
295 		return -EINVAL;
296 
297 	if (current_user_ns() != &init_user_ns)
298 		return -EINVAL;
299 
300 	if (task_active_pid_ns(current) != &init_pid_ns)
301 		return -EINVAL;
302 
303 	if (isadd == REGISTER) {
304 		for_each_cpu(cpu, mask) {
305 			s = kmalloc_node(sizeof(struct listener),
306 					GFP_KERNEL, cpu_to_node(cpu));
307 			if (!s)
308 				goto cleanup;
309 
310 			s->pid = pid;
311 			s->valid = 1;
312 
313 			listeners = &per_cpu(listener_array, cpu);
314 			down_write(&listeners->sem);
315 			list_for_each_entry(s2, &listeners->list, list) {
316 				if (s2->pid == pid && s2->valid)
317 					goto exists;
318 			}
319 			list_add(&s->list, &listeners->list);
320 			s = NULL;
321 exists:
322 			up_write(&listeners->sem);
323 			kfree(s); /* nop if NULL */
324 		}
325 		return 0;
326 	}
327 
328 	/* Deregister or cleanup */
329 cleanup:
330 	for_each_cpu(cpu, mask) {
331 		listeners = &per_cpu(listener_array, cpu);
332 		down_write(&listeners->sem);
333 		list_for_each_entry_safe(s, tmp, &listeners->list, list) {
334 			if (s->pid == pid) {
335 				list_del(&s->list);
336 				kfree(s);
337 				break;
338 			}
339 		}
340 		up_write(&listeners->sem);
341 	}
342 	return 0;
343 }
344 
345 static int parse(struct nlattr *na, struct cpumask *mask)
346 {
347 	char *data;
348 	int len;
349 	int ret;
350 
351 	if (na == NULL)
352 		return 1;
353 	len = nla_len(na);
354 	if (len > TASKSTATS_CPUMASK_MAXLEN)
355 		return -E2BIG;
356 	if (len < 1)
357 		return -EINVAL;
358 	data = kmalloc(len, GFP_KERNEL);
359 	if (!data)
360 		return -ENOMEM;
361 	nla_strlcpy(data, na, len);
362 	ret = cpulist_parse(data, mask);
363 	kfree(data);
364 	return ret;
365 }
366 
367 #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
368 #define TASKSTATS_NEEDS_PADDING 1
369 #endif
370 
371 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
372 {
373 	struct nlattr *na, *ret;
374 	int aggr;
375 
376 	aggr = (type == TASKSTATS_TYPE_PID)
377 			? TASKSTATS_TYPE_AGGR_PID
378 			: TASKSTATS_TYPE_AGGR_TGID;
379 
380 	/*
381 	 * The taskstats structure is internally aligned on 8 byte
382 	 * boundaries but the layout of the aggregrate reply, with
383 	 * two NLA headers and the pid (each 4 bytes), actually
384 	 * force the entire structure to be unaligned. This causes
385 	 * the kernel to issue unaligned access warnings on some
386 	 * architectures like ia64. Unfortunately, some software out there
387 	 * doesn't properly unroll the NLA packet and assumes that the start
388 	 * of the taskstats structure will always be 20 bytes from the start
389 	 * of the netlink payload. Aligning the start of the taskstats
390 	 * structure breaks this software, which we don't want. So, for now
391 	 * the alignment only happens on architectures that require it
392 	 * and those users will have to update to fixed versions of those
393 	 * packages. Space is reserved in the packet only when needed.
394 	 * This ifdef should be removed in several years e.g. 2012 once
395 	 * we can be confident that fixed versions are installed on most
396 	 * systems. We add the padding before the aggregate since the
397 	 * aggregate is already a defined type.
398 	 */
399 #ifdef TASKSTATS_NEEDS_PADDING
400 	if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
401 		goto err;
402 #endif
403 	na = nla_nest_start(skb, aggr);
404 	if (!na)
405 		goto err;
406 
407 	if (nla_put(skb, type, sizeof(pid), &pid) < 0)
408 		goto err;
409 	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
410 	if (!ret)
411 		goto err;
412 	nla_nest_end(skb, na);
413 
414 	return nla_data(ret);
415 err:
416 	return NULL;
417 }
418 
419 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
420 {
421 	int rc = 0;
422 	struct sk_buff *rep_skb;
423 	struct cgroupstats *stats;
424 	struct nlattr *na;
425 	size_t size;
426 	u32 fd;
427 	struct fd f;
428 
429 	na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
430 	if (!na)
431 		return -EINVAL;
432 
433 	fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
434 	f = fdget(fd);
435 	if (!f.file)
436 		return 0;
437 
438 	size = nla_total_size(sizeof(struct cgroupstats));
439 
440 	rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
441 				size);
442 	if (rc < 0)
443 		goto err;
444 
445 	na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
446 				sizeof(struct cgroupstats));
447 	if (na == NULL) {
448 		rc = -EMSGSIZE;
449 		goto err;
450 	}
451 
452 	stats = nla_data(na);
453 	memset(stats, 0, sizeof(*stats));
454 
455 	rc = cgroupstats_build(stats, f.file->f_dentry);
456 	if (rc < 0) {
457 		nlmsg_free(rep_skb);
458 		goto err;
459 	}
460 
461 	rc = send_reply(rep_skb, info);
462 
463 err:
464 	fdput(f);
465 	return rc;
466 }
467 
468 static int cmd_attr_register_cpumask(struct genl_info *info)
469 {
470 	cpumask_var_t mask;
471 	int rc;
472 
473 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
474 		return -ENOMEM;
475 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
476 	if (rc < 0)
477 		goto out;
478 	rc = add_del_listener(info->snd_portid, mask, REGISTER);
479 out:
480 	free_cpumask_var(mask);
481 	return rc;
482 }
483 
484 static int cmd_attr_deregister_cpumask(struct genl_info *info)
485 {
486 	cpumask_var_t mask;
487 	int rc;
488 
489 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
490 		return -ENOMEM;
491 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
492 	if (rc < 0)
493 		goto out;
494 	rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
495 out:
496 	free_cpumask_var(mask);
497 	return rc;
498 }
499 
500 static size_t taskstats_packet_size(void)
501 {
502 	size_t size;
503 
504 	size = nla_total_size(sizeof(u32)) +
505 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
506 #ifdef TASKSTATS_NEEDS_PADDING
507 	size += nla_total_size(0); /* Padding for alignment */
508 #endif
509 	return size;
510 }
511 
512 static int cmd_attr_pid(struct genl_info *info)
513 {
514 	struct taskstats *stats;
515 	struct sk_buff *rep_skb;
516 	size_t size;
517 	u32 pid;
518 	int rc;
519 
520 	size = taskstats_packet_size();
521 
522 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
523 	if (rc < 0)
524 		return rc;
525 
526 	rc = -EINVAL;
527 	pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
528 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
529 	if (!stats)
530 		goto err;
531 
532 	rc = fill_stats_for_pid(pid, stats);
533 	if (rc < 0)
534 		goto err;
535 	return send_reply(rep_skb, info);
536 err:
537 	nlmsg_free(rep_skb);
538 	return rc;
539 }
540 
541 static int cmd_attr_tgid(struct genl_info *info)
542 {
543 	struct taskstats *stats;
544 	struct sk_buff *rep_skb;
545 	size_t size;
546 	u32 tgid;
547 	int rc;
548 
549 	size = taskstats_packet_size();
550 
551 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
552 	if (rc < 0)
553 		return rc;
554 
555 	rc = -EINVAL;
556 	tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
557 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
558 	if (!stats)
559 		goto err;
560 
561 	rc = fill_stats_for_tgid(tgid, stats);
562 	if (rc < 0)
563 		goto err;
564 	return send_reply(rep_skb, info);
565 err:
566 	nlmsg_free(rep_skb);
567 	return rc;
568 }
569 
570 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
571 {
572 	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
573 		return cmd_attr_register_cpumask(info);
574 	else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
575 		return cmd_attr_deregister_cpumask(info);
576 	else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
577 		return cmd_attr_pid(info);
578 	else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
579 		return cmd_attr_tgid(info);
580 	else
581 		return -EINVAL;
582 }
583 
584 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
585 {
586 	struct signal_struct *sig = tsk->signal;
587 	struct taskstats *stats;
588 
589 	if (sig->stats || thread_group_empty(tsk))
590 		goto ret;
591 
592 	/* No problem if kmem_cache_zalloc() fails */
593 	stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
594 
595 	spin_lock_irq(&tsk->sighand->siglock);
596 	if (!sig->stats) {
597 		sig->stats = stats;
598 		stats = NULL;
599 	}
600 	spin_unlock_irq(&tsk->sighand->siglock);
601 
602 	if (stats)
603 		kmem_cache_free(taskstats_cache, stats);
604 ret:
605 	return sig->stats;
606 }
607 
608 /* Send pid data out on exit */
609 void taskstats_exit(struct task_struct *tsk, int group_dead)
610 {
611 	int rc;
612 	struct listener_list *listeners;
613 	struct taskstats *stats;
614 	struct sk_buff *rep_skb;
615 	size_t size;
616 	int is_thread_group;
617 
618 	if (!family_registered)
619 		return;
620 
621 	/*
622 	 * Size includes space for nested attributes
623 	 */
624 	size = taskstats_packet_size();
625 
626 	is_thread_group = !!taskstats_tgid_alloc(tsk);
627 	if (is_thread_group) {
628 		/* PID + STATS + TGID + STATS */
629 		size = 2 * size;
630 		/* fill the tsk->signal->stats structure */
631 		fill_tgid_exit(tsk);
632 	}
633 
634 	listeners = __this_cpu_ptr(&listener_array);
635 	if (list_empty(&listeners->list))
636 		return;
637 
638 	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
639 	if (rc < 0)
640 		return;
641 
642 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
643 			 task_pid_nr_ns(tsk, &init_pid_ns));
644 	if (!stats)
645 		goto err;
646 
647 	fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
648 
649 	/*
650 	 * Doesn't matter if tsk is the leader or the last group member leaving
651 	 */
652 	if (!is_thread_group || !group_dead)
653 		goto send;
654 
655 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
656 			 task_tgid_nr_ns(tsk, &init_pid_ns));
657 	if (!stats)
658 		goto err;
659 
660 	memcpy(stats, tsk->signal->stats, sizeof(*stats));
661 
662 send:
663 	send_cpu_listeners(rep_skb, listeners);
664 	return;
665 err:
666 	nlmsg_free(rep_skb);
667 }
668 
669 static struct genl_ops taskstats_ops = {
670 	.cmd		= TASKSTATS_CMD_GET,
671 	.doit		= taskstats_user_cmd,
672 	.policy		= taskstats_cmd_get_policy,
673 	.flags		= GENL_ADMIN_PERM,
674 };
675 
676 static struct genl_ops cgroupstats_ops = {
677 	.cmd		= CGROUPSTATS_CMD_GET,
678 	.doit		= cgroupstats_user_cmd,
679 	.policy		= cgroupstats_cmd_get_policy,
680 };
681 
682 /* Needed early in initialization */
683 void __init taskstats_init_early(void)
684 {
685 	unsigned int i;
686 
687 	taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
688 	for_each_possible_cpu(i) {
689 		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
690 		init_rwsem(&(per_cpu(listener_array, i).sem));
691 	}
692 }
693 
694 static int __init taskstats_init(void)
695 {
696 	int rc;
697 
698 	rc = genl_register_family(&family);
699 	if (rc)
700 		return rc;
701 
702 	rc = genl_register_ops(&family, &taskstats_ops);
703 	if (rc < 0)
704 		goto err;
705 
706 	rc = genl_register_ops(&family, &cgroupstats_ops);
707 	if (rc < 0)
708 		goto err_cgroup_ops;
709 
710 	family_registered = 1;
711 	pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
712 	return 0;
713 err_cgroup_ops:
714 	genl_unregister_ops(&family, &taskstats_ops);
715 err:
716 	genl_unregister_family(&family);
717 	return rc;
718 }
719 
720 /*
721  * late initcall ensures initialization of statistics collection
722  * mechanisms precedes initialization of the taskstats interface
723  */
724 late_initcall(taskstats_init);
725