Blob Blame History Raw
From: Quentin Monnet <quentin.monnet@netronome.com>
Date: Mon, 30 Apr 2018 11:39:05 +0100
Subject: bpf: update bpf.h uapi header for tools
Patch-mainline: v4.18-rc1
Git-commit: a56497d3d5cf081e00fde944a1de129442e7b065
References: bsc#1109837

Bring fixes for eBPF helper documentation formatting to bpf.h under
tools/ as well.

Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 tools/include/uapi/linux/bpf.h |   58 ++++++++++++++++++++---------------------
 1 file changed, 29 insertions(+), 29 deletions(-)

--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -828,12 +828,12 @@ union bpf_attr {
  *
  * 		Also, be aware that the newer helper
  * 		**bpf_perf_event_read_value**\ () is recommended over
- * 		**bpf_perf_event_read*\ () in general. The latter has some ABI
+ * 		**bpf_perf_event_read**\ () in general. The latter has some ABI
  * 		quirks where error and counter value are used as a return code
  * 		(which is wrong to do since ranges may overlap). This issue is
- * 		fixed with bpf_perf_event_read_value(), which at the same time
- * 		provides more features over the **bpf_perf_event_read**\ ()
- * 		interface. Please refer to the description of
+ * 		fixed with **bpf_perf_event_read_value**\ (), which at the same
+ * 		time provides more features over the **bpf_perf_event_read**\
+ * 		() interface. Please refer to the description of
  * 		**bpf_perf_event_read_value**\ () for details.
  * 	Return
  * 		The value of the perf event counter read from the map, or a
@@ -1770,33 +1770,33 @@ union bpf_attr {
  *
  * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
  * 	Description
- *		Return a user or a kernel stack in bpf program provided buffer.
- *		To achieve this, the helper needs *ctx*, which is a pointer
- *		to the context on which the tracing program is executed.
- *		To store the stacktrace, the bpf program provides *buf* with
- *		a nonnegative *size*.
- *
- *		The last argument, *flags*, holds the number of stack frames to
- *		skip (from 0 to 255), masked with
- *		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
- *		the following flags:
- *
- *		**BPF_F_USER_STACK**
- *			Collect a user space stack instead of a kernel stack.
- *		**BPF_F_USER_BUILD_ID**
- *			Collect buildid+offset instead of ips for user stack,
- *			only valid if **BPF_F_USER_STACK** is also specified.
- *
- *		**bpf_get_stack**\ () can collect up to
- *		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
- *		to sufficient large buffer size. Note that
- *		this limit can be controlled with the **sysctl** program, and
- *		that it should be manually increased in order to profile long
- *		user stacks (such as stacks for Java programs). To do so, use:
+ * 		Return a user or a kernel stack in bpf program provided buffer.
+ * 		To achieve this, the helper needs *ctx*, which is a pointer
+ * 		to the context on which the tracing program is executed.
+ * 		To store the stacktrace, the bpf program provides *buf* with
+ * 		a nonnegative *size*.
  *
- *	::
+ * 		The last argument, *flags*, holds the number of stack frames to
+ * 		skip (from 0 to 255), masked with
+ * 		**BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * 		the following flags:
+ *
+ * 		**BPF_F_USER_STACK**
+ * 			Collect a user space stack instead of a kernel stack.
+ * 		**BPF_F_USER_BUILD_ID**
+ * 			Collect buildid+offset instead of ips for user stack,
+ * 			only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * 		**bpf_get_stack**\ () can collect up to
+ * 		**PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * 		to sufficient large buffer size. Note that
+ * 		this limit can be controlled with the **sysctl** program, and
+ * 		that it should be manually increased in order to profile long
+ * 		user stacks (such as stacks for Java programs). To do so, use:
  *
- *		# sysctl kernel.perf_event_max_stack=<new value>
+ * 		::
+ *
+ * 			# sysctl kernel.perf_event_max_stack=<new value>
  *
  * 	Return
  * 		a non-negative value equal to or less than size on success, or