Blob Blame History Raw
From: Jason Gunthorpe <jgg@mellanox.com>
Date: Tue, 20 Mar 2018 14:19:51 -0600
Subject: RDMA: Change all uapi headers to use __aligned_u64 instead of __u64
Patch-mainline: v4.17-rc1
Git-commit: 26b9906612c3553189d7d1673ee116ffac474d53
References: bsc#1103992 FATE#326009

The new auditing standard for the subsystem will be to only use
__aligned_64 in uapi headers to try and prevent 32/64 compat bugs
from existing in the future.

Changing all existing usage will help ensure new developers copy the
right idea.

The before/after of this patch was tested using pahole on 32 and 64
bit compiles to confirm it has no change in the structure layout, so
this patch is a NOP.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 include/uapi/rdma/bnxt_re-abi.h    |   14 +--
 include/uapi/rdma/cxgb3-abi.h      |   12 +-
 include/uapi/rdma/cxgb4-abi.h      |   24 ++---
 include/uapi/rdma/hfi/hfi1_ioctl.h |   32 +++----
 include/uapi/rdma/hfi/hfi1_user.h  |    4 
 include/uapi/rdma/hns-abi.h        |   14 +--
 include/uapi/rdma/i40iw-abi.h      |   12 +-
 include/uapi/rdma/ib_user_cm.h     |   48 +++++------
 include/uapi/rdma/ib_user_mad.h    |    4 
 include/uapi/rdma/ib_user_verbs.h  |  158 ++++++++++++++++++-------------------
 include/uapi/rdma/mlx4-abi.h       |   24 ++---
 include/uapi/rdma/mlx5-abi.h       |   40 ++++-----
 include/uapi/rdma/mthca-abi.h      |   10 +-
 include/uapi/rdma/nes-abi.h        |    6 -
 include/uapi/rdma/ocrdma-abi.h     |   30 +++----
 include/uapi/rdma/qedr-abi.h       |   16 +--
 include/uapi/rdma/rdma_user_cm.h   |   34 +++----
 include/uapi/rdma/rdma_user_rxe.h  |   22 ++---
 include/uapi/rdma/vmw_pvrdma-abi.h |   48 +++++------
 19 files changed, 276 insertions(+), 276 deletions(-)

--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -64,8 +64,8 @@ struct bnxt_re_pd_resp {
 } __attribute__((packed, aligned(4)));
 
 struct bnxt_re_cq_req {
-	__u64 cq_va;
-	__u64 cq_handle;
+	__aligned_u64 cq_va;
+	__aligned_u64 cq_handle;
 };
 
 struct bnxt_re_cq_resp {
@@ -76,9 +76,9 @@ struct bnxt_re_cq_resp {
 };
 
 struct bnxt_re_qp_req {
-	__u64 qpsva;
-	__u64 qprva;
-	__u64 qp_handle;
+	__aligned_u64 qpsva;
+	__aligned_u64 qprva;
+	__aligned_u64 qp_handle;
 };
 
 struct bnxt_re_qp_resp {
@@ -87,8 +87,8 @@ struct bnxt_re_qp_resp {
 };
 
 struct bnxt_re_srq_req {
-	__u64 srqva;
-	__u64 srq_handle;
+	__aligned_u64 srqva;
+	__aligned_u64 srq_handle;
 };
 
 struct bnxt_re_srq_resp {
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -40,21 +40,21 @@
  * Make sure that all structs defined in this file remain laid out so
  * that they pack the same way on 32-bit and 64-bit architectures (to
  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
  * instead.
  */
 struct iwch_create_cq_req {
-	__u64 user_rptr_addr;
+	__aligned_u64 user_rptr_addr;
 };
 
 struct iwch_create_cq_resp_v0 {
-	__u64 key;
+	__aligned_u64 key;
 	__u32 cqid;
 	__u32 size_log2;
 };
 
 struct iwch_create_cq_resp {
-	__u64 key;
+	__aligned_u64 key;
 	__u32 cqid;
 	__u32 size_log2;
 	__u32 memsize;
@@ -62,8 +62,8 @@ struct iwch_create_cq_resp {
 };
 
 struct iwch_create_qp_resp {
-	__u64 key;
-	__u64 db_key;
+	__aligned_u64 key;
+	__aligned_u64 db_key;
 	__u32 qpid;
 	__u32 size_log2;
 	__u32 sq_size_log2;
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -40,13 +40,13 @@
  * Make sure that all structs defined in this file remain laid out so
  * that they pack the same way on 32-bit and 64-bit architectures (to
  * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
  * instead.
  */
 struct c4iw_create_cq_resp {
-	__u64 key;
-	__u64 gts_key;
-	__u64 memsize;
+	__aligned_u64 key;
+	__aligned_u64 gts_key;
+	__aligned_u64 memsize;
 	__u32 cqid;
 	__u32 size;
 	__u32 qid_mask;
@@ -58,13 +58,13 @@ enum {
 };
 
 struct c4iw_create_qp_resp {
-	__u64 ma_sync_key;
-	__u64 sq_key;
-	__u64 rq_key;
-	__u64 sq_db_gts_key;
-	__u64 rq_db_gts_key;
-	__u64 sq_memsize;
-	__u64 rq_memsize;
+	__aligned_u64 ma_sync_key;
+	__aligned_u64 sq_key;
+	__aligned_u64 rq_key;
+	__aligned_u64 sq_db_gts_key;
+	__aligned_u64 rq_db_gts_key;
+	__aligned_u64 sq_memsize;
+	__aligned_u64 rq_memsize;
 	__u32 sqid;
 	__u32 rqid;
 	__u32 sq_size;
@@ -74,7 +74,7 @@ struct c4iw_create_qp_resp {
 };
 
 struct c4iw_alloc_ucontext_resp {
-	__u64 status_page_key;
+	__aligned_u64 status_page_key;
 	__u32 status_page_size;
 	__u32 reserved; /* explicit padding (optional for i386) */
 };
--- a/include/uapi/rdma/hfi/hfi1_ioctl.h
+++ b/include/uapi/rdma/hfi/hfi1_ioctl.h
@@ -78,7 +78,7 @@ struct hfi1_user_info {
 };
 
 struct hfi1_ctxt_info {
-	__u64 runtime_flags;    /* chip/drv runtime flags (HFI1_CAP_*) */
+	__aligned_u64 runtime_flags;    /* chip/drv runtime flags (HFI1_CAP_*) */
 	__u32 rcvegr_size;      /* size of each eager buffer */
 	__u16 num_active;       /* number of active units */
 	__u16 unit;             /* unit (chip) assigned to caller */
@@ -97,9 +97,9 @@ struct hfi1_ctxt_info {
 
 struct hfi1_tid_info {
 	/* virtual address of first page in transfer */
-	__u64 vaddr;
+	__aligned_u64 vaddr;
 	/* pointer to tid array. this array is big enough */
-	__u64 tidlist;
+	__aligned_u64 tidlist;
 	/* number of tids programmed by this request */
 	__u32 tidcnt;
 	/* length of transfer buffer programmed by this request */
@@ -130,23 +130,23 @@ struct hfi1_base_info {
 	 */
 	__u32 bthqp;
 	/* PIO credit return address, */
-	__u64 sc_credits_addr;
+	__aligned_u64 sc_credits_addr;
 	/*
 	 * Base address of write-only pio buffers for this process.
 	 * Each buffer has sendpio_credits*64 bytes.
 	 */
-	__u64 pio_bufbase_sop;
+	__aligned_u64 pio_bufbase_sop;
 	/*
 	 * Base address of write-only pio buffers for this process.
 	 * Each buffer has sendpio_credits*64 bytes.
 	 */
-	__u64 pio_bufbase;
+	__aligned_u64 pio_bufbase;
 	/* address where receive buffer queue is mapped into */
-	__u64 rcvhdr_bufbase;
+	__aligned_u64 rcvhdr_bufbase;
 	/* base address of Eager receive buffers. */
-	__u64 rcvegr_bufbase;
+	__aligned_u64 rcvegr_bufbase;
 	/* base address of SDMA completion ring */
-	__u64 sdma_comp_bufbase;
+	__aligned_u64 sdma_comp_bufbase;
 	/*
 	 * User register base for init code, not to be used directly by
 	 * protocol or applications.  Always maps real chip register space.
@@ -154,20 +154,20 @@ struct hfi1_base_info {
 	 * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
 	 * ur_rcvtidflow
 	 */
-	__u64 user_regbase;
+	__aligned_u64 user_regbase;
 	/* notification events */
-	__u64 events_bufbase;
+	__aligned_u64 events_bufbase;
 	/* status page */
-	__u64 status_bufbase;
+	__aligned_u64 status_bufbase;
 	/* rcvhdrtail update */
-	__u64 rcvhdrtail_base;
+	__aligned_u64 rcvhdrtail_base;
 	/*
 	 * shared memory pages for subctxts if ctxt is shared; these cover
 	 * all the processes in the group sharing a single context.
 	 * all have enough space for the num_subcontexts value on this job.
 	 */
-	__u64 subctxt_uregbase;
-	__u64 subctxt_rcvegrbuf;
-	__u64 subctxt_rcvhdrbuf;
+	__aligned_u64 subctxt_uregbase;
+	__aligned_u64 subctxt_rcvegrbuf;
+	__aligned_u64 subctxt_rcvhdrbuf;
 };
 #endif /* _LINIUX__HFI1_IOCTL_H */
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -176,8 +176,8 @@ struct hfi1_sdma_comp_entry {
  * Device status and notifications from driver to user-space.
  */
 struct hfi1_status {
-	__u64 dev;      /* device/hw status bits */
-	__u64 port;     /* port state and status bits */
+	__aligned_u64 dev;      /* device/hw status bits */
+	__aligned_u64 port;     /* port state and status bits */
 	char freezemsg[0];
 };
 
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -36,18 +36,18 @@
 #include <linux/types.h>
 
 struct hns_roce_ib_create_cq {
-	__u64   buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 };
 
 struct hns_roce_ib_create_cq_resp {
-	__u64	cqn; /* Only 32 bits used, 64 for compat */
-	__u64	cap_flags;
+	__aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
+	__aligned_u64 cap_flags;
 };
 
 struct hns_roce_ib_create_qp {
-	__u64	buf_addr;
-	__u64   db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u8    log_sq_bb_count;
 	__u8    log_sq_stride;
 	__u8    sq_no_prefetch;
@@ -55,7 +55,7 @@ struct hns_roce_ib_create_qp {
 };
 
 struct hns_roce_ib_create_qp_resp {
-	__u64	cap_flags;
+	__aligned_u64 cap_flags;
 };
 
 struct hns_roce_ib_alloc_ucontext_resp {
--- a/include/uapi/rdma/i40iw-abi.h
+++ b/include/uapi/rdma/i40iw-abi.h
@@ -61,17 +61,17 @@ struct i40iw_alloc_pd_resp {
 };
 
 struct i40iw_create_cq_req {
-	__u64 user_cq_buffer;
-	__u64 user_shadow_area;
+	__aligned_u64 user_cq_buffer;
+	__aligned_u64 user_shadow_area;
 };
 
 struct i40iw_create_qp_req {
-	__u64 user_wqe_buffers;
-	__u64 user_compl_ctx;
+	__aligned_u64 user_wqe_buffers;
+	__aligned_u64 user_compl_ctx;
 
 	/* UDA QP PHB */
-	__u64 user_sq_phb;	/* place for VA of the sq phb buff */
-	__u64 user_rq_phb;	/* place for VA of the rq phb buff */
+	__aligned_u64 user_sq_phb;	/* place for VA of the sq phb buff */
+	__aligned_u64 user_rq_phb;	/* place for VA of the rq phb buff */
 };
 
 enum i40iw_memreg_type {
--- a/include/uapi/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
@@ -72,8 +72,8 @@ struct ib_ucm_cmd_hdr {
 };
 
 struct ib_ucm_create_id {
-	__u64 uid;
-	__u64 response;
+	__aligned_u64 uid;
+	__aligned_u64 response;
 };
 
 struct ib_ucm_create_id_resp {
@@ -81,7 +81,7 @@ struct ib_ucm_create_id_resp {
 };
 
 struct ib_ucm_destroy_id {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 reserved;
 };
@@ -91,7 +91,7 @@ struct ib_ucm_destroy_id_resp {
 };
 
 struct ib_ucm_attr_id {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 reserved;
 };
@@ -104,7 +104,7 @@ struct ib_ucm_attr_id_resp {
 };
 
 struct ib_ucm_init_qp_attr {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 qp_state;
 };
@@ -122,7 +122,7 @@ struct ib_ucm_notify {
 };
 
 struct ib_ucm_private_data {
-	__u64 data;
+	__aligned_u64 data;
 	__u32 id;
 	__u8  len;
 	__u8  reserved[3];
@@ -134,9 +134,9 @@ struct ib_ucm_req {
 	__u32 qp_type;
 	__u32 psn;
 	__be64 sid;
-	__u64 data;
-	__u64 primary_path;
-	__u64 alternate_path;
+	__aligned_u64 data;
+	__aligned_u64 primary_path;
+	__aligned_u64 alternate_path;
 	__u8  len;
 	__u8  peer_to_peer;
 	__u8  responder_resources;
@@ -152,8 +152,8 @@ struct ib_ucm_req {
 };
 
 struct ib_ucm_rep {
-	__u64 uid;
-	__u64 data;
+	__aligned_u64 uid;
+	__aligned_u64 data;
 	__u32 id;
 	__u32 qpn;
 	__u32 psn;
@@ -171,15 +171,15 @@ struct ib_ucm_rep {
 struct ib_ucm_info {
 	__u32 id;
 	__u32 status;
-	__u64 info;
-	__u64 data;
+	__aligned_u64 info;
+	__aligned_u64 data;
 	__u8  info_len;
 	__u8  data_len;
 	__u8  reserved[6];
 };
 
 struct ib_ucm_mra {
-	__u64 data;
+	__aligned_u64 data;
 	__u32 id;
 	__u8  len;
 	__u8  timeout;
@@ -187,8 +187,8 @@ struct ib_ucm_mra {
 };
 
 struct ib_ucm_lap {
-	__u64 path;
-	__u64 data;
+	__aligned_u64 path;
+	__aligned_u64 data;
 	__u32 id;
 	__u8  len;
 	__u8  reserved[3];
@@ -198,8 +198,8 @@ struct ib_ucm_sidr_req {
 	__u32 id;
 	__u32 timeout;
 	__be64 sid;
-	__u64 data;
-	__u64 path;
+	__aligned_u64 data;
+	__aligned_u64 path;
 	__u16 reserved_pkey;
 	__u8  len;
 	__u8  max_cm_retries;
@@ -211,8 +211,8 @@ struct ib_ucm_sidr_rep {
 	__u32 qpn;
 	__u32 qkey;
 	__u32 status;
-	__u64 info;
-	__u64 data;
+	__aligned_u64 info;
+	__aligned_u64 data;
 	__u8  info_len;
 	__u8  data_len;
 	__u8  reserved[6];
@@ -221,9 +221,9 @@ struct ib_ucm_sidr_rep {
  * event notification ABI structures.
  */
 struct ib_ucm_event_get {
-	__u64 response;
-	__u64 data;
-	__u64 info;
+	__aligned_u64 response;
+	__aligned_u64 data;
+	__aligned_u64 info;
 	__u8  data_len;
 	__u8  info_len;
 	__u8  reserved[6];
@@ -302,7 +302,7 @@ struct ib_ucm_sidr_rep_event_resp {
 #define IB_UCM_PRES_ALTERNATE 0x08
 
 struct ib_ucm_event_resp {
-	__u64 uid;
+	__aligned_u64 uid;
 	__u32 id;
 	__u32 event;
 	__u32 present;
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -142,7 +142,7 @@ struct ib_user_mad_hdr {
  */
 struct ib_user_mad {
 	struct ib_user_mad_hdr hdr;
-	__u64	data[0];
+	__aligned_u64	data[0];
 };
 
 /*
@@ -224,7 +224,7 @@ struct ib_user_mad_reg_req2 {
 	__u8	mgmt_class_version;
 	__u16   res;
 	__u32   flags;
-	__u64   method_mask[2];
+	__aligned_u64 method_mask[2];
 	__u32   oui;
 	__u8	rmpp_version;
 	__u8	reserved[3];
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -116,13 +116,13 @@ enum {
  */
 
 struct ib_uverbs_async_event_desc {
-	__u64 element;
+	__aligned_u64 element;
 	__u32 event_type;	/* enum ib_event_type */
 	__u32 reserved;
 };
 
 struct ib_uverbs_comp_event_desc {
-	__u64 cq_handle;
+	__aligned_u64 cq_handle;
 };
 
 struct ib_uverbs_cq_moderation_caps {
@@ -149,15 +149,15 @@ struct ib_uverbs_cmd_hdr {
 };
 
 struct ib_uverbs_ex_cmd_hdr {
-	__u64 response;
+	__aligned_u64 response;
 	__u16 provider_in_words;
 	__u16 provider_out_words;
 	__u32 cmd_hdr_reserved;
 };
 
 struct ib_uverbs_get_context {
-	__u64 response;
-	__u64 driver_data[0];
+	__aligned_u64 response;
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_get_context_resp {
@@ -166,16 +166,16 @@ struct ib_uverbs_get_context_resp {
 };
 
 struct ib_uverbs_query_device {
-	__u64 response;
-	__u64 driver_data[0];
+	__aligned_u64 response;
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_query_device_resp {
-	__u64 fw_ver;
+	__aligned_u64 fw_ver;
 	__be64 node_guid;
 	__be64 sys_image_guid;
-	__u64 max_mr_size;
-	__u64 page_size_cap;
+	__aligned_u64 max_mr_size;
+	__aligned_u64 page_size_cap;
 	__u32 vendor_id;
 	__u32 vendor_part_id;
 	__u32 hw_ver;
@@ -220,7 +220,7 @@ struct ib_uverbs_ex_query_device {
 };
 
 struct ib_uverbs_odp_caps {
-	__u64 general_caps;
+	__aligned_u64 general_caps;
 	struct {
 		__u32 rc_odp_caps;
 		__u32 uc_odp_caps;
@@ -259,9 +259,9 @@ struct ib_uverbs_ex_query_device_resp {
 	__u32 comp_mask;
 	__u32 response_length;
 	struct ib_uverbs_odp_caps odp_caps;
-	__u64 timestamp_mask;
-	__u64 hca_core_clock; /* in KHZ */
-	__u64 device_cap_flags_ex;
+	__aligned_u64 timestamp_mask;
+	__aligned_u64 hca_core_clock; /* in KHZ */
+	__aligned_u64 device_cap_flags_ex;
 	struct ib_uverbs_rss_caps rss_caps;
 	__u32  max_wq_type_rq;
 	__u32 raw_packet_caps;
@@ -270,10 +270,10 @@ struct ib_uverbs_ex_query_device_resp {
 };
 
 struct ib_uverbs_query_port {
-	__u64 response;
+	__aligned_u64 response;
 	__u8  port_num;
 	__u8  reserved[7];
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_query_port_resp {
@@ -301,8 +301,8 @@ struct ib_uverbs_query_port_resp {
 };
 
 struct ib_uverbs_alloc_pd {
-	__u64 response;
-	__u64 driver_data[0];
+	__aligned_u64 response;
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_alloc_pd_resp {
@@ -314,10 +314,10 @@ struct ib_uverbs_dealloc_pd {
 };
 
 struct ib_uverbs_open_xrcd {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 fd;
 	__u32 oflags;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_open_xrcd_resp {
@@ -329,13 +329,13 @@ struct ib_uverbs_close_xrcd {
 };
 
 struct ib_uverbs_reg_mr {
-	__u64 response;
-	__u64 start;
-	__u64 length;
-	__u64 hca_va;
+	__aligned_u64 response;
+	__aligned_u64 start;
+	__aligned_u64 length;
+	__aligned_u64 hca_va;
 	__u32 pd_handle;
 	__u32 access_flags;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_reg_mr_resp {
@@ -345,12 +345,12 @@ struct ib_uverbs_reg_mr_resp {
 };
 
 struct ib_uverbs_rereg_mr {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 mr_handle;
 	__u32 flags;
-	__u64 start;
-	__u64 length;
-	__u64 hca_va;
+	__aligned_u64 start;
+	__aligned_u64 length;
+	__aligned_u64 hca_va;
 	__u32 pd_handle;
 	__u32 access_flags;
 };
@@ -365,7 +365,7 @@ struct ib_uverbs_dereg_mr {
 };
 
 struct ib_uverbs_alloc_mw {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 pd_handle;
 	__u8  mw_type;
 	__u8  reserved[3];
@@ -381,7 +381,7 @@ struct ib_uverbs_dealloc_mw {
 };
 
 struct ib_uverbs_create_comp_channel {
-	__u64 response;
+	__aligned_u64 response;
 };
 
 struct ib_uverbs_create_comp_channel_resp {
@@ -389,13 +389,13 @@ struct ib_uverbs_create_comp_channel_res
 };
 
 struct ib_uverbs_create_cq {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 cqe;
 	__u32 comp_vector;
 	__s32 comp_channel;
 	__u32 reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 enum ib_uverbs_ex_create_cq_flags {
@@ -404,7 +404,7 @@ enum ib_uverbs_ex_create_cq_flags {
 };
 
 struct ib_uverbs_ex_create_cq {
-	__u64 user_handle;
+	__aligned_u64 user_handle;
 	__u32 cqe;
 	__u32 comp_vector;
 	__s32 comp_channel;
@@ -425,26 +425,26 @@ struct ib_uverbs_ex_create_cq_resp {
 };
 
 struct ib_uverbs_resize_cq {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 cq_handle;
 	__u32 cqe;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_resize_cq_resp {
 	__u32 cqe;
 	__u32 reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_poll_cq {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 cq_handle;
 	__u32 ne;
 };
 
 struct ib_uverbs_wc {
-	__u64 wr_id;
+	__aligned_u64 wr_id;
 	__u32 status;
 	__u32 opcode;
 	__u32 vendor_err;
@@ -476,7 +476,7 @@ struct ib_uverbs_req_notify_cq {
 };
 
 struct ib_uverbs_destroy_cq {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 cq_handle;
 	__u32 reserved;
 };
@@ -545,8 +545,8 @@ struct ib_uverbs_qp_attr {
 };
 
 struct ib_uverbs_create_qp {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 send_cq_handle;
 	__u32 recv_cq_handle;
@@ -560,7 +560,7 @@ struct ib_uverbs_create_qp {
 	__u8  qp_type;
 	__u8  is_srq;
 	__u8  reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 enum ib_uverbs_create_qp_mask {
@@ -586,7 +586,7 @@ enum {
 };
 
 struct ib_uverbs_ex_create_qp {
-	__u64 user_handle;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 send_cq_handle;
 	__u32 recv_cq_handle;
@@ -607,13 +607,13 @@ struct ib_uverbs_ex_create_qp {
 };
 
 struct ib_uverbs_open_qp {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 qpn;
 	__u8  qp_type;
 	__u8  reserved[7];
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 /* also used for open response */
@@ -654,10 +654,10 @@ struct ib_uverbs_qp_dest {
 };
 
 struct ib_uverbs_query_qp {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 qp_handle;
 	__u32 attr_mask;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_query_qp_resp {
@@ -691,7 +691,7 @@ struct ib_uverbs_query_qp_resp {
 	__u8  alt_timeout;
 	__u8  sq_sig_all;
 	__u8  reserved[5];
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_modify_qp {
@@ -721,7 +721,7 @@ struct ib_uverbs_modify_qp {
 	__u8  alt_port_num;
 	__u8  alt_timeout;
 	__u8  reserved[2];
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_ex_modify_qp {
@@ -739,7 +739,7 @@ struct ib_uverbs_ex_modify_qp_resp {
 };
 
 struct ib_uverbs_destroy_qp {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 qp_handle;
 	__u32 reserved;
 };
@@ -755,13 +755,13 @@ struct ib_uverbs_destroy_qp_resp {
  * document the ABI.
  */
 struct ib_uverbs_sge {
-	__u64 addr;
+	__aligned_u64 addr;
 	__u32 length;
 	__u32 lkey;
 };
 
 struct ib_uverbs_send_wr {
-	__u64 wr_id;
+	__aligned_u64 wr_id;
 	__u32 num_sge;
 	__u32 opcode;
 	__u32 send_flags;
@@ -771,14 +771,14 @@ struct ib_uverbs_send_wr {
 	} ex;
 	union {
 		struct {
-			__u64 remote_addr;
+			__aligned_u64 remote_addr;
 			__u32 rkey;
 			__u32 reserved;
 		} rdma;
 		struct {
-			__u64 remote_addr;
-			__u64 compare_add;
-			__u64 swap;
+			__aligned_u64 remote_addr;
+			__aligned_u64 compare_add;
+			__aligned_u64 swap;
 			__u32 rkey;
 			__u32 reserved;
 		} atomic;
@@ -792,7 +792,7 @@ struct ib_uverbs_send_wr {
 };
 
 struct ib_uverbs_post_send {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 qp_handle;
 	__u32 wr_count;
 	__u32 sge_count;
@@ -805,13 +805,13 @@ struct ib_uverbs_post_send_resp {
 };
 
 struct ib_uverbs_recv_wr {
-	__u64 wr_id;
+	__aligned_u64 wr_id;
 	__u32 num_sge;
 	__u32 reserved;
 };
 
 struct ib_uverbs_post_recv {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 qp_handle;
 	__u32 wr_count;
 	__u32 sge_count;
@@ -824,7 +824,7 @@ struct ib_uverbs_post_recv_resp {
 };
 
 struct ib_uverbs_post_srq_recv {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 srq_handle;
 	__u32 wr_count;
 	__u32 sge_count;
@@ -837,8 +837,8 @@ struct ib_uverbs_post_srq_recv_resp {
 };
 
 struct ib_uverbs_create_ah {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 reserved;
 	struct ib_uverbs_ah_attr attr;
@@ -857,7 +857,7 @@ struct ib_uverbs_attach_mcast {
 	__u32 qp_handle;
 	__u16 mlid;
 	__u16 reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_detach_mcast {
@@ -865,7 +865,7 @@ struct ib_uverbs_detach_mcast {
 	__u32 qp_handle;
 	__u16 mlid;
 	__u16 reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_flow_spec_hdr {
@@ -873,7 +873,7 @@ struct ib_uverbs_flow_spec_hdr {
 	__u16 size;
 	__u16 reserved;
 	/* followed by flow_spec */
-	__u64 flow_spec_data[0];
+	__aligned_u64 flow_spec_data[0];
 };
 
 struct ib_uverbs_flow_eth_filter {
@@ -1032,18 +1032,18 @@ struct ib_uverbs_destroy_flow  {
 };
 
 struct ib_uverbs_create_srq {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 max_wr;
 	__u32 max_sge;
 	__u32 srq_limit;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_create_xsrq {
-	__u64 response;
-	__u64 user_handle;
+	__aligned_u64 response;
+	__aligned_u64 user_handle;
 	__u32 srq_type;
 	__u32 pd_handle;
 	__u32 max_wr;
@@ -1052,7 +1052,7 @@ struct ib_uverbs_create_xsrq {
 	__u32 max_num_tags;
 	__u32 xrcd_handle;
 	__u32 cq_handle;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_create_srq_resp {
@@ -1067,14 +1067,14 @@ struct ib_uverbs_modify_srq {
 	__u32 attr_mask;
 	__u32 max_wr;
 	__u32 srq_limit;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_query_srq {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 srq_handle;
 	__u32 reserved;
-	__u64 driver_data[0];
+	__aligned_u64 driver_data[0];
 };
 
 struct ib_uverbs_query_srq_resp {
@@ -1085,7 +1085,7 @@ struct ib_uverbs_query_srq_resp {
 };
 
 struct ib_uverbs_destroy_srq {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 srq_handle;
 	__u32 reserved;
 };
@@ -1097,7 +1097,7 @@ struct ib_uverbs_destroy_srq_resp {
 struct ib_uverbs_ex_create_wq  {
 	__u32 comp_mask;
 	__u32 wq_type;
-	__u64 user_handle;
+	__aligned_u64 user_handle;
 	__u32 pd_handle;
 	__u32 cq_handle;
 	__u32 max_wr;
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -76,8 +76,8 @@ struct mlx4_ib_alloc_pd_resp {
 };
 
 struct mlx4_ib_create_cq {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 };
 
 struct mlx4_ib_create_cq_resp {
@@ -86,12 +86,12 @@ struct mlx4_ib_create_cq_resp {
 };
 
 struct mlx4_ib_resize_cq {
-	__u64	buf_addr;
+	__aligned_u64 buf_addr;
 };
 
 struct mlx4_ib_create_srq {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 };
 
 struct mlx4_ib_create_srq_resp {
@@ -100,7 +100,7 @@ struct mlx4_ib_create_srq_resp {
 };
 
 struct mlx4_ib_create_qp_rss {
-	__u64   rx_hash_fields_mask; /* Use  enum mlx4_ib_rx_hash_fields */
+	__aligned_u64 rx_hash_fields_mask; /* Use  enum mlx4_ib_rx_hash_fields */
 	__u8    rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
 	__u8    reserved[7];
 	__u8    rx_hash_key[40];
@@ -109,8 +109,8 @@ struct mlx4_ib_create_qp_rss {
 };
 
 struct mlx4_ib_create_qp {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u8	log_sq_bb_count;
 	__u8	log_sq_stride;
 	__u8	sq_no_prefetch;
@@ -119,8 +119,8 @@ struct mlx4_ib_create_qp {
 };
 
 struct mlx4_ib_create_wq {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u8	log_range_size;
 	__u8	reserved[3];
 	__u32   comp_mask;
@@ -160,7 +160,7 @@ enum mlx4_ib_rx_hash_fields {
 };
 
 struct mlx4_ib_rss_caps {
-	__u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
+	__aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
 	__u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
 	__u8 reserved[7];
 };
@@ -180,7 +180,7 @@ struct mlx4_ib_tso_caps {
 struct mlx4_uverbs_ex_query_device_resp {
 	__u32			comp_mask;
 	__u32			response_length;
-	__u64			hca_core_clock_offset;
+	__aligned_u64		hca_core_clock_offset;
 	__u32			max_inl_recv_sz;
 	__u32			reserved;
 	struct mlx4_ib_rss_caps	rss_caps;
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -83,7 +83,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
 	__u8	reserved0;
 	__u16	reserved1;
 	__u32	reserved2;
-	__u64	lib_caps;
+	__aligned_u64 lib_caps;
 };
 
 enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -124,7 +124,7 @@ struct mlx5_ib_alloc_ucontext_resp {
 	__u8	cmds_supp_uhw;
 	__u8	eth_min_inline;
 	__u8	clock_info_versions;
-	__u64	hca_core_clock_offset;
+	__aligned_u64 hca_core_clock_offset;
 	__u32	log_uar_size;
 	__u32	num_uars_per_page;
 	__u32	num_dyn_bfregs;
@@ -146,7 +146,7 @@ struct mlx5_ib_tso_caps {
 };
 
 struct mlx5_ib_rss_caps {
-	__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
 	__u8 reserved[7];
 };
@@ -247,8 +247,8 @@ enum mlx5_ib_create_cq_flags {
 };
 
 struct mlx5_ib_create_cq {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u32	cqe_size;
 	__u8    cqe_comp_en;
 	__u8    cqe_comp_res_format;
@@ -261,15 +261,15 @@ struct mlx5_ib_create_cq_resp {
 };
 
 struct mlx5_ib_resize_cq {
-	__u64	buf_addr;
+	__aligned_u64 buf_addr;
 	__u16	cqe_size;
 	__u16	reserved0;
 	__u32	reserved1;
 };
 
 struct mlx5_ib_create_srq {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u32	flags;
 	__u32	reserved0; /* explicit padding (optional on i386) */
 	__u32	uidx;
@@ -282,8 +282,8 @@ struct mlx5_ib_create_srq_resp {
 };
 
 struct mlx5_ib_create_qp {
-	__u64	buf_addr;
-	__u64	db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u32	sq_wqe_count;
 	__u32	rq_wqe_count;
 	__u32	rq_wqe_shift;
@@ -291,8 +291,8 @@ struct mlx5_ib_create_qp {
 	__u32	uidx;
 	__u32	bfreg_index;
 	union {
-		__u64	sq_buf_addr;
-		__u64	access_key;
+		__aligned_u64 sq_buf_addr;
+		__aligned_u64 access_key;
 	};
 };
 
@@ -323,7 +323,7 @@ enum mlx5_rx_hash_fields {
 };
 
 struct mlx5_ib_create_qp_rss {
-	__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+	__aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
 	__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
 	__u8 rx_key_len; /* valid only for Toeplitz */
 	__u8 reserved[6];
@@ -348,8 +348,8 @@ enum mlx5_ib_create_wq_mask {
 };
 
 struct mlx5_ib_create_wq {
-	__u64   buf_addr;
-	__u64   db_addr;
+	__aligned_u64 buf_addr;
+	__aligned_u64 db_addr;
 	__u32   rq_wqe_count;
 	__u32   rq_wqe_shift;
 	__u32   user_index;
@@ -401,13 +401,13 @@ struct mlx5_ib_modify_wq {
 struct mlx5_ib_clock_info {
 	__u32 sign;
 	__u32 resv;
-	__u64 nsec;
-	__u64 cycles;
-	__u64 frac;
+	__aligned_u64 nsec;
+	__aligned_u64 cycles;
+	__aligned_u64 frac;
 	__u32 mult;
 	__u32 shift;
-	__u64 mask;
-	__u64 overflow_period;
+	__aligned_u64 mask;
+	__aligned_u64 overflow_period;
 };
 
 enum mlx5_ib_mmap_cmd {
--- a/include/uapi/rdma/mthca-abi.h
+++ b/include/uapi/rdma/mthca-abi.h
@@ -73,8 +73,8 @@ struct mthca_reg_mr {
 struct mthca_create_cq {
 	__u32 lkey;
 	__u32 pdn;
-	__u64 arm_db_page;
-	__u64 set_db_page;
+	__aligned_u64 arm_db_page;
+	__aligned_u64 set_db_page;
 	__u32 arm_db_index;
 	__u32 set_db_index;
 };
@@ -92,7 +92,7 @@ struct mthca_resize_cq {
 struct mthca_create_srq {
 	__u32 lkey;
 	__u32 db_index;
-	__u64 db_page;
+	__aligned_u64 db_page;
 };
 
 struct mthca_create_srq_resp {
@@ -103,8 +103,8 @@ struct mthca_create_srq_resp {
 struct mthca_create_qp {
 	__u32 lkey;
 	__u32 reserved;
-	__u64 sq_db_page;
-	__u64 rq_db_page;
+	__aligned_u64 sq_db_page;
+	__aligned_u64 rq_db_page;
 	__u32 sq_db_index;
 	__u32 rq_db_index;
 };
--- a/include/uapi/rdma/nes-abi.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -71,14 +71,14 @@ struct nes_alloc_pd_resp {
 };
 
 struct nes_create_cq_req {
-	__u64 user_cq_buffer;
+	__aligned_u64 user_cq_buffer;
 	__u32 mcrqf;
 	__u8 reserved[4];
 };
 
 struct nes_create_qp_req {
-	__u64 user_wqe_buffers;
-	__u64 user_qp_buffer;
+	__aligned_u64 user_wqe_buffers;
+	__aligned_u64 user_qp_buffer;
 };
 
 enum iwnes_memreg_type {
--- a/include/uapi/rdma/ocrdma-abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -54,13 +54,13 @@ struct ocrdma_alloc_ucontext_resp {
 	__u32 wqe_size;
 	__u32 max_inline_data;
 	__u32 dpp_wqe_size;
-	__u64 ah_tbl_page;
+	__aligned_u64 ah_tbl_page;
 	__u32 ah_tbl_len;
 	__u32 rqe_size;
 	__u8 fw_ver[32];
 	/* for future use/new features in progress */
-	__u64 rsvd1;
-	__u64 rsvd2;
+	__aligned_u64 rsvd1;
+	__aligned_u64 rsvd2;
 };
 
 struct ocrdma_alloc_pd_ureq {
@@ -86,13 +86,13 @@ struct ocrdma_create_cq_uresp {
 	__u32 page_size;
 	__u32 num_pages;
 	__u32 max_hw_cqe;
-	__u64 page_addr[MAX_CQ_PAGES];
-	__u64 db_page_addr;
+	__aligned_u64 page_addr[MAX_CQ_PAGES];
+	__aligned_u64 db_page_addr;
 	__u32 db_page_size;
 	__u32 phase_change;
 	/* for future use/new features in progress */
-	__u64 rsvd1;
-	__u64 rsvd2;
+	__aligned_u64 rsvd1;
+	__aligned_u64 rsvd2;
 };
 
 #define MAX_QP_PAGES 8
@@ -114,9 +114,9 @@ struct ocrdma_create_qp_uresp {
 	__u32 rq_page_size;
 	__u32 num_sq_pages;
 	__u32 num_rq_pages;
-	__u64 sq_page_addr[MAX_QP_PAGES];
-	__u64 rq_page_addr[MAX_QP_PAGES];
-	__u64 db_page_addr;
+	__aligned_u64 sq_page_addr[MAX_QP_PAGES];
+	__aligned_u64 rq_page_addr[MAX_QP_PAGES];
+	__aligned_u64 db_page_addr;
 	__u32 db_page_size;
 	__u32 dpp_credit;
 	__u32 dpp_offset;
@@ -125,7 +125,7 @@ struct ocrdma_create_qp_uresp {
 	__u32 db_sq_offset;
 	__u32 db_rq_offset;
 	__u32 db_shift;
-	__u64 rsvd[11];
+	__aligned_u64 rsvd[11];
 };
 
 struct ocrdma_create_srq_uresp {
@@ -136,16 +136,16 @@ struct ocrdma_create_srq_uresp {
 	__u32 rq_page_size;
 	__u32 num_rq_pages;
 
-	__u64 rq_page_addr[MAX_QP_PAGES];
-	__u64 db_page_addr;
+	__aligned_u64 rq_page_addr[MAX_QP_PAGES];
+	__aligned_u64 db_page_addr;
 
 	__u32 db_page_size;
 	__u32 num_rqe_allocated;
 	__u32 db_rq_offset;
 	__u32 db_shift;
 
-	__u64 rsvd2;
-	__u64 rsvd3;
+	__aligned_u64 rsvd2;
+	__aligned_u64 rsvd3;
 };
 
 #endif	/* OCRDMA_ABI_USER_H */
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -39,7 +39,7 @@
 /* user kernel communication data structures. */
 
 struct qedr_alloc_ucontext_resp {
-	__u64 db_pa;
+	__aligned_u64 db_pa;
 	__u32 db_size;
 
 	__u32 max_send_wr;
@@ -56,7 +56,7 @@ struct qedr_alloc_ucontext_resp {
 };
 
 struct qedr_alloc_pd_ureq {
-	__u64 rsvd1;
+	__aligned_u64 rsvd1;
 };
 
 struct qedr_alloc_pd_uresp {
@@ -65,8 +65,8 @@ struct qedr_alloc_pd_uresp {
 };
 
 struct qedr_create_cq_ureq {
-	__u64 addr;
-	__u64 len;
+	__aligned_u64 addr;
+	__aligned_u64 len;
 };
 
 struct qedr_create_cq_uresp {
@@ -81,17 +81,17 @@ struct qedr_create_qp_ureq {
 
 	/* SQ */
 	/* user space virtual address of SQ buffer */
-	__u64 sq_addr;
+	__aligned_u64 sq_addr;
 
 	/* length of SQ buffer */
-	__u64 sq_len;
+	__aligned_u64 sq_len;
 
 	/* RQ */
 	/* user space virtual address of RQ buffer */
-	__u64 rq_addr;
+	__aligned_u64 rq_addr;
 
 	/* length of RQ buffer */
-	__u64 rq_len;
+	__aligned_u64 rq_len;
 };
 
 struct qedr_create_qp_uresp {
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -79,8 +79,8 @@ struct rdma_ucm_cmd_hdr {
 };
 
 struct rdma_ucm_create_id {
-	__u64 uid;
-	__u64 response;
+	__aligned_u64 uid;
+	__aligned_u64 response;
 	__u16 ps;
 	__u8  qp_type;
 	__u8  reserved[5];
@@ -91,7 +91,7 @@ struct rdma_ucm_create_id_resp {
 };
 
 struct rdma_ucm_destroy_id {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 reserved;
 };
@@ -101,7 +101,7 @@ struct rdma_ucm_destroy_id_resp {
 };
 
 struct rdma_ucm_bind_ip {
-	__u64 response;
+	__aligned_u64 response;
 	struct sockaddr_in6 addr;
 	__u32 id;
 };
@@ -142,13 +142,13 @@ enum {
 };
 
 struct rdma_ucm_query {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 option;
 };
 
 struct rdma_ucm_query_route_resp {
-	__u64 node_guid;
+	__aligned_u64 node_guid;
 	struct ib_user_path_rec ib_route[2];
 	struct sockaddr_in6 src_addr;
 	struct sockaddr_in6 dst_addr;
@@ -158,7 +158,7 @@ struct rdma_ucm_query_route_resp {
 };
 
 struct rdma_ucm_query_addr_resp {
-	__u64 node_guid;
+	__aligned_u64 node_guid;
 	__u8  port_num;
 	__u8  reserved;
 	__u16 pkey;
@@ -209,7 +209,7 @@ struct rdma_ucm_listen {
 };
 
 struct rdma_ucm_accept {
-	__u64 uid;
+	__aligned_u64 uid;
 	struct rdma_ucm_conn_param conn_param;
 	__u32 id;
 	__u32 reserved;
@@ -227,7 +227,7 @@ struct rdma_ucm_disconnect {
 };
 
 struct rdma_ucm_init_qp_attr {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 qp_state;
 };
@@ -238,8 +238,8 @@ struct rdma_ucm_notify {
 };
 
 struct rdma_ucm_join_ip_mcast {
-	__u64 response;		/* rdma_ucm_create_id_resp */
-	__u64 uid;
+	__aligned_u64 response;		/* rdma_ucm_create_id_resp */
+	__aligned_u64 uid;
 	struct sockaddr_in6 addr;
 	__u32 id;
 };
@@ -252,8 +252,8 @@ enum {
 };
 
 struct rdma_ucm_join_mcast {
-	__u64 response;		/* rdma_ucma_create_id_resp */
-	__u64 uid;
+	__aligned_u64 response;		/* rdma_ucma_create_id_resp */
+	__aligned_u64 uid;
 	__u32 id;
 	__u16 addr_size;
 	__u16 join_flags;
@@ -261,11 +261,11 @@ struct rdma_ucm_join_mcast {
 };
 
 struct rdma_ucm_get_event {
-	__u64 response;
+	__aligned_u64 response;
 };
 
 struct rdma_ucm_event_resp {
-	__u64 uid;
+	__aligned_u64 uid;
 	__u32 id;
 	__u32 event;
 	__u32 status;
@@ -295,7 +295,7 @@ enum {
 };
 
 struct rdma_ucm_set_option {
-	__u64 optval;
+	__aligned_u64 optval;
 	__u32 id;
 	__u32 level;
 	__u32 optname;
@@ -303,7 +303,7 @@ struct rdma_ucm_set_option {
 };
 
 struct rdma_ucm_migrate_id {
-	__u64 response;
+	__aligned_u64 response;
 	__u32 id;
 	__u32 fd;
 };
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -67,7 +67,7 @@ struct rxe_av {
 };
 
 struct rxe_send_wr {
-	__u64			wr_id;
+	__aligned_u64		wr_id;
 	__u32			num_sge;
 	__u32			opcode;
 	__u32			send_flags;
@@ -77,14 +77,14 @@ struct rxe_send_wr {
 	} ex;
 	union {
 		struct {
-			__u64	remote_addr;
+			__aligned_u64 remote_addr;
 			__u32	rkey;
 			__u32	reserved;
 		} rdma;
 		struct {
-			__u64	remote_addr;
-			__u64	compare_add;
-			__u64	swap;
+			__aligned_u64 remote_addr;
+			__aligned_u64 compare_add;
+			__aligned_u64 swap;
 			__u32	rkey;
 			__u32	reserved;
 		} atomic;
@@ -97,7 +97,7 @@ struct rxe_send_wr {
 		struct {
 			union {
 				struct ib_mr *mr;
-				__u64 reserved;
+				__aligned_u64 reserved;
 			};
 			__u32        key;
 			__u32        access;
@@ -106,13 +106,13 @@ struct rxe_send_wr {
 };
 
 struct rxe_sge {
-	__u64	addr;
+	__aligned_u64 addr;
 	__u32	length;
 	__u32	lkey;
 };
 
 struct mminfo {
-	__u64			offset;
+	__aligned_u64  		offset;
 	__u32			size;
 	__u32			pad;
 };
@@ -135,7 +135,7 @@ struct rxe_send_wqe {
 	struct rxe_av		av;
 	__u32			status;
 	__u32			state;
-	__u64			iova;
+	__aligned_u64		iova;
 	__u32			mask;
 	__u32			first_psn;
 	__u32			last_psn;
@@ -146,7 +146,7 @@ struct rxe_send_wqe {
 };
 
 struct rxe_recv_wqe {
-	__u64			wr_id;
+	__aligned_u64		wr_id;
 	__u32			num_sge;
 	__u32			padding;
 	struct rxe_dma_info	dma;
@@ -172,7 +172,7 @@ struct rxe_create_srq_resp {
 };
 
 struct rxe_modify_srq_cmd {
-	__u64 mmap_info_addr;
+	__aligned_u64 mmap_info_addr;
 };
 
 #endif /* RDMA_USER_RXE_H */
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -142,7 +142,7 @@ struct pvrdma_alloc_pd_resp {
 };
 
 struct pvrdma_create_cq {
-	__u64 buf_addr;
+	__aligned_u64 buf_addr;
 	__u32 buf_size;
 	__u32 reserved;
 };
@@ -153,13 +153,13 @@ struct pvrdma_create_cq_resp {
 };
 
 struct pvrdma_resize_cq {
-	__u64 buf_addr;
+	__aligned_u64 buf_addr;
 	__u32 buf_size;
 	__u32 reserved;
 };
 
 struct pvrdma_create_srq {
-	__u64 buf_addr;
+	__aligned_u64 buf_addr;
 	__u32 buf_size;
 	__u32 reserved;
 };
@@ -170,25 +170,25 @@ struct pvrdma_create_srq_resp {
 };
 
 struct pvrdma_create_qp {
-	__u64 rbuf_addr;
-	__u64 sbuf_addr;
+	__aligned_u64 rbuf_addr;
+	__aligned_u64 sbuf_addr;
 	__u32 rbuf_size;
 	__u32 sbuf_size;
-	__u64 qp_addr;
+	__aligned_u64 qp_addr;
 };
 
 /* PVRDMA masked atomic compare and swap */
 struct pvrdma_ex_cmp_swap {
-	__u64 swap_val;
-	__u64 compare_val;
-	__u64 swap_mask;
-	__u64 compare_mask;
+	__aligned_u64 swap_val;
+	__aligned_u64 compare_val;
+	__aligned_u64 swap_mask;
+	__aligned_u64 compare_mask;
 };
 
 /* PVRDMA masked atomic fetch and add */
 struct pvrdma_ex_fetch_add {
-	__u64 add_val;
-	__u64 field_boundary;
+	__aligned_u64 add_val;
+	__aligned_u64 field_boundary;
 };
 
 /* PVRDMA address vector. */
@@ -206,14 +206,14 @@ struct pvrdma_av {
 
 /* PVRDMA scatter/gather entry */
 struct pvrdma_sge {
-	__u64   addr;
+	__aligned_u64 addr;
 	__u32   length;
 	__u32   lkey;
 };
 
 /* PVRDMA receive queue work request */
 struct pvrdma_rq_wqe_hdr {
-	__u64 wr_id;		/* wr id */
+	__aligned_u64 wr_id;		/* wr id */
 	__u32 num_sge;		/* size of s/g array */
 	__u32 total_len;	/* reserved */
 };
@@ -221,7 +221,7 @@ struct pvrdma_rq_wqe_hdr {
 
 /* PVRDMA send queue work request */
 struct pvrdma_sq_wqe_hdr {
-	__u64 wr_id;		/* wr id */
+	__aligned_u64 wr_id;		/* wr id */
 	__u32 num_sge;		/* size of s/g array */
 	__u32 total_len;	/* reserved */
 	__u32 opcode;		/* operation type */
@@ -233,19 +233,19 @@ struct pvrdma_sq_wqe_hdr {
 	__u32 reserved;
 	union {
 		struct {
-			__u64 remote_addr;
+			__aligned_u64 remote_addr;
 			__u32 rkey;
 			__u8 reserved[4];
 		} rdma;
 		struct {
-			__u64 remote_addr;
-			__u64 compare_add;
-			__u64 swap;
+			__aligned_u64 remote_addr;
+			__aligned_u64 compare_add;
+			__aligned_u64 swap;
 			__u32 rkey;
 			__u32 reserved;
 		} atomic;
 		struct {
-			__u64 remote_addr;
+			__aligned_u64 remote_addr;
 			__u32 log_arg_sz;
 			__u32 rkey;
 			union {
@@ -254,8 +254,8 @@ struct pvrdma_sq_wqe_hdr {
 			} wr_data;
 		} masked_atomics;
 		struct {
-			__u64 iova_start;
-			__u64 pl_pdir_dma;
+			__aligned_u64 iova_start;
+			__aligned_u64 pl_pdir_dma;
 			__u32 page_shift;
 			__u32 page_list_len;
 			__u32 length;
@@ -274,8 +274,8 @@ struct pvrdma_sq_wqe_hdr {
 
 /* Completion queue element. */
 struct pvrdma_cqe {
-	__u64 wr_id;
-	__u64 qp;
+	__aligned_u64 wr_id;
+	__aligned_u64 qp;
 	__u32 opcode;
 	__u32 status;
 	__u32 byte_len;