Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 3 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 6 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 7 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 8 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This software is available to you under a choice of one of two |
| 11 | * licenses. You may choose to be licensed under the terms of the GNU |
| 12 | * General Public License (GPL) Version 2, available from the file |
| 13 | * COPYING in the main directory of this source tree, or the |
| 14 | * OpenIB.org BSD license below: |
| 15 | * |
| 16 | * Redistribution and use in source and binary forms, with or |
| 17 | * without modification, are permitted provided that the following |
| 18 | * conditions are met: |
| 19 | * |
| 20 | * - Redistributions of source code must retain the above |
| 21 | * copyright notice, this list of conditions and the following |
| 22 | * disclaimer. |
| 23 | * |
| 24 | * - Redistributions in binary form must reproduce the above |
| 25 | * copyright notice, this list of conditions and the following |
| 26 | * disclaimer in the documentation and/or other materials |
| 27 | * provided with the distribution. |
| 28 | * |
| 29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 30 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 31 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 32 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 33 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 34 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 35 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 36 | * SOFTWARE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | */ |
| 38 | |
| 39 | #if !defined(IB_VERBS_H) |
| 40 | #define IB_VERBS_H |
| 41 | |
| 42 | #include <linux/types.h> |
| 43 | #include <linux/device.h> |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 44 | #include <linux/mm.h> |
| 45 | #include <linux/dma-mapping.h> |
Michael S. Tsirkin | 459d6e2 | 2007-02-04 14:11:55 -0800 | [diff] [blame] | 46 | #include <linux/kref.h> |
Dotan Barak | bfb3ea1 | 2007-07-31 16:49:15 +0300 | [diff] [blame] | 47 | #include <linux/list.h> |
| 48 | #include <linux/rwsem.h> |
Adrian Bunk | 87ae9af | 2007-10-30 10:35:04 +0100 | [diff] [blame] | 49 | #include <linux/scatterlist.h> |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 50 | #include <linux/workqueue.h> |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 51 | #include <linux/socket.h> |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 52 | #include <linux/irq_poll.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 53 | #include <uapi/linux/if_ether.h> |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 54 | |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 55 | #include <linux/atomic.h> |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 56 | #include <linux/mmu_notifier.h> |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 57 | #include <asm/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 59 | extern struct workqueue_struct *ib_wq; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 60 | extern struct workqueue_struct *ib_comp_wq; |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | union ib_gid { |
| 63 | u8 raw[16]; |
| 64 | struct { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 65 | __be64 subnet_prefix; |
| 66 | __be64 interface_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } global; |
| 68 | }; |
| 69 | |
Moni Shoua | e26be1b | 2015-07-30 18:33:29 +0300 | [diff] [blame] | 70 | extern union ib_gid zgid; |
| 71 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 72 | struct ib_gid_attr { |
| 73 | struct net_device *ndev; |
| 74 | }; |
| 75 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 76 | enum rdma_node_type { |
| 77 | /* IB values map to NodeInfo:NodeType. */ |
| 78 | RDMA_NODE_IB_CA = 1, |
| 79 | RDMA_NODE_IB_SWITCH, |
| 80 | RDMA_NODE_IB_ROUTER, |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 81 | RDMA_NODE_RNIC, |
| 82 | RDMA_NODE_USNIC, |
Upinder Malhi | 5db5765 | 2014-01-15 17:02:36 -0800 | [diff] [blame] | 83 | RDMA_NODE_USNIC_UDP, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | }; |
| 85 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 86 | enum rdma_transport_type { |
| 87 | RDMA_TRANSPORT_IB, |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 88 | RDMA_TRANSPORT_IWARP, |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 89 | RDMA_TRANSPORT_USNIC, |
| 90 | RDMA_TRANSPORT_USNIC_UDP |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 91 | }; |
| 92 | |
Michael Wang | 6b90a6d | 2015-05-05 14:50:18 +0200 | [diff] [blame] | 93 | enum rdma_protocol_type { |
| 94 | RDMA_PROTOCOL_IB, |
| 95 | RDMA_PROTOCOL_IBOE, |
| 96 | RDMA_PROTOCOL_IWARP, |
| 97 | RDMA_PROTOCOL_USNIC_UDP |
| 98 | }; |
| 99 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 100 | __attribute_const__ enum rdma_transport_type |
| 101 | rdma_node_get_transport(enum rdma_node_type node_type); |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 102 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 103 | enum rdma_link_layer { |
| 104 | IB_LINK_LAYER_UNSPECIFIED, |
| 105 | IB_LINK_LAYER_INFINIBAND, |
| 106 | IB_LINK_LAYER_ETHERNET, |
| 107 | }; |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | enum ib_device_cap_flags { |
| 110 | IB_DEVICE_RESIZE_MAX_WR = 1, |
| 111 | IB_DEVICE_BAD_PKEY_CNTR = (1<<1), |
| 112 | IB_DEVICE_BAD_QKEY_CNTR = (1<<2), |
| 113 | IB_DEVICE_RAW_MULTI = (1<<3), |
| 114 | IB_DEVICE_AUTO_PATH_MIG = (1<<4), |
| 115 | IB_DEVICE_CHANGE_PHY_PORT = (1<<5), |
| 116 | IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), |
| 117 | IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), |
| 118 | IB_DEVICE_SHUTDOWN_PORT = (1<<8), |
| 119 | IB_DEVICE_INIT_TYPE = (1<<9), |
| 120 | IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), |
| 121 | IB_DEVICE_SYS_IMAGE_GUID = (1<<11), |
| 122 | IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), |
| 123 | IB_DEVICE_SRQ_RESIZE = (1<<13), |
| 124 | IB_DEVICE_N_NOTIFY_CQ = (1<<14), |
Steve Wise | 96f15c0 | 2008-07-14 23:48:53 -0700 | [diff] [blame] | 125 | IB_DEVICE_LOCAL_DMA_LKEY = (1<<15), |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 126 | IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 127 | IB_DEVICE_MEM_WINDOW = (1<<17), |
| 128 | /* |
| 129 | * Devices should set IB_DEVICE_UD_IP_SUM if they support |
| 130 | * insertion of UDP and TCP checksum on outgoing UD IPoIB |
| 131 | * messages and can verify the validity of checksum for |
| 132 | * incoming messages. Setting this flag implies that the |
| 133 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. |
| 134 | */ |
| 135 | IB_DEVICE_UD_IP_CSUM = (1<<18), |
Eli Cohen | c93570f | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 136 | IB_DEVICE_UD_TSO = (1<<19), |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 137 | IB_DEVICE_XRC = (1<<20), |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 138 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), |
Ron Livne | 47ee1b9 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 139 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 140 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 141 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), |
Bodong Wang | 470a553 | 2015-09-22 23:18:10 +0300 | [diff] [blame] | 142 | IB_DEVICE_RC_IP_CSUM = (1<<25), |
| 143 | IB_DEVICE_RAW_IP_CSUM = (1<<26), |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 144 | IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 145 | IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), |
| 146 | IB_DEVICE_ON_DEMAND_PAGING = (1<<31), |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | enum ib_signature_prot_cap { |
| 150 | IB_PROT_T10DIF_TYPE_1 = 1, |
| 151 | IB_PROT_T10DIF_TYPE_2 = 1 << 1, |
| 152 | IB_PROT_T10DIF_TYPE_3 = 1 << 2, |
| 153 | }; |
| 154 | |
| 155 | enum ib_signature_guard_cap { |
| 156 | IB_GUARD_T10DIF_CRC = 1, |
| 157 | IB_GUARD_T10DIF_CSUM = 1 << 1, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | }; |
| 159 | |
| 160 | enum ib_atomic_cap { |
| 161 | IB_ATOMIC_NONE, |
| 162 | IB_ATOMIC_HCA, |
| 163 | IB_ATOMIC_GLOB |
| 164 | }; |
| 165 | |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 166 | enum ib_odp_general_cap_bits { |
| 167 | IB_ODP_SUPPORT = 1 << 0, |
| 168 | }; |
| 169 | |
| 170 | enum ib_odp_transport_cap_bits { |
| 171 | IB_ODP_SUPPORT_SEND = 1 << 0, |
| 172 | IB_ODP_SUPPORT_RECV = 1 << 1, |
| 173 | IB_ODP_SUPPORT_WRITE = 1 << 2, |
| 174 | IB_ODP_SUPPORT_READ = 1 << 3, |
| 175 | IB_ODP_SUPPORT_ATOMIC = 1 << 4, |
| 176 | }; |
| 177 | |
| 178 | struct ib_odp_caps { |
| 179 | uint64_t general_caps; |
| 180 | struct { |
| 181 | uint32_t rc_odp_caps; |
| 182 | uint32_t uc_odp_caps; |
| 183 | uint32_t ud_odp_caps; |
| 184 | } per_transport_caps; |
| 185 | }; |
| 186 | |
Matan Barak | b9926b92 | 2015-06-11 16:35:22 +0300 | [diff] [blame] | 187 | enum ib_cq_creation_flags { |
| 188 | IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, |
| 189 | }; |
| 190 | |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 191 | struct ib_cq_init_attr { |
| 192 | unsigned int cqe; |
| 193 | int comp_vector; |
| 194 | u32 flags; |
| 195 | }; |
| 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | struct ib_device_attr { |
| 198 | u64 fw_ver; |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 199 | __be64 sys_image_guid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | u64 max_mr_size; |
| 201 | u64 page_size_cap; |
| 202 | u32 vendor_id; |
| 203 | u32 vendor_part_id; |
| 204 | u32 hw_ver; |
| 205 | int max_qp; |
| 206 | int max_qp_wr; |
| 207 | int device_cap_flags; |
| 208 | int max_sge; |
| 209 | int max_sge_rd; |
| 210 | int max_cq; |
| 211 | int max_cqe; |
| 212 | int max_mr; |
| 213 | int max_pd; |
| 214 | int max_qp_rd_atom; |
| 215 | int max_ee_rd_atom; |
| 216 | int max_res_rd_atom; |
| 217 | int max_qp_init_rd_atom; |
| 218 | int max_ee_init_rd_atom; |
| 219 | enum ib_atomic_cap atomic_cap; |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 220 | enum ib_atomic_cap masked_atomic_cap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | int max_ee; |
| 222 | int max_rdd; |
| 223 | int max_mw; |
| 224 | int max_raw_ipv6_qp; |
| 225 | int max_raw_ethy_qp; |
| 226 | int max_mcast_grp; |
| 227 | int max_mcast_qp_attach; |
| 228 | int max_total_mcast_qp_attach; |
| 229 | int max_ah; |
| 230 | int max_fmr; |
| 231 | int max_map_per_fmr; |
| 232 | int max_srq; |
| 233 | int max_srq_wr; |
| 234 | int max_srq_sge; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 235 | unsigned int max_fast_reg_page_list_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | u16 max_pkeys; |
| 237 | u8 local_ca_ack_delay; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 238 | int sig_prot_cap; |
| 239 | int sig_guard_cap; |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 240 | struct ib_odp_caps odp_caps; |
Matan Barak | 24306dc | 2015-06-11 16:35:24 +0300 | [diff] [blame] | 241 | uint64_t timestamp_mask; |
| 242 | uint64_t hca_core_clock; /* in KHZ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | }; |
| 244 | |
| 245 | enum ib_mtu { |
| 246 | IB_MTU_256 = 1, |
| 247 | IB_MTU_512 = 2, |
| 248 | IB_MTU_1024 = 3, |
| 249 | IB_MTU_2048 = 4, |
| 250 | IB_MTU_4096 = 5 |
| 251 | }; |
| 252 | |
| 253 | static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) |
| 254 | { |
| 255 | switch (mtu) { |
| 256 | case IB_MTU_256: return 256; |
| 257 | case IB_MTU_512: return 512; |
| 258 | case IB_MTU_1024: return 1024; |
| 259 | case IB_MTU_2048: return 2048; |
| 260 | case IB_MTU_4096: return 4096; |
| 261 | default: return -1; |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | enum ib_port_state { |
| 266 | IB_PORT_NOP = 0, |
| 267 | IB_PORT_DOWN = 1, |
| 268 | IB_PORT_INIT = 2, |
| 269 | IB_PORT_ARMED = 3, |
| 270 | IB_PORT_ACTIVE = 4, |
| 271 | IB_PORT_ACTIVE_DEFER = 5 |
| 272 | }; |
| 273 | |
| 274 | enum ib_port_cap_flags { |
| 275 | IB_PORT_SM = 1 << 1, |
| 276 | IB_PORT_NOTICE_SUP = 1 << 2, |
| 277 | IB_PORT_TRAP_SUP = 1 << 3, |
| 278 | IB_PORT_OPT_IPD_SUP = 1 << 4, |
| 279 | IB_PORT_AUTO_MIGR_SUP = 1 << 5, |
| 280 | IB_PORT_SL_MAP_SUP = 1 << 6, |
| 281 | IB_PORT_MKEY_NVRAM = 1 << 7, |
| 282 | IB_PORT_PKEY_NVRAM = 1 << 8, |
| 283 | IB_PORT_LED_INFO_SUP = 1 << 9, |
| 284 | IB_PORT_SM_DISABLED = 1 << 10, |
| 285 | IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, |
| 286 | IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 287 | IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | IB_PORT_CM_SUP = 1 << 16, |
| 289 | IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, |
| 290 | IB_PORT_REINIT_SUP = 1 << 18, |
| 291 | IB_PORT_DEVICE_MGMT_SUP = 1 << 19, |
| 292 | IB_PORT_VENDOR_CLASS_SUP = 1 << 20, |
| 293 | IB_PORT_DR_NOTICE_SUP = 1 << 21, |
| 294 | IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, |
| 295 | IB_PORT_BOOT_MGMT_SUP = 1 << 23, |
| 296 | IB_PORT_LINK_LATENCY_SUP = 1 << 24, |
Moni Shoua | b4a26a2 | 2014-02-09 11:54:34 +0200 | [diff] [blame] | 297 | IB_PORT_CLIENT_REG_SUP = 1 << 25, |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 298 | IB_PORT_IP_BASED_GIDS = 1 << 26, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | }; |
| 300 | |
| 301 | enum ib_port_width { |
| 302 | IB_WIDTH_1X = 1, |
| 303 | IB_WIDTH_4X = 2, |
| 304 | IB_WIDTH_8X = 4, |
| 305 | IB_WIDTH_12X = 8 |
| 306 | }; |
| 307 | |
| 308 | static inline int ib_width_enum_to_int(enum ib_port_width width) |
| 309 | { |
| 310 | switch (width) { |
| 311 | case IB_WIDTH_1X: return 1; |
| 312 | case IB_WIDTH_4X: return 4; |
| 313 | case IB_WIDTH_8X: return 8; |
| 314 | case IB_WIDTH_12X: return 12; |
| 315 | default: return -1; |
| 316 | } |
| 317 | } |
| 318 | |
Or Gerlitz | 2e96691 | 2012-02-28 18:49:50 +0200 | [diff] [blame] | 319 | enum ib_port_speed { |
| 320 | IB_SPEED_SDR = 1, |
| 321 | IB_SPEED_DDR = 2, |
| 322 | IB_SPEED_QDR = 4, |
| 323 | IB_SPEED_FDR10 = 8, |
| 324 | IB_SPEED_FDR = 16, |
| 325 | IB_SPEED_EDR = 32 |
| 326 | }; |
| 327 | |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 328 | struct ib_protocol_stats { |
| 329 | /* TBD... */ |
| 330 | }; |
| 331 | |
| 332 | struct iw_protocol_stats { |
| 333 | u64 ipInReceives; |
| 334 | u64 ipInHdrErrors; |
| 335 | u64 ipInTooBigErrors; |
| 336 | u64 ipInNoRoutes; |
| 337 | u64 ipInAddrErrors; |
| 338 | u64 ipInUnknownProtos; |
| 339 | u64 ipInTruncatedPkts; |
| 340 | u64 ipInDiscards; |
| 341 | u64 ipInDelivers; |
| 342 | u64 ipOutForwDatagrams; |
| 343 | u64 ipOutRequests; |
| 344 | u64 ipOutDiscards; |
| 345 | u64 ipOutNoRoutes; |
| 346 | u64 ipReasmTimeout; |
| 347 | u64 ipReasmReqds; |
| 348 | u64 ipReasmOKs; |
| 349 | u64 ipReasmFails; |
| 350 | u64 ipFragOKs; |
| 351 | u64 ipFragFails; |
| 352 | u64 ipFragCreates; |
| 353 | u64 ipInMcastPkts; |
| 354 | u64 ipOutMcastPkts; |
| 355 | u64 ipInBcastPkts; |
| 356 | u64 ipOutBcastPkts; |
| 357 | |
| 358 | u64 tcpRtoAlgorithm; |
| 359 | u64 tcpRtoMin; |
| 360 | u64 tcpRtoMax; |
| 361 | u64 tcpMaxConn; |
| 362 | u64 tcpActiveOpens; |
| 363 | u64 tcpPassiveOpens; |
| 364 | u64 tcpAttemptFails; |
| 365 | u64 tcpEstabResets; |
| 366 | u64 tcpCurrEstab; |
| 367 | u64 tcpInSegs; |
| 368 | u64 tcpOutSegs; |
| 369 | u64 tcpRetransSegs; |
| 370 | u64 tcpInErrs; |
| 371 | u64 tcpOutRsts; |
| 372 | }; |
| 373 | |
| 374 | union rdma_protocol_stats { |
| 375 | struct ib_protocol_stats ib; |
| 376 | struct iw_protocol_stats iw; |
| 377 | }; |
| 378 | |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 379 | /* Define bits for the various functionality this port needs to be supported by |
| 380 | * the core. |
| 381 | */ |
| 382 | /* Management 0x00000FFF */ |
| 383 | #define RDMA_CORE_CAP_IB_MAD 0x00000001 |
| 384 | #define RDMA_CORE_CAP_IB_SMI 0x00000002 |
| 385 | #define RDMA_CORE_CAP_IB_CM 0x00000004 |
| 386 | #define RDMA_CORE_CAP_IW_CM 0x00000008 |
| 387 | #define RDMA_CORE_CAP_IB_SA 0x00000010 |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 388 | #define RDMA_CORE_CAP_OPA_MAD 0x00000020 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 389 | |
| 390 | /* Address format 0x000FF000 */ |
| 391 | #define RDMA_CORE_CAP_AF_IB 0x00001000 |
| 392 | #define RDMA_CORE_CAP_ETH_AH 0x00002000 |
| 393 | |
| 394 | /* Protocol 0xFFF00000 */ |
| 395 | #define RDMA_CORE_CAP_PROT_IB 0x00100000 |
| 396 | #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 |
| 397 | #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 |
| 398 | |
| 399 | #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ |
| 400 | | RDMA_CORE_CAP_IB_MAD \ |
| 401 | | RDMA_CORE_CAP_IB_SMI \ |
| 402 | | RDMA_CORE_CAP_IB_CM \ |
| 403 | | RDMA_CORE_CAP_IB_SA \ |
| 404 | | RDMA_CORE_CAP_AF_IB) |
| 405 | #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ |
| 406 | | RDMA_CORE_CAP_IB_MAD \ |
| 407 | | RDMA_CORE_CAP_IB_CM \ |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 408 | | RDMA_CORE_CAP_AF_IB \ |
| 409 | | RDMA_CORE_CAP_ETH_AH) |
| 410 | #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ |
| 411 | | RDMA_CORE_CAP_IW_CM) |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 412 | #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ |
| 413 | | RDMA_CORE_CAP_OPA_MAD) |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | struct ib_port_attr { |
| 416 | enum ib_port_state state; |
| 417 | enum ib_mtu max_mtu; |
| 418 | enum ib_mtu active_mtu; |
| 419 | int gid_tbl_len; |
| 420 | u32 port_cap_flags; |
| 421 | u32 max_msg_sz; |
| 422 | u32 bad_pkey_cntr; |
| 423 | u32 qkey_viol_cntr; |
| 424 | u16 pkey_tbl_len; |
| 425 | u16 lid; |
| 426 | u16 sm_lid; |
| 427 | u8 lmc; |
| 428 | u8 max_vl_num; |
| 429 | u8 sm_sl; |
| 430 | u8 subnet_timeout; |
| 431 | u8 init_type_reply; |
| 432 | u8 active_width; |
| 433 | u8 active_speed; |
| 434 | u8 phys_state; |
| 435 | }; |
| 436 | |
| 437 | enum ib_device_modify_flags { |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 438 | IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, |
| 439 | IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | }; |
| 441 | |
| 442 | struct ib_device_modify { |
| 443 | u64 sys_image_guid; |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 444 | char node_desc[64]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | }; |
| 446 | |
| 447 | enum ib_port_modify_flags { |
| 448 | IB_PORT_SHUTDOWN = 1, |
| 449 | IB_PORT_INIT_TYPE = (1<<2), |
| 450 | IB_PORT_RESET_QKEY_CNTR = (1<<3) |
| 451 | }; |
| 452 | |
| 453 | struct ib_port_modify { |
| 454 | u32 set_port_cap_mask; |
| 455 | u32 clr_port_cap_mask; |
| 456 | u8 init_type; |
| 457 | }; |
| 458 | |
| 459 | enum ib_event_type { |
| 460 | IB_EVENT_CQ_ERR, |
| 461 | IB_EVENT_QP_FATAL, |
| 462 | IB_EVENT_QP_REQ_ERR, |
| 463 | IB_EVENT_QP_ACCESS_ERR, |
| 464 | IB_EVENT_COMM_EST, |
| 465 | IB_EVENT_SQ_DRAINED, |
| 466 | IB_EVENT_PATH_MIG, |
| 467 | IB_EVENT_PATH_MIG_ERR, |
| 468 | IB_EVENT_DEVICE_FATAL, |
| 469 | IB_EVENT_PORT_ACTIVE, |
| 470 | IB_EVENT_PORT_ERR, |
| 471 | IB_EVENT_LID_CHANGE, |
| 472 | IB_EVENT_PKEY_CHANGE, |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 473 | IB_EVENT_SM_CHANGE, |
| 474 | IB_EVENT_SRQ_ERR, |
| 475 | IB_EVENT_SRQ_LIMIT_REACHED, |
Leonid Arsh | 63942c9 | 2006-06-17 20:37:35 -0700 | [diff] [blame] | 476 | IB_EVENT_QP_LAST_WQE_REACHED, |
Or Gerlitz | 761d90e | 2011-06-15 14:39:29 +0000 | [diff] [blame] | 477 | IB_EVENT_CLIENT_REREGISTER, |
| 478 | IB_EVENT_GID_CHANGE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | }; |
| 480 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 481 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | struct ib_event { |
| 484 | struct ib_device *device; |
| 485 | union { |
| 486 | struct ib_cq *cq; |
| 487 | struct ib_qp *qp; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 488 | struct ib_srq *srq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | u8 port_num; |
| 490 | } element; |
| 491 | enum ib_event_type event; |
| 492 | }; |
| 493 | |
| 494 | struct ib_event_handler { |
| 495 | struct ib_device *device; |
| 496 | void (*handler)(struct ib_event_handler *, struct ib_event *); |
| 497 | struct list_head list; |
| 498 | }; |
| 499 | |
| 500 | #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ |
| 501 | do { \ |
| 502 | (_ptr)->device = _device; \ |
| 503 | (_ptr)->handler = _handler; \ |
| 504 | INIT_LIST_HEAD(&(_ptr)->list); \ |
| 505 | } while (0) |
| 506 | |
| 507 | struct ib_global_route { |
| 508 | union ib_gid dgid; |
| 509 | u32 flow_label; |
| 510 | u8 sgid_index; |
| 511 | u8 hop_limit; |
| 512 | u8 traffic_class; |
| 513 | }; |
| 514 | |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 515 | struct ib_grh { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 516 | __be32 version_tclass_flow; |
| 517 | __be16 paylen; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 518 | u8 next_hdr; |
| 519 | u8 hop_limit; |
| 520 | union ib_gid sgid; |
| 521 | union ib_gid dgid; |
| 522 | }; |
| 523 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | enum { |
| 525 | IB_MULTICAST_QPN = 0xffffff |
| 526 | }; |
| 527 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 528 | #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 529 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | enum ib_ah_flags { |
| 531 | IB_AH_GRH = 1 |
| 532 | }; |
| 533 | |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 534 | enum ib_rate { |
| 535 | IB_RATE_PORT_CURRENT = 0, |
| 536 | IB_RATE_2_5_GBPS = 2, |
| 537 | IB_RATE_5_GBPS = 5, |
| 538 | IB_RATE_10_GBPS = 3, |
| 539 | IB_RATE_20_GBPS = 6, |
| 540 | IB_RATE_30_GBPS = 4, |
| 541 | IB_RATE_40_GBPS = 7, |
| 542 | IB_RATE_60_GBPS = 8, |
| 543 | IB_RATE_80_GBPS = 9, |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 544 | IB_RATE_120_GBPS = 10, |
| 545 | IB_RATE_14_GBPS = 11, |
| 546 | IB_RATE_56_GBPS = 12, |
| 547 | IB_RATE_112_GBPS = 13, |
| 548 | IB_RATE_168_GBPS = 14, |
| 549 | IB_RATE_25_GBPS = 15, |
| 550 | IB_RATE_100_GBPS = 16, |
| 551 | IB_RATE_200_GBPS = 17, |
| 552 | IB_RATE_300_GBPS = 18 |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 553 | }; |
| 554 | |
| 555 | /** |
| 556 | * ib_rate_to_mult - Convert the IB rate enum to a multiple of the |
| 557 | * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be |
| 558 | * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. |
| 559 | * @rate: rate to convert. |
| 560 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 561 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 562 | |
| 563 | /** |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 564 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. |
| 565 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. |
| 566 | * @rate: rate to convert. |
| 567 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 568 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 569 | |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 570 | |
| 571 | /** |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 572 | * enum ib_mr_type - memory region type |
| 573 | * @IB_MR_TYPE_MEM_REG: memory region that is used for |
| 574 | * normal registration |
| 575 | * @IB_MR_TYPE_SIGNATURE: memory region that is used for |
| 576 | * signature operations (data-integrity |
| 577 | * capable regions) |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 578 | */ |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 579 | enum ib_mr_type { |
| 580 | IB_MR_TYPE_MEM_REG, |
| 581 | IB_MR_TYPE_SIGNATURE, |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 582 | }; |
| 583 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 584 | /** |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 585 | * Signature types |
| 586 | * IB_SIG_TYPE_NONE: Unprotected. |
| 587 | * IB_SIG_TYPE_T10_DIF: Type T10-DIF |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 588 | */ |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 589 | enum ib_signature_type { |
| 590 | IB_SIG_TYPE_NONE, |
| 591 | IB_SIG_TYPE_T10_DIF, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 592 | }; |
| 593 | |
| 594 | /** |
| 595 | * Signature T10-DIF block-guard types |
| 596 | * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. |
| 597 | * IB_T10DIF_CSUM: Corresponds to IP checksum rules. |
| 598 | */ |
| 599 | enum ib_t10_dif_bg_type { |
| 600 | IB_T10DIF_CRC, |
| 601 | IB_T10DIF_CSUM |
| 602 | }; |
| 603 | |
| 604 | /** |
| 605 | * struct ib_t10_dif_domain - Parameters specific for T10-DIF |
| 606 | * domain. |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 607 | * @bg_type: T10-DIF block guard type (CRC|CSUM) |
| 608 | * @pi_interval: protection information interval. |
| 609 | * @bg: seed of guard computation. |
| 610 | * @app_tag: application tag of guard block |
| 611 | * @ref_tag: initial guard block reference tag. |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 612 | * @ref_remap: Indicate wethear the reftag increments each block |
| 613 | * @app_escape: Indicate to skip block check if apptag=0xffff |
| 614 | * @ref_escape: Indicate to skip block check if reftag=0xffffffff |
| 615 | * @apptag_check_mask: check bitmask of application tag. |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 616 | */ |
| 617 | struct ib_t10_dif_domain { |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 618 | enum ib_t10_dif_bg_type bg_type; |
| 619 | u16 pi_interval; |
| 620 | u16 bg; |
| 621 | u16 app_tag; |
| 622 | u32 ref_tag; |
Sagi Grimberg | 78eda2b | 2014-08-13 19:54:35 +0300 | [diff] [blame] | 623 | bool ref_remap; |
| 624 | bool app_escape; |
| 625 | bool ref_escape; |
| 626 | u16 apptag_check_mask; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 627 | }; |
| 628 | |
| 629 | /** |
| 630 | * struct ib_sig_domain - Parameters for signature domain |
| 631 | * @sig_type: specific signauture type |
| 632 | * @sig: union of all signature domain attributes that may |
| 633 | * be used to set domain layout. |
| 634 | */ |
| 635 | struct ib_sig_domain { |
| 636 | enum ib_signature_type sig_type; |
| 637 | union { |
| 638 | struct ib_t10_dif_domain dif; |
| 639 | } sig; |
| 640 | }; |
| 641 | |
| 642 | /** |
| 643 | * struct ib_sig_attrs - Parameters for signature handover operation |
| 644 | * @check_mask: bitmask for signature byte check (8 bytes) |
| 645 | * @mem: memory domain layout desciptor. |
| 646 | * @wire: wire domain layout desciptor. |
| 647 | */ |
| 648 | struct ib_sig_attrs { |
| 649 | u8 check_mask; |
| 650 | struct ib_sig_domain mem; |
| 651 | struct ib_sig_domain wire; |
| 652 | }; |
| 653 | |
| 654 | enum ib_sig_err_type { |
| 655 | IB_SIG_BAD_GUARD, |
| 656 | IB_SIG_BAD_REFTAG, |
| 657 | IB_SIG_BAD_APPTAG, |
| 658 | }; |
| 659 | |
| 660 | /** |
| 661 | * struct ib_sig_err - signature error descriptor |
| 662 | */ |
| 663 | struct ib_sig_err { |
| 664 | enum ib_sig_err_type err_type; |
| 665 | u32 expected; |
| 666 | u32 actual; |
| 667 | u64 sig_err_offset; |
| 668 | u32 key; |
| 669 | }; |
| 670 | |
| 671 | enum ib_mr_status_check { |
| 672 | IB_MR_CHECK_SIG_STATUS = 1, |
| 673 | }; |
| 674 | |
| 675 | /** |
| 676 | * struct ib_mr_status - Memory region status container |
| 677 | * |
| 678 | * @fail_status: Bitmask of MR checks status. For each |
| 679 | * failed check a corresponding status bit is set. |
| 680 | * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS |
| 681 | * failure. |
| 682 | */ |
| 683 | struct ib_mr_status { |
| 684 | u32 fail_status; |
| 685 | struct ib_sig_err sig_err; |
| 686 | }; |
| 687 | |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 688 | /** |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 689 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate |
| 690 | * enum. |
| 691 | * @mult: multiple to convert. |
| 692 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 693 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 694 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | struct ib_ah_attr { |
| 696 | struct ib_global_route grh; |
| 697 | u16 dlid; |
| 698 | u8 sl; |
| 699 | u8 src_path_bits; |
| 700 | u8 static_rate; |
| 701 | u8 ah_flags; |
| 702 | u8 port_num; |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 703 | u8 dmac[ETH_ALEN]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | }; |
| 705 | |
| 706 | enum ib_wc_status { |
| 707 | IB_WC_SUCCESS, |
| 708 | IB_WC_LOC_LEN_ERR, |
| 709 | IB_WC_LOC_QP_OP_ERR, |
| 710 | IB_WC_LOC_EEC_OP_ERR, |
| 711 | IB_WC_LOC_PROT_ERR, |
| 712 | IB_WC_WR_FLUSH_ERR, |
| 713 | IB_WC_MW_BIND_ERR, |
| 714 | IB_WC_BAD_RESP_ERR, |
| 715 | IB_WC_LOC_ACCESS_ERR, |
| 716 | IB_WC_REM_INV_REQ_ERR, |
| 717 | IB_WC_REM_ACCESS_ERR, |
| 718 | IB_WC_REM_OP_ERR, |
| 719 | IB_WC_RETRY_EXC_ERR, |
| 720 | IB_WC_RNR_RETRY_EXC_ERR, |
| 721 | IB_WC_LOC_RDD_VIOL_ERR, |
| 722 | IB_WC_REM_INV_RD_REQ_ERR, |
| 723 | IB_WC_REM_ABORT_ERR, |
| 724 | IB_WC_INV_EECN_ERR, |
| 725 | IB_WC_INV_EEC_STATE_ERR, |
| 726 | IB_WC_FATAL_ERR, |
| 727 | IB_WC_RESP_TIMEOUT_ERR, |
| 728 | IB_WC_GENERAL_ERR |
| 729 | }; |
| 730 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 731 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 732 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | enum ib_wc_opcode { |
| 734 | IB_WC_SEND, |
| 735 | IB_WC_RDMA_WRITE, |
| 736 | IB_WC_RDMA_READ, |
| 737 | IB_WC_COMP_SWAP, |
| 738 | IB_WC_FETCH_ADD, |
| 739 | IB_WC_BIND_MW, |
Eli Cohen | c93570f | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 740 | IB_WC_LSO, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 741 | IB_WC_LOCAL_INV, |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 742 | IB_WC_REG_MR, |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 743 | IB_WC_MASKED_COMP_SWAP, |
| 744 | IB_WC_MASKED_FETCH_ADD, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | /* |
| 746 | * Set value of IB_WC_RECV so consumers can test if a completion is a |
| 747 | * receive by testing (opcode & IB_WC_RECV). |
| 748 | */ |
| 749 | IB_WC_RECV = 1 << 7, |
| 750 | IB_WC_RECV_RDMA_WITH_IMM |
| 751 | }; |
| 752 | |
| 753 | enum ib_wc_flags { |
| 754 | IB_WC_GRH = 1, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 755 | IB_WC_WITH_IMM = (1<<1), |
| 756 | IB_WC_WITH_INVALIDATE = (1<<2), |
Or Gerlitz | d927d50 | 2012-01-11 19:03:51 +0200 | [diff] [blame] | 757 | IB_WC_IP_CSUM_OK = (1<<3), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 758 | IB_WC_WITH_SMAC = (1<<4), |
| 759 | IB_WC_WITH_VLAN = (1<<5), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | }; |
| 761 | |
| 762 | struct ib_wc { |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 763 | union { |
| 764 | u64 wr_id; |
| 765 | struct ib_cqe *wr_cqe; |
| 766 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | enum ib_wc_status status; |
| 768 | enum ib_wc_opcode opcode; |
| 769 | u32 vendor_err; |
| 770 | u32 byte_len; |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 771 | struct ib_qp *qp; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 772 | union { |
| 773 | __be32 imm_data; |
| 774 | u32 invalidate_rkey; |
| 775 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | u32 src_qp; |
| 777 | int wc_flags; |
| 778 | u16 pkey_index; |
| 779 | u16 slid; |
| 780 | u8 sl; |
| 781 | u8 dlid_path_bits; |
| 782 | u8 port_num; /* valid only for DR SMPs on switches */ |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 783 | u8 smac[ETH_ALEN]; |
| 784 | u16 vlan_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | }; |
| 786 | |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 787 | enum ib_cq_notify_flags { |
| 788 | IB_CQ_SOLICITED = 1 << 0, |
| 789 | IB_CQ_NEXT_COMP = 1 << 1, |
| 790 | IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, |
| 791 | IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | }; |
| 793 | |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 794 | enum ib_srq_type { |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 795 | IB_SRQT_BASIC, |
| 796 | IB_SRQT_XRC |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 797 | }; |
| 798 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 799 | enum ib_srq_attr_mask { |
| 800 | IB_SRQ_MAX_WR = 1 << 0, |
| 801 | IB_SRQ_LIMIT = 1 << 1, |
| 802 | }; |
| 803 | |
| 804 | struct ib_srq_attr { |
| 805 | u32 max_wr; |
| 806 | u32 max_sge; |
| 807 | u32 srq_limit; |
| 808 | }; |
| 809 | |
| 810 | struct ib_srq_init_attr { |
| 811 | void (*event_handler)(struct ib_event *, void *); |
| 812 | void *srq_context; |
| 813 | struct ib_srq_attr attr; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 814 | enum ib_srq_type srq_type; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 815 | |
| 816 | union { |
| 817 | struct { |
| 818 | struct ib_xrcd *xrcd; |
| 819 | struct ib_cq *cq; |
| 820 | } xrc; |
| 821 | } ext; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 822 | }; |
| 823 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | struct ib_qp_cap { |
| 825 | u32 max_send_wr; |
| 826 | u32 max_recv_wr; |
| 827 | u32 max_send_sge; |
| 828 | u32 max_recv_sge; |
| 829 | u32 max_inline_data; |
| 830 | }; |
| 831 | |
| 832 | enum ib_sig_type { |
| 833 | IB_SIGNAL_ALL_WR, |
| 834 | IB_SIGNAL_REQ_WR |
| 835 | }; |
| 836 | |
| 837 | enum ib_qp_type { |
| 838 | /* |
| 839 | * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries |
| 840 | * here (and in that order) since the MAD layer uses them as |
| 841 | * indices into a 2-entry table. |
| 842 | */ |
| 843 | IB_QPT_SMI, |
| 844 | IB_QPT_GSI, |
| 845 | |
| 846 | IB_QPT_RC, |
| 847 | IB_QPT_UC, |
| 848 | IB_QPT_UD, |
| 849 | IB_QPT_RAW_IPV6, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 850 | IB_QPT_RAW_ETHERTYPE, |
Or Gerlitz | c938a61 | 2012-03-01 12:17:51 +0200 | [diff] [blame] | 851 | IB_QPT_RAW_PACKET = 8, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 852 | IB_QPT_XRC_INI = 9, |
| 853 | IB_QPT_XRC_TGT, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 854 | IB_QPT_MAX, |
| 855 | /* Reserve a range for qp types internal to the low level driver. |
| 856 | * These qp types will not be visible at the IB core layer, so the |
| 857 | * IB_QPT_MAX usages should not be affected in the core layer |
| 858 | */ |
| 859 | IB_QPT_RESERVED1 = 0x1000, |
| 860 | IB_QPT_RESERVED2, |
| 861 | IB_QPT_RESERVED3, |
| 862 | IB_QPT_RESERVED4, |
| 863 | IB_QPT_RESERVED5, |
| 864 | IB_QPT_RESERVED6, |
| 865 | IB_QPT_RESERVED7, |
| 866 | IB_QPT_RESERVED8, |
| 867 | IB_QPT_RESERVED9, |
| 868 | IB_QPT_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | }; |
| 870 | |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 871 | enum ib_qp_create_flags { |
Ron Livne | 47ee1b9 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 872 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, |
| 873 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, |
Matan Barak | 90f1d1b | 2013-11-07 15:25:12 +0200 | [diff] [blame] | 874 | IB_QP_CREATE_NETIF_QP = 1 << 5, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 875 | IB_QP_CREATE_SIGNATURE_EN = 1 << 6, |
Or Gerlitz | 09b9308 | 2014-05-11 15:15:11 +0300 | [diff] [blame] | 876 | IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, |
Jack Morgenstein | d2b5706 | 2012-08-03 08:40:37 +0000 | [diff] [blame] | 877 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 878 | IB_QP_CREATE_RESERVED_START = 1 << 26, |
| 879 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 880 | }; |
| 881 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 882 | /* |
| 883 | * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler |
| 884 | * callback to destroy the passed in QP. |
| 885 | */ |
| 886 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | struct ib_qp_init_attr { |
| 888 | void (*event_handler)(struct ib_event *, void *); |
| 889 | void *qp_context; |
| 890 | struct ib_cq *send_cq; |
| 891 | struct ib_cq *recv_cq; |
| 892 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 893 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | struct ib_qp_cap cap; |
| 895 | enum ib_sig_type sq_sig_type; |
| 896 | enum ib_qp_type qp_type; |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 897 | enum ib_qp_create_flags create_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | u8 port_num; /* special QP types only */ |
| 899 | }; |
| 900 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 901 | struct ib_qp_open_attr { |
| 902 | void (*event_handler)(struct ib_event *, void *); |
| 903 | void *qp_context; |
| 904 | u32 qp_num; |
| 905 | enum ib_qp_type qp_type; |
| 906 | }; |
| 907 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | enum ib_rnr_timeout { |
| 909 | IB_RNR_TIMER_655_36 = 0, |
| 910 | IB_RNR_TIMER_000_01 = 1, |
| 911 | IB_RNR_TIMER_000_02 = 2, |
| 912 | IB_RNR_TIMER_000_03 = 3, |
| 913 | IB_RNR_TIMER_000_04 = 4, |
| 914 | IB_RNR_TIMER_000_06 = 5, |
| 915 | IB_RNR_TIMER_000_08 = 6, |
| 916 | IB_RNR_TIMER_000_12 = 7, |
| 917 | IB_RNR_TIMER_000_16 = 8, |
| 918 | IB_RNR_TIMER_000_24 = 9, |
| 919 | IB_RNR_TIMER_000_32 = 10, |
| 920 | IB_RNR_TIMER_000_48 = 11, |
| 921 | IB_RNR_TIMER_000_64 = 12, |
| 922 | IB_RNR_TIMER_000_96 = 13, |
| 923 | IB_RNR_TIMER_001_28 = 14, |
| 924 | IB_RNR_TIMER_001_92 = 15, |
| 925 | IB_RNR_TIMER_002_56 = 16, |
| 926 | IB_RNR_TIMER_003_84 = 17, |
| 927 | IB_RNR_TIMER_005_12 = 18, |
| 928 | IB_RNR_TIMER_007_68 = 19, |
| 929 | IB_RNR_TIMER_010_24 = 20, |
| 930 | IB_RNR_TIMER_015_36 = 21, |
| 931 | IB_RNR_TIMER_020_48 = 22, |
| 932 | IB_RNR_TIMER_030_72 = 23, |
| 933 | IB_RNR_TIMER_040_96 = 24, |
| 934 | IB_RNR_TIMER_061_44 = 25, |
| 935 | IB_RNR_TIMER_081_92 = 26, |
| 936 | IB_RNR_TIMER_122_88 = 27, |
| 937 | IB_RNR_TIMER_163_84 = 28, |
| 938 | IB_RNR_TIMER_245_76 = 29, |
| 939 | IB_RNR_TIMER_327_68 = 30, |
| 940 | IB_RNR_TIMER_491_52 = 31 |
| 941 | }; |
| 942 | |
| 943 | enum ib_qp_attr_mask { |
| 944 | IB_QP_STATE = 1, |
| 945 | IB_QP_CUR_STATE = (1<<1), |
| 946 | IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), |
| 947 | IB_QP_ACCESS_FLAGS = (1<<3), |
| 948 | IB_QP_PKEY_INDEX = (1<<4), |
| 949 | IB_QP_PORT = (1<<5), |
| 950 | IB_QP_QKEY = (1<<6), |
| 951 | IB_QP_AV = (1<<7), |
| 952 | IB_QP_PATH_MTU = (1<<8), |
| 953 | IB_QP_TIMEOUT = (1<<9), |
| 954 | IB_QP_RETRY_CNT = (1<<10), |
| 955 | IB_QP_RNR_RETRY = (1<<11), |
| 956 | IB_QP_RQ_PSN = (1<<12), |
| 957 | IB_QP_MAX_QP_RD_ATOMIC = (1<<13), |
| 958 | IB_QP_ALT_PATH = (1<<14), |
| 959 | IB_QP_MIN_RNR_TIMER = (1<<15), |
| 960 | IB_QP_SQ_PSN = (1<<16), |
| 961 | IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), |
| 962 | IB_QP_PATH_MIG_STATE = (1<<18), |
| 963 | IB_QP_CAP = (1<<19), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 964 | IB_QP_DEST_QPN = (1<<20), |
Matan Barak | aa744cc | 2015-10-15 18:38:53 +0300 | [diff] [blame] | 965 | IB_QP_RESERVED1 = (1<<21), |
| 966 | IB_QP_RESERVED2 = (1<<22), |
| 967 | IB_QP_RESERVED3 = (1<<23), |
| 968 | IB_QP_RESERVED4 = (1<<24), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | }; |
| 970 | |
| 971 | enum ib_qp_state { |
| 972 | IB_QPS_RESET, |
| 973 | IB_QPS_INIT, |
| 974 | IB_QPS_RTR, |
| 975 | IB_QPS_RTS, |
| 976 | IB_QPS_SQD, |
| 977 | IB_QPS_SQE, |
| 978 | IB_QPS_ERR |
| 979 | }; |
| 980 | |
| 981 | enum ib_mig_state { |
| 982 | IB_MIG_MIGRATED, |
| 983 | IB_MIG_REARM, |
| 984 | IB_MIG_ARMED |
| 985 | }; |
| 986 | |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 987 | enum ib_mw_type { |
| 988 | IB_MW_TYPE_1 = 1, |
| 989 | IB_MW_TYPE_2 = 2 |
| 990 | }; |
| 991 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | struct ib_qp_attr { |
| 993 | enum ib_qp_state qp_state; |
| 994 | enum ib_qp_state cur_qp_state; |
| 995 | enum ib_mtu path_mtu; |
| 996 | enum ib_mig_state path_mig_state; |
| 997 | u32 qkey; |
| 998 | u32 rq_psn; |
| 999 | u32 sq_psn; |
| 1000 | u32 dest_qp_num; |
| 1001 | int qp_access_flags; |
| 1002 | struct ib_qp_cap cap; |
| 1003 | struct ib_ah_attr ah_attr; |
| 1004 | struct ib_ah_attr alt_ah_attr; |
| 1005 | u16 pkey_index; |
| 1006 | u16 alt_pkey_index; |
| 1007 | u8 en_sqd_async_notify; |
| 1008 | u8 sq_draining; |
| 1009 | u8 max_rd_atomic; |
| 1010 | u8 max_dest_rd_atomic; |
| 1011 | u8 min_rnr_timer; |
| 1012 | u8 port_num; |
| 1013 | u8 timeout; |
| 1014 | u8 retry_cnt; |
| 1015 | u8 rnr_retry; |
| 1016 | u8 alt_port_num; |
| 1017 | u8 alt_timeout; |
| 1018 | }; |
| 1019 | |
| 1020 | enum ib_wr_opcode { |
| 1021 | IB_WR_RDMA_WRITE, |
| 1022 | IB_WR_RDMA_WRITE_WITH_IMM, |
| 1023 | IB_WR_SEND, |
| 1024 | IB_WR_SEND_WITH_IMM, |
| 1025 | IB_WR_RDMA_READ, |
| 1026 | IB_WR_ATOMIC_CMP_AND_SWP, |
Eli Cohen | c93570f | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 1027 | IB_WR_ATOMIC_FETCH_AND_ADD, |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 1028 | IB_WR_LSO, |
| 1029 | IB_WR_SEND_WITH_INV, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1030 | IB_WR_RDMA_READ_WITH_INV, |
| 1031 | IB_WR_LOCAL_INV, |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1032 | IB_WR_REG_MR, |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 1033 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP, |
| 1034 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1035 | IB_WR_BIND_MW, |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 1036 | IB_WR_REG_SIG_MR, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1037 | /* reserve values for low level drivers' internal use. |
| 1038 | * These values will not be used at all in the ib core layer. |
| 1039 | */ |
| 1040 | IB_WR_RESERVED1 = 0xf0, |
| 1041 | IB_WR_RESERVED2, |
| 1042 | IB_WR_RESERVED3, |
| 1043 | IB_WR_RESERVED4, |
| 1044 | IB_WR_RESERVED5, |
| 1045 | IB_WR_RESERVED6, |
| 1046 | IB_WR_RESERVED7, |
| 1047 | IB_WR_RESERVED8, |
| 1048 | IB_WR_RESERVED9, |
| 1049 | IB_WR_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | }; |
| 1051 | |
| 1052 | enum ib_send_flags { |
| 1053 | IB_SEND_FENCE = 1, |
| 1054 | IB_SEND_SIGNALED = (1<<1), |
| 1055 | IB_SEND_SOLICITED = (1<<2), |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 1056 | IB_SEND_INLINE = (1<<3), |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1057 | IB_SEND_IP_CSUM = (1<<4), |
| 1058 | |
| 1059 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 1060 | IB_SEND_RESERVED_START = (1 << 26), |
| 1061 | IB_SEND_RESERVED_END = (1 << 31), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | }; |
| 1063 | |
| 1064 | struct ib_sge { |
| 1065 | u64 addr; |
| 1066 | u32 length; |
| 1067 | u32 lkey; |
| 1068 | }; |
| 1069 | |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1070 | /** |
| 1071 | * struct ib_mw_bind_info - Parameters for a memory window bind operation. |
| 1072 | * @mr: A memory region to bind the memory window to. |
| 1073 | * @addr: The address where the memory window should begin. |
| 1074 | * @length: The length of the memory window, in bytes. |
| 1075 | * @mw_access_flags: Access flags from enum ib_access_flags for the window. |
| 1076 | * |
| 1077 | * This struct contains the shared parameters for type 1 and type 2 |
| 1078 | * memory window bind operations. |
| 1079 | */ |
| 1080 | struct ib_mw_bind_info { |
| 1081 | struct ib_mr *mr; |
| 1082 | u64 addr; |
| 1083 | u64 length; |
| 1084 | int mw_access_flags; |
| 1085 | }; |
| 1086 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 1087 | struct ib_cqe { |
| 1088 | void (*done)(struct ib_cq *cq, struct ib_wc *wc); |
| 1089 | }; |
| 1090 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | struct ib_send_wr { |
| 1092 | struct ib_send_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 1093 | union { |
| 1094 | u64 wr_id; |
| 1095 | struct ib_cqe *wr_cqe; |
| 1096 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | struct ib_sge *sg_list; |
| 1098 | int num_sge; |
| 1099 | enum ib_wr_opcode opcode; |
| 1100 | int send_flags; |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 1101 | union { |
| 1102 | __be32 imm_data; |
| 1103 | u32 invalidate_rkey; |
| 1104 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | }; |
| 1106 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1107 | struct ib_rdma_wr { |
| 1108 | struct ib_send_wr wr; |
| 1109 | u64 remote_addr; |
| 1110 | u32 rkey; |
| 1111 | }; |
| 1112 | |
| 1113 | static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) |
| 1114 | { |
| 1115 | return container_of(wr, struct ib_rdma_wr, wr); |
| 1116 | } |
| 1117 | |
| 1118 | struct ib_atomic_wr { |
| 1119 | struct ib_send_wr wr; |
| 1120 | u64 remote_addr; |
| 1121 | u64 compare_add; |
| 1122 | u64 swap; |
| 1123 | u64 compare_add_mask; |
| 1124 | u64 swap_mask; |
| 1125 | u32 rkey; |
| 1126 | }; |
| 1127 | |
| 1128 | static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) |
| 1129 | { |
| 1130 | return container_of(wr, struct ib_atomic_wr, wr); |
| 1131 | } |
| 1132 | |
| 1133 | struct ib_ud_wr { |
| 1134 | struct ib_send_wr wr; |
| 1135 | struct ib_ah *ah; |
| 1136 | void *header; |
| 1137 | int hlen; |
| 1138 | int mss; |
| 1139 | u32 remote_qpn; |
| 1140 | u32 remote_qkey; |
| 1141 | u16 pkey_index; /* valid for GSI only */ |
| 1142 | u8 port_num; /* valid for DR SMPs on switch only */ |
| 1143 | }; |
| 1144 | |
| 1145 | static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) |
| 1146 | { |
| 1147 | return container_of(wr, struct ib_ud_wr, wr); |
| 1148 | } |
| 1149 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1150 | struct ib_reg_wr { |
| 1151 | struct ib_send_wr wr; |
| 1152 | struct ib_mr *mr; |
| 1153 | u32 key; |
| 1154 | int access; |
| 1155 | }; |
| 1156 | |
| 1157 | static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) |
| 1158 | { |
| 1159 | return container_of(wr, struct ib_reg_wr, wr); |
| 1160 | } |
| 1161 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1162 | struct ib_bind_mw_wr { |
| 1163 | struct ib_send_wr wr; |
| 1164 | struct ib_mw *mw; |
| 1165 | /* The new rkey for the memory window. */ |
| 1166 | u32 rkey; |
| 1167 | struct ib_mw_bind_info bind_info; |
| 1168 | }; |
| 1169 | |
| 1170 | static inline struct ib_bind_mw_wr *bind_mw_wr(struct ib_send_wr *wr) |
| 1171 | { |
| 1172 | return container_of(wr, struct ib_bind_mw_wr, wr); |
| 1173 | } |
| 1174 | |
| 1175 | struct ib_sig_handover_wr { |
| 1176 | struct ib_send_wr wr; |
| 1177 | struct ib_sig_attrs *sig_attrs; |
| 1178 | struct ib_mr *sig_mr; |
| 1179 | int access_flags; |
| 1180 | struct ib_sge *prot; |
| 1181 | }; |
| 1182 | |
| 1183 | static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) |
| 1184 | { |
| 1185 | return container_of(wr, struct ib_sig_handover_wr, wr); |
| 1186 | } |
| 1187 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | struct ib_recv_wr { |
| 1189 | struct ib_recv_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 1190 | union { |
| 1191 | u64 wr_id; |
| 1192 | struct ib_cqe *wr_cqe; |
| 1193 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | struct ib_sge *sg_list; |
| 1195 | int num_sge; |
| 1196 | }; |
| 1197 | |
| 1198 | enum ib_access_flags { |
| 1199 | IB_ACCESS_LOCAL_WRITE = 1, |
| 1200 | IB_ACCESS_REMOTE_WRITE = (1<<1), |
| 1201 | IB_ACCESS_REMOTE_READ = (1<<2), |
| 1202 | IB_ACCESS_REMOTE_ATOMIC = (1<<3), |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1203 | IB_ACCESS_MW_BIND = (1<<4), |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 1204 | IB_ZERO_BASED = (1<<5), |
| 1205 | IB_ACCESS_ON_DEMAND = (1<<6), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | }; |
| 1207 | |
| 1208 | struct ib_phys_buf { |
| 1209 | u64 addr; |
| 1210 | u64 size; |
| 1211 | }; |
| 1212 | |
| 1213 | struct ib_mr_attr { |
| 1214 | struct ib_pd *pd; |
| 1215 | u64 device_virt_addr; |
| 1216 | u64 size; |
| 1217 | int mr_access_flags; |
| 1218 | u32 lkey; |
| 1219 | u32 rkey; |
| 1220 | }; |
| 1221 | |
| 1222 | enum ib_mr_rereg_flags { |
| 1223 | IB_MR_REREG_TRANS = 1, |
| 1224 | IB_MR_REREG_PD = (1<<1), |
Matan Barak | 7e6edb9 | 2014-07-31 11:01:28 +0300 | [diff] [blame] | 1225 | IB_MR_REREG_ACCESS = (1<<2), |
| 1226 | IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | }; |
| 1228 | |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1229 | /** |
| 1230 | * struct ib_mw_bind - Parameters for a type 1 memory window bind operation. |
| 1231 | * @wr_id: Work request id. |
| 1232 | * @send_flags: Flags from ib_send_flags enum. |
| 1233 | * @bind_info: More parameters of the bind operation. |
| 1234 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | struct ib_mw_bind { |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1236 | u64 wr_id; |
| 1237 | int send_flags; |
| 1238 | struct ib_mw_bind_info bind_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1239 | }; |
| 1240 | |
| 1241 | struct ib_fmr_attr { |
| 1242 | int max_pages; |
| 1243 | int max_maps; |
Or Gerlitz | d36f34a | 2006-02-02 10:43:45 -0800 | [diff] [blame] | 1244 | u8 page_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | }; |
| 1246 | |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 1247 | struct ib_umem; |
| 1248 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1249 | struct ib_ucontext { |
| 1250 | struct ib_device *device; |
| 1251 | struct list_head pd_list; |
| 1252 | struct list_head mr_list; |
| 1253 | struct list_head mw_list; |
| 1254 | struct list_head cq_list; |
| 1255 | struct list_head qp_list; |
| 1256 | struct list_head srq_list; |
| 1257 | struct list_head ah_list; |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1258 | struct list_head xrcd_list; |
Hadar Hen Zion | 436f2ad | 2013-08-14 13:58:30 +0300 | [diff] [blame] | 1259 | struct list_head rule_list; |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 1260 | int closing; |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 1261 | |
| 1262 | struct pid *tgid; |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 1263 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 1264 | struct rb_root umem_tree; |
| 1265 | /* |
| 1266 | * Protects .umem_rbroot and tree, as well as odp_mrs_count and |
| 1267 | * mmu notifiers registration. |
| 1268 | */ |
| 1269 | struct rw_semaphore umem_rwsem; |
| 1270 | void (*invalidate_range)(struct ib_umem *umem, |
| 1271 | unsigned long start, unsigned long end); |
| 1272 | |
| 1273 | struct mmu_notifier mn; |
| 1274 | atomic_t notifier_count; |
| 1275 | /* A list of umems that don't have private mmu notifier counters yet. */ |
| 1276 | struct list_head no_private_counters; |
| 1277 | int odp_mrs_count; |
| 1278 | #endif |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1279 | }; |
| 1280 | |
| 1281 | struct ib_uobject { |
| 1282 | u64 user_handle; /* handle given to us by userspace */ |
| 1283 | struct ib_ucontext *context; /* associated user context */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1284 | void *object; /* containing object */ |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1285 | struct list_head list; /* link to context's list */ |
Roland Dreier | b3d636b | 2008-04-16 21:01:06 -0700 | [diff] [blame] | 1286 | int id; /* index into kernel idr */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1287 | struct kref ref; |
| 1288 | struct rw_semaphore mutex; /* protects .live */ |
| 1289 | int live; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1290 | }; |
| 1291 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1292 | struct ib_udata { |
Yann Droneaud | 309243e | 2013-12-11 23:01:44 +0100 | [diff] [blame] | 1293 | const void __user *inbuf; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1294 | void __user *outbuf; |
| 1295 | size_t inlen; |
| 1296 | size_t outlen; |
| 1297 | }; |
| 1298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | struct ib_pd { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 1300 | u32 local_dma_lkey; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1301 | struct ib_device *device; |
| 1302 | struct ib_uobject *uobject; |
| 1303 | atomic_t usecnt; /* count all resources */ |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 1304 | struct ib_mr *local_mr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | }; |
| 1306 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1307 | struct ib_xrcd { |
| 1308 | struct ib_device *device; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1309 | atomic_t usecnt; /* count all exposed resources */ |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1310 | struct inode *inode; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1311 | |
| 1312 | struct mutex tgt_qp_mutex; |
| 1313 | struct list_head tgt_qp_list; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1314 | }; |
| 1315 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1316 | struct ib_ah { |
| 1317 | struct ib_device *device; |
| 1318 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1319 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | }; |
| 1321 | |
| 1322 | typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); |
| 1323 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 1324 | enum ib_poll_context { |
| 1325 | IB_POLL_DIRECT, /* caller context, no hw completions */ |
| 1326 | IB_POLL_SOFTIRQ, /* poll from softirq context */ |
| 1327 | IB_POLL_WORKQUEUE, /* poll from workqueue */ |
| 1328 | }; |
| 1329 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | struct ib_cq { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1331 | struct ib_device *device; |
| 1332 | struct ib_uobject *uobject; |
| 1333 | ib_comp_handler comp_handler; |
| 1334 | void (*event_handler)(struct ib_event *, void *); |
Dotan Barak | 4deccd6 | 2008-07-14 23:48:44 -0700 | [diff] [blame] | 1335 | void *cq_context; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1336 | int cqe; |
| 1337 | atomic_t usecnt; /* count number of work queues */ |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 1338 | enum ib_poll_context poll_ctx; |
| 1339 | struct ib_wc *wc; |
| 1340 | union { |
| 1341 | struct irq_poll iop; |
| 1342 | struct work_struct work; |
| 1343 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | }; |
| 1345 | |
| 1346 | struct ib_srq { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1347 | struct ib_device *device; |
| 1348 | struct ib_pd *pd; |
| 1349 | struct ib_uobject *uobject; |
| 1350 | void (*event_handler)(struct ib_event *, void *); |
| 1351 | void *srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1352 | enum ib_srq_type srq_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | atomic_t usecnt; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1354 | |
| 1355 | union { |
| 1356 | struct { |
| 1357 | struct ib_xrcd *xrcd; |
| 1358 | struct ib_cq *cq; |
| 1359 | u32 srq_num; |
| 1360 | } xrc; |
| 1361 | } ext; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | }; |
| 1363 | |
| 1364 | struct ib_qp { |
| 1365 | struct ib_device *device; |
| 1366 | struct ib_pd *pd; |
| 1367 | struct ib_cq *send_cq; |
| 1368 | struct ib_cq *recv_cq; |
| 1369 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1370 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1371 | struct list_head xrcd_list; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1372 | /* count times opened, mcast attaches, flow attaches */ |
| 1373 | atomic_t usecnt; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1374 | struct list_head open_list; |
| 1375 | struct ib_qp *real_qp; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1376 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | void (*event_handler)(struct ib_event *, void *); |
| 1378 | void *qp_context; |
| 1379 | u32 qp_num; |
| 1380 | enum ib_qp_type qp_type; |
| 1381 | }; |
| 1382 | |
| 1383 | struct ib_mr { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1384 | struct ib_device *device; |
| 1385 | struct ib_pd *pd; |
| 1386 | struct ib_uobject *uobject; |
| 1387 | u32 lkey; |
| 1388 | u32 rkey; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1389 | u64 iova; |
| 1390 | u32 length; |
| 1391 | unsigned int page_size; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1392 | atomic_t usecnt; /* count number of MWs */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | }; |
| 1394 | |
| 1395 | struct ib_mw { |
| 1396 | struct ib_device *device; |
| 1397 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1398 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | u32 rkey; |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1400 | enum ib_mw_type type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | }; |
| 1402 | |
| 1403 | struct ib_fmr { |
| 1404 | struct ib_device *device; |
| 1405 | struct ib_pd *pd; |
| 1406 | struct list_head list; |
| 1407 | u32 lkey; |
| 1408 | u32 rkey; |
| 1409 | }; |
| 1410 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1411 | /* Supported steering options */ |
| 1412 | enum ib_flow_attr_type { |
| 1413 | /* steering according to rule specifications */ |
| 1414 | IB_FLOW_ATTR_NORMAL = 0x0, |
| 1415 | /* default unicast and multicast rule - |
| 1416 | * receive all Eth traffic which isn't steered to any QP |
| 1417 | */ |
| 1418 | IB_FLOW_ATTR_ALL_DEFAULT = 0x1, |
| 1419 | /* default multicast rule - |
| 1420 | * receive all Eth multicast traffic which isn't steered to any QP |
| 1421 | */ |
| 1422 | IB_FLOW_ATTR_MC_DEFAULT = 0x2, |
| 1423 | /* sniffer rule - receive all port traffic */ |
| 1424 | IB_FLOW_ATTR_SNIFFER = 0x3 |
| 1425 | }; |
| 1426 | |
| 1427 | /* Supported steering header types */ |
| 1428 | enum ib_flow_spec_type { |
| 1429 | /* L2 headers*/ |
| 1430 | IB_FLOW_SPEC_ETH = 0x20, |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1431 | IB_FLOW_SPEC_IB = 0x22, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1432 | /* L3 header*/ |
| 1433 | IB_FLOW_SPEC_IPV4 = 0x30, |
| 1434 | /* L4 headers*/ |
| 1435 | IB_FLOW_SPEC_TCP = 0x40, |
| 1436 | IB_FLOW_SPEC_UDP = 0x41 |
| 1437 | }; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1438 | #define IB_FLOW_SPEC_LAYER_MASK 0xF0 |
Matan Barak | 22878db | 2013-09-01 18:39:52 +0300 | [diff] [blame] | 1439 | #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 |
| 1440 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1441 | /* Flow steering rule priority is set according to it's domain. |
| 1442 | * Lower domain value means higher priority. |
| 1443 | */ |
| 1444 | enum ib_flow_domain { |
| 1445 | IB_FLOW_DOMAIN_USER, |
| 1446 | IB_FLOW_DOMAIN_ETHTOOL, |
| 1447 | IB_FLOW_DOMAIN_RFS, |
| 1448 | IB_FLOW_DOMAIN_NIC, |
| 1449 | IB_FLOW_DOMAIN_NUM /* Must be last */ |
| 1450 | }; |
| 1451 | |
| 1452 | struct ib_flow_eth_filter { |
| 1453 | u8 dst_mac[6]; |
| 1454 | u8 src_mac[6]; |
| 1455 | __be16 ether_type; |
| 1456 | __be16 vlan_tag; |
| 1457 | }; |
| 1458 | |
| 1459 | struct ib_flow_spec_eth { |
| 1460 | enum ib_flow_spec_type type; |
| 1461 | u16 size; |
| 1462 | struct ib_flow_eth_filter val; |
| 1463 | struct ib_flow_eth_filter mask; |
| 1464 | }; |
| 1465 | |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1466 | struct ib_flow_ib_filter { |
| 1467 | __be16 dlid; |
| 1468 | __u8 sl; |
| 1469 | }; |
| 1470 | |
| 1471 | struct ib_flow_spec_ib { |
| 1472 | enum ib_flow_spec_type type; |
| 1473 | u16 size; |
| 1474 | struct ib_flow_ib_filter val; |
| 1475 | struct ib_flow_ib_filter mask; |
| 1476 | }; |
| 1477 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1478 | struct ib_flow_ipv4_filter { |
| 1479 | __be32 src_ip; |
| 1480 | __be32 dst_ip; |
| 1481 | }; |
| 1482 | |
| 1483 | struct ib_flow_spec_ipv4 { |
| 1484 | enum ib_flow_spec_type type; |
| 1485 | u16 size; |
| 1486 | struct ib_flow_ipv4_filter val; |
| 1487 | struct ib_flow_ipv4_filter mask; |
| 1488 | }; |
| 1489 | |
| 1490 | struct ib_flow_tcp_udp_filter { |
| 1491 | __be16 dst_port; |
| 1492 | __be16 src_port; |
| 1493 | }; |
| 1494 | |
| 1495 | struct ib_flow_spec_tcp_udp { |
| 1496 | enum ib_flow_spec_type type; |
| 1497 | u16 size; |
| 1498 | struct ib_flow_tcp_udp_filter val; |
| 1499 | struct ib_flow_tcp_udp_filter mask; |
| 1500 | }; |
| 1501 | |
| 1502 | union ib_flow_spec { |
| 1503 | struct { |
| 1504 | enum ib_flow_spec_type type; |
| 1505 | u16 size; |
| 1506 | }; |
| 1507 | struct ib_flow_spec_eth eth; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1508 | struct ib_flow_spec_ib ib; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1509 | struct ib_flow_spec_ipv4 ipv4; |
| 1510 | struct ib_flow_spec_tcp_udp tcp_udp; |
| 1511 | }; |
| 1512 | |
| 1513 | struct ib_flow_attr { |
| 1514 | enum ib_flow_attr_type type; |
| 1515 | u16 size; |
| 1516 | u16 priority; |
| 1517 | u32 flags; |
| 1518 | u8 num_of_specs; |
| 1519 | u8 port; |
| 1520 | /* Following are the optional layers according to user request |
| 1521 | * struct ib_flow_spec_xxx |
| 1522 | * struct ib_flow_spec_yyy |
| 1523 | */ |
| 1524 | }; |
| 1525 | |
| 1526 | struct ib_flow { |
| 1527 | struct ib_qp *qp; |
| 1528 | struct ib_uobject *uobject; |
| 1529 | }; |
| 1530 | |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 1531 | struct ib_mad_hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 | struct ib_grh; |
| 1533 | |
| 1534 | enum ib_process_mad_flags { |
| 1535 | IB_MAD_IGNORE_MKEY = 1, |
| 1536 | IB_MAD_IGNORE_BKEY = 2, |
| 1537 | IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY |
| 1538 | }; |
| 1539 | |
| 1540 | enum ib_mad_result { |
| 1541 | IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ |
| 1542 | IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ |
| 1543 | IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ |
| 1544 | IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ |
| 1545 | }; |
| 1546 | |
| 1547 | #define IB_DEVICE_NAME_MAX 64 |
| 1548 | |
| 1549 | struct ib_cache { |
| 1550 | rwlock_t lock; |
| 1551 | struct ib_event_handler event_handler; |
| 1552 | struct ib_pkey_cache **pkey_cache; |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1553 | struct ib_gid_table **gid_cache; |
Jack Morgenstein | 6fb9cdb | 2006-06-17 20:37:34 -0700 | [diff] [blame] | 1554 | u8 *lmc_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | }; |
| 1556 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1557 | struct ib_dma_mapping_ops { |
| 1558 | int (*mapping_error)(struct ib_device *dev, |
| 1559 | u64 dma_addr); |
| 1560 | u64 (*map_single)(struct ib_device *dev, |
| 1561 | void *ptr, size_t size, |
| 1562 | enum dma_data_direction direction); |
| 1563 | void (*unmap_single)(struct ib_device *dev, |
| 1564 | u64 addr, size_t size, |
| 1565 | enum dma_data_direction direction); |
| 1566 | u64 (*map_page)(struct ib_device *dev, |
| 1567 | struct page *page, unsigned long offset, |
| 1568 | size_t size, |
| 1569 | enum dma_data_direction direction); |
| 1570 | void (*unmap_page)(struct ib_device *dev, |
| 1571 | u64 addr, size_t size, |
| 1572 | enum dma_data_direction direction); |
| 1573 | int (*map_sg)(struct ib_device *dev, |
| 1574 | struct scatterlist *sg, int nents, |
| 1575 | enum dma_data_direction direction); |
| 1576 | void (*unmap_sg)(struct ib_device *dev, |
| 1577 | struct scatterlist *sg, int nents, |
| 1578 | enum dma_data_direction direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1579 | void (*sync_single_for_cpu)(struct ib_device *dev, |
| 1580 | u64 dma_handle, |
| 1581 | size_t size, |
Dotan Barak | 4deccd6 | 2008-07-14 23:48:44 -0700 | [diff] [blame] | 1582 | enum dma_data_direction dir); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1583 | void (*sync_single_for_device)(struct ib_device *dev, |
| 1584 | u64 dma_handle, |
| 1585 | size_t size, |
| 1586 | enum dma_data_direction dir); |
| 1587 | void *(*alloc_coherent)(struct ib_device *dev, |
| 1588 | size_t size, |
| 1589 | u64 *dma_handle, |
| 1590 | gfp_t flag); |
| 1591 | void (*free_coherent)(struct ib_device *dev, |
| 1592 | size_t size, void *cpu_addr, |
| 1593 | u64 dma_handle); |
| 1594 | }; |
| 1595 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 1596 | struct iw_cm_verbs; |
| 1597 | |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1598 | struct ib_port_immutable { |
| 1599 | int pkey_tbl_len; |
| 1600 | int gid_tbl_len; |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 1601 | u32 core_cap_flags; |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 1602 | u32 max_mad_size; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1603 | }; |
| 1604 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | struct ib_device { |
| 1606 | struct device *dma_device; |
| 1607 | |
| 1608 | char name[IB_DEVICE_NAME_MAX]; |
| 1609 | |
| 1610 | struct list_head event_handler_list; |
| 1611 | spinlock_t event_handler_lock; |
| 1612 | |
Alexander Chiang | 17a55f7 | 2010-02-02 19:09:16 +0000 | [diff] [blame] | 1613 | spinlock_t client_data_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | struct list_head core_list; |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 1615 | /* Access to the client_data_list is protected by the client_data_lock |
| 1616 | * spinlock and the lists_rwsem read-write semaphore */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | struct list_head client_data_list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | |
| 1619 | struct ib_cache cache; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1620 | /** |
| 1621 | * port_immutable is indexed by port number |
| 1622 | */ |
| 1623 | struct ib_port_immutable *port_immutable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1624 | |
Michael S. Tsirkin | f4fd0b2 | 2007-05-03 13:48:47 +0300 | [diff] [blame] | 1625 | int num_comp_vectors; |
| 1626 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 1627 | struct iw_cm_verbs *iwcm; |
| 1628 | |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 1629 | int (*get_protocol_stats)(struct ib_device *device, |
| 1630 | union rdma_protocol_stats *stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | int (*query_device)(struct ib_device *device, |
Matan Barak | 2528e33 | 2015-06-11 16:35:25 +0300 | [diff] [blame] | 1632 | struct ib_device_attr *device_attr, |
| 1633 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | int (*query_port)(struct ib_device *device, |
| 1635 | u8 port_num, |
| 1636 | struct ib_port_attr *port_attr); |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 1637 | enum rdma_link_layer (*get_link_layer)(struct ib_device *device, |
| 1638 | u8 port_num); |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1639 | /* When calling get_netdev, the HW vendor's driver should return the |
| 1640 | * net device of device @device at port @port_num or NULL if such |
| 1641 | * a net device doesn't exist. The vendor driver should call dev_hold |
| 1642 | * on this net device. The HW vendor's device driver must guarantee |
| 1643 | * that this function returns NULL before the net device reaches |
| 1644 | * NETDEV_UNREGISTER_FINAL state. |
| 1645 | */ |
| 1646 | struct net_device *(*get_netdev)(struct ib_device *device, |
| 1647 | u8 port_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | int (*query_gid)(struct ib_device *device, |
| 1649 | u8 port_num, int index, |
| 1650 | union ib_gid *gid); |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 1651 | /* When calling add_gid, the HW vendor's driver should |
| 1652 | * add the gid of device @device at gid index @index of |
| 1653 | * port @port_num to be @gid. Meta-info of that gid (for example, |
| 1654 | * the network device related to this gid is available |
| 1655 | * at @attr. @context allows the HW vendor driver to store extra |
| 1656 | * information together with a GID entry. The HW vendor may allocate |
| 1657 | * memory to contain this information and store it in @context when a |
| 1658 | * new GID entry is written to. Params are consistent until the next |
| 1659 | * call of add_gid or delete_gid. The function should return 0 on |
| 1660 | * success or error otherwise. The function could be called |
| 1661 | * concurrently for different ports. This function is only called |
| 1662 | * when roce_gid_table is used. |
| 1663 | */ |
| 1664 | int (*add_gid)(struct ib_device *device, |
| 1665 | u8 port_num, |
| 1666 | unsigned int index, |
| 1667 | const union ib_gid *gid, |
| 1668 | const struct ib_gid_attr *attr, |
| 1669 | void **context); |
| 1670 | /* When calling del_gid, the HW vendor's driver should delete the |
| 1671 | * gid of device @device at gid index @index of port @port_num. |
| 1672 | * Upon the deletion of a GID entry, the HW vendor must free any |
| 1673 | * allocated memory. The caller will clear @context afterwards. |
| 1674 | * This function is only called when roce_gid_table is used. |
| 1675 | */ |
| 1676 | int (*del_gid)(struct ib_device *device, |
| 1677 | u8 port_num, |
| 1678 | unsigned int index, |
| 1679 | void **context); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1680 | int (*query_pkey)(struct ib_device *device, |
| 1681 | u8 port_num, u16 index, u16 *pkey); |
| 1682 | int (*modify_device)(struct ib_device *device, |
| 1683 | int device_modify_mask, |
| 1684 | struct ib_device_modify *device_modify); |
| 1685 | int (*modify_port)(struct ib_device *device, |
| 1686 | u8 port_num, int port_modify_mask, |
| 1687 | struct ib_port_modify *port_modify); |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1688 | struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, |
| 1689 | struct ib_udata *udata); |
| 1690 | int (*dealloc_ucontext)(struct ib_ucontext *context); |
| 1691 | int (*mmap)(struct ib_ucontext *context, |
| 1692 | struct vm_area_struct *vma); |
| 1693 | struct ib_pd * (*alloc_pd)(struct ib_device *device, |
| 1694 | struct ib_ucontext *context, |
| 1695 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | int (*dealloc_pd)(struct ib_pd *pd); |
| 1697 | struct ib_ah * (*create_ah)(struct ib_pd *pd, |
| 1698 | struct ib_ah_attr *ah_attr); |
| 1699 | int (*modify_ah)(struct ib_ah *ah, |
| 1700 | struct ib_ah_attr *ah_attr); |
| 1701 | int (*query_ah)(struct ib_ah *ah, |
| 1702 | struct ib_ah_attr *ah_attr); |
| 1703 | int (*destroy_ah)(struct ib_ah *ah); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1704 | struct ib_srq * (*create_srq)(struct ib_pd *pd, |
| 1705 | struct ib_srq_init_attr *srq_init_attr, |
| 1706 | struct ib_udata *udata); |
| 1707 | int (*modify_srq)(struct ib_srq *srq, |
| 1708 | struct ib_srq_attr *srq_attr, |
Ralph Campbell | 9bc57e2 | 2006-08-11 14:58:09 -0700 | [diff] [blame] | 1709 | enum ib_srq_attr_mask srq_attr_mask, |
| 1710 | struct ib_udata *udata); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1711 | int (*query_srq)(struct ib_srq *srq, |
| 1712 | struct ib_srq_attr *srq_attr); |
| 1713 | int (*destroy_srq)(struct ib_srq *srq); |
| 1714 | int (*post_srq_recv)(struct ib_srq *srq, |
| 1715 | struct ib_recv_wr *recv_wr, |
| 1716 | struct ib_recv_wr **bad_recv_wr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | struct ib_qp * (*create_qp)(struct ib_pd *pd, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1718 | struct ib_qp_init_attr *qp_init_attr, |
| 1719 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | int (*modify_qp)(struct ib_qp *qp, |
| 1721 | struct ib_qp_attr *qp_attr, |
Ralph Campbell | 9bc57e2 | 2006-08-11 14:58:09 -0700 | [diff] [blame] | 1722 | int qp_attr_mask, |
| 1723 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | int (*query_qp)(struct ib_qp *qp, |
| 1725 | struct ib_qp_attr *qp_attr, |
| 1726 | int qp_attr_mask, |
| 1727 | struct ib_qp_init_attr *qp_init_attr); |
| 1728 | int (*destroy_qp)(struct ib_qp *qp); |
| 1729 | int (*post_send)(struct ib_qp *qp, |
| 1730 | struct ib_send_wr *send_wr, |
| 1731 | struct ib_send_wr **bad_send_wr); |
| 1732 | int (*post_recv)(struct ib_qp *qp, |
| 1733 | struct ib_recv_wr *recv_wr, |
| 1734 | struct ib_recv_wr **bad_recv_wr); |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 1735 | struct ib_cq * (*create_cq)(struct ib_device *device, |
| 1736 | const struct ib_cq_init_attr *attr, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1737 | struct ib_ucontext *context, |
| 1738 | struct ib_udata *udata); |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 1739 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, |
| 1740 | u16 cq_period); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1741 | int (*destroy_cq)(struct ib_cq *cq); |
Roland Dreier | 33b9b3e | 2006-01-30 14:29:21 -0800 | [diff] [blame] | 1742 | int (*resize_cq)(struct ib_cq *cq, int cqe, |
| 1743 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | int (*poll_cq)(struct ib_cq *cq, int num_entries, |
| 1745 | struct ib_wc *wc); |
| 1746 | int (*peek_cq)(struct ib_cq *cq, int wc_cnt); |
| 1747 | int (*req_notify_cq)(struct ib_cq *cq, |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 1748 | enum ib_cq_notify_flags flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | int (*req_ncomp_notif)(struct ib_cq *cq, |
| 1750 | int wc_cnt); |
| 1751 | struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, |
| 1752 | int mr_access_flags); |
| 1753 | struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, |
| 1754 | struct ib_phys_buf *phys_buf_array, |
| 1755 | int num_phys_buf, |
| 1756 | int mr_access_flags, |
| 1757 | u64 *iova_start); |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1758 | struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 1759 | u64 start, u64 length, |
| 1760 | u64 virt_addr, |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1761 | int mr_access_flags, |
| 1762 | struct ib_udata *udata); |
Matan Barak | 7e6edb9 | 2014-07-31 11:01:28 +0300 | [diff] [blame] | 1763 | int (*rereg_user_mr)(struct ib_mr *mr, |
| 1764 | int flags, |
| 1765 | u64 start, u64 length, |
| 1766 | u64 virt_addr, |
| 1767 | int mr_access_flags, |
| 1768 | struct ib_pd *pd, |
| 1769 | struct ib_udata *udata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1770 | int (*query_mr)(struct ib_mr *mr, |
| 1771 | struct ib_mr_attr *mr_attr); |
| 1772 | int (*dereg_mr)(struct ib_mr *mr); |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 1773 | struct ib_mr * (*alloc_mr)(struct ib_pd *pd, |
| 1774 | enum ib_mr_type mr_type, |
| 1775 | u32 max_num_sg); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1776 | int (*map_mr_sg)(struct ib_mr *mr, |
| 1777 | struct scatterlist *sg, |
| 1778 | int sg_nents); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | int (*rereg_phys_mr)(struct ib_mr *mr, |
| 1780 | int mr_rereg_mask, |
| 1781 | struct ib_pd *pd, |
| 1782 | struct ib_phys_buf *phys_buf_array, |
| 1783 | int num_phys_buf, |
| 1784 | int mr_access_flags, |
| 1785 | u64 *iova_start); |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1786 | struct ib_mw * (*alloc_mw)(struct ib_pd *pd, |
| 1787 | enum ib_mw_type type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 | int (*bind_mw)(struct ib_qp *qp, |
| 1789 | struct ib_mw *mw, |
| 1790 | struct ib_mw_bind *mw_bind); |
| 1791 | int (*dealloc_mw)(struct ib_mw *mw); |
| 1792 | struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, |
| 1793 | int mr_access_flags, |
| 1794 | struct ib_fmr_attr *fmr_attr); |
| 1795 | int (*map_phys_fmr)(struct ib_fmr *fmr, |
| 1796 | u64 *page_list, int list_len, |
| 1797 | u64 iova); |
| 1798 | int (*unmap_fmr)(struct list_head *fmr_list); |
| 1799 | int (*dealloc_fmr)(struct ib_fmr *fmr); |
| 1800 | int (*attach_mcast)(struct ib_qp *qp, |
| 1801 | union ib_gid *gid, |
| 1802 | u16 lid); |
| 1803 | int (*detach_mcast)(struct ib_qp *qp, |
| 1804 | union ib_gid *gid, |
| 1805 | u16 lid); |
| 1806 | int (*process_mad)(struct ib_device *device, |
| 1807 | int process_mad_flags, |
| 1808 | u8 port_num, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 1809 | const struct ib_wc *in_wc, |
| 1810 | const struct ib_grh *in_grh, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 1811 | const struct ib_mad_hdr *in_mad, |
| 1812 | size_t in_mad_size, |
| 1813 | struct ib_mad_hdr *out_mad, |
| 1814 | size_t *out_mad_size, |
| 1815 | u16 *out_mad_pkey_index); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1816 | struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, |
| 1817 | struct ib_ucontext *ucontext, |
| 1818 | struct ib_udata *udata); |
| 1819 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd); |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1820 | struct ib_flow * (*create_flow)(struct ib_qp *qp, |
| 1821 | struct ib_flow_attr |
| 1822 | *flow_attr, |
| 1823 | int domain); |
| 1824 | int (*destroy_flow)(struct ib_flow *flow_id); |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 1825 | int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, |
| 1826 | struct ib_mr_status *mr_status); |
Yishai Hadas | 036b106 | 2015-08-13 18:32:05 +0300 | [diff] [blame] | 1827 | void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 1829 | struct ib_dma_mapping_ops *dma_ops; |
| 1830 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1831 | struct module *owner; |
Tony Jones | f4e91eb | 2008-02-22 00:13:36 +0100 | [diff] [blame] | 1832 | struct device dev; |
Greg Kroah-Hartman | 35be068 | 2007-12-17 15:54:39 -0400 | [diff] [blame] | 1833 | struct kobject *ports_parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | struct list_head port_list; |
| 1835 | |
| 1836 | enum { |
| 1837 | IB_DEV_UNINITIALIZED, |
| 1838 | IB_DEV_REGISTERED, |
| 1839 | IB_DEV_UNREGISTERED |
| 1840 | } reg_state; |
| 1841 | |
Roland Dreier | 274c089 | 2005-09-29 14:17:48 -0700 | [diff] [blame] | 1842 | int uverbs_abi_ver; |
Alexander Chiang | 17a55f7 | 2010-02-02 19:09:16 +0000 | [diff] [blame] | 1843 | u64 uverbs_cmd_mask; |
Yann Droneaud | f21519b | 2013-11-06 23:21:49 +0100 | [diff] [blame] | 1844 | u64 uverbs_ex_cmd_mask; |
Roland Dreier | 274c089 | 2005-09-29 14:17:48 -0700 | [diff] [blame] | 1845 | |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 1846 | char node_desc[64]; |
Sean Hefty | cf311cd | 2006-01-10 07:39:34 -0800 | [diff] [blame] | 1847 | __be64 node_guid; |
Steve Wise | 96f15c0 | 2008-07-14 23:48:53 -0700 | [diff] [blame] | 1848 | u32 local_dma_lkey; |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 1849 | u16 is_switch:1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | u8 node_type; |
| 1851 | u8 phys_port_cnt; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 1852 | |
| 1853 | /** |
| 1854 | * The following mandatory functions are used only at device |
| 1855 | * registration. Keep functions such as these at the end of this |
| 1856 | * structure to avoid cache line misses when accessing struct ib_device |
| 1857 | * in fast paths. |
| 1858 | */ |
| 1859 | int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | }; |
| 1861 | |
| 1862 | struct ib_client { |
| 1863 | char *name; |
| 1864 | void (*add) (struct ib_device *); |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 1865 | void (*remove)(struct ib_device *, void *client_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1866 | |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 1867 | /* Returns the net_dev belonging to this ib_client and matching the |
| 1868 | * given parameters. |
| 1869 | * @dev: An RDMA device that the net_dev use for communication. |
| 1870 | * @port: A physical port number on the RDMA device. |
| 1871 | * @pkey: P_Key that the net_dev uses if applicable. |
| 1872 | * @gid: A GID that the net_dev uses to communicate. |
| 1873 | * @addr: An IP address the net_dev is configured with. |
| 1874 | * @client_data: The device's client data set by ib_set_client_data(). |
| 1875 | * |
| 1876 | * An ib_client that implements a net_dev on top of RDMA devices |
| 1877 | * (such as IP over IB) should implement this callback, allowing the |
| 1878 | * rdma_cm module to find the right net_dev for a given request. |
| 1879 | * |
| 1880 | * The caller is responsible for calling dev_put on the returned |
| 1881 | * netdev. */ |
| 1882 | struct net_device *(*get_net_dev_by_params)( |
| 1883 | struct ib_device *dev, |
| 1884 | u8 port, |
| 1885 | u16 pkey, |
| 1886 | const union ib_gid *gid, |
| 1887 | const struct sockaddr *addr, |
| 1888 | void *client_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | struct list_head list; |
| 1890 | }; |
| 1891 | |
| 1892 | struct ib_device *ib_alloc_device(size_t size); |
| 1893 | void ib_dealloc_device(struct ib_device *device); |
| 1894 | |
Ralph Campbell | 9a6edb6 | 2010-05-06 17:03:25 -0700 | [diff] [blame] | 1895 | int ib_register_device(struct ib_device *device, |
| 1896 | int (*port_callback)(struct ib_device *, |
| 1897 | u8, struct kobject *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | void ib_unregister_device(struct ib_device *device); |
| 1899 | |
| 1900 | int ib_register_client (struct ib_client *client); |
| 1901 | void ib_unregister_client(struct ib_client *client); |
| 1902 | |
| 1903 | void *ib_get_client_data(struct ib_device *device, struct ib_client *client); |
| 1904 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, |
| 1905 | void *data); |
| 1906 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1907 | static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) |
| 1908 | { |
| 1909 | return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; |
| 1910 | } |
| 1911 | |
| 1912 | static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) |
| 1913 | { |
Yann Droneaud | 43c61165 | 2015-02-05 22:10:18 +0100 | [diff] [blame] | 1914 | return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1915 | } |
| 1916 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1917 | /** |
| 1918 | * ib_modify_qp_is_ok - Check that the supplied attribute mask |
| 1919 | * contains all required attributes and no attributes not allowed for |
| 1920 | * the given QP state transition. |
| 1921 | * @cur_state: Current QP state |
| 1922 | * @next_state: Next QP state |
| 1923 | * @type: QP type |
| 1924 | * @mask: Mask of supplied QP attributes |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1925 | * @ll : link layer of port |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1926 | * |
| 1927 | * This function is a helper function that a low-level driver's |
| 1928 | * modify_qp method can use to validate the consumer's input. It |
| 1929 | * checks that cur_state and next_state are valid QP states, that a |
| 1930 | * transition from cur_state to next_state is allowed by the IB spec, |
| 1931 | * and that the attribute mask supplied is allowed for the transition. |
| 1932 | */ |
| 1933 | int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1934 | enum ib_qp_type type, enum ib_qp_attr_mask mask, |
| 1935 | enum rdma_link_layer ll); |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 1936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1937 | int ib_register_event_handler (struct ib_event_handler *event_handler); |
| 1938 | int ib_unregister_event_handler(struct ib_event_handler *event_handler); |
| 1939 | void ib_dispatch_event(struct ib_event *event); |
| 1940 | |
| 1941 | int ib_query_device(struct ib_device *device, |
| 1942 | struct ib_device_attr *device_attr); |
| 1943 | |
| 1944 | int ib_query_port(struct ib_device *device, |
| 1945 | u8 port_num, struct ib_port_attr *port_attr); |
| 1946 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 1947 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, |
| 1948 | u8 port_num); |
| 1949 | |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 1950 | /** |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 1951 | * rdma_cap_ib_switch - Check if the device is IB switch |
| 1952 | * @device: Device to check |
| 1953 | * |
| 1954 | * Device driver is responsible for setting is_switch bit on |
| 1955 | * in ib_device structure at init time. |
| 1956 | * |
| 1957 | * Return: true if the device is IB switch. |
| 1958 | */ |
| 1959 | static inline bool rdma_cap_ib_switch(const struct ib_device *device) |
| 1960 | { |
| 1961 | return device->is_switch; |
| 1962 | } |
| 1963 | |
| 1964 | /** |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 1965 | * rdma_start_port - Return the first valid port number for the device |
| 1966 | * specified |
| 1967 | * |
| 1968 | * @device: Device to be checked |
| 1969 | * |
| 1970 | * Return start port number |
| 1971 | */ |
| 1972 | static inline u8 rdma_start_port(const struct ib_device *device) |
| 1973 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 1974 | return rdma_cap_ib_switch(device) ? 0 : 1; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 1975 | } |
| 1976 | |
| 1977 | /** |
| 1978 | * rdma_end_port - Return the last valid port number for the device |
| 1979 | * specified |
| 1980 | * |
| 1981 | * @device: Device to be checked |
| 1982 | * |
| 1983 | * Return last port number |
| 1984 | */ |
| 1985 | static inline u8 rdma_end_port(const struct ib_device *device) |
| 1986 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 1987 | return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 1988 | } |
| 1989 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 1990 | static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 1991 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 1992 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 1993 | } |
| 1994 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 1995 | static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 1996 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 1997 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 1998 | } |
| 1999 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2000 | static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2001 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2002 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2003 | } |
| 2004 | |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2005 | static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2006 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2007 | return device->port_immutable[port_num].core_cap_flags & |
| 2008 | (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 2009 | } |
| 2010 | |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2011 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2012 | * rdma_cap_ib_mad - Check if the port of a device supports Infiniband |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2013 | * Management Datagrams. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2014 | * @device: Device to check |
| 2015 | * @port_num: Port number to check |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2016 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2017 | * Management Datagrams (MAD) are a required part of the InfiniBand |
| 2018 | * specification and are supported on all InfiniBand devices. A slightly |
| 2019 | * extended version are also supported on OPA interfaces. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2020 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2021 | * Return: true if the port supports sending/receiving of MAD packets. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2022 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2023 | static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2024 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2025 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 2026 | } |
| 2027 | |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2028 | /** |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 2029 | * rdma_cap_opa_mad - Check if the port of device provides support for OPA |
| 2030 | * Management Datagrams. |
| 2031 | * @device: Device to check |
| 2032 | * @port_num: Port number to check |
| 2033 | * |
| 2034 | * Intel OmniPath devices extend and/or replace the InfiniBand Management |
| 2035 | * datagrams with their own versions. These OPA MADs share many but not all of |
| 2036 | * the characteristics of InfiniBand MADs. |
| 2037 | * |
| 2038 | * OPA MADs differ in the following ways: |
| 2039 | * |
| 2040 | * 1) MADs are variable size up to 2K |
| 2041 | * IBTA defined MADs remain fixed at 256 bytes |
| 2042 | * 2) OPA SMPs must carry valid PKeys |
| 2043 | * 3) OPA SMP packets are a different format |
| 2044 | * |
| 2045 | * Return: true if the port supports OPA MAD packet formats. |
| 2046 | */ |
| 2047 | static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) |
| 2048 | { |
| 2049 | return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) |
| 2050 | == RDMA_CORE_CAP_OPA_MAD; |
| 2051 | } |
| 2052 | |
| 2053 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2054 | * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband |
| 2055 | * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). |
| 2056 | * @device: Device to check |
| 2057 | * @port_num: Port number to check |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2058 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2059 | * Each InfiniBand node is required to provide a Subnet Management Agent |
| 2060 | * that the subnet manager can access. Prior to the fabric being fully |
| 2061 | * configured by the subnet manager, the SMA is accessed via a well known |
| 2062 | * interface called the Subnet Management Interface (SMI). This interface |
| 2063 | * uses directed route packets to communicate with the SM to get around the |
| 2064 | * chicken and egg problem of the SM needing to know what's on the fabric |
| 2065 | * in order to configure the fabric, and needing to configure the fabric in |
| 2066 | * order to send packets to the devices on the fabric. These directed |
| 2067 | * route packets do not need the fabric fully configured in order to reach |
| 2068 | * their destination. The SMI is the only method allowed to send |
| 2069 | * directed route packets on an InfiniBand fabric. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2070 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2071 | * Return: true if the port provides an SMI. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2072 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2073 | static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2074 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2075 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 2076 | } |
| 2077 | |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2078 | /** |
| 2079 | * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband |
| 2080 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2081 | * @device: Device to check |
| 2082 | * @port_num: Port number to check |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2083 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2084 | * The InfiniBand Communication Manager is one of many pre-defined General |
| 2085 | * Service Agents (GSA) that are accessed via the General Service |
| 2086 | * Interface (GSI). It's role is to facilitate establishment of connections |
| 2087 | * between nodes as well as other management related tasks for established |
| 2088 | * connections. |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2089 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2090 | * Return: true if the port supports an IB CM (this does not guarantee that |
| 2091 | * a CM is actually running however). |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2092 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2093 | static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2094 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2095 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 2096 | } |
| 2097 | |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2098 | /** |
| 2099 | * rdma_cap_iw_cm - Check if the port of device has the capability IWARP |
| 2100 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2101 | * @device: Device to check |
| 2102 | * @port_num: Port number to check |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2103 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2104 | * Similar to above, but specific to iWARP connections which have a different |
| 2105 | * managment protocol than InfiniBand. |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2106 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2107 | * Return: true if the port supports an iWARP CM (this does not guarantee that |
| 2108 | * a CM is actually running however). |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2109 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2110 | static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2111 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2112 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 2113 | } |
| 2114 | |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2115 | /** |
| 2116 | * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband |
| 2117 | * Subnet Administration. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2118 | * @device: Device to check |
| 2119 | * @port_num: Port number to check |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2120 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2121 | * An InfiniBand Subnet Administration (SA) service is a pre-defined General |
| 2122 | * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand |
| 2123 | * fabrics, devices should resolve routes to other hosts by contacting the |
| 2124 | * SA to query the proper route. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2125 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2126 | * Return: true if the port should act as a client to the fabric Subnet |
| 2127 | * Administration interface. This does not imply that the SA service is |
| 2128 | * running locally. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2129 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2130 | static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2131 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2132 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 2133 | } |
| 2134 | |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2135 | /** |
| 2136 | * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband |
| 2137 | * Multicast. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2138 | * @device: Device to check |
| 2139 | * @port_num: Port number to check |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2140 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2141 | * InfiniBand multicast registration is more complex than normal IPv4 or |
| 2142 | * IPv6 multicast registration. Each Host Channel Adapter must register |
| 2143 | * with the Subnet Manager when it wishes to join a multicast group. It |
| 2144 | * should do so only once regardless of how many queue pairs it subscribes |
| 2145 | * to this group. And it should leave the group only after all queue pairs |
| 2146 | * attached to the group have been detached. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2147 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2148 | * Return: true if the port must undertake the additional adminstrative |
| 2149 | * overhead of registering/unregistering with the SM and tracking of the |
| 2150 | * total number of queue pairs attached to the multicast group. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2151 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2152 | static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 2153 | { |
| 2154 | return rdma_cap_ib_sa(device, port_num); |
| 2155 | } |
| 2156 | |
Michael Wang | bc0f1d7 | 2015-05-05 14:50:38 +0200 | [diff] [blame] | 2157 | /** |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2158 | * rdma_cap_af_ib - Check if the port of device has the capability |
| 2159 | * Native Infiniband Address. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2160 | * @device: Device to check |
| 2161 | * @port_num: Port number to check |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2162 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2163 | * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default |
| 2164 | * GID. RoCE uses a different mechanism, but still generates a GID via |
| 2165 | * a prescribed mechanism and port specific data. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2166 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2167 | * Return: true if the port uses a GID address to identify devices on the |
| 2168 | * network. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2169 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2170 | static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2171 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2172 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 2173 | } |
| 2174 | |
| 2175 | /** |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2176 | * rdma_cap_eth_ah - Check if the port of device has the capability |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2177 | * Ethernet Address Handle. |
| 2178 | * @device: Device to check |
| 2179 | * @port_num: Port number to check |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2180 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2181 | * RoCE is InfiniBand over Ethernet, and it uses a well defined technique |
| 2182 | * to fabricate GIDs over Ethernet/IP specific addresses native to the |
| 2183 | * port. Normally, packet headers are generated by the sending host |
| 2184 | * adapter, but when sending connectionless datagrams, we must manually |
| 2185 | * inject the proper headers for the fabric we are communicating over. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2186 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 2187 | * Return: true if we are running as a RoCE port and must force the |
| 2188 | * addition of a Global Route Header built from our Ethernet Address |
| 2189 | * Handle into our header list for connectionless packets. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2190 | */ |
Ira Weiny | 5ede928 | 2015-05-31 17:15:29 -0400 | [diff] [blame] | 2191 | static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2192 | { |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2193 | return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 2194 | } |
| 2195 | |
| 2196 | /** |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 2197 | * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. |
| 2198 | * |
| 2199 | * @device: Device |
| 2200 | * @port_num: Port number |
| 2201 | * |
| 2202 | * This MAD size includes the MAD headers and MAD payload. No other headers |
| 2203 | * are included. |
| 2204 | * |
| 2205 | * Return the max MAD size required by the Port. Will return 0 if the port |
| 2206 | * does not support MADs |
| 2207 | */ |
| 2208 | static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) |
| 2209 | { |
| 2210 | return device->port_immutable[port_num].max_mad_size; |
| 2211 | } |
| 2212 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 2213 | /** |
| 2214 | * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table |
| 2215 | * @device: Device to check |
| 2216 | * @port_num: Port number to check |
| 2217 | * |
| 2218 | * RoCE GID table mechanism manages the various GIDs for a device. |
| 2219 | * |
| 2220 | * NOTE: if allocating the port's GID table has failed, this call will still |
| 2221 | * return true, but any RoCE GID table API will fail. |
| 2222 | * |
| 2223 | * Return: true if the port uses RoCE GID table mechanism in order to manage |
| 2224 | * its GIDs. |
| 2225 | */ |
| 2226 | static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, |
| 2227 | u8 port_num) |
| 2228 | { |
| 2229 | return rdma_protocol_roce(device, port_num) && |
| 2230 | device->add_gid && device->del_gid; |
| 2231 | } |
| 2232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2233 | int ib_query_gid(struct ib_device *device, |
Matan Barak | 55ee3ab | 2015-10-15 18:38:45 +0300 | [diff] [blame] | 2234 | u8 port_num, int index, union ib_gid *gid, |
| 2235 | struct ib_gid_attr *attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2236 | |
| 2237 | int ib_query_pkey(struct ib_device *device, |
| 2238 | u8 port_num, u16 index, u16 *pkey); |
| 2239 | |
| 2240 | int ib_modify_device(struct ib_device *device, |
| 2241 | int device_modify_mask, |
| 2242 | struct ib_device_modify *device_modify); |
| 2243 | |
| 2244 | int ib_modify_port(struct ib_device *device, |
| 2245 | u8 port_num, int port_modify_mask, |
| 2246 | struct ib_port_modify *port_modify); |
| 2247 | |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 2248 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, |
Matan Barak | 55ee3ab | 2015-10-15 18:38:45 +0300 | [diff] [blame] | 2249 | struct net_device *ndev, u8 *port_num, u16 *index); |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 2250 | |
| 2251 | int ib_find_pkey(struct ib_device *device, |
| 2252 | u8 port_num, u16 pkey, u16 *index); |
| 2253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | struct ib_pd *ib_alloc_pd(struct ib_device *device); |
| 2255 | |
Jason Gunthorpe | 7dd7864 | 2015-08-05 14:34:31 -0600 | [diff] [blame] | 2256 | void ib_dealloc_pd(struct ib_pd *pd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2257 | |
| 2258 | /** |
| 2259 | * ib_create_ah - Creates an address handle for the given address vector. |
| 2260 | * @pd: The protection domain associated with the address handle. |
| 2261 | * @ah_attr: The attributes of the address vector. |
| 2262 | * |
| 2263 | * The address handle is used to reference a local or global destination |
| 2264 | * in all UD QP post sends. |
| 2265 | */ |
| 2266 | struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); |
| 2267 | |
| 2268 | /** |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 2269 | * ib_init_ah_from_wc - Initializes address handle attributes from a |
| 2270 | * work completion. |
| 2271 | * @device: Device on which the received message arrived. |
| 2272 | * @port_num: Port on which the received message arrived. |
| 2273 | * @wc: Work completion associated with the received message. |
| 2274 | * @grh: References the received global route header. This parameter is |
| 2275 | * ignored unless the work completion indicates that the GRH is valid. |
| 2276 | * @ah_attr: Returned attributes that can be used when creating an address |
| 2277 | * handle for replying to the message. |
| 2278 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 2279 | int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, |
| 2280 | const struct ib_wc *wc, const struct ib_grh *grh, |
| 2281 | struct ib_ah_attr *ah_attr); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 2282 | |
| 2283 | /** |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 2284 | * ib_create_ah_from_wc - Creates an address handle associated with the |
| 2285 | * sender of the specified work completion. |
| 2286 | * @pd: The protection domain associated with the address handle. |
| 2287 | * @wc: Work completion information associated with a received message. |
| 2288 | * @grh: References the received global route header. This parameter is |
| 2289 | * ignored unless the work completion indicates that the GRH is valid. |
| 2290 | * @port_num: The outbound port number to associate with the address. |
| 2291 | * |
| 2292 | * The address handle is used to reference a local or global destination |
| 2293 | * in all UD QP post sends. |
| 2294 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 2295 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
| 2296 | const struct ib_grh *grh, u8 port_num); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 2297 | |
| 2298 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2299 | * ib_modify_ah - Modifies the address vector associated with an address |
| 2300 | * handle. |
| 2301 | * @ah: The address handle to modify. |
| 2302 | * @ah_attr: The new address vector attributes to associate with the |
| 2303 | * address handle. |
| 2304 | */ |
| 2305 | int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); |
| 2306 | |
| 2307 | /** |
| 2308 | * ib_query_ah - Queries the address vector associated with an address |
| 2309 | * handle. |
| 2310 | * @ah: The address handle to query. |
| 2311 | * @ah_attr: The address vector attributes associated with the address |
| 2312 | * handle. |
| 2313 | */ |
| 2314 | int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); |
| 2315 | |
| 2316 | /** |
| 2317 | * ib_destroy_ah - Destroys an address handle. |
| 2318 | * @ah: The address handle to destroy. |
| 2319 | */ |
| 2320 | int ib_destroy_ah(struct ib_ah *ah); |
| 2321 | |
| 2322 | /** |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 2323 | * ib_create_srq - Creates a SRQ associated with the specified protection |
| 2324 | * domain. |
| 2325 | * @pd: The protection domain associated with the SRQ. |
Dotan Barak | abb6e9b | 2006-02-23 12:13:51 -0800 | [diff] [blame] | 2326 | * @srq_init_attr: A list of initial attributes required to create the |
| 2327 | * SRQ. If SRQ creation succeeds, then the attributes are updated to |
| 2328 | * the actual capabilities of the created SRQ. |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 2329 | * |
| 2330 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the |
| 2331 | * requested size of the SRQ, and set to the actual values allocated |
| 2332 | * on return. If ib_create_srq() succeeds, then max_wr and max_sge |
| 2333 | * will always be at least as large as the requested values. |
| 2334 | */ |
| 2335 | struct ib_srq *ib_create_srq(struct ib_pd *pd, |
| 2336 | struct ib_srq_init_attr *srq_init_attr); |
| 2337 | |
| 2338 | /** |
| 2339 | * ib_modify_srq - Modifies the attributes for the specified SRQ. |
| 2340 | * @srq: The SRQ to modify. |
| 2341 | * @srq_attr: On input, specifies the SRQ attributes to modify. On output, |
| 2342 | * the current values of selected SRQ attributes are returned. |
| 2343 | * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ |
| 2344 | * are being modified. |
| 2345 | * |
| 2346 | * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or |
| 2347 | * IB_SRQ_LIMIT to set the SRQ's limit and request notification when |
| 2348 | * the number of receives queued drops below the limit. |
| 2349 | */ |
| 2350 | int ib_modify_srq(struct ib_srq *srq, |
| 2351 | struct ib_srq_attr *srq_attr, |
| 2352 | enum ib_srq_attr_mask srq_attr_mask); |
| 2353 | |
| 2354 | /** |
| 2355 | * ib_query_srq - Returns the attribute list and current values for the |
| 2356 | * specified SRQ. |
| 2357 | * @srq: The SRQ to query. |
| 2358 | * @srq_attr: The attributes of the specified SRQ. |
| 2359 | */ |
| 2360 | int ib_query_srq(struct ib_srq *srq, |
| 2361 | struct ib_srq_attr *srq_attr); |
| 2362 | |
| 2363 | /** |
| 2364 | * ib_destroy_srq - Destroys the specified SRQ. |
| 2365 | * @srq: The SRQ to destroy. |
| 2366 | */ |
| 2367 | int ib_destroy_srq(struct ib_srq *srq); |
| 2368 | |
| 2369 | /** |
| 2370 | * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. |
| 2371 | * @srq: The SRQ to post the work request on. |
| 2372 | * @recv_wr: A list of work requests to post on the receive queue. |
| 2373 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 2374 | * the work request that failed to be posted on the QP. |
| 2375 | */ |
| 2376 | static inline int ib_post_srq_recv(struct ib_srq *srq, |
| 2377 | struct ib_recv_wr *recv_wr, |
| 2378 | struct ib_recv_wr **bad_recv_wr) |
| 2379 | { |
| 2380 | return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); |
| 2381 | } |
| 2382 | |
| 2383 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2384 | * ib_create_qp - Creates a QP associated with the specified protection |
| 2385 | * domain. |
| 2386 | * @pd: The protection domain associated with the QP. |
Dotan Barak | abb6e9b | 2006-02-23 12:13:51 -0800 | [diff] [blame] | 2387 | * @qp_init_attr: A list of initial attributes required to create the |
| 2388 | * QP. If QP creation succeeds, then the attributes are updated to |
| 2389 | * the actual capabilities of the created QP. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2390 | */ |
| 2391 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 2392 | struct ib_qp_init_attr *qp_init_attr); |
| 2393 | |
| 2394 | /** |
| 2395 | * ib_modify_qp - Modifies the attributes for the specified QP and then |
| 2396 | * transitions the QP to the given state. |
| 2397 | * @qp: The QP to modify. |
| 2398 | * @qp_attr: On input, specifies the QP attributes to modify. On output, |
| 2399 | * the current values of selected QP attributes are returned. |
| 2400 | * @qp_attr_mask: A bit-mask used to specify which attributes of the QP |
| 2401 | * are being modified. |
| 2402 | */ |
| 2403 | int ib_modify_qp(struct ib_qp *qp, |
| 2404 | struct ib_qp_attr *qp_attr, |
| 2405 | int qp_attr_mask); |
| 2406 | |
| 2407 | /** |
| 2408 | * ib_query_qp - Returns the attribute list and current values for the |
| 2409 | * specified QP. |
| 2410 | * @qp: The QP to query. |
| 2411 | * @qp_attr: The attributes of the specified QP. |
| 2412 | * @qp_attr_mask: A bit-mask used to select specific attributes to query. |
| 2413 | * @qp_init_attr: Additional attributes of the selected QP. |
| 2414 | * |
| 2415 | * The qp_attr_mask may be used to limit the query to gathering only the |
| 2416 | * selected attributes. |
| 2417 | */ |
| 2418 | int ib_query_qp(struct ib_qp *qp, |
| 2419 | struct ib_qp_attr *qp_attr, |
| 2420 | int qp_attr_mask, |
| 2421 | struct ib_qp_init_attr *qp_init_attr); |
| 2422 | |
| 2423 | /** |
| 2424 | * ib_destroy_qp - Destroys the specified QP. |
| 2425 | * @qp: The QP to destroy. |
| 2426 | */ |
| 2427 | int ib_destroy_qp(struct ib_qp *qp); |
| 2428 | |
| 2429 | /** |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2430 | * ib_open_qp - Obtain a reference to an existing sharable QP. |
| 2431 | * @xrcd - XRC domain |
| 2432 | * @qp_open_attr: Attributes identifying the QP to open. |
| 2433 | * |
| 2434 | * Returns a reference to a sharable QP. |
| 2435 | */ |
| 2436 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 2437 | struct ib_qp_open_attr *qp_open_attr); |
| 2438 | |
| 2439 | /** |
| 2440 | * ib_close_qp - Release an external reference to a QP. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2441 | * @qp: The QP handle to release |
| 2442 | * |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2443 | * The opened QP handle is released by the caller. The underlying |
| 2444 | * shared QP is not destroyed until all internal references are released. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2445 | */ |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 2446 | int ib_close_qp(struct ib_qp *qp); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 2447 | |
| 2448 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2449 | * ib_post_send - Posts a list of work requests to the send queue of |
| 2450 | * the specified QP. |
| 2451 | * @qp: The QP to post the work request on. |
| 2452 | * @send_wr: A list of work requests to post on the send queue. |
| 2453 | * @bad_send_wr: On an immediate failure, this parameter will reference |
| 2454 | * the work request that failed to be posted on the QP. |
Bart Van Assche | 55464d4 | 2009-12-09 14:20:04 -0800 | [diff] [blame] | 2455 | * |
| 2456 | * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate |
| 2457 | * error is returned, the QP state shall not be affected, |
| 2458 | * ib_post_send() will return an immediate error after queueing any |
| 2459 | * earlier work requests in the list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2460 | */ |
| 2461 | static inline int ib_post_send(struct ib_qp *qp, |
| 2462 | struct ib_send_wr *send_wr, |
| 2463 | struct ib_send_wr **bad_send_wr) |
| 2464 | { |
| 2465 | return qp->device->post_send(qp, send_wr, bad_send_wr); |
| 2466 | } |
| 2467 | |
| 2468 | /** |
| 2469 | * ib_post_recv - Posts a list of work requests to the receive queue of |
| 2470 | * the specified QP. |
| 2471 | * @qp: The QP to post the work request on. |
| 2472 | * @recv_wr: A list of work requests to post on the receive queue. |
| 2473 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 2474 | * the work request that failed to be posted on the QP. |
| 2475 | */ |
| 2476 | static inline int ib_post_recv(struct ib_qp *qp, |
| 2477 | struct ib_recv_wr *recv_wr, |
| 2478 | struct ib_recv_wr **bad_recv_wr) |
| 2479 | { |
| 2480 | return qp->device->post_recv(qp, recv_wr, bad_recv_wr); |
| 2481 | } |
| 2482 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame^] | 2483 | struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, |
| 2484 | int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); |
| 2485 | void ib_free_cq(struct ib_cq *cq); |
| 2486 | int ib_process_cq_direct(struct ib_cq *cq, int budget); |
| 2487 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | /** |
| 2489 | * ib_create_cq - Creates a CQ on the specified device. |
| 2490 | * @device: The device on which to create the CQ. |
| 2491 | * @comp_handler: A user-specified callback that is invoked when a |
| 2492 | * completion event occurs on the CQ. |
| 2493 | * @event_handler: A user-specified callback that is invoked when an |
| 2494 | * asynchronous event not associated with a completion occurs on the CQ. |
| 2495 | * @cq_context: Context associated with the CQ returned to the user via |
| 2496 | * the associated completion and event handlers. |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 2497 | * @cq_attr: The attributes the CQ should be created upon. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2498 | * |
| 2499 | * Users can examine the cq structure to determine the actual CQ size. |
| 2500 | */ |
| 2501 | struct ib_cq *ib_create_cq(struct ib_device *device, |
| 2502 | ib_comp_handler comp_handler, |
| 2503 | void (*event_handler)(struct ib_event *, void *), |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 2504 | void *cq_context, |
| 2505 | const struct ib_cq_init_attr *cq_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2506 | |
| 2507 | /** |
| 2508 | * ib_resize_cq - Modifies the capacity of the CQ. |
| 2509 | * @cq: The CQ to resize. |
| 2510 | * @cqe: The minimum size of the CQ. |
| 2511 | * |
| 2512 | * Users can examine the cq structure to determine the actual CQ size. |
| 2513 | */ |
| 2514 | int ib_resize_cq(struct ib_cq *cq, int cqe); |
| 2515 | |
| 2516 | /** |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 2517 | * ib_modify_cq - Modifies moderation params of the CQ |
| 2518 | * @cq: The CQ to modify. |
| 2519 | * @cq_count: number of CQEs that will trigger an event |
| 2520 | * @cq_period: max period of time in usec before triggering an event |
| 2521 | * |
| 2522 | */ |
| 2523 | int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
| 2524 | |
| 2525 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2526 | * ib_destroy_cq - Destroys the specified CQ. |
| 2527 | * @cq: The CQ to destroy. |
| 2528 | */ |
| 2529 | int ib_destroy_cq(struct ib_cq *cq); |
| 2530 | |
| 2531 | /** |
| 2532 | * ib_poll_cq - poll a CQ for completion(s) |
| 2533 | * @cq:the CQ being polled |
| 2534 | * @num_entries:maximum number of completions to return |
| 2535 | * @wc:array of at least @num_entries &struct ib_wc where completions |
| 2536 | * will be returned |
| 2537 | * |
| 2538 | * Poll a CQ for (possibly multiple) completions. If the return value |
| 2539 | * is < 0, an error occurred. If the return value is >= 0, it is the |
| 2540 | * number of completions returned. If the return value is |
| 2541 | * non-negative and < num_entries, then the CQ was emptied. |
| 2542 | */ |
| 2543 | static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, |
| 2544 | struct ib_wc *wc) |
| 2545 | { |
| 2546 | return cq->device->poll_cq(cq, num_entries, wc); |
| 2547 | } |
| 2548 | |
| 2549 | /** |
| 2550 | * ib_peek_cq - Returns the number of unreaped completions currently |
| 2551 | * on the specified CQ. |
| 2552 | * @cq: The CQ to peek. |
| 2553 | * @wc_cnt: A minimum number of unreaped completions to check for. |
| 2554 | * |
| 2555 | * If the number of unreaped completions is greater than or equal to wc_cnt, |
| 2556 | * this function returns wc_cnt, otherwise, it returns the actual number of |
| 2557 | * unreaped completions. |
| 2558 | */ |
| 2559 | int ib_peek_cq(struct ib_cq *cq, int wc_cnt); |
| 2560 | |
| 2561 | /** |
| 2562 | * ib_req_notify_cq - Request completion notification on a CQ. |
| 2563 | * @cq: The CQ to generate an event for. |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2564 | * @flags: |
| 2565 | * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP |
| 2566 | * to request an event on the next solicited event or next work |
| 2567 | * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS |
| 2568 | * may also be |ed in to request a hint about missed events, as |
| 2569 | * described below. |
| 2570 | * |
| 2571 | * Return Value: |
| 2572 | * < 0 means an error occurred while requesting notification |
| 2573 | * == 0 means notification was requested successfully, and if |
| 2574 | * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events |
| 2575 | * were missed and it is safe to wait for another event. In |
| 2576 | * this case is it guaranteed that any work completions added |
| 2577 | * to the CQ since the last CQ poll will trigger a completion |
| 2578 | * notification event. |
| 2579 | * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed |
| 2580 | * in. It means that the consumer must poll the CQ again to |
| 2581 | * make sure it is empty to avoid missing an event because of a |
| 2582 | * race between requesting notification and an entry being |
| 2583 | * added to the CQ. This return value means it is possible |
| 2584 | * (but not guaranteed) that a work completion has been added |
| 2585 | * to the CQ since the last poll without triggering a |
| 2586 | * completion notification event. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2587 | */ |
| 2588 | static inline int ib_req_notify_cq(struct ib_cq *cq, |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2589 | enum ib_cq_notify_flags flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2590 | { |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 2591 | return cq->device->req_notify_cq(cq, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2592 | } |
| 2593 | |
| 2594 | /** |
| 2595 | * ib_req_ncomp_notif - Request completion notification when there are |
| 2596 | * at least the specified number of unreaped completions on the CQ. |
| 2597 | * @cq: The CQ to generate an event for. |
| 2598 | * @wc_cnt: The number of unreaped completions that should be on the |
| 2599 | * CQ before an event is generated. |
| 2600 | */ |
| 2601 | static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) |
| 2602 | { |
| 2603 | return cq->device->req_ncomp_notif ? |
| 2604 | cq->device->req_ncomp_notif(cq, wc_cnt) : |
| 2605 | -ENOSYS; |
| 2606 | } |
| 2607 | |
| 2608 | /** |
| 2609 | * ib_get_dma_mr - Returns a memory region for system memory that is |
| 2610 | * usable for DMA. |
| 2611 | * @pd: The protection domain associated with the memory region. |
| 2612 | * @mr_access_flags: Specifies the memory access rights. |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2613 | * |
| 2614 | * Note that the ib_dma_*() functions defined below must be used |
| 2615 | * to create/destroy addresses used with the Lkey or Rkey returned |
| 2616 | * by ib_get_dma_mr(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2617 | */ |
| 2618 | struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); |
| 2619 | |
| 2620 | /** |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2621 | * ib_dma_mapping_error - check a DMA addr for error |
| 2622 | * @dev: The device for which the dma_addr was created |
| 2623 | * @dma_addr: The DMA address to check |
| 2624 | */ |
| 2625 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) |
| 2626 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2627 | if (dev->dma_ops) |
| 2628 | return dev->dma_ops->mapping_error(dev, dma_addr); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 2629 | return dma_mapping_error(dev->dma_device, dma_addr); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2630 | } |
| 2631 | |
| 2632 | /** |
| 2633 | * ib_dma_map_single - Map a kernel virtual address to DMA address |
| 2634 | * @dev: The device for which the dma_addr is to be created |
| 2635 | * @cpu_addr: The kernel virtual address |
| 2636 | * @size: The size of the region in bytes |
| 2637 | * @direction: The direction of the DMA |
| 2638 | */ |
| 2639 | static inline u64 ib_dma_map_single(struct ib_device *dev, |
| 2640 | void *cpu_addr, size_t size, |
| 2641 | enum dma_data_direction direction) |
| 2642 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2643 | if (dev->dma_ops) |
| 2644 | return dev->dma_ops->map_single(dev, cpu_addr, size, direction); |
| 2645 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2646 | } |
| 2647 | |
| 2648 | /** |
| 2649 | * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() |
| 2650 | * @dev: The device for which the DMA address was created |
| 2651 | * @addr: The DMA address |
| 2652 | * @size: The size of the region in bytes |
| 2653 | * @direction: The direction of the DMA |
| 2654 | */ |
| 2655 | static inline void ib_dma_unmap_single(struct ib_device *dev, |
| 2656 | u64 addr, size_t size, |
| 2657 | enum dma_data_direction direction) |
| 2658 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2659 | if (dev->dma_ops) |
| 2660 | dev->dma_ops->unmap_single(dev, addr, size, direction); |
| 2661 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2662 | dma_unmap_single(dev->dma_device, addr, size, direction); |
| 2663 | } |
| 2664 | |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2665 | static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, |
| 2666 | void *cpu_addr, size_t size, |
| 2667 | enum dma_data_direction direction, |
| 2668 | struct dma_attrs *attrs) |
| 2669 | { |
| 2670 | return dma_map_single_attrs(dev->dma_device, cpu_addr, size, |
| 2671 | direction, attrs); |
| 2672 | } |
| 2673 | |
| 2674 | static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, |
| 2675 | u64 addr, size_t size, |
| 2676 | enum dma_data_direction direction, |
| 2677 | struct dma_attrs *attrs) |
| 2678 | { |
| 2679 | return dma_unmap_single_attrs(dev->dma_device, addr, size, |
| 2680 | direction, attrs); |
| 2681 | } |
| 2682 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2683 | /** |
| 2684 | * ib_dma_map_page - Map a physical page to DMA address |
| 2685 | * @dev: The device for which the dma_addr is to be created |
| 2686 | * @page: The page to be mapped |
| 2687 | * @offset: The offset within the page |
| 2688 | * @size: The size of the region in bytes |
| 2689 | * @direction: The direction of the DMA |
| 2690 | */ |
| 2691 | static inline u64 ib_dma_map_page(struct ib_device *dev, |
| 2692 | struct page *page, |
| 2693 | unsigned long offset, |
| 2694 | size_t size, |
| 2695 | enum dma_data_direction direction) |
| 2696 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2697 | if (dev->dma_ops) |
| 2698 | return dev->dma_ops->map_page(dev, page, offset, size, direction); |
| 2699 | return dma_map_page(dev->dma_device, page, offset, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2700 | } |
| 2701 | |
| 2702 | /** |
| 2703 | * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() |
| 2704 | * @dev: The device for which the DMA address was created |
| 2705 | * @addr: The DMA address |
| 2706 | * @size: The size of the region in bytes |
| 2707 | * @direction: The direction of the DMA |
| 2708 | */ |
| 2709 | static inline void ib_dma_unmap_page(struct ib_device *dev, |
| 2710 | u64 addr, size_t size, |
| 2711 | enum dma_data_direction direction) |
| 2712 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2713 | if (dev->dma_ops) |
| 2714 | dev->dma_ops->unmap_page(dev, addr, size, direction); |
| 2715 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2716 | dma_unmap_page(dev->dma_device, addr, size, direction); |
| 2717 | } |
| 2718 | |
| 2719 | /** |
| 2720 | * ib_dma_map_sg - Map a scatter/gather list to DMA addresses |
| 2721 | * @dev: The device for which the DMA addresses are to be created |
| 2722 | * @sg: The array of scatter/gather entries |
| 2723 | * @nents: The number of scatter/gather entries |
| 2724 | * @direction: The direction of the DMA |
| 2725 | */ |
| 2726 | static inline int ib_dma_map_sg(struct ib_device *dev, |
| 2727 | struct scatterlist *sg, int nents, |
| 2728 | enum dma_data_direction direction) |
| 2729 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2730 | if (dev->dma_ops) |
| 2731 | return dev->dma_ops->map_sg(dev, sg, nents, direction); |
| 2732 | return dma_map_sg(dev->dma_device, sg, nents, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2733 | } |
| 2734 | |
| 2735 | /** |
| 2736 | * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses |
| 2737 | * @dev: The device for which the DMA addresses were created |
| 2738 | * @sg: The array of scatter/gather entries |
| 2739 | * @nents: The number of scatter/gather entries |
| 2740 | * @direction: The direction of the DMA |
| 2741 | */ |
| 2742 | static inline void ib_dma_unmap_sg(struct ib_device *dev, |
| 2743 | struct scatterlist *sg, int nents, |
| 2744 | enum dma_data_direction direction) |
| 2745 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2746 | if (dev->dma_ops) |
| 2747 | dev->dma_ops->unmap_sg(dev, sg, nents, direction); |
| 2748 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2749 | dma_unmap_sg(dev->dma_device, sg, nents, direction); |
| 2750 | } |
| 2751 | |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 2752 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
| 2753 | struct scatterlist *sg, int nents, |
| 2754 | enum dma_data_direction direction, |
| 2755 | struct dma_attrs *attrs) |
| 2756 | { |
| 2757 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); |
| 2758 | } |
| 2759 | |
| 2760 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
| 2761 | struct scatterlist *sg, int nents, |
| 2762 | enum dma_data_direction direction, |
| 2763 | struct dma_attrs *attrs) |
| 2764 | { |
| 2765 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); |
| 2766 | } |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2767 | /** |
| 2768 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
| 2769 | * @dev: The device for which the DMA addresses were created |
| 2770 | * @sg: The scatter/gather entry |
Mike Marciniszyn | ea58a59 | 2014-03-28 13:26:59 -0400 | [diff] [blame] | 2771 | * |
| 2772 | * Note: this function is obsolete. To do: change all occurrences of |
| 2773 | * ib_sg_dma_address() into sg_dma_address(). |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2774 | */ |
| 2775 | static inline u64 ib_sg_dma_address(struct ib_device *dev, |
| 2776 | struct scatterlist *sg) |
| 2777 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2778 | return sg_dma_address(sg); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2779 | } |
| 2780 | |
| 2781 | /** |
| 2782 | * ib_sg_dma_len - Return the DMA length from a scatter/gather entry |
| 2783 | * @dev: The device for which the DMA addresses were created |
| 2784 | * @sg: The scatter/gather entry |
Mike Marciniszyn | ea58a59 | 2014-03-28 13:26:59 -0400 | [diff] [blame] | 2785 | * |
| 2786 | * Note: this function is obsolete. To do: change all occurrences of |
| 2787 | * ib_sg_dma_len() into sg_dma_len(). |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2788 | */ |
| 2789 | static inline unsigned int ib_sg_dma_len(struct ib_device *dev, |
| 2790 | struct scatterlist *sg) |
| 2791 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2792 | return sg_dma_len(sg); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2793 | } |
| 2794 | |
| 2795 | /** |
| 2796 | * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU |
| 2797 | * @dev: The device for which the DMA address was created |
| 2798 | * @addr: The DMA address |
| 2799 | * @size: The size of the region in bytes |
| 2800 | * @dir: The direction of the DMA |
| 2801 | */ |
| 2802 | static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, |
| 2803 | u64 addr, |
| 2804 | size_t size, |
| 2805 | enum dma_data_direction dir) |
| 2806 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2807 | if (dev->dma_ops) |
| 2808 | dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); |
| 2809 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2810 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); |
| 2811 | } |
| 2812 | |
| 2813 | /** |
| 2814 | * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device |
| 2815 | * @dev: The device for which the DMA address was created |
| 2816 | * @addr: The DMA address |
| 2817 | * @size: The size of the region in bytes |
| 2818 | * @dir: The direction of the DMA |
| 2819 | */ |
| 2820 | static inline void ib_dma_sync_single_for_device(struct ib_device *dev, |
| 2821 | u64 addr, |
| 2822 | size_t size, |
| 2823 | enum dma_data_direction dir) |
| 2824 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2825 | if (dev->dma_ops) |
| 2826 | dev->dma_ops->sync_single_for_device(dev, addr, size, dir); |
| 2827 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2828 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); |
| 2829 | } |
| 2830 | |
| 2831 | /** |
| 2832 | * ib_dma_alloc_coherent - Allocate memory and map it for DMA |
| 2833 | * @dev: The device for which the DMA address is requested |
| 2834 | * @size: The size of the region to allocate in bytes |
| 2835 | * @dma_handle: A pointer for returning the DMA address of the region |
| 2836 | * @flag: memory allocator flags |
| 2837 | */ |
| 2838 | static inline void *ib_dma_alloc_coherent(struct ib_device *dev, |
| 2839 | size_t size, |
| 2840 | u64 *dma_handle, |
| 2841 | gfp_t flag) |
| 2842 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2843 | if (dev->dma_ops) |
| 2844 | return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); |
Roland Dreier | c59a3da | 2006-12-15 13:57:26 -0800 | [diff] [blame] | 2845 | else { |
| 2846 | dma_addr_t handle; |
| 2847 | void *ret; |
| 2848 | |
| 2849 | ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); |
| 2850 | *dma_handle = handle; |
| 2851 | return ret; |
| 2852 | } |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2853 | } |
| 2854 | |
| 2855 | /** |
| 2856 | * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() |
| 2857 | * @dev: The device for which the DMA addresses were allocated |
| 2858 | * @size: The size of the region |
| 2859 | * @cpu_addr: the address returned by ib_dma_alloc_coherent() |
| 2860 | * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() |
| 2861 | */ |
| 2862 | static inline void ib_dma_free_coherent(struct ib_device *dev, |
| 2863 | size_t size, void *cpu_addr, |
| 2864 | u64 dma_handle) |
| 2865 | { |
Ben Collins | d1998ef | 2006-12-13 22:10:05 -0500 | [diff] [blame] | 2866 | if (dev->dma_ops) |
| 2867 | dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); |
| 2868 | else |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 2869 | dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); |
| 2870 | } |
| 2871 | |
| 2872 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2873 | * ib_query_mr - Retrieves information about a specific memory region. |
| 2874 | * @mr: The memory region to retrieve information about. |
| 2875 | * @mr_attr: The attributes of the specified memory region. |
| 2876 | */ |
| 2877 | int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); |
| 2878 | |
| 2879 | /** |
| 2880 | * ib_dereg_mr - Deregisters a memory region and removes it from the |
| 2881 | * HCA translation table. |
| 2882 | * @mr: The memory region to deregister. |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 2883 | * |
| 2884 | * This function can fail, if the memory region has memory windows bound to it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2885 | */ |
| 2886 | int ib_dereg_mr(struct ib_mr *mr); |
| 2887 | |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 2888 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, |
| 2889 | enum ib_mr_type mr_type, |
| 2890 | u32 max_num_sg); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 2891 | |
| 2892 | /** |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 2893 | * ib_update_fast_reg_key - updates the key portion of the fast_reg MR |
| 2894 | * R_Key and L_Key. |
| 2895 | * @mr - struct ib_mr pointer to be updated. |
| 2896 | * @newkey - new key to be used. |
| 2897 | */ |
| 2898 | static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) |
| 2899 | { |
| 2900 | mr->lkey = (mr->lkey & 0xffffff00) | newkey; |
| 2901 | mr->rkey = (mr->rkey & 0xffffff00) | newkey; |
| 2902 | } |
| 2903 | |
| 2904 | /** |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 2905 | * ib_inc_rkey - increments the key portion of the given rkey. Can be used |
| 2906 | * for calculating a new rkey for type 2 memory windows. |
| 2907 | * @rkey - the rkey to increment. |
| 2908 | */ |
| 2909 | static inline u32 ib_inc_rkey(u32 rkey) |
| 2910 | { |
| 2911 | const u32 mask = 0x000000ff; |
| 2912 | return ((rkey + 1) & mask) | (rkey & ~mask); |
| 2913 | } |
| 2914 | |
| 2915 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2916 | * ib_alloc_mw - Allocates a memory window. |
| 2917 | * @pd: The protection domain associated with the memory window. |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 2918 | * @type: The type of the memory window (1 or 2). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2919 | */ |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 2920 | struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2921 | |
| 2922 | /** |
| 2923 | * ib_bind_mw - Posts a work request to the send queue of the specified |
| 2924 | * QP, which binds the memory window to the given address range and |
| 2925 | * remote access attributes. |
| 2926 | * @qp: QP to post the bind work request on. |
| 2927 | * @mw: The memory window to bind. |
| 2928 | * @mw_bind: Specifies information about the memory window, including |
| 2929 | * its address range, remote access rights, and associated memory region. |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 2930 | * |
| 2931 | * If there is no immediate error, the function will update the rkey member |
| 2932 | * of the mw parameter to its new value. The bind operation can still fail |
| 2933 | * asynchronously. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2934 | */ |
| 2935 | static inline int ib_bind_mw(struct ib_qp *qp, |
| 2936 | struct ib_mw *mw, |
| 2937 | struct ib_mw_bind *mw_bind) |
| 2938 | { |
| 2939 | /* XXX reference counting in corresponding MR? */ |
| 2940 | return mw->device->bind_mw ? |
| 2941 | mw->device->bind_mw(qp, mw, mw_bind) : |
| 2942 | -ENOSYS; |
| 2943 | } |
| 2944 | |
| 2945 | /** |
| 2946 | * ib_dealloc_mw - Deallocates a memory window. |
| 2947 | * @mw: The memory window to deallocate. |
| 2948 | */ |
| 2949 | int ib_dealloc_mw(struct ib_mw *mw); |
| 2950 | |
| 2951 | /** |
| 2952 | * ib_alloc_fmr - Allocates a unmapped fast memory region. |
| 2953 | * @pd: The protection domain associated with the unmapped region. |
| 2954 | * @mr_access_flags: Specifies the memory access rights. |
| 2955 | * @fmr_attr: Attributes of the unmapped region. |
| 2956 | * |
| 2957 | * A fast memory region must be mapped before it can be used as part of |
| 2958 | * a work request. |
| 2959 | */ |
| 2960 | struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, |
| 2961 | int mr_access_flags, |
| 2962 | struct ib_fmr_attr *fmr_attr); |
| 2963 | |
| 2964 | /** |
| 2965 | * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. |
| 2966 | * @fmr: The fast memory region to associate with the pages. |
| 2967 | * @page_list: An array of physical pages to map to the fast memory region. |
| 2968 | * @list_len: The number of pages in page_list. |
| 2969 | * @iova: The I/O virtual address to use with the mapped region. |
| 2970 | */ |
| 2971 | static inline int ib_map_phys_fmr(struct ib_fmr *fmr, |
| 2972 | u64 *page_list, int list_len, |
| 2973 | u64 iova) |
| 2974 | { |
| 2975 | return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); |
| 2976 | } |
| 2977 | |
| 2978 | /** |
| 2979 | * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. |
| 2980 | * @fmr_list: A linked list of fast memory regions to unmap. |
| 2981 | */ |
| 2982 | int ib_unmap_fmr(struct list_head *fmr_list); |
| 2983 | |
| 2984 | /** |
| 2985 | * ib_dealloc_fmr - Deallocates a fast memory region. |
| 2986 | * @fmr: The fast memory region to deallocate. |
| 2987 | */ |
| 2988 | int ib_dealloc_fmr(struct ib_fmr *fmr); |
| 2989 | |
| 2990 | /** |
| 2991 | * ib_attach_mcast - Attaches the specified QP to a multicast group. |
| 2992 | * @qp: QP to attach to the multicast group. The QP must be type |
| 2993 | * IB_QPT_UD. |
| 2994 | * @gid: Multicast group GID. |
| 2995 | * @lid: Multicast group LID in host byte order. |
| 2996 | * |
| 2997 | * In order to send and receive multicast packets, subnet |
| 2998 | * administration must have created the multicast group and configured |
| 2999 | * the fabric appropriately. The port associated with the specified |
| 3000 | * QP must also be a member of the multicast group. |
| 3001 | */ |
| 3002 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 3003 | |
| 3004 | /** |
| 3005 | * ib_detach_mcast - Detaches the specified QP from a multicast group. |
| 3006 | * @qp: QP to detach from the multicast group. |
| 3007 | * @gid: Multicast group GID. |
| 3008 | * @lid: Multicast group LID in host byte order. |
| 3009 | */ |
| 3010 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 3011 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 3012 | /** |
| 3013 | * ib_alloc_xrcd - Allocates an XRC domain. |
| 3014 | * @device: The device on which to allocate the XRC domain. |
| 3015 | */ |
| 3016 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); |
| 3017 | |
| 3018 | /** |
| 3019 | * ib_dealloc_xrcd - Deallocates an XRC domain. |
| 3020 | * @xrcd: The XRC domain to deallocate. |
| 3021 | */ |
| 3022 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
| 3023 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 3024 | struct ib_flow *ib_create_flow(struct ib_qp *qp, |
| 3025 | struct ib_flow_attr *flow_attr, int domain); |
| 3026 | int ib_destroy_flow(struct ib_flow *flow_id); |
| 3027 | |
Eli Cohen | 1c636f8 | 2013-10-31 15:26:32 +0200 | [diff] [blame] | 3028 | static inline int ib_check_mr_access(int flags) |
| 3029 | { |
| 3030 | /* |
| 3031 | * Local write permission is required if remote write or |
| 3032 | * remote atomic permission is also requested. |
| 3033 | */ |
| 3034 | if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && |
| 3035 | !(flags & IB_ACCESS_LOCAL_WRITE)) |
| 3036 | return -EINVAL; |
| 3037 | |
| 3038 | return 0; |
| 3039 | } |
| 3040 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 3041 | /** |
| 3042 | * ib_check_mr_status: lightweight check of MR status. |
| 3043 | * This routine may provide status checks on a selected |
| 3044 | * ib_mr. first use is for signature status check. |
| 3045 | * |
| 3046 | * @mr: A memory region. |
| 3047 | * @check_mask: Bitmask of which checks to perform from |
| 3048 | * ib_mr_status_check enumeration. |
| 3049 | * @mr_status: The container of relevant status checks. |
| 3050 | * failed checks will be indicated in the status bitmask |
| 3051 | * and the relevant info shall be in the error item. |
| 3052 | */ |
| 3053 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 3054 | struct ib_mr_status *mr_status); |
| 3055 | |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 3056 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, |
| 3057 | u16 pkey, const union ib_gid *gid, |
| 3058 | const struct sockaddr *addr); |
| 3059 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 3060 | int ib_map_mr_sg(struct ib_mr *mr, |
| 3061 | struct scatterlist *sg, |
| 3062 | int sg_nents, |
| 3063 | unsigned int page_size); |
| 3064 | |
| 3065 | static inline int |
| 3066 | ib_map_mr_sg_zbva(struct ib_mr *mr, |
| 3067 | struct scatterlist *sg, |
| 3068 | int sg_nents, |
| 3069 | unsigned int page_size) |
| 3070 | { |
| 3071 | int n; |
| 3072 | |
| 3073 | n = ib_map_mr_sg(mr, sg, sg_nents, page_size); |
| 3074 | mr->iova = 0; |
| 3075 | |
| 3076 | return n; |
| 3077 | } |
| 3078 | |
| 3079 | int ib_sg_to_pages(struct ib_mr *mr, |
| 3080 | struct scatterlist *sgl, |
| 3081 | int sg_nents, |
| 3082 | int (*set_page)(struct ib_mr *, u64)); |
| 3083 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3084 | #endif /* IB_VERBS_H */ |