struct ib_ah *ah;
struct ib_mad_send_wr_private *mad_send_wr;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(device))
port_priv = ib_get_agent_port(device, 0);
else
port_priv = ib_get_agent_port(device, port_num);
memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
mad_send_wr = container_of(send_buf,
struct ib_mad_send_wr_private,
send_buf);
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
+ int going_down;
struct cm_port *port[0];
};
{
int wait_time;
unsigned long flags;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+ if (!cm_dev)
+ return;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
- queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
- msecs_to_jiffies(wait_time));
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down)
+ queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ msecs_to_jiffies(wait_time));
+ spin_unlock_irq(&cm.lock);
+
cm_id_priv->timewait_info = NULL;
}
struct cm_work *work;
unsigned long flags;
int ret = 0;
+ struct cm_device *cm_dev;
+
+ cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+ if (!cm_dev)
+ return -ENODEV;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!cm_dev->going_down) {
+ queue_delayed_work(cm.wq, &work->work, 0);
+ } else {
+ kfree(work);
+ ret = -ENODEV;
+ }
+ spin_unlock_irq(&cm.lock);
+
out:
return ret;
}
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
+ int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
- queue_delayed_work(cm.wq, &work->work, 0);
+
+ /* Check if the device started its remove_one */
+ spin_lock_irq(&cm.lock);
+ if (!port->cm_dev->going_down)
+ queue_delayed_work(cm.wq, &work->work, 0);
+ else
+ going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+ if (going_down) {
+ kfree(work);
+ ib_free_recv_mad(mad_recv_wc);
+ }
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
-
+ cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
+ spin_lock_irq(&cm.lock);
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
if (!rdma_cap_ib_cm(ib_device, i))
continue;
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
- ib_unregister_mad_agent(port->mad_agent);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
flush_workqueue(cm.wq);
+ ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
err_str = "Invalid port mapper client";
goto pid_query_error;
}
- if (iwpm_registered_client(nl_client))
+ if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+ iwpm_user_pid == IWPM_PID_UNAVAILABLE)
return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
if (!skb) {
ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
- iwpm_set_registered(nl_client, 1);
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
err_str = "Unable to send a nlmsg";
goto pid_query_error;
err_str = "Invalid port mapper client";
goto add_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto add_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
if (!skb) {
err_str = "Unable to create a nlmsg";
err_str = "Invalid port mapper client";
goto query_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
err_str = "Unregistered port mapper client";
goto query_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
ret = -ENOMEM;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
if (!skb) {
err_str = "Invalid port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_registered_client(nl_client)) {
+ if (!iwpm_valid_pid())
+ return 0;
+ if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
err_str = "Unregistered port mapper client";
goto remove_mapping_error;
}
- if (!iwpm_valid_pid())
- return 0;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
if (!skb) {
ret = -ENOMEM;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
__func__, iwpm_user_pid);
if (iwpm_valid_client(nl_client))
- iwpm_set_registered(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_VALID);
register_pid_response_exit:
nlmsg_request->request_done = 1;
/* always for found nlmsg_request */
{
struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
const char *msg_type = "Mapping Info response";
- int iwpm_pid;
u8 nl_client;
char *iwpm_name;
u16 iwpm_version;
__func__, nl_client);
return ret;
}
- iwpm_set_registered(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+ iwpm_user_pid = cb->nlh->nlmsg_pid;
if (!iwpm_mapinfo_available())
return 0;
- iwpm_pid = cb->nlh->nlmsg_pid;
pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
- __func__, iwpm_pid);
- ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+ __func__, iwpm_user_pid);
+ ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
EXPORT_SYMBOL(iwpm_mapping_info_cb);
mutex_unlock(&iwpm_admin_lock);
if (!ret) {
iwpm_set_valid(nl_client, 1);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
pr_debug("%s: Mapinfo and reminfo tables are created\n",
__func__);
}
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
+ iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
EXPORT_SYMBOL(iwpm_exit);
}
/* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
{
return iwpm_admin.reg_list[nl_client];
}
/* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
{
iwpm_admin.reg_list[nl_client] = reg;
}
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+ return (iwpm_get_registration(nl_client) & reg);
+}
+
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
struct sockaddr_storage *b_sockaddr)
{
#define IWPM_PID_UNDEFINED -1
#define IWPM_PID_UNAVAILABLE -2
+#define IWPM_REG_UNDEF 0x01
+#define IWPM_REG_VALID 0x02
+#define IWPM_REG_INCOMPL 0x04
+
struct iwpm_nlmsg_request {
struct list_head inprocess_list;
__u32 nlmsg_seq;
atomic_t refcount;
atomic_t nlmsg_seq;
int client_list[RDMA_NL_NUM_CLIENTS];
- int reg_list[RDMA_NL_NUM_CLIENTS];
+ u32 reg_list[RDMA_NL_NUM_CLIENTS];
};
/**
void iwpm_set_valid(u8 nl_client, int valid);
/**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ * matches the given one
* @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
*
* Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
*/
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
/**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
* @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
*/
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
/**
* iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
mad_agent_priv->qp_info->port_priv->port_num);
- if (device->node_type == RDMA_NODE_IB_SWITCH &&
+ if (rdma_cap_ib_switch(device) &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
port_num = send_wr->wr.ud.port_num;
else
if ((opa_get_smp_direction(opa_smp)
? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
OPA_LID_PERMISSIVE &&
- opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
+ opa_smi_handle_dr_smp_send(opa_smp,
+ rdma_cap_ib_switch(device),
port_num) == IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid directed route\n");
goto out;
}
opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
- if (opa_drslid != OPA_LID_PERMISSIVE &&
+ if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
opa_drslid & 0xffff0000) {
ret = -EINVAL;
dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
} else {
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
- smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
+ smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
IB_SMI_DISCARD) {
ret = -EINVAL;
dev_err(&device->dev, "Invalid directed route\n");
struct ib_smp *smp = (struct ib_smp *)recv->mad;
if (smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
struct opa_smp *smp = (struct opa_smp *)recv->mad;
if (opa_smi_handle_dr_smp_recv(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
if (retsmi == IB_SMI_SEND) { /* don't forward */
if (opa_smi_handle_dr_smp_send(smp,
- port_priv->device->node_type,
+ rdma_cap_ib_switch(port_priv->device),
port_num) == IB_SMI_DISCARD)
return IB_SMI_DISCARD;
IB_SMI_DISCARD)
return IB_SMI_DISCARD;
- } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ } else if (rdma_cap_ib_switch(port_priv->device)) {
/* forward case for switches */
memcpy(response, recv, mad_priv_size(response));
response->header.recv_wc.wc = &response->header.wc;
goto out;
}
- if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+ if (rdma_cap_ib_switch(port_priv->device))
port_num = wc->port_num;
else
port_num = port_priv->port_num;
static void ib_mad_init_device(struct ib_device *device)
{
- int start, end, i;
+ int start, i;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ start = rdma_start_port(device);
- for (i = start; i <= end; i++) {
+ for (i = start; i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
static void ib_mad_remove_device(struct ib_device *device)
{
- int start, end, i;
-
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- start = 0;
- end = 0;
- } else {
- start = 1;
- end = device->phys_port_cnt;
- }
+ int i;
- for (i = start; i <= end; i++) {
+ for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
if (!rdma_cap_ib_mad(device, i))
continue;
if (!dev)
return;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- dev->start_port = dev->end_port = 0;
- else {
- dev->start_port = 1;
- dev->end_port = device->phys_port_cnt;
- }
+ dev->start_port = rdma_start_port(device);
+ dev->end_port = rdma_end_port(device);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
if (!rdma_cap_ib_mcast(device, dev->start_port + i))
#include "smi.h"
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int opa_smi_get_fwd_port(struct opa_smp *smp);
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
int s, e, i;
int count = 0;
- if (device->node_type == RDMA_NODE_IB_SWITCH)
- s = e = 0;
- else {
- s = 1;
- e = device->phys_port_cnt;
- }
+ s = rdma_start_port(device);
+ e = rdma_end_port(device);
sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
#include "smi.h"
#include "opa_smi.h"
-static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
const u8 *return_path,
/* C14-9:2 */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* return_path set when received */
if (*hop_ptr == hop_cnt) {
/* return_path set when received */
(*hop_ptr)++;
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
(*hop_ptr)--;
if (*hop_ptr == 1) {
(*hop_ptr)--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_slid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
* Return IB_SMI_DISCARD if the SMP should be discarded
*/
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
}
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
- u8 node_type, int port_num)
+ bool is_switch, int port_num)
{
- return __smi_handle_dr_smp_send(node_type, port_num,
+ return __smi_handle_dr_smp_send(is_switch, port_num,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
OPA_LID_PERMISSIVE);
}
-static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
int phys_port_cnt,
u8 *hop_ptr, u8 hop_cnt,
const u8 *initial_path,
/* C14-9:2 -- intermediate hop */
if (*hop_ptr && *hop_ptr < hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
return_path[*hop_ptr] = port_num;
return_path[*hop_ptr] = port_num;
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ||
+ return (is_switch ||
dr_dlid_is_permissive ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
- if (node_type != RDMA_NODE_IB_SWITCH)
+ if (!is_switch)
return IB_SMI_DISCARD;
/* hop_ptr updated when sending */
return IB_SMI_HANDLE;
}
/* hop_ptr updated when sending */
- return (node_type == RDMA_NODE_IB_SWITCH ?
- IB_SMI_HANDLE : IB_SMI_DISCARD);
+ return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->initial_path,
smp->return_path,
* Adjust information for a received SMP
* Return IB_SMI_DISCARD if the SMP should be dropped
*/
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
int port_num, int phys_port_cnt)
{
- return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+ return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
&smp->hop_ptr, smp->hop_cnt,
smp->route.dr.initial_path,
smp->route.dr.return_path,
IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
};
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
int port_num, int phys_port_cnt);
int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
- u8 node_type, int port_num);
+ bool is_switch, int port_num);
/*
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
goto err_put;
}
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ if (rdma_cap_ib_switch(device)) {
ret = add_port(device, 0, port_callback);
if (ret)
goto err_put;
return 0;
}
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map);
else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
kfree(ucm_dev);
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static int find_overflow_devnum(void)
{
int ret;
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
if (file1 < file2) {
mutex_lock(&file1->mut);
- mutex_lock(&file2->mut);
+ mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&file2->mut);
- mutex_lock(&file1->mut);
+ mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
}
}
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
misc_deregister(&ucma_misc);
idr_destroy(&ctx_idr);
+ idr_destroy(&multicast_idr);
}
module_init(ucma_init);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock);
- idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+ idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
/* Set the prefix to the default value (see ch. 4.1.1) */
- idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+ idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
if (ret)
struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
- switch (rdma_port_get_link_layer(ibdev, port_num)) {
- case IB_LINK_LAYER_INFINIBAND:
- if (!mlx4_is_slave(dev->dev))
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- case IB_LINK_LAYER_ETHERNET:
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
- default:
- return -EINVAL;
+ /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+ * queries, should be called only by VFs and for that specific purpose
+ */
+ if (link == IB_LINK_LAYER_INFINIBAND) {
+ if (mlx4_is_slave(dev->dev) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
}
+
+ if (link == IB_LINK_LAYER_ETHERNET)
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+
+ return -EINVAL;
}
static void send_handler(struct ib_mad_agent *agent,
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
- err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
- if (err)
- goto out;
+ if (!mlx4_is_slave(dev->dev))
+ err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
- resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset);
- resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ if (!err && !mlx4_is_slave(dev->dev)) {
+ resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+ resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+ }
}
if (uhw->outlen) {
dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
if (!dm) {
pr_err("failed to allocate memory for tunneling qp update\n");
- goto out;
+ return;
}
for (i = 0; i < ports; i++) {
dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
if (!dm[i]) {
pr_err("failed to allocate memory for tunneling qp update work struct\n");
- for (i = 0; i < dev->caps.num_ports; i++) {
- if (dm[i])
- kfree(dm[i]);
- }
+ while (--i >= 0)
+ kfree(dm[i]);
goto out;
}
- }
- /* initialize or tear down tunnel QPs for the slave */
- for (i = 0; i < ports; i++) {
INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
dm[i]->port = first_port + i + 1;
dm[i]->slave = slave;
dm[i]->do_init = do_init;
dm[i]->dev = ibdev;
- spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
- if (!ibdev->sriov.is_going_down)
+ }
+ /* initialize or tear down tunnel QPs for the slave */
+ spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+ if (!ibdev->sriov.is_going_down) {
+ for (i = 0; i < ports; i++)
queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+ for (i = 0; i < ports; i++)
+ kfree(dm[i]);
}
out:
kfree(dm);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
int rc = arpindex;
struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+ __be32 dst_ipaddr = htonl(dst_ip);
- rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
if (IS_ERR(rt)) {
printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip);
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
if (neigh) {
(((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
(((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
- (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+ (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
} else {
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT:
ocrdma_unregister_inet6addr_notifier();
ocrdma_unregister_inetaddr_notifier();
ocrdma_rem_debugfs();
+ idr_destroy(&ocrdma_dev_id);
}
module_init(ocrdma_init_module);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- BUG_ON(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad));
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
struct net_device *dev;
struct ipoib_neigh *neigh;
struct ipoib_path *path;
- struct ipoib_cm_tx_buf *tx_ring;
+ struct ipoib_tx_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
unsigned long flags;
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
+{
+ int i, off;
+ struct sk_buff *skb = tx_req->skb;
+ skb_frag_t *frags = skb_shinfo(skb)->frags;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ u64 *mapping = tx_req->mapping;
+
+ if (skb_headlen(skb)) {
+ priv->tx_sge[0].addr = mapping[0];
+ priv->tx_sge[0].length = skb_headlen(skb);
+ off = 1;
+ } else
+ off = 0;
+
+ for (i = 0; i < nr_frags; ++i) {
+ priv->tx_sge[i + off].addr = mapping[i + off];
+ priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+ }
+ priv->tx_wr.num_sge = nr_frags + off;
+}
+
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
- u64 addr, int len)
+ struct ipoib_tx_buf *tx_req)
{
struct ib_send_wr *bad_wr;
- priv->tx_sge[0].addr = addr;
- priv->tx_sge[0].length = len;
+ ipoib_build_sge(priv, tx_req);
- priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_cm_tx_buf *tx_req;
- u64 addr;
+ struct ipoib_tx_buf *tx_req;
int rc;
if (unlikely(skb->len > tx->mtu)) {
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+
+ if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
- tx_req->mapping = addr;
-
skb_orphan(skb);
skb_dst_drop(skb);
- rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
- addr, skb->len);
+ rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
tx_req = &tx->tx_ring[wr_id];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
struct ib_qp *tx_qp;
+ if (dev->features & NETIF_F_SG)
+ attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
- struct ipoib_cm_tx_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
- ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
- DMA_TO_DEVICE);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
netif_tx_lock_bh(p->dev);
spin_unlock_irq(&priv->lock);
}
-
static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
"for buf %d\n", wr_id);
}
-static int ipoib_dma_map_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
return -EIO;
}
-static void ipoib_dma_unmap_tx(struct ib_device *ca,
- struct ipoib_tx_buf *tx_req)
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+ struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int off;
if (skb_headlen(skb)) {
- ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+ DMA_TO_DEVICE);
off = 1;
} else
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
- DMA_TO_DEVICE);
+ ib_dma_unmap_page(priv->ca, mapping[i + off],
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
}
tx_req = &priv->tx_ring[wr_id];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
void *head, int hlen)
{
struct ib_send_wr *bad_wr;
- int i, off;
struct sk_buff *skb = tx_req->skb;
- skb_frag_t *frags = skb_shinfo(skb)->frags;
- int nr_frags = skb_shinfo(skb)->nr_frags;
- u64 *mapping = tx_req->mapping;
- if (skb_headlen(skb)) {
- priv->tx_sge[0].addr = mapping[0];
- priv->tx_sge[0].length = skb_headlen(skb);
- off = 1;
- } else
- off = 0;
+ ipoib_build_sge(priv, tx_req);
- for (i = 0; i < nr_frags; ++i) {
- priv->tx_sge[i + off].addr = mapping[i + off];
- priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
- }
- priv->tx_wr.num_sge = nr_frags + off;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
--priv->tx_outstanding;
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- ipoib_dma_unmap_tx(priv->ca, tx_req);
+ ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
--priv->tx_outstanding;
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
- enum ipoib_flush_level level)
+ enum ipoib_flush_level level,
+ int nesting)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
- down_read(&priv->vlan_rwsem);
+ down_read_nested(&priv->vlan_rwsem, nesting);
/*
* Flush any child interfaces too -- they might be up even if
* the parent is down.
*/
list_for_each_entry(cpriv, &priv->child_intfs, list)
- __ipoib_ib_dev_flush(cpriv, level);
+ __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
up_read(&priv->vlan_rwsem);
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
- __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+ __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
- features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
return features;
}
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
+ dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
SET_NETDEV_DEV(priv->dev, hca->dma_device);
priv->dev->dev_id = port - 1;
- if (!ib_query_port(hca, port, &attr))
+ result = ib_query_port(hca, port, &attr);
+ if (!result)
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
goto device_init_failed;
}
- if (ipoib_set_dev_features(priv, hca))
+ result = ipoib_set_dev_features(priv, hca);
+ if (result)
goto device_init_failed;
/*
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
- int s, e, p;
+ int p;
int count = 0;
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
INIT_LIST_HEAD(dev_list);
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
- } else {
- s = 1;
- e = device->phys_port_cnt;
- }
-
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
{
int tmo, res;
- if (strncmp(val, "off", 3) != 0) {
- res = kstrtoint(val, 0, &tmo);
- if (res)
- goto out;
- } else {
- tmo = -1;
- }
+ res = srp_parse_tmo(&tmo, val);
+ if (res)
+ goto out;
+
if (kp->arg == &srp_reconnect_delay)
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
srp_dev_loss_tmo);
struct srp_device *srp_dev;
struct ib_device_attr *dev_attr;
struct srp_host *host;
- int mr_page_shift, s, e, p;
+ int mr_page_shift, p;
u64 max_pages_per_mr;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (IS_ERR(srp_dev->mr))
goto err_pd;
- if (device->node_type == RDMA_NODE_IB_SWITCH) {
- s = 0;
- e = 0;
- } else {
- s = 1;
- e = device->phys_port_cnt;
- }
-
- for (p = s; p <= e; ++p) {
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
int i;
ioui = (struct ib_dm_iou_info *)mad->data;
- ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->change_id = cpu_to_be16(1);
ioui->max_controllers = 16;
/* set present for slot 1 and empty for the rest */
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
iocp->subsys_device_id = 0x0;
- iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
- iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
- iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
- iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
if (!slot || slot > 16) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2 || lo > hi || hi > 1) {
mad->mad_hdr.status
- = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
break;
default:
rsp_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
}
}
break;
case IB_MGMT_METHOD_SET:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
default:
dm_mad->mad_hdr.status =
- __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
break;
}
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
- __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->status = status;
memset(srp_rsp, 0, sizeof *srp_rsp);
srp_rsp->opcode = SRP_RSP;
- srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
- + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->req_lim_delta =
+ cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
switch (len) {
case 8:
if ((*((__be64 *)lun) &
- __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
goto out_err;
break;
case 4:
}
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because its"
" length (%d bytes) is out of range (%d .. %d)\n",
}
if (!sport->enabled) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
ret = -EINVAL;
pr_err("rejected SRP_LOGIN_REQ because the target port"
" has not yet been enabled\n");
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
ret = -ENOMEM;
pr_err("rejected SRP_LOGIN_REQ because it"
" has an invalid target port identifier.\n");
ch = kzalloc(sizeof *ch, GFP_KERNEL);
if (!ch) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
ret = -ENOMEM;
goto reject;
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
goto free_ring;
ret = srpt_ch_qp_rtr(ch, ch->qp);
if (ret) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling"
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
if (!nacl) {
pr_info("Rejected login because no ACL has been"
" configured yet for initiator %s.\n", ch->sess_name);
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
}
ch->sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(ch->sess)) {
- rej->reason = __constant_cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_debug("Failed to create session\n");
goto deregister_session;
}
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
reject:
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof *rej);
return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
}
-static int srp_parse_tmo(int *tmo, const char *buf)
+int srp_parse_tmo(int *tmo, const char *buf)
{
int res = 0;
return res;
}
+EXPORT_SYMBOL(srp_parse_tmo);
static ssize_t show_reconnect_delay(struct device *dev,
struct device_attribute *attr, char *buf)
char node_desc[64];
__be64 node_guid;
u32 local_dma_lkey;
+ u16 is_switch:1;
u8 node_type;
u8 phys_port_cnt;
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
u8 port_num);
+/**
+ * rdma_cap_ib_switch - Check if the device is IB switch
+ * @device: Device to check
+ *
+ * Device driver is responsible for setting is_switch bit on
+ * in ib_device structure at init time.
+ *
+ * Return: true if the device is IB switch.
+ */
+static inline bool rdma_cap_ib_switch(const struct ib_device *device)
+{
+ return device->is_switch;
+}
+
/**
* rdma_start_port - Return the first valid port number for the device
* specified
*/
static inline u8 rdma_start_port(const struct ib_device *device)
{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+ return rdma_cap_ib_switch(device) ? 0 : 1;
}
/**
*/
static inline u8 rdma_end_port(const struct ib_device *device)
{
- return (device->node_type == RDMA_NODE_IB_SWITCH) ?
- 0 : device->phys_port_cnt;
+ return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
}
static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
extern void srp_rport_del(struct srp_rport *);
extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
int dev_loss_tmo);
+int srp_parse_tmo(int *tmo, const char *buf);
extern int srp_reconnect_rport(struct srp_rport *rport);
extern void srp_start_tl_fail_timers(struct srp_rport *rport);
extern void srp_remove_host(struct Scsi_Host *);
}
ibmr = rds_ib_alloc_fmr(rds_ibdev);
- if (IS_ERR(ibmr))
+ if (IS_ERR(ibmr)) {
+ rds_ib_dev_put(rds_ibdev);
return ibmr;
+ }
ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
if (ret == 0)