From e19c0d237873be2426dac45887edf293da13c339 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 3 Apr 2018 07:52:02 +0300 Subject: RDMA/rdma_cm: Remove process_req and timer sorting Now that the work queue is used directly to launch and track the work there is no need for the second processing function to do 'all list entries'. Just schedule all entries onto the main work queue directly. We can also drop all of the useless list sorting now, as the workqueue sorts by expiration time automatically. This change requires switching lock to a spinlock as netdev notifiers are called in an atomic context, this is now easy since the lock does not need to be held across the lookup, that is already single threaded due to the work queue. Signed-off-by: Leon Romanovsky Reviewed-by: Parav Pandit Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/addr.c | 96 +++++++++++------------------------------- 1 file changed, 25 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 88a7542d8c7b..8ef4b98e6a3a 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -68,11 +68,8 @@ struct addr_req { static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0); -static void process_req(struct work_struct *work); - -static DEFINE_MUTEX(lock); +static DEFINE_SPINLOCK(lock); static LIST_HEAD(req_list); -static DECLARE_DELAYED_WORK(work, process_req); static struct workqueue_struct *addr_wq; static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { @@ -112,7 +109,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) memcpy(&gid, nla_data(curr), nla_len(curr)); } - mutex_lock(&lock); + spin_lock_bh(&lock); list_for_each_entry(req, &req_list, list) { if (nlh->nlmsg_seq != req->seq) continue; @@ -122,7 +119,7 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) found = 1; break; } - mutex_unlock(&lock); + spin_unlock_bh(&lock); if (!found) pr_info("Couldn't find request waiting for DGID: %pI6\n", @@ -302,7 +299,7 @@ int rdma_translate_ip(const struct sockaddr *addr, } EXPORT_SYMBOL(rdma_translate_ip); -static void set_timeout(struct delayed_work *delayed_work, unsigned long time) +static void set_timeout(struct addr_req *req, unsigned long time) { unsigned long delay; @@ -310,23 +307,15 @@ static void set_timeout(struct delayed_work *delayed_work, unsigned long time) if ((long)delay < 0) delay = 0; - mod_delayed_work(addr_wq, delayed_work, delay); + mod_delayed_work(addr_wq, &req->work, delay); } static void queue_req(struct addr_req *req) { - struct addr_req *temp_req; - - mutex_lock(&lock); - list_for_each_entry_reverse(temp_req, &req_list, list) { - if (time_after_eq(req->timeout, temp_req->timeout)) - break; - } - - list_add(&req->list, &temp_req->list); - - set_timeout(&req->work, req->timeout); - mutex_unlock(&lock); + spin_lock_bh(&lock); + list_add_tail(&req->list, &req_list); + set_timeout(req, req->timeout); + spin_unlock_bh(&lock); } static int ib_nl_fetch_ha(const struct dst_entry *dst, @@ -584,7 +573,6 @@ static void process_one_req(struct work_struct *_work) struct addr_req *req; struct sockaddr *src_in, *dst_in; - mutex_lock(&lock); req = container_of(_work, struct addr_req, work.work); if (req->status == -ENODATA) { @@ -596,13 +584,15 @@ static void process_one_req(struct work_struct *_work) req->status = -ETIMEDOUT; } else if (req->status == -ENODATA) { /* requeue the work for retrying again */ - set_timeout(&req->work, req->timeout); - mutex_unlock(&lock); + spin_lock_bh(&lock); + set_timeout(req, req->timeout); + spin_unlock_bh(&lock); return; } } + spin_lock_bh(&lock); list_del(&req->list); - mutex_unlock(&lock); + spin_unlock_bh(&lock); /* * Although the work will normally have been canceled by the @@ -619,47 +609,6 @@ static void process_one_req(struct work_struct *_work) kfree(req); } -static void process_req(struct work_struct *work) -{ - struct addr_req *req, *temp_req; - struct sockaddr *src_in, *dst_in; - struct list_head done_list; - - INIT_LIST_HEAD(&done_list); - - mutex_lock(&lock); - list_for_each_entry_safe(req, temp_req, &req_list, list) { - if (req->status == -ENODATA) { - src_in = (struct sockaddr *) &req->src_addr; - dst_in = (struct sockaddr *) &req->dst_addr; - req->status = addr_resolve(src_in, dst_in, req->addr, - true, req->seq); - if (req->status && time_after_eq(jiffies, req->timeout)) - req->status = -ETIMEDOUT; - else if (req->status == -ENODATA) { - set_timeout(&req->work, req->timeout); - continue; - } - } - list_move_tail(&req->list, &done_list); - } - - mutex_unlock(&lock); - - list_for_each_entry_safe(req, temp_req, &done_list, list) { - list_del(&req->list); - /* It is safe to cancel other work items from this work item - * because at a time there can be only one work item running - * with this single threaded work queue. - */ - cancel_delayed_work(&req->work); - req->callback(req->status, (struct sockaddr *) &req->src_addr, - req->addr, req->context); - put_client(req->client); - kfree(req); - } -} - int rdma_resolve_ip(struct rdma_addr_client *client, struct sockaddr *src_addr, struct sockaddr *dst_addr, struct rdma_dev_addr *addr, int timeout_ms, @@ -743,17 +692,16 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) { struct addr_req *req, *temp_req; - mutex_lock(&lock); + spin_lock_bh(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->addr == addr) { req->status = -ECANCELED; req->timeout = jiffies; - list_move(&req->list, &req_list); - set_timeout(&req->work, req->timeout); + set_timeout(req, req->timeout); break; } } - mutex_unlock(&lock); + spin_unlock_bh(&lock); } EXPORT_SYMBOL(rdma_addr_cancel); @@ -810,11 +758,17 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { + struct addr_req *req; + if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; - if (neigh->nud_state & NUD_VALID) - set_timeout(&work, jiffies); + if (neigh->nud_state & NUD_VALID) { + spin_lock_bh(&lock); + list_for_each_entry(req, &req_list, list) + set_timeout(req, jiffies); + spin_unlock_bh(&lock); + } } return 0; } -- cgit v1.2.3 From 44e75052bc2ae4d39386c1d9e218861639905873 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 3 Apr 2018 07:52:03 +0300 Subject: RDMA/rdma_cm: Make rdma_addr_cancel into a fence Currently rdma_addr_cancel does not prevent the callback from being used, this is surprising and hard to reason about. There does not appear to be a bug here as the only user of this API does refcount properly, fixing it only to increase clarity. Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/addr.c | 58 +++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 8ef4b98e6a3a..9756cfbdef0e 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -585,28 +585,30 @@ static void process_one_req(struct work_struct *_work) } else if (req->status == -ENODATA) { /* requeue the work for retrying again */ spin_lock_bh(&lock); - set_timeout(req, req->timeout); + if (!list_empty(&req->list)) + set_timeout(req, req->timeout); spin_unlock_bh(&lock); return; } } - spin_lock_bh(&lock); - list_del(&req->list); - spin_unlock_bh(&lock); - - /* - * Although the work will normally have been canceled by the - * workqueue, it can still be requeued as long as it is on the - * req_list, so it could have been requeued before we grabbed &lock. - * We need to cancel it after it is removed from req_list to really be - * sure it is safe to free. - */ - cancel_delayed_work(&req->work); req->callback(req->status, (struct sockaddr *)&req->src_addr, req->addr, req->context); - put_client(req->client); - kfree(req); + req->callback = NULL; + + spin_lock_bh(&lock); + if (!list_empty(&req->list)) { + /* + * Although the work will normally have been canceled by the + * workqueue, it can still be requeued as long as it is on the + * req_list. + */ + cancel_delayed_work(&req->work); + list_del_init(&req->list); + put_client(req->client); + kfree(req); + } + spin_unlock_bh(&lock); } int rdma_resolve_ip(struct rdma_addr_client *client, @@ -691,17 +693,37 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr, void rdma_addr_cancel(struct rdma_dev_addr *addr) { struct addr_req *req, *temp_req; + struct addr_req *found = NULL; spin_lock_bh(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->addr == addr) { - req->status = -ECANCELED; - req->timeout = jiffies; - set_timeout(req, req->timeout); + /* + * Removing from the list means we take ownership of + * the req + */ + list_del_init(&req->list); + found = req; break; } } spin_unlock_bh(&lock); + + if (!found) + return; + + /* + * sync canceling the work after removing it from the req_list + * guarentees no work is running and none will be started. + */ + cancel_delayed_work_sync(&found->work); + + if (found->callback) + found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr, + found->addr, found->context); + + put_client(found->client); + kfree(found); } EXPORT_SYMBOL(rdma_addr_cancel); -- cgit v1.2.3 From ee6548d1d98df7df3b9c8103a42cf68b31c29417 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 3 Apr 2018 07:52:04 +0300 Subject: RDMA/rdma_cm: Delete rdma_addr_client The only thing it does is block module unload while work is posted from rdma_resolve_ip(). However, this is not the right place to do this. The users of rdma_resolve_ip() must ensure their own module does not unload until rdma_resolve_ip() calls the callback, or until rdma_addr_cancel() is called. Similarly callers to rdma_addr_find_l2_eth_by_grh() must ensure their module does not unload while they are calling code. The only two users are already safe, so there is no need for this. Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/addr.c | 38 ++++---------------------------------- drivers/infiniband/core/cma.c | 6 +----- 2 files changed, 5 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 9756cfbdef0e..4f32c4062fb6 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -56,7 +56,6 @@ struct addr_req { struct sockaddr_storage src_addr; struct sockaddr_storage dst_addr; struct rdma_dev_addr *addr; - struct rdma_addr_client *client; void *context; void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); @@ -220,28 +219,6 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) } EXPORT_SYMBOL(rdma_addr_size_kss); -static struct rdma_addr_client self; - -void rdma_addr_register_client(struct rdma_addr_client *client) -{ - atomic_set(&client->refcount, 1); - init_completion(&client->comp); -} -EXPORT_SYMBOL(rdma_addr_register_client); - -static inline void put_client(struct rdma_addr_client *client) -{ - if (atomic_dec_and_test(&client->refcount)) - complete(&client->comp); -} - -void rdma_addr_unregister_client(struct rdma_addr_client *client) -{ - put_client(client); - wait_for_completion(&client->comp); -} -EXPORT_SYMBOL(rdma_addr_unregister_client); - void rdma_copy_addr(struct rdma_dev_addr *dev_addr, const struct net_device *dev, const unsigned char *dst_dev_addr) @@ -605,14 +582,12 @@ static void process_one_req(struct work_struct *_work) */ cancel_delayed_work(&req->work); list_del_init(&req->list); - put_client(req->client); kfree(req); } spin_unlock_bh(&lock); } -int rdma_resolve_ip(struct rdma_addr_client *client, - struct sockaddr *src_addr, struct sockaddr *dst_addr, +int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, struct rdma_dev_addr *addr, int timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), @@ -644,8 +619,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client, req->addr = addr; req->callback = callback; req->context = context; - req->client = client; - atomic_inc(&client->refcount); INIT_DELAYED_WORK(&req->work, process_one_req); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); @@ -661,7 +634,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client, break; default: ret = req->status; - atomic_dec(&client->refcount); goto err; } return ret; @@ -722,7 +694,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr, found->addr, found->context); - put_client(found->client); kfree(found); } EXPORT_SYMBOL(rdma_addr_cancel); @@ -761,8 +732,8 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, dev_addr.net = &init_net; init_completion(&ctx.comp); - ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr, - &dev_addr, 1000, resolve_cb, &ctx); + ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr, + &dev_addr, 1000, resolve_cb, &ctx); if (ret) return ret; @@ -806,14 +777,13 @@ int addr_init(void) return -ENOMEM; register_netevent_notifier(&nb); - rdma_addr_register_client(&self); return 0; } void addr_cleanup(void) { - rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); + WARN_ON(!list_empty(&req_list)); } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 51a641002e10..48300838e354 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -156,7 +156,6 @@ static struct ib_client cma_client = { }; static struct ib_sa_client sa_client; -static struct rdma_addr_client addr_client; static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); @@ -2910,7 +2909,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, if (dst_addr->sa_family == AF_IB) { ret = cma_resolve_ib_addr(id_priv); } else { - ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), + ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, &id->route.addr.dev_addr, timeout_ms, addr_handler, id_priv); } @@ -4547,7 +4546,6 @@ static int __init cma_init(void) goto err_wq; ib_sa_register_client(&sa_client); - rdma_addr_register_client(&addr_client); register_netdevice_notifier(&cma_nb); ret = ib_register_client(&cma_client); @@ -4561,7 +4559,6 @@ static int __init cma_init(void) err: unregister_netdevice_notifier(&cma_nb); - rdma_addr_unregister_client(&addr_client); ib_sa_unregister_client(&sa_client); err_wq: destroy_workqueue(cma_wq); @@ -4574,7 +4571,6 @@ static void __exit cma_cleanup(void) rdma_nl_unregister(RDMA_NL_RDMA_CM); ib_unregister_client(&cma_client); unregister_netdevice_notifier(&cma_nb); - rdma_addr_unregister_client(&addr_client); ib_sa_unregister_client(&sa_client); unregister_pernet_subsys(&cma_pernet_operations); destroy_workqueue(cma_wq); -- cgit v1.2.3 From 0f02ba7ed16acdbc8c4f0b46a6fee81bb94f3407 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 3 Apr 2018 20:08:20 -0400 Subject: IB/rxe: make the variable static The variable rxe_net_notifier is only used in the file rxe_net.c. So remove it from rxe_net.h file and make it static in the file rxe_net.c. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Bart Van Assche Reviewed-by: Yuval Shaia Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- drivers/infiniband/sw/rxe/rxe_net.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 9da6e37fb70c..241762606a66 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -707,7 +707,7 @@ out: return NOTIFY_OK; } -struct notifier_block rxe_net_notifier = { +static struct notifier_block rxe_net_notifier = { .notifier_call = rxe_notify, }; diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h index 728d8c71b36a..15a0caf98629 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.h +++ b/drivers/infiniband/sw/rxe/rxe_net.h @@ -43,7 +43,6 @@ struct rxe_recv_sockets { struct socket *sk6; }; -extern struct notifier_block rxe_net_notifier; void rxe_release_udp_tunnel(struct socket *sk); struct rxe_dev *rxe_net_add(struct net_device *ndev); -- cgit v1.2.3 From 76be04500be2d3e3c0ed7e13d3bc4453b985355b Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sat, 14 Apr 2018 07:44:44 -0400 Subject: IB/rxe: avoid export symbols The functions rxe_set_mtu, rxe_add and rxe_remove are only used in their own module. So it is not necessary to export them. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index e493fdbd61c6..4d1d96805ca5 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -306,7 +306,6 @@ int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) return 0; } -EXPORT_SYMBOL(rxe_set_mtu); /* called by ifc layer to create new rxe device. * The caller should allocate memory for rxe by calling ib_alloc_device. @@ -335,7 +334,6 @@ err1: rxe_dev_put(rxe); return err; } -EXPORT_SYMBOL(rxe_add); /* called by the ifc layer to remove a device */ void rxe_remove(struct rxe_dev *rxe) @@ -344,7 +342,6 @@ void rxe_remove(struct rxe_dev *rxe) rxe_dev_put(rxe); } -EXPORT_SYMBOL(rxe_remove); static int __init rxe_module_init(void) { -- cgit v1.2.3 From 39e487faaf706fa94bab4d0cf9f543a3430c746e Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 15:32:25 +0800 Subject: infiniband: i40iw: Replace GFP_ATOMIC with GFP_KERNEL in i40iw_add_mqh_4 i40iw_add_mqh_4() is never called in atomic context, because it calls rtnl_lock() that can sleep. Despite never getting called from atomic context, i40iw_add_mqh_4() calls kzalloc() with GFP_ATOMIC, which does not sleep for allocation. GFP_ATOMIC is not necessary and can be replaced with GFP_KERNEL, which can sleep and improve the possibility of sucessful allocation. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Acked-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 4cfa8f4647e2..8310d2488681 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1788,7 +1788,7 @@ static enum i40iw_status_code i40iw_add_mqh_4( &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev), dev->dev_addr); - child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); + child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL); cm_parent_listen_node->cm_core->stats_listen_nodes_created++; i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, -- cgit v1.2.3 From f9af8730143a0fdc572f90b8a388795ee812cd74 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 15:32:48 +0800 Subject: infiniband: i40iw: Replace GFP_ATOMIC with GFP_KERNEL in i40iw_make_listen_node i40iw_make_listen_node() is never called in atomic context. i40iw_make_listen_node() is only called by i40iw_create_listen, which is set as ".create_listen" in struct iw_cm_verbs. Despite never getting called from atomic context, i40iw_make_listen_node() calls kzalloc() with GFP_ATOMIC, which does not sleep for allocation. GFP_ATOMIC is not necessary and can be replaced with GFP_KERNEL, which can sleep and improve the possibility of sucessful allocation. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Acked-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 8310d2488681..0243ec48e4b5 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2872,7 +2872,7 @@ static struct i40iw_cm_listener *i40iw_make_listen_node( if (!listener) { /* create a CM listen node (1/2 node to compare incoming traffic to) */ - listener = kzalloc(sizeof(*listener), GFP_ATOMIC); + listener = kzalloc(sizeof(*listener), GFP_KERNEL); if (!listener) return NULL; cm_core->stats_listen_nodes_created++; -- cgit v1.2.3 From 4e56569cee1505846b3dcb15fbf400f6a7e9f015 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Wed, 11 Apr 2018 15:33:06 +0800 Subject: infiniband: i40iw: Replace GFP_ATOMIC with GFP_KERNEL in i40iw_l2param_change i40iw_l2param_change() is never called in atomic context. i40iw_make_listen_node() is only set as ".l2_param_change" in struct i40e_client_ops, and this function pointer is not called in atomic context. Despite never getting called from atomic context, i40iw_l2param_change() calls kzalloc() with GFP_ATOMIC, which does not sleep for allocation. GFP_ATOMIC is not necessary and can be replaced with GFP_KERNEL, which can sleep and improve the possibility of sucessful allocation. This is found by a static analysis tool named DCNS written by myself. And I also manually check it. Signed-off-by: Jia-Ju Bai Acked-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/i40iw/i40iw_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 9cd0d3ef9057..a220794dcdb0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1758,7 +1758,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli return; - work = kzalloc(sizeof(*work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) return; -- cgit v1.2.3 From 8f1a72c815cf121f8a842c60c837f0d7605cdad4 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Mon, 9 Apr 2018 08:19:18 -0400 Subject: IB/rxe: make rxe_release_udp_tunnel static The function rxe_release_udp_tunnel is only used in rxe_net.c. So it is necessary to make this function as static. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- drivers/infiniband/sw/rxe/rxe_net.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 241762606a66..97e128579137 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -315,7 +315,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, return sock; } -void rxe_release_udp_tunnel(struct socket *sk) +static void rxe_release_udp_tunnel(struct socket *sk) { if (sk) udp_tunnel_sock_release(sk); diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h index 15a0caf98629..106c586dbb26 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.h +++ b/drivers/infiniband/sw/rxe/rxe_net.h @@ -43,8 +43,6 @@ struct rxe_recv_sockets { struct socket *sk6; }; -void rxe_release_udp_tunnel(struct socket *sk); - struct rxe_dev *rxe_net_add(struct net_device *ndev); int rxe_net_init(void); -- cgit v1.2.3 From 2e47350789ebbc002b06d4549f60b5f9cba326ea Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 10 Apr 2018 00:47:15 -0400 Subject: IB/rxe: optimize the function duplicate_request In the function duplicate_request, the reference of skb can be increased to replace the function skb_clone. This will make rxe performace better and save memory. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_resp.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index a65c9969f7fc..c4172edf1f07 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1124,24 +1124,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, /* Find the operation in our list of responder resources. */ res = find_resource(qp, pkt->psn); if (res) { - struct sk_buff *skb_copy; - - skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC); - if (skb_copy) { - rxe_add_ref(qp); /* for the new SKB */ - } else { - pr_warn("Couldn't clone atomic resp\n"); - rc = RESPST_CLEANUP; - goto out; - } - + skb_get(res->atomic.skb); /* Resend the result. */ rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, - pkt, skb_copy); + pkt, res->atomic.skb); if (rc) { pr_err("Failed resending result. This flow is not handled - skb ignored\n"); - rxe_drop_ref(qp); - kfree_skb(skb_copy); + kfree_skb(res->atomic.skb); rc = RESPST_CLEANUP; goto out; } -- cgit v1.2.3 From fe896ceb577252966ec3339d511424e2495e1072 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 10 Apr 2018 09:37:39 -0400 Subject: IB/rxe: replace refcount_inc with skb_get Follow the advice from Bart, the function refcount_inc is replaced with skb_get in commit 99dae690255e ("IB/rxe: optimize mcast recv process") and commit 86af61764151 ("IB/rxe: remove unnecessary skb_clone"). CC: Srinivas Eeda CC: Junxiao Bi Suggested-by: Bart Van Assche Signed-off-by: Zhu Yanjun Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_recv.c | 2 +- drivers/infiniband/sw/rxe/rxe_resp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index dd80c7d9074a..c4c9f3be33d9 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -311,7 +311,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) * increase the users of the skb then post to the next qp */ if (mce->qp_list.next != &mcg->qp_list) - refcount_inc(&skb->users); + skb_get(skb); pkt->qp = qp; rxe_add_ref(qp); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index c4172edf1f07..ed402f028471 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -990,7 +990,7 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0, sizeof(skb->cb) - sizeof(ack_pkt)); - refcount_inc(&skb->users); + skb_get(skb); res->type = RXE_ATOMIC_MASK; res->atomic.skb = skb; res->first_psn = ack_pkt.psn; -- cgit v1.2.3 From d819734126ce705784ca2cd847ad7623825f1a08 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Tue, 17 Apr 2018 19:53:58 +0530 Subject: infiniband: hw: hfi1: Change return type to vm_fault_t Use new return type vm_fault_t for fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. Reference id -> 1c8f422059ae ("mm: change return type to vm_fault_t") Signed-off-by: Souptick Joarder Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/file_ops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index da4aa1a95b11..1b778fd16a32 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -110,7 +110,7 @@ static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg); static int ctxt_reset(struct hfi1_ctxtdata *uctxt); static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt, unsigned long arg); -static int vma_fault(struct vm_fault *vmf); +static vm_fault_t vma_fault(struct vm_fault *vmf); static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); @@ -591,7 +591,7 @@ done: * Local (non-chip) user memory is not mapped right away but as it is * accessed by the user-level code. */ -static int vma_fault(struct vm_fault *vmf) +static vm_fault_t vma_fault(struct vm_fault *vmf) { struct page *page; -- cgit v1.2.3 From 7991d96dd137408385f425cdf8ff815738ea2b49 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Tue, 17 Apr 2018 20:04:28 +0530 Subject: infiniband: hw: qib: Change return type to vm_fault_t Use new return type vm_fault_t for fault handler. For now, this is just documenting that the function returns a VM_FAULT value rather than an errno. Once all instances are converted, vm_fault_t will become a distinct type. Reference id -> 1c8f422059ae ("mm: change return type to vm_fault_t") Signed-off-by: Souptick Joarder Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_file_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6a8800b65047..bbb720bfd030 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -868,7 +868,7 @@ bail: /* * qib_file_vma_fault - handle a VMA page fault. */ -static int qib_file_vma_fault(struct vm_fault *vmf) +static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf) { struct page *page; -- cgit v1.2.3 From 10c47d560603a8ba9d74889028cf6728a2d845ca Mon Sep 17 00:00:00 2001 From: Yuval Shaia Date: Fri, 20 Apr 2018 17:05:03 +0300 Subject: IB/rxe: Change rxe_rcv to return void It always returns 0. Change return type to void. Signed-off-by: Yuval Shaia Reviewed-by: Zhu Yanjun Reviewed-by: Johannes Thumshirn Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe.h | 2 +- drivers/infiniband/sw/rxe/rxe_loc.h | 5 +++-- drivers/infiniband/sw/rxe/rxe_net.c | 9 ++++++--- drivers/infiniband/sw/rxe/rxe_recv.c | 5 ++--- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 561ad307c6ec..1275fde6503a 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -98,7 +98,7 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu); void rxe_remove(struct rxe_dev *rxe); void rxe_remove_all(void); -int rxe_rcv(struct sk_buff *skb); +void rxe_rcv(struct sk_buff *skb); static inline void rxe_dev_put(struct rxe_dev *rxe) { diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index b71023c1c58b..2f8ab8eebcb1 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -143,7 +143,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); /* rxe_net.c */ -int rxe_loopback(struct sk_buff *skb); +void rxe_loopback(struct sk_buff *skb); int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb); struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt); @@ -268,7 +268,8 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp, if (pkt->mask & RXE_LOOPBACK_MASK) { memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); - err = rxe_loopback(skb); + rxe_loopback(skb); + err = 0; } else { err = rxe_send(pkt, skb); } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 97e128579137..fca13a6281f0 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -276,9 +276,12 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); - return rxe_rcv(skb); + rxe_rcv(skb); + + return 0; drop: kfree_skb(skb); + return 0; } @@ -517,9 +520,9 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) return 0; } -int rxe_loopback(struct sk_buff *skb) +void rxe_loopback(struct sk_buff *skb) { - return rxe_rcv(skb); + rxe_rcv(skb); } static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av) diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index c4c9f3be33d9..dfba44a40f0b 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -345,7 +345,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb) } /* rxe_rcv is called from the interface driver */ -int rxe_rcv(struct sk_buff *skb) +void rxe_rcv(struct sk_buff *skb) { int err; struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); @@ -403,12 +403,11 @@ int rxe_rcv(struct sk_buff *skb) else rxe_rcv_pkt(rxe, pkt, skb); - return 0; + return; drop: if (pkt->qp) rxe_drop_ref(pkt->qp); kfree_skb(skb); - return 0; } -- cgit v1.2.3 From 0dff463a6a867072f2c779e2fed651b498901801 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Fri, 20 Apr 2018 10:30:54 -0400 Subject: IB/rxe: change rxe_set_mtu function type to void The function rxe_set_mtu always returns zero. So this function type is changed to void. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Yuval Shaia Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe.c | 8 ++------ drivers/infiniband/sw/rxe/rxe.h | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 4d1d96805ca5..7121e1b1eb89 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -291,7 +291,7 @@ err1: return err; } -int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) +void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) { struct rxe_port *port = &rxe->port; enum ib_mtu mtu; @@ -303,8 +303,6 @@ int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) port->attr.active_mtu = mtu; port->mtu_cap = ib_mtu_enum_to_int(mtu); - - return 0; } /* called by ifc layer to create new rxe device. @@ -320,9 +318,7 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu) if (err) goto err1; - err = rxe_set_mtu(rxe, mtu); - if (err) - goto err1; + rxe_set_mtu(rxe, mtu); err = rxe_register_device(rxe); if (err) diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 1275fde6503a..d9ec2de68738 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -92,7 +92,7 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, return retval; } -int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); +void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu); void rxe_remove(struct rxe_dev *rxe); -- cgit v1.2.3 From e12ee8ce51435c4d24f437f10e0fce773505c674 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Mon, 23 Apr 2018 03:57:58 -0400 Subject: IB/rxe: remove unused function variable In the functions rxe_mem_init_dma, rxe_mem_init_user, rxe_mem_init_fast and copy_data, the function variable rxe is not used. So this function variable rxe is removed. CC: Srinivas Eeda CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_comp.c | 6 ++---- drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++---- drivers/infiniband/sw/rxe/rxe_mr.c | 13 ++++++------- drivers/infiniband/sw/rxe/rxe_req.c | 2 +- drivers/infiniband/sw/rxe/rxe_resp.c | 3 +-- drivers/infiniband/sw/rxe/rxe_verbs.c | 6 +++--- 6 files changed, 17 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 6cdc40ed8a9f..a285978aa7fe 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -355,10 +355,9 @@ static inline enum comp_state do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); int ret; - ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, + ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, payload_addr(pkt), payload_size(pkt), to_mem_obj, NULL); if (ret) @@ -374,12 +373,11 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); int ret; u64 atomic_orig = atmack_orig(pkt); - ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, + ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &wqe->dma, &atomic_orig, sizeof(u64), to_mem_obj, NULL); if (ret) diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 2f8ab8eebcb1..a51ece596c43 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -106,20 +106,20 @@ enum copy_direction { from_mem_obj, }; -int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, +int rxe_mem_init_dma(struct rxe_pd *pd, int access, struct rxe_mem *mem); -int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, +int rxe_mem_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mr); -int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, +int rxe_mem_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mem *mem); int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, enum copy_direction dir, u32 *crcp); -int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access, +int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 5c2684bf430f..dff605fdf60f 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -107,7 +107,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg) } } -static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf) +static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf) { int i; int num_map; @@ -145,7 +145,7 @@ err1: return -ENOMEM; } -int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, +int rxe_mem_init_dma(struct rxe_pd *pd, int access, struct rxe_mem *mem) { rxe_mem_init(access, mem); @@ -158,7 +158,7 @@ int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, return 0; } -int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, +int rxe_mem_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mem) { @@ -184,7 +184,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, rxe_mem_init(access, mem); - err = rxe_mem_alloc(rxe, mem, num_buf); + err = rxe_mem_alloc(mem, num_buf); if (err) { pr_warn("err %d from rxe_mem_alloc\n", err); ib_umem_release(umem); @@ -236,7 +236,7 @@ err1: return err; } -int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, +int rxe_mem_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mem *mem) { int err; @@ -246,7 +246,7 @@ int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, /* In fastreg, we also set the rkey */ mem->ibmr.rkey = mem->ibmr.lkey; - err = rxe_mem_alloc(rxe, mem, max_pages); + err = rxe_mem_alloc(mem, max_pages); if (err) goto err1; @@ -434,7 +434,6 @@ err1: * under the control of a dma descriptor */ int copy_data( - struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_dma_info *dma, diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 7bdaf71b8221..957826dde94f 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -490,7 +490,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, wqe->dma.resid -= paylen; wqe->dma.sge_offset += paylen; } else { - err = copy_data(rxe, qp->pd, 0, &wqe->dma, + err = copy_data(qp->pd, 0, &wqe->dma, payload_addr(pkt), paylen, from_mem_obj, &crc); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index ed402f028471..c45c1ff24497 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -511,9 +511,8 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr, int data_len) { int err; - struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, + err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma, data_addr, data_len, to_mem_obj, NULL); if (unlikely(err)) return (err == -ENOSPC) ? RESPST_ERR_LENGTH diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 2cb52fd48cf1..c5206148243c 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1011,7 +1011,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) rxe_add_ref(pd); - err = rxe_mem_init_dma(rxe, pd, access, mr); + err = rxe_mem_init_dma(pd, access, mr); if (err) goto err2; @@ -1046,7 +1046,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, rxe_add_ref(pd); - err = rxe_mem_init_user(rxe, pd, start, length, iova, + err = rxe_mem_init_user(pd, start, length, iova, access, udata, mr); if (err) goto err3; @@ -1094,7 +1094,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, rxe_add_ref(pd); - err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr); + err = rxe_mem_init_fast(pd, max_num_sg, mr); if (err) goto err2; -- cgit v1.2.3 From 2f6e51365727a1428d281821ec928904c723e47d Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 26 Apr 2018 10:56:34 +0300 Subject: IB/core: Use CONFIG_SECURITY_INFINIBAND to compile out security code Make security.c depends on CONFIG_SECURITY_INFINIBAND. Reviewed-by: Daniel Jurgens Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/Makefile | 3 ++- drivers/infiniband/core/security.c | 4 ---- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index dda9e856e3fa..1cfedc469b23 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -12,8 +12,9 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ multicast.o mad.o smi.o agent.o mad_rmpp.o \ - security.o nldev.o restrack.o + nldev.o restrack.o +ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index b61dda6b04fc..9b0bea8303e0 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -30,8 +30,6 @@ * SOFTWARE. */ -#ifdef CONFIG_SECURITY_INFINIBAND - #include #include #include @@ -751,5 +749,3 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) pkey_index, map->agent.security); } - -#endif /* CONFIG_SECURITY_INFINIBAND */ -- cgit v1.2.3 From ecb238f6a7f369b5e0eece4e913c9d671208860c Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 28 Apr 2018 15:31:06 +0800 Subject: IB/cxgb4: use skb_put_zero()/__skb_put_zero Use the recently introduced helper to replace the pattern of skb_put_zero/__skb_put() && memset(). Signed-off-by: YueHaibing Reviewed-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/qp.c | 9 +++------ drivers/infiniband/sw/rxe/rxe_net.c | 4 +--- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index de77b6027d69..2dc94997ea11 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1297,8 +1297,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = __skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof *wqe); + wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(qhp->ep->hwtid) | @@ -1421,8 +1420,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - wqe = __skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof *wqe); + wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | FW_WR_COMPL_F); @@ -1487,8 +1485,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); - wqe = __skb_put(skb, sizeof(*wqe)); - memset(wqe, 0, sizeof *wqe); + wqe = __skb_put_zero(skb, sizeof(*wqe)); wqe->op_compl = cpu_to_be32( FW_WR_OP_V(FW_RI_INIT_WR) | FW_WR_COMPL_F); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index fca13a6281f0..95e52b3ec757 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -565,11 +565,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, pkt->rxe = rxe; pkt->port_num = port_num; - pkt->hdr = skb_put(skb, paylen); + pkt->hdr = skb_put_zero(skb, paylen); pkt->mask |= RXE_GRH_MASK; - memset(pkt->hdr, 0, paylen); - dev_put(ndev); return skb; } -- cgit v1.2.3 From ffab8c89ba59c4e01f9c277f1baaad12bd5a3c0c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 1 May 2018 09:25:49 +0100 Subject: RDMA/qedr: fix spelling mistake: "failes" -> "fails" Trivial fix to spelling mistake in DP_ERR error message Signed-off-by: Colin Ian King Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7d3763b2e01c..35f3b6f8fd45 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2579,7 +2579,7 @@ static int qedr_set_page(struct ib_mr *ibmr, u64 addr) u32 pbes_in_page; if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) { - DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages); + DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages); return -ENOMEM; } -- cgit v1.2.3 From 25a0ad85156a7b697d4340560fff0d25a3b19243 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 3 May 2018 08:40:49 -0700 Subject: RDMA/nldev: Add explicit pad attribute Add a specific RDMA_NLDEV_ATTR_PAD attribute to be used for 64b attribute padding. To preserve the ABI, make this attribute equal to RDMA_NLDEV_ATTR_UNSPEC, which has a value of 0, because that has been used up until now as the pad attribute. Change all the previous use of 0 as the pad with this new enum. Signed-off-by: Steve Wise Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/nldev.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index eb567765f45c..6b0c1eb71ea0 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -122,7 +122,8 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, - device->attrs.device_cap_flags, 0)) + device->attrs.device_cap_flags, + RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; ib_get_device_fw_str(device, fw); @@ -131,10 +132,12 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, - be64_to_cpu(device->node_guid), 0)) + be64_to_cpu(device->node_guid), + RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, - be64_to_cpu(device->attrs.sys_image_guid), 0)) + be64_to_cpu(device->attrs.sys_image_guid), + RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) return -EMSGSIZE; @@ -161,11 +164,11 @@ static int fill_port_info(struct sk_buff *msg, BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64)); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, - (u64)attr.port_cap_flags, 0)) + (u64)attr.port_cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (rdma_protocol_ib(device, port) && nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, - attr.subnet_prefix, 0)) + attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (rdma_protocol_ib(device, port)) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) @@ -209,8 +212,8 @@ static int fill_res_info_entry(struct sk_buff *msg, if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) goto err; - if (nla_put_u64_64bit(msg, - RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0)) + if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, + RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); @@ -409,7 +412,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, - atomic_read(&cq->usecnt), 0)) + atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; /* Poll context is only valid for kernel CQs */ @@ -445,11 +448,12 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb, if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA, - mr->iova, 0)) + mr->iova, RDMA_NLDEV_ATTR_PAD)) goto err; } - if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 0)) + if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, + RDMA_NLDEV_ATTR_PAD)) goto err; if (fill_res_name_pid(msg, res)) @@ -484,7 +488,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, goto err; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, - atomic_read(&pd->usecnt), 0)) + atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, -- cgit v1.2.3 From da5c8507821573b8ed6e3f47e009f273493ffaf7 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 3 May 2018 08:41:30 -0700 Subject: RDMA/nldev: add driver-specific resource tracking Each driver can register a "fill entry" function with the restrack core. This function will be called when filling out a resource, allowing the driver to add driver-specific details. The details consist of a nltable of nested attributes, that are in the form of tuples. Both key and value attributes are mandatory. The key nlattr must be a string, and the value nlattr can be one of the driver attributes that are generic, but typed, allowing the attributes to be validated. Currently the driver nlattr types include string, s32, u32, s64, and u64. The print-type nlattr allows a driver to specify an alternative display format for user tools displaying the attribute. For example, a u32 attribute will default to "%u", but a print-type attribute can be included for it to be displayed in hex. This allows the user tool to print the number in the format desired by the driver driver. More attrs can be defined as they become needed by drivers. Signed-off-by: Steve Wise Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/nldev.c | 29 +++++++++++++++++++++++++++++ drivers/infiniband/core/restrack.c | 7 +++++++ 2 files changed, 36 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 6b0c1eb71ea0..50efca482a6c 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -98,6 +98,15 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, + [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, + [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, + .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN }, + [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, + [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, + [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, + [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, + [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, }; static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) @@ -285,6 +294,7 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); + struct rdma_restrack_root *resroot = &qp->device->res; struct ib_qp_init_attr qp_init_attr; struct nlattr *entry_attr; struct ib_qp_attr qp_attr; @@ -334,6 +344,9 @@ static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb, if (fill_res_name_pid(msg, res)) goto err; + if (resroot->fill_res_entry(msg, res)) + goto err; + nla_nest_end(msg, entry_attr); return 0; @@ -349,6 +362,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); + struct rdma_restrack_root *resroot = &id_priv->id.device->res; struct rdma_cm_id *cm_id = &id_priv->id; struct nlattr *entry_attr; @@ -390,6 +404,9 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, if (fill_res_name_pid(msg, res)) goto err; + if (resroot->fill_res_entry(msg, res)) + goto err; + nla_nest_end(msg, entry_attr); return 0; @@ -403,6 +420,7 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); + struct rdma_restrack_root *resroot = &cq->device->res; struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY); @@ -423,6 +441,9 @@ static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb, if (fill_res_name_pid(msg, res)) goto err; + if (resroot->fill_res_entry(msg, res)) + goto err; + nla_nest_end(msg, entry_attr); return 0; @@ -436,6 +457,7 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); + struct rdma_restrack_root *resroot = &mr->pd->device->res; struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY); @@ -459,6 +481,9 @@ static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb, if (fill_res_name_pid(msg, res)) goto err; + if (resroot->fill_res_entry(msg, res)) + goto err; + nla_nest_end(msg, entry_attr); return 0; @@ -472,6 +497,7 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, struct rdma_restrack_entry *res, uint32_t port) { struct ib_pd *pd = container_of(res, struct ib_pd, res); + struct rdma_restrack_root *resroot = &pd->device->res; struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY); @@ -498,6 +524,9 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, if (fill_res_name_pid(msg, res)) goto err; + if (resroot->fill_res_entry(msg, res)) + goto err; + nla_nest_end(msg, entry_attr); return 0; diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index efddd13e3edb..172b517dc7b9 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -12,9 +12,16 @@ #include "cma_priv.h" +static int fill_res_noop(struct sk_buff *msg, + struct rdma_restrack_entry *entry) +{ + return 0; +} + void rdma_restrack_init(struct rdma_restrack_root *res) { init_rwsem(&res->rwsem); + res->fill_res_entry = fill_res_noop; } static const char *type2str(enum rdma_restrack_type type) -- cgit v1.2.3 From 73937e8a030b046c6b0fa73868bee25647a29be4 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 3 May 2018 08:41:42 -0700 Subject: RDMA/nldev: helper functions to add driver attributes These help rdma drivers to fill out the driver entries. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/core/nldev.c | 66 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 50efca482a6c..8674ca2d8f91 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -109,6 +109,72 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, }; +static int put_driver_name_print_type(struct sk_buff *msg, const char *name, + enum rdma_nldev_print_type print_type) +{ + if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) + return -EMSGSIZE; + if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && + nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) + return -EMSGSIZE; + + return 0; +} + +static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, + enum rdma_nldev_print_type print_type, + u32 value) +{ + if (put_driver_name_print_type(msg, name, print_type)) + return -EMSGSIZE; + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) + return -EMSGSIZE; + + return 0; +} + +static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, + enum rdma_nldev_print_type print_type, + u64 value) +{ + if (put_driver_name_print_type(msg, name, print_type)) + return -EMSGSIZE; + if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, + RDMA_NLDEV_ATTR_PAD)) + return -EMSGSIZE; + + return 0; +} + +int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) +{ + return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, + value); +} +EXPORT_SYMBOL(rdma_nl_put_driver_u32); + +int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, + u32 value) +{ + return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, + value); +} +EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); + +int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) +{ + return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, + value); +} +EXPORT_SYMBOL(rdma_nl_put_driver_u64); + +int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) +{ + return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, + value); +} +EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); + static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) -- cgit v1.2.3 From 056f9c7f39bf517d58f32797f1eb1465bb6f6ef2 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 3 May 2018 08:41:49 -0700 Subject: iw_cxgb4: dump detailed driver-specific QP information Provide a cxgb4-specific function to fill in qp state details. This allows dumping important c4iw_qp state useful for debugging. Included in the dump are the t4_sq, t4_rq structs, plus a dump of the t4_swsqe and t4swrqe descriptors for the first and last pending entries. Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/hw/cxgb4/Makefile | 3 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 5 + drivers/infiniband/hw/cxgb4/provider.c | 8 ++ drivers/infiniband/hw/cxgb4/restrack.c | 248 +++++++++++++++++++++++++++++++++ 4 files changed, 263 insertions(+), 1 deletion(-) create mode 100644 drivers/infiniband/hw/cxgb4/restrack.c (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile index fa40b685831b..9edd92023e18 100644 --- a/drivers/infiniband/hw/cxgb4/Makefile +++ b/drivers/infiniband/