2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache {
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
61 static const struct ib_gid_attr zattr;
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
69 enum gid_table_entry_props {
70 GID_TABLE_ENTRY_INVALID = 1UL << 0,
71 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
74 enum gid_table_write_action {
75 GID_TABLE_WRITE_ACTION_ADD,
76 GID_TABLE_WRITE_ACTION_DEL,
77 /* MODIFY only updates the GID table. Currently only used by
80 GID_TABLE_WRITE_ACTION_MODIFY
83 struct ib_gid_table_entry {
84 /* This lock protects an entry from being
85 * read and written simultaneously.
90 struct ib_gid_attr attr;
96 /* In RoCE, adding a GID to the table requires:
97 * (a) Find if this GID is already exists.
98 * (b) Find a free space.
99 * (c) Write the new GID
101 * Delete requires different set of operations:
105 * Add/delete should be carried out atomically.
106 * This is done by locking this mutex from multiple
107 * writers. We don't need this lock for IB, as the MAD
108 * layer replaces all entries. All data_vec entries
109 * are locked by this lock.
112 struct ib_gid_table_entry *data_vec;
115 static int write_gid(struct ib_device *ib_dev, u8 port,
116 struct ib_gid_table *table, int ix,
117 const union ib_gid *gid,
118 const struct ib_gid_attr *attr,
119 enum gid_table_write_action action,
123 struct net_device *old_net_dev;
126 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
129 write_lock_irqsave(&table->data_vec[ix].lock, flags);
131 if (rdma_cap_roce_gid_table(ib_dev, port)) {
132 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
133 write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
134 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
135 * RoCE providers and thus only updates the cache.
137 if (action == GID_TABLE_WRITE_ACTION_ADD)
138 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
139 &table->data_vec[ix].context);
140 else if (action == GID_TABLE_WRITE_ACTION_DEL)
141 ret = ib_dev->del_gid(ib_dev, port, ix,
142 &table->data_vec[ix].context);
143 write_lock_irqsave(&table->data_vec[ix].lock, flags);
146 old_net_dev = table->data_vec[ix].attr.ndev;
147 if (old_net_dev && old_net_dev != attr->ndev)
148 dev_put(old_net_dev);
149 /* if modify_gid failed, just delete the old gid */
150 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
153 table->data_vec[ix].context = NULL;
156 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
157 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
158 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
159 if (table->data_vec[ix].attr.ndev &&
160 table->data_vec[ix].attr.ndev != old_net_dev)
161 dev_hold(table->data_vec[ix].attr.ndev);
163 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
165 write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
167 if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) {
168 struct ib_event event;
170 event.device = ib_dev;
171 event.element.port_num = port;
172 event.event = IB_EVENT_GID_CHANGE;
174 ib_dispatch_event(&event);
179 static int add_gid(struct ib_device *ib_dev, u8 port,
180 struct ib_gid_table *table, int ix,
181 const union ib_gid *gid,
182 const struct ib_gid_attr *attr,
184 return write_gid(ib_dev, port, table, ix, gid, attr,
185 GID_TABLE_WRITE_ACTION_ADD, default_gid);
188 static int modify_gid(struct ib_device *ib_dev, u8 port,
189 struct ib_gid_table *table, int ix,
190 const union ib_gid *gid,
191 const struct ib_gid_attr *attr,
193 return write_gid(ib_dev, port, table, ix, gid, attr,
194 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
197 static int del_gid(struct ib_device *ib_dev, u8 port,
198 struct ib_gid_table *table, int ix,
200 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
201 GID_TABLE_WRITE_ACTION_DEL, default_gid);
204 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
205 const struct ib_gid_attr *val, bool default_gid,
210 for (i = 0; i < table->sz; i++) {
212 struct ib_gid_attr *attr = &table->data_vec[i].attr;
214 read_lock_irqsave(&table->data_vec[i].lock, flags);
216 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
219 if (mask & GID_ATTR_FIND_MASK_GID &&
220 memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
223 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
224 attr->ndev != val->ndev)
227 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
228 !!(table->data_vec[i].props & GID_TABLE_ENTRY_DEFAULT) !=
232 read_unlock_irqrestore(&table->data_vec[i].lock, flags);
235 read_unlock_irqrestore(&table->data_vec[i].lock, flags);
241 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
243 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
244 addrconf_ifid_eui48(&gid->raw[8], dev);
247 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
248 union ib_gid *gid, struct ib_gid_attr *attr)
250 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
251 struct ib_gid_table *table;
254 struct net_device *idev;
256 table = ports_table[port - rdma_start_port(ib_dev)];
258 if (!memcmp(gid, &zgid, sizeof(*gid)))
261 if (ib_dev->get_netdev) {
262 idev = ib_dev->get_netdev(ib_dev, port);
263 if (idev && attr->ndev != idev) {
264 union ib_gid default_gid;
266 /* Adding default GIDs in not permitted */
267 make_default_gid(idev, &default_gid);
268 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
277 mutex_lock(&table->lock);
279 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
280 GID_ATTR_FIND_MASK_NETDEV);
284 ix = find_gid(table, &zgid, NULL, false, GID_ATTR_FIND_MASK_GID |
285 GID_ATTR_FIND_MASK_DEFAULT);
291 add_gid(ib_dev, port, table, ix, gid, attr, false);
294 mutex_unlock(&table->lock);
298 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
299 union ib_gid *gid, struct ib_gid_attr *attr)
301 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
302 struct ib_gid_table *table;
305 table = ports_table[port - rdma_start_port(ib_dev)];
307 mutex_lock(&table->lock);
309 ix = find_gid(table, gid, attr, false,
310 GID_ATTR_FIND_MASK_GID |
311 GID_ATTR_FIND_MASK_NETDEV |
312 GID_ATTR_FIND_MASK_DEFAULT);
316 del_gid(ib_dev, port, table, ix, false);
319 mutex_unlock(&table->lock);
323 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
324 struct net_device *ndev)
326 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
327 struct ib_gid_table *table;
330 table = ports_table[port - rdma_start_port(ib_dev)];
332 mutex_lock(&table->lock);
334 for (ix = 0; ix < table->sz; ix++)
335 if (table->data_vec[ix].attr.ndev == ndev)
336 del_gid(ib_dev, port, table, ix, false);
338 mutex_unlock(&table->lock);
342 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
343 union ib_gid *gid, struct ib_gid_attr *attr)
345 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
346 struct ib_gid_table *table;
349 table = ports_table[port - rdma_start_port(ib_dev)];
351 if (index < 0 || index >= table->sz)
354 read_lock_irqsave(&table->data_vec[index].lock, flags);
355 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) {
356 read_unlock_irqrestore(&table->data_vec[index].lock, flags);
360 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
362 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
364 dev_hold(attr->ndev);
367 read_unlock_irqrestore(&table->data_vec[index].lock, flags);
371 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
372 const union ib_gid *gid,
373 const struct ib_gid_attr *val,
375 u8 *port, u16 *index)
377 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
378 struct ib_gid_table *table;
382 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
383 table = ports_table[p];
384 local_index = find_gid(table, gid, val, false, mask);
385 if (local_index >= 0) {
387 *index = local_index;
389 *port = p + rdma_start_port(ib_dev);
397 static int ib_cache_gid_find(struct ib_device *ib_dev,
398 const union ib_gid *gid,
399 struct net_device *ndev, u8 *port,
402 unsigned long mask = GID_ATTR_FIND_MASK_GID;
403 struct ib_gid_attr gid_attr_val = {.ndev = ndev};
406 mask |= GID_ATTR_FIND_MASK_NETDEV;
408 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
412 int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
413 const union ib_gid *gid,
414 u8 port, struct net_device *ndev,
418 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
419 struct ib_gid_table *table;
420 unsigned long mask = GID_ATTR_FIND_MASK_GID;
421 struct ib_gid_attr val = {.ndev = ndev};
423 if (port < rdma_start_port(ib_dev) ||
424 port > rdma_end_port(ib_dev))
427 table = ports_table[port - rdma_start_port(ib_dev)];
430 mask |= GID_ATTR_FIND_MASK_NETDEV;
432 local_index = find_gid(table, gid, &val, false, mask);
433 if (local_index >= 0) {
435 *index = local_index;
442 static struct ib_gid_table *alloc_gid_table(int sz)
445 struct ib_gid_table *table =
446 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
450 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
451 if (!table->data_vec)
454 mutex_init(&table->lock);
458 for (i = 0; i < sz; i++)
459 rwlock_init(&table->data_vec[i].lock);
468 static void release_gid_table(struct ib_gid_table *table)
471 kfree(table->data_vec);
476 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
477 struct ib_gid_table *table)
484 for (i = 0; i < table->sz; ++i) {
485 if (memcmp(&table->data_vec[i].gid, &zgid,
486 sizeof(table->data_vec[i].gid)))
487 del_gid(ib_dev, port, table, i,
488 table->data_vec[i].props &
489 GID_ATTR_FIND_MASK_DEFAULT);
493 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
494 struct net_device *ndev,
495 enum ib_cache_gid_default_mode mode)
497 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
499 struct ib_gid_attr gid_attr;
500 struct ib_gid_table *table;
502 union ib_gid current_gid;
503 struct ib_gid_attr current_gid_attr = {};
505 table = ports_table[port - rdma_start_port(ib_dev)];
507 make_default_gid(ndev, &gid);
508 memset(&gid_attr, 0, sizeof(gid_attr));
509 gid_attr.ndev = ndev;
511 mutex_lock(&table->lock);
512 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
514 /* Coudn't find default GID location */
517 if (!__ib_cache_gid_get(ib_dev, port, ix,
518 ¤t_gid, ¤t_gid_attr) &&
519 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
520 !memcmp(&gid, ¤t_gid, sizeof(gid)) &&
521 !memcmp(&gid_attr, ¤t_gid_attr, sizeof(gid_attr)))
524 if ((memcmp(¤t_gid, &zgid, sizeof(current_gid)) ||
525 memcmp(¤t_gid_attr, &zattr,
526 sizeof(current_gid_attr))) &&
527 del_gid(ib_dev, port, table, ix, true)) {
528 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
533 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET)
534 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
535 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
539 if (current_gid_attr.ndev)
540 dev_put(current_gid_attr.ndev);
541 mutex_unlock(&table->lock);
544 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
545 struct ib_gid_table *table)
547 if (rdma_protocol_roce(ib_dev, port)) {
548 struct ib_gid_table_entry *entry = &table->data_vec[0];
550 entry->props |= GID_TABLE_ENTRY_DEFAULT;
556 static int _gid_table_setup_one(struct ib_device *ib_dev)
559 struct ib_gid_table **table;
562 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
565 pr_warn("failed to allocate ib gid cache for %s\n",
570 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
571 u8 rdma_port = port + rdma_start_port(ib_dev);
575 ib_dev->port_immutable[rdma_port].gid_tbl_len);
578 goto rollback_table_setup;
581 err = gid_table_reserve_default(ib_dev,
582 port + rdma_start_port(ib_dev),
585 goto rollback_table_setup;
588 ib_dev->cache.gid_cache = table;
591 rollback_table_setup:
592 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
593 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
595 release_gid_table(table[port]);
602 static void gid_table_release_one(struct ib_device *ib_dev)
604 struct ib_gid_table **table = ib_dev->cache.gid_cache;
610 for (port = 0; port < ib_dev->phys_port_cnt; port++)
611 release_gid_table(table[port]);
614 ib_dev->cache.gid_cache = NULL;
617 static void gid_table_cleanup_one(struct ib_device *ib_dev)
619 struct ib_gid_table **table = ib_dev->cache.gid_cache;
625 for (port = 0; port < ib_dev->phys_port_cnt; port++)
626 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
630 static int gid_table_setup_one(struct ib_device *ib_dev)
634 err = _gid_table_setup_one(ib_dev);
639 err = roce_rescan_device(ib_dev);
642 gid_table_cleanup_one(ib_dev);
643 gid_table_release_one(ib_dev);
649 int ib_get_cached_gid(struct ib_device *device,
654 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
657 return __ib_cache_gid_get(device, port_num, index, gid, NULL);
659 EXPORT_SYMBOL(ib_get_cached_gid);
661 int ib_find_cached_gid(struct ib_device *device,
662 const union ib_gid *gid,
666 return ib_cache_gid_find(device, gid, NULL, port_num, index);
668 EXPORT_SYMBOL(ib_find_cached_gid);
670 int ib_get_cached_pkey(struct ib_device *device,
675 struct ib_pkey_cache *cache;
679 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
682 read_lock_irqsave(&device->cache.lock, flags);
684 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
686 if (index < 0 || index >= cache->table_len)
689 *pkey = cache->table[index];
691 read_unlock_irqrestore(&device->cache.lock, flags);
695 EXPORT_SYMBOL(ib_get_cached_pkey);
697 int ib_find_cached_pkey(struct ib_device *device,
702 struct ib_pkey_cache *cache;
708 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
711 read_lock_irqsave(&device->cache.lock, flags);
713 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
717 for (i = 0; i < cache->table_len; ++i)
718 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
719 if (cache->table[i] & 0x8000) {
727 if (ret && partial_ix >= 0) {
732 read_unlock_irqrestore(&device->cache.lock, flags);
736 EXPORT_SYMBOL(ib_find_cached_pkey);
738 int ib_find_exact_cached_pkey(struct ib_device *device,
743 struct ib_pkey_cache *cache;
748 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
751 read_lock_irqsave(&device->cache.lock, flags);
753 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
757 for (i = 0; i < cache->table_len; ++i)
758 if (cache->table[i] == pkey) {
764 read_unlock_irqrestore(&device->cache.lock, flags);
768 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
770 int ib_get_cached_lmc(struct ib_device *device,
777 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
780 read_lock_irqsave(&device->cache.lock, flags);
781 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
782 read_unlock_irqrestore(&device->cache.lock, flags);
786 EXPORT_SYMBOL(ib_get_cached_lmc);
788 static void ib_cache_update(struct ib_device *device,
791 struct ib_port_attr *tprops = NULL;
792 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
793 struct ib_gid_cache {
795 union ib_gid table[0];
799 struct ib_gid_table *table;
800 struct ib_gid_table **ports_table = device->cache.gid_cache;
801 bool use_roce_gid_table =
802 rdma_cap_roce_gid_table(device, port);
804 if (port < rdma_start_port(device) || port > rdma_end_port(device))
807 table = ports_table[port - rdma_start_port(device)];
809 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
813 ret = ib_query_port(device, port, tprops);
815 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
820 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
821 sizeof *pkey_cache->table, GFP_KERNEL);
825 pkey_cache->table_len = tprops->pkey_tbl_len;
827 if (!use_roce_gid_table) {
828 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
829 sizeof(*gid_cache->table), GFP_KERNEL);
833 gid_cache->table_len = tprops->gid_tbl_len;
836 for (i = 0; i < pkey_cache->table_len; ++i) {
837 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
839 printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
840 ret, device->name, i);
845 if (!use_roce_gid_table) {
846 for (i = 0; i < gid_cache->table_len; ++i) {
847 ret = ib_query_gid(device, port, i,
848 gid_cache->table + i);
850 printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
851 ret, device->name, i);
857 write_lock_irq(&device->cache.lock);
859 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
861 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
862 if (!use_roce_gid_table) {
863 for (i = 0; i < gid_cache->table_len; i++) {
864 modify_gid(device, port, table, i, gid_cache->table + i,
869 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
871 write_unlock_irq(&device->cache.lock);
874 kfree(old_pkey_cache);
884 static void ib_cache_task(struct work_struct *_work)
886 struct ib_update_work *work =
887 container_of(_work, struct ib_update_work, work);
889 ib_cache_update(work->device, work->port_num);
893 static void ib_cache_event(struct ib_event_handler *handler,
894 struct ib_event *event)
896 struct ib_update_work *work;
898 if (event->event == IB_EVENT_PORT_ERR ||
899 event->event == IB_EVENT_PORT_ACTIVE ||
900 event->event == IB_EVENT_LID_CHANGE ||
901 event->event == IB_EVENT_PKEY_CHANGE ||
902 event->event == IB_EVENT_SM_CHANGE ||
903 event->event == IB_EVENT_CLIENT_REREGISTER ||
904 event->event == IB_EVENT_GID_CHANGE) {
905 work = kmalloc(sizeof *work, GFP_ATOMIC);
907 INIT_WORK(&work->work, ib_cache_task);
908 work->device = event->device;
909 work->port_num = event->element.port_num;
910 queue_work(ib_wq, &work->work);
915 int ib_cache_setup_one(struct ib_device *device)
920 rwlock_init(&device->cache.lock);
922 device->cache.pkey_cache =
923 kzalloc(sizeof *device->cache.pkey_cache *
924 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
925 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
926 (rdma_end_port(device) -
927 rdma_start_port(device) + 1),
929 if (!device->cache.pkey_cache ||
930 !device->cache.lmc_cache) {
931 printk(KERN_WARNING "Couldn't allocate cache "
932 "for %s\n", device->name);
936 err = gid_table_setup_one(device);
938 /* Allocated memory will be cleaned in the release function */
941 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
942 ib_cache_update(device, p + rdma_start_port(device));
944 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
945 device, ib_cache_event);
946 err = ib_register_event_handler(&device->cache.event_handler);
953 gid_table_cleanup_one(device);
957 void ib_cache_release_one(struct ib_device *device)
962 * The release function frees all the cache elements.
963 * This function should be called as part of freeing
964 * all the device's resources when the cache could no
965 * longer be accessed.
967 if (device->cache.pkey_cache)
969 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
970 kfree(device->cache.pkey_cache[p]);
972 gid_table_release_one(device);
973 kfree(device->cache.pkey_cache);
974 kfree(device->cache.lmc_cache);
977 void ib_cache_cleanup_one(struct ib_device *device)
979 /* The cleanup function unregisters the event handler,
980 * waits for all in-progress workqueue elements and cleans
981 * up the GID cache. This function should be called after
982 * the device was removed from the devices list and all
983 * clients were removed, so the cache exists but is
984 * non-functional and shouldn't be updated anymore.
986 ib_unregister_event_handler(&device->cache.event_handler);
987 flush_workqueue(ib_wq);
988 gid_table_cleanup_one(device);
991 void __init ib_cache_setup(void)
993 roce_gid_mgmt_init();
996 void __exit ib_cache_cleanup(void)
998 roce_gid_mgmt_cleanup();