2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2003, 2010
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
40 #include <linux/ethtool.h>
43 #include <linux/ipv6.h>
44 #include <linux/slab.h>
45 #include <asm/hvcall.h>
46 #include <linux/atomic.h>
48 #include <asm/iommu.h>
49 #include <asm/firmware.h>
53 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
54 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
55 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
57 static struct kobj_type ktype_veth_pool;
60 static const char ibmveth_driver_name[] = "ibmveth";
61 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
62 #define ibmveth_driver_version "1.04"
64 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
65 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(ibmveth_driver_version);
69 static unsigned int tx_copybreak __read_mostly = 128;
70 module_param(tx_copybreak, uint, 0644);
71 MODULE_PARM_DESC(tx_copybreak,
72 "Maximum size of packet that is copied to a new buffer on transmit");
74 static unsigned int rx_copybreak __read_mostly = 128;
75 module_param(rx_copybreak, uint, 0644);
76 MODULE_PARM_DESC(rx_copybreak,
77 "Maximum size of packet that is copied to a new buffer on receive");
79 static unsigned int rx_flush __read_mostly = 0;
80 module_param(rx_flush, uint, 0644);
81 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
84 char name[ETH_GSTRING_LEN];
88 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
91 struct ibmveth_stat ibmveth_stats[] = {
92 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
93 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
94 { "replenish_add_buff_failure",
95 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
96 { "replenish_add_buff_success",
97 IBMVETH_STAT_OFF(replenish_add_buff_success) },
98 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
99 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
100 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
101 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
102 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
103 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
106 /* simple methods of getting data from the current rxq entry */
107 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
109 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
112 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
114 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115 IBMVETH_RXQ_TOGGLE_SHIFT;
118 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
120 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
123 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
125 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
128 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
130 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
133 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
135 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
138 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
140 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
143 /* setup the initial settings for a buffer pool */
144 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145 u32 pool_index, u32 pool_size,
146 u32 buff_size, u32 pool_active)
148 pool->size = pool_size;
149 pool->index = pool_index;
150 pool->buff_size = buff_size;
151 pool->threshold = pool_size * 7 / 8;
152 pool->active = pool_active;
155 /* allocate and setup an buffer pool - called during open */
156 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
160 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
165 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
166 if (!pool->dma_addr) {
167 kfree(pool->free_map);
168 pool->free_map = NULL;
172 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
175 kfree(pool->dma_addr);
176 pool->dma_addr = NULL;
178 kfree(pool->free_map);
179 pool->free_map = NULL;
183 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
185 for (i = 0; i < pool->size; ++i)
186 pool->free_map[i] = i;
188 atomic_set(&pool->available, 0);
189 pool->producer_index = 0;
190 pool->consumer_index = 0;
195 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
197 unsigned long offset;
199 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
203 /* replenish the buffers for a pool. note that we don't need to
204 * skb_reserve these since they are used for incoming...
206 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207 struct ibmveth_buff_pool *pool)
210 u32 count = pool->size - atomic_read(&pool->available);
211 u32 buffers_added = 0;
213 unsigned int free_index, index;
215 unsigned long lpar_rc;
220 for (i = 0; i < count; ++i) {
221 union ibmveth_buf_desc desc;
223 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
226 netdev_dbg(adapter->netdev,
227 "replenish: unable to allocate skb\n");
228 adapter->replenish_no_mem++;
232 free_index = pool->consumer_index;
233 pool->consumer_index++;
234 if (pool->consumer_index >= pool->size)
235 pool->consumer_index = 0;
236 index = pool->free_map[free_index];
238 BUG_ON(index == IBM_VETH_INVALID_MAP);
239 BUG_ON(pool->skbuff[index] != NULL);
241 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242 pool->buff_size, DMA_FROM_DEVICE);
244 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
247 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248 pool->dma_addr[index] = dma_addr;
249 pool->skbuff[index] = skb;
251 correlator = ((u64)pool->index << 32) | index;
252 *(u64 *)skb->data = correlator;
254 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
255 desc.fields.address = dma_addr;
258 unsigned int len = min(pool->buff_size,
259 adapter->netdev->mtu +
261 ibmveth_flush_buffer(skb->data, len);
263 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
266 if (lpar_rc != H_SUCCESS) {
270 adapter->replenish_add_buff_success++;
275 atomic_add(buffers_added, &(pool->available));
279 pool->free_map[free_index] = index;
280 pool->skbuff[index] = NULL;
281 if (pool->consumer_index == 0)
282 pool->consumer_index = pool->size - 1;
284 pool->consumer_index--;
285 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
286 dma_unmap_single(&adapter->vdev->dev,
287 pool->dma_addr[index], pool->buff_size,
289 dev_kfree_skb_any(skb);
290 adapter->replenish_add_buff_failure++;
293 atomic_add(buffers_added, &(pool->available));
296 /* replenish routine */
297 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
301 adapter->replenish_task_cycles++;
303 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
304 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
307 (atomic_read(&pool->available) < pool->threshold))
308 ibmveth_replenish_buffer_pool(adapter, pool);
311 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
315 /* empty and free ana buffer pool - also used to do cleanup in error paths */
316 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
317 struct ibmveth_buff_pool *pool)
321 kfree(pool->free_map);
322 pool->free_map = NULL;
324 if (pool->skbuff && pool->dma_addr) {
325 for (i = 0; i < pool->size; ++i) {
326 struct sk_buff *skb = pool->skbuff[i];
328 dma_unmap_single(&adapter->vdev->dev,
332 dev_kfree_skb_any(skb);
333 pool->skbuff[i] = NULL;
338 if (pool->dma_addr) {
339 kfree(pool->dma_addr);
340 pool->dma_addr = NULL;
349 /* remove a buffer from a pool */
350 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
353 unsigned int pool = correlator >> 32;
354 unsigned int index = correlator & 0xffffffffUL;
355 unsigned int free_index;
358 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
359 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
361 skb = adapter->rx_buff_pool[pool].skbuff[index];
365 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
367 dma_unmap_single(&adapter->vdev->dev,
368 adapter->rx_buff_pool[pool].dma_addr[index],
369 adapter->rx_buff_pool[pool].buff_size,
372 free_index = adapter->rx_buff_pool[pool].producer_index;
373 adapter->rx_buff_pool[pool].producer_index++;
374 if (adapter->rx_buff_pool[pool].producer_index >=
375 adapter->rx_buff_pool[pool].size)
376 adapter->rx_buff_pool[pool].producer_index = 0;
377 adapter->rx_buff_pool[pool].free_map[free_index] = index;
381 atomic_dec(&(adapter->rx_buff_pool[pool].available));
384 /* get the current buffer on the rx queue */
385 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
387 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
388 unsigned int pool = correlator >> 32;
389 unsigned int index = correlator & 0xffffffffUL;
391 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
392 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
394 return adapter->rx_buff_pool[pool].skbuff[index];
397 /* recycle the current buffer on the rx queue */
398 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
400 u32 q_index = adapter->rx_queue.index;
401 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
402 unsigned int pool = correlator >> 32;
403 unsigned int index = correlator & 0xffffffffUL;
404 union ibmveth_buf_desc desc;
405 unsigned long lpar_rc;
408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
411 if (!adapter->rx_buff_pool[pool].active) {
412 ibmveth_rxq_harvest_buffer(adapter);
413 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
417 desc.fields.flags_len = IBMVETH_BUF_VALID |
418 adapter->rx_buff_pool[pool].buff_size;
419 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
421 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
423 if (lpar_rc != H_SUCCESS) {
424 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
425 "during recycle rc=%ld", lpar_rc);
426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
430 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
431 adapter->rx_queue.index = 0;
432 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
439 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
441 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
443 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 adapter->rx_queue.index = 0;
445 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
449 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
452 struct device *dev = &adapter->vdev->dev;
454 if (adapter->buffer_list_addr != NULL) {
455 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
458 adapter->buffer_list_dma = DMA_ERROR_CODE;
460 free_page((unsigned long)adapter->buffer_list_addr);
461 adapter->buffer_list_addr = NULL;
464 if (adapter->filter_list_addr != NULL) {
465 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
468 adapter->filter_list_dma = DMA_ERROR_CODE;
470 free_page((unsigned long)adapter->filter_list_addr);
471 adapter->filter_list_addr = NULL;
474 if (adapter->rx_queue.queue_addr != NULL) {
475 dma_free_coherent(dev, adapter->rx_queue.queue_len,
476 adapter->rx_queue.queue_addr,
477 adapter->rx_queue.queue_dma);
478 adapter->rx_queue.queue_addr = NULL;
481 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
482 if (adapter->rx_buff_pool[i].active)
483 ibmveth_free_buffer_pool(adapter,
484 &adapter->rx_buff_pool[i]);
486 if (adapter->bounce_buffer != NULL) {
487 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
488 dma_unmap_single(&adapter->vdev->dev,
489 adapter->bounce_buffer_dma,
490 adapter->netdev->mtu + IBMVETH_BUFF_OH,
492 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
494 kfree(adapter->bounce_buffer);
495 adapter->bounce_buffer = NULL;
499 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
500 union ibmveth_buf_desc rxq_desc, u64 mac_address)
502 int rc, try_again = 1;
505 * After a kexec the adapter will still be open, so our attempt to
506 * open it will fail. So if we get a failure we free the adapter and
507 * try again, but only once.
510 rc = h_register_logical_lan(adapter->vdev->unit_address,
511 adapter->buffer_list_dma, rxq_desc.desc,
512 adapter->filter_list_dma, mac_address);
514 if (rc != H_SUCCESS && try_again) {
516 rc = h_free_logical_lan(adapter->vdev->unit_address);
517 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
526 static int ibmveth_open(struct net_device *netdev)
528 struct ibmveth_adapter *adapter = netdev_priv(netdev);
531 unsigned long lpar_rc;
533 union ibmveth_buf_desc rxq_desc;
537 netdev_dbg(netdev, "open starting\n");
539 napi_enable(&adapter->napi);
541 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
542 rxq_entries += adapter->rx_buff_pool[i].size;
544 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
545 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
547 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
548 netdev_err(netdev, "unable to allocate filter or buffer list "
554 dev = &adapter->vdev->dev;
556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
558 adapter->rx_queue.queue_addr =
559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
560 &adapter->rx_queue.queue_dma, GFP_KERNEL);
562 if (!adapter->rx_queue.queue_addr) {
563 netdev_err(netdev, "unable to allocate rx queue pages\n");
568 adapter->buffer_list_dma = dma_map_single(dev,
569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
570 adapter->filter_list_dma = dma_map_single(dev,
571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
573 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
574 (dma_mapping_error(dev, adapter->filter_list_dma))) {
575 netdev_err(netdev, "unable to map filter or buffer list "
581 adapter->rx_queue.index = 0;
582 adapter->rx_queue.num_slots = rxq_entries;
583 adapter->rx_queue.toggle = 1;
585 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
586 mac_address = mac_address >> 16;
588 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
589 adapter->rx_queue.queue_len;
590 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
592 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
593 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
594 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
596 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
598 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
600 if (lpar_rc != H_SUCCESS) {
601 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
603 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
604 "desc:0x%llx MAC:0x%llx\n",
605 adapter->buffer_list_dma,
606 adapter->filter_list_dma,
613 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
614 if (!adapter->rx_buff_pool[i].active)
616 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
617 netdev_err(netdev, "unable to alloc pool\n");
618 adapter->rx_buff_pool[i].active = 0;
624 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
625 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
628 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
631 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
632 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
637 adapter->bounce_buffer =
638 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
639 if (!adapter->bounce_buffer) {
640 netdev_err(netdev, "unable to allocate bounce buffer\n");
642 goto err_out_free_irq;
644 adapter->bounce_buffer_dma =
645 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
646 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
647 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
648 netdev_err(netdev, "unable to map bounce buffer\n");
650 goto err_out_free_irq;
653 netdev_dbg(netdev, "initial replenish cycle\n");
654 ibmveth_interrupt(netdev->irq, netdev);
656 netif_start_queue(netdev);
658 netdev_dbg(netdev, "open complete\n");
663 free_irq(netdev->irq, netdev);
665 ibmveth_cleanup(adapter);
666 napi_disable(&adapter->napi);
670 static int ibmveth_close(struct net_device *netdev)
672 struct ibmveth_adapter *adapter = netdev_priv(netdev);
675 netdev_dbg(netdev, "close starting\n");
677 napi_disable(&adapter->napi);
679 if (!adapter->pool_config)
680 netif_stop_queue(netdev);
682 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
685 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
686 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
688 if (lpar_rc != H_SUCCESS) {
689 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
690 "continuing with close\n", lpar_rc);
693 free_irq(netdev->irq, netdev);
695 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
698 ibmveth_cleanup(adapter);
700 netdev_dbg(netdev, "close complete\n");
705 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
707 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
709 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
711 ethtool_cmd_speed_set(cmd, SPEED_1000);
712 cmd->duplex = DUPLEX_FULL;
713 cmd->port = PORT_FIBRE;
714 cmd->phy_address = 0;
715 cmd->transceiver = XCVR_INTERNAL;
716 cmd->autoneg = AUTONEG_ENABLE;
722 static void netdev_get_drvinfo(struct net_device *dev,
723 struct ethtool_drvinfo *info)
725 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
726 strncpy(info->version, ibmveth_driver_version,
727 sizeof(info->version) - 1);
730 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
731 netdev_features_t features)
734 * Since the ibmveth firmware interface does not have the
735 * concept of separate tx/rx checksum offload enable, if rx
736 * checksum is disabled we also have to disable tx checksum
737 * offload. Once we disable rx checksum offload, we are no
738 * longer allowed to send tx buffers that are not properly
742 if (!(features & NETIF_F_RXCSUM))
743 features &= ~NETIF_F_ALL_CSUM;
748 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
750 struct ibmveth_adapter *adapter = netdev_priv(dev);
751 unsigned long set_attr, clr_attr, ret_attr;
752 unsigned long set_attr6, clr_attr6;
753 long ret, ret4, ret6;
754 int rc1 = 0, rc2 = 0;
757 if (netif_running(dev)) {
759 adapter->pool_config = 1;
761 adapter->pool_config = 0;
770 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
771 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
773 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
774 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
777 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
779 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
780 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
781 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
782 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
783 set_attr, &ret_attr);
785 if (ret4 != H_SUCCESS) {
786 netdev_err(dev, "unable to change IPv4 checksum "
787 "offload settings. %d rc=%ld\n",
790 h_illan_attributes(adapter->vdev->unit_address,
791 set_attr, clr_attr, &ret_attr);
794 dev->features &= ~NETIF_F_IP_CSUM;
797 adapter->fw_ipv4_csum_support = data;
800 ret6 = h_illan_attributes(adapter->vdev->unit_address,
801 clr_attr6, set_attr6, &ret_attr);
803 if (ret6 != H_SUCCESS) {
804 netdev_err(dev, "unable to change IPv6 checksum "
805 "offload settings. %d rc=%ld\n",
808 h_illan_attributes(adapter->vdev->unit_address,
809 set_attr6, clr_attr6, &ret_attr);
812 dev->features &= ~NETIF_F_IPV6_CSUM;
815 adapter->fw_ipv6_csum_support = data;
817 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
818 adapter->rx_csum = data;
823 netdev_err(dev, "unable to change checksum offload settings."
824 " %d rc=%ld ret_attr=%lx\n", data, ret,
829 rc2 = ibmveth_open(dev);
831 return rc1 ? rc1 : rc2;
834 static int ibmveth_set_features(struct net_device *dev,
835 netdev_features_t features)
837 struct ibmveth_adapter *adapter = netdev_priv(dev);
838 int rx_csum = !!(features & NETIF_F_RXCSUM);
841 if (rx_csum == adapter->rx_csum)
844 rc = ibmveth_set_csum_offload(dev, rx_csum);
845 if (rc && !adapter->rx_csum)
846 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
851 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
855 if (stringset != ETH_SS_STATS)
858 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
859 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
862 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
866 return ARRAY_SIZE(ibmveth_stats);
872 static void ibmveth_get_ethtool_stats(struct net_device *dev,
873 struct ethtool_stats *stats, u64 *data)
876 struct ibmveth_adapter *adapter = netdev_priv(dev);
878 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
879 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
882 static const struct ethtool_ops netdev_ethtool_ops = {
883 .get_drvinfo = netdev_get_drvinfo,
884 .get_settings = netdev_get_settings,
885 .get_link = ethtool_op_get_link,
886 .get_strings = ibmveth_get_strings,
887 .get_sset_count = ibmveth_get_sset_count,
888 .get_ethtool_stats = ibmveth_get_ethtool_stats,
891 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
896 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
898 static int ibmveth_send(struct ibmveth_adapter *adapter,
899 union ibmveth_buf_desc *descs)
901 unsigned long correlator;
902 unsigned int retry_count;
906 * The retry count sets a maximum for the number of broadcast and
907 * multicast destinations within the system.
912 ret = h_send_logical_lan(adapter->vdev->unit_address,
913 descs[0].desc, descs[1].desc,
914 descs[2].desc, descs[3].desc,
915 descs[4].desc, descs[5].desc,
916 correlator, &correlator);
917 } while ((ret == H_BUSY) && (retry_count--));
919 if (ret != H_SUCCESS && ret != H_DROPPED) {
920 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
921 "with rc=%ld\n", ret);
928 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
929 struct net_device *netdev)
931 struct ibmveth_adapter *adapter = netdev_priv(netdev);
932 unsigned int desc_flags;
933 union ibmveth_buf_desc descs[6];
935 int force_bounce = 0;
939 * veth handles a maximum of 6 segments including the header, so
940 * we have to linearize the skb if there are more than this.
942 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
943 netdev->stats.tx_dropped++;
947 /* veth can't checksum offload UDP */
948 if (skb->ip_summed == CHECKSUM_PARTIAL &&
949 ((skb->protocol == htons(ETH_P_IP) &&
950 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
951 (skb->protocol == htons(ETH_P_IPV6) &&
952 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
953 skb_checksum_help(skb)) {
955 netdev_err(netdev, "tx: failed to checksum packet\n");
956 netdev->stats.tx_dropped++;
960 desc_flags = IBMVETH_BUF_VALID;
962 if (skb->ip_summed == CHECKSUM_PARTIAL) {
963 unsigned char *buf = skb_transport_header(skb) +
966 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
968 /* Need to zero out the checksum */
974 memset(descs, 0, sizeof(descs));
977 * If a linear packet is below the rx threshold then
978 * copy it into the static bounce buffer. This avoids the
979 * cost of a TCE insert and remove.
981 if (force_bounce || (!skb_is_nonlinear(skb) &&
982 (skb->len < tx_copybreak))) {
983 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
986 descs[0].fields.flags_len = desc_flags | skb->len;
987 descs[0].fields.address = adapter->bounce_buffer_dma;
989 if (ibmveth_send(adapter, descs)) {
990 adapter->tx_send_failed++;
991 netdev->stats.tx_dropped++;
993 netdev->stats.tx_packets++;
994 netdev->stats.tx_bytes += skb->len;
1000 /* Map the header */
1001 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1002 skb_headlen(skb), DMA_TO_DEVICE);
1003 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1006 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1007 descs[0].fields.address = dma_addr;
1010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1011 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1013 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1014 skb_frag_size(frag), DMA_TO_DEVICE);
1016 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1017 goto map_failed_frags;
1019 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1020 descs[i+1].fields.address = dma_addr;
1023 if (ibmveth_send(adapter, descs)) {
1024 adapter->tx_send_failed++;
1025 netdev->stats.tx_dropped++;
1027 netdev->stats.tx_packets++;
1028 netdev->stats.tx_bytes += skb->len;
1031 dma_unmap_single(&adapter->vdev->dev,
1032 descs[0].fields.address,
1033 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1036 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1037 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1038 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1043 return NETDEV_TX_OK;
1047 for (i = 0; i < last; i++)
1048 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1049 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1053 if (!firmware_has_feature(FW_FEATURE_CMO))
1054 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1055 adapter->tx_map_failed++;
1061 static int ibmveth_poll(struct napi_struct *napi, int budget)
1063 struct ibmveth_adapter *adapter =
1064 container_of(napi, struct ibmveth_adapter, napi);
1065 struct net_device *netdev = adapter->netdev;
1066 int frames_processed = 0;
1067 unsigned long lpar_rc;
1071 if (!ibmveth_rxq_pending_buffer(adapter))
1075 if (!ibmveth_rxq_buffer_valid(adapter)) {
1076 wmb(); /* suggested by larson1 */
1077 adapter->rx_invalid_buffer++;
1078 netdev_dbg(netdev, "recycling invalid buffer\n");
1079 ibmveth_rxq_recycle_buffer(adapter);
1081 struct sk_buff *skb, *new_skb;
1082 int length = ibmveth_rxq_frame_length(adapter);
1083 int offset = ibmveth_rxq_frame_offset(adapter);
1084 int csum_good = ibmveth_rxq_csum_good(adapter);
1086 skb = ibmveth_rxq_get_buffer(adapter);
1089 if (length < rx_copybreak)
1090 new_skb = netdev_alloc_skb(netdev, length);
1093 skb_copy_to_linear_data(new_skb,
1097 ibmveth_flush_buffer(skb->data,
1099 if (!ibmveth_rxq_recycle_buffer(adapter))
1103 ibmveth_rxq_harvest_buffer(adapter);
1104 skb_reserve(skb, offset);
1107 skb_put(skb, length);
1108 skb->protocol = eth_type_trans(skb, netdev);
1111 skb->ip_summed = CHECKSUM_UNNECESSARY;
1113 netif_receive_skb(skb); /* send it up */
1115 netdev->stats.rx_packets++;
1116 netdev->stats.rx_bytes += length;
1119 } while (frames_processed < budget);
1121 ibmveth_replenish_task(adapter);
1123 if (frames_processed < budget) {
1124 /* We think we are done - reenable interrupts,
1125 * then check once more to make sure we are done.
1127 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1130 BUG_ON(lpar_rc != H_SUCCESS);
1132 napi_complete(napi);
1134 if (ibmveth_rxq_pending_buffer(adapter) &&
1135 napi_reschedule(napi)) {
1136 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1142 return frames_processed;
1145 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1147 struct net_device *netdev = dev_instance;
1148 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1149 unsigned long lpar_rc;
1151 if (napi_schedule_prep(&adapter->napi)) {
1152 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1154 BUG_ON(lpar_rc != H_SUCCESS);
1155 __napi_schedule(&adapter->napi);
1160 static void ibmveth_set_multicast_list(struct net_device *netdev)
1162 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1163 unsigned long lpar_rc;
1165 if ((netdev->flags & IFF_PROMISC) ||
1166 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1167 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1168 IbmVethMcastEnableRecv |
1169 IbmVethMcastDisableFiltering,
1171 if (lpar_rc != H_SUCCESS) {
1172 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1173 "entering promisc mode\n", lpar_rc);
1176 struct netdev_hw_addr *ha;
1177 /* clear the filter table & disable filtering */
1178 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1179 IbmVethMcastEnableRecv |
1180 IbmVethMcastDisableFiltering |
1181 IbmVethMcastClearFilterTable,
1183 if (lpar_rc != H_SUCCESS) {
1184 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1185 "attempting to clear filter table\n",
1188 /* add the addresses to the filter table */
1189 netdev_for_each_mc_addr(ha, netdev) {
1190 /* add the multicast address to the filter table */
1191 unsigned long mcast_addr = 0;
1192 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1193 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1194 IbmVethMcastAddFilter,
1196 if (lpar_rc != H_SUCCESS) {
1197 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1198 "when adding an entry to the filter "
1199 "table\n", lpar_rc);
1203 /* re-enable filtering */
1204 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1205 IbmVethMcastEnableFiltering,
1207 if (lpar_rc != H_SUCCESS) {
1208 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1209 "enabling filtering\n", lpar_rc);
1214 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1216 struct ibmveth_adapter *adapter = netdev_priv(dev);
1217 struct vio_dev *viodev = adapter->vdev;
1218 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1220 int need_restart = 0;
1222 if (new_mtu < IBMVETH_MIN_MTU)
1225 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1226 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1229 if (i == IBMVETH_NUM_BUFF_POOLS)
1232 /* Deactivate all the buffer pools so that the next loop can activate
1233 only the buffer pools necessary to hold the new MTU */
1234 if (netif_running(adapter->netdev)) {
1236 adapter->pool_config = 1;
1237 ibmveth_close(adapter->netdev);
1238 adapter->pool_config = 0;
1241 /* Look for an active buffer pool that can hold the new MTU */
1242 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1243 adapter->rx_buff_pool[i].active = 1;
1245 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1247 vio_cmo_set_dev_desired(viodev,
1248 ibmveth_get_desired_dma
1251 return ibmveth_open(adapter->netdev);
1257 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1263 #ifdef CONFIG_NET_POLL_CONTROLLER
1264 static void ibmveth_poll_controller(struct net_device *dev)
1266 ibmveth_replenish_task(netdev_priv(dev));
1267 ibmveth_interrupt(dev->irq, dev);
1272 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1274 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1277 * Number of bytes of IO data the driver will need to perform well.
1279 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1281 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1282 struct ibmveth_adapter *adapter;
1287 /* netdev inits at probe time along with the structures we need below*/
1289 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1291 adapter = netdev_priv(netdev);
1293 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1294 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1296 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1297 /* add the size of the active receive buffers */
1298 if (adapter->rx_buff_pool[i].active)
1300 adapter->rx_buff_pool[i].size *
1301 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1303 rxqentries += adapter->rx_buff_pool[i].size;
1305 /* add the size of the receive queue entries */
1306 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1311 static const struct net_device_ops ibmveth_netdev_ops = {
1312 .ndo_open = ibmveth_open,
1313 .ndo_stop = ibmveth_close,
1314 .ndo_start_xmit = ibmveth_start_xmit,
1315 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1316 .ndo_do_ioctl = ibmveth_ioctl,
1317 .ndo_change_mtu = ibmveth_change_mtu,
1318 .ndo_fix_features = ibmveth_fix_features,
1319 .ndo_set_features = ibmveth_set_features,
1320 .ndo_validate_addr = eth_validate_addr,
1321 .ndo_set_mac_address = eth_mac_addr,
1322 #ifdef CONFIG_NET_POLL_CONTROLLER
1323 .ndo_poll_controller = ibmveth_poll_controller,
1327 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1330 struct net_device *netdev;
1331 struct ibmveth_adapter *adapter;
1332 unsigned char *mac_addr_p;
1333 unsigned int *mcastFilterSize_p;
1335 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1338 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1341 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1345 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1346 VETH_MCAST_FILTER_SIZE, NULL);
1347 if (!mcastFilterSize_p) {
1348 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1353 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1358 adapter = netdev_priv(netdev);
1359 dev_set_drvdata(&dev->dev, netdev);
1361 adapter->vdev = dev;
1362 adapter->netdev = netdev;
1363 adapter->mcastFilterSize = *mcastFilterSize_p;
1364 adapter->pool_config = 0;
1366 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1369 * Some older boxes running PHYP non-natively have an OF that returns
1370 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1371 * ignored) while newer boxes' OF return a 6-byte field. Note that
1372 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1373 * The RPA doc specifies that the first byte must be 10b, so we'll
1374 * just look for it to solve this 8 vs. 6 byte field issue
1376 if ((*mac_addr_p & 0x3) != 0x02)
1379 adapter->mac_addr = 0;
1380 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1382 netdev->irq = dev->irq;
1383 netdev->netdev_ops = &ibmveth_netdev_ops;
1384 netdev->ethtool_ops = &netdev_ethtool_ops;
1385 SET_NETDEV_DEV(netdev, &dev->dev);
1386 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1387 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1388 netdev->features |= netdev->hw_features;
1390 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1392 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1393 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1396 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1397 pool_count[i], pool_size[i],
1399 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1400 &dev->dev.kobj, "pool%d", i);
1402 kobject_uevent(kobj, KOBJ_ADD);
1405 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1407 adapter->buffer_list_dma = DMA_ERROR_CODE;
1408 adapter->filter_list_dma = DMA_ERROR_CODE;
1409 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1411 netdev_dbg(netdev, "registering netdev...\n");
1413 ibmveth_set_features(netdev, netdev->features);
1415 rc = register_netdev(netdev);
1418 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1419 free_netdev(netdev);
1423 netdev_dbg(netdev, "registered\n");
1428 static int ibmveth_remove(struct vio_dev *dev)
1430 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1431 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1434 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1435 kobject_put(&adapter->rx_buff_pool[i].kobj);
1437 unregister_netdev(netdev);
1439 free_netdev(netdev);
1440 dev_set_drvdata(&dev->dev, NULL);
1445 static struct attribute veth_active_attr;
1446 static struct attribute veth_num_attr;
1447 static struct attribute veth_size_attr;
1449 static ssize_t veth_pool_show(struct kobject *kobj,
1450 struct attribute *attr, char *buf)
1452 struct ibmveth_buff_pool *pool = container_of(kobj,
1453 struct ibmveth_buff_pool,
1456 if (attr == &veth_active_attr)
1457 return sprintf(buf, "%d\n", pool->active);
1458 else if (attr == &veth_num_attr)
1459 return sprintf(buf, "%d\n", pool->size);
1460 else if (attr == &veth_size_attr)
1461 return sprintf(buf, "%d\n", pool->buff_size);
1465 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1466 const char *buf, size_t count)
1468 struct ibmveth_buff_pool *pool = container_of(kobj,
1469 struct ibmveth_buff_pool,
1471 struct net_device *netdev = dev_get_drvdata(
1472 container_of(kobj->parent, struct device, kobj));
1473 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1474 long value = simple_strtol(buf, NULL, 10);
1477 if (attr == &veth_active_attr) {
1478 if (value && !pool->active) {
1479 if (netif_running(netdev)) {
1480 if (ibmveth_alloc_buffer_pool(pool)) {
1482 "unable to alloc pool\n");
1486 adapter->pool_config = 1;
1487 ibmveth_close(netdev);
1488 adapter->pool_config = 0;
1489 if ((rc = ibmveth_open(netdev)))
1494 } else if (!value && pool->active) {
1495 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1497 /* Make sure there is a buffer pool with buffers that
1498 can hold a packet of the size of the MTU */
1499 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1500 if (pool == &adapter->rx_buff_pool[i])
1502 if (!adapter->rx_buff_pool[i].active)
1504 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1508 if (i == IBMVETH_NUM_BUFF_POOLS) {
1509 netdev_err(netdev, "no active pool >= MTU\n");
1513 if (netif_running(netdev)) {
1514 adapter->pool_config = 1;
1515 ibmveth_close(netdev);
1517 adapter->pool_config = 0;
1518 if ((rc = ibmveth_open(netdev)))
1523 } else if (attr == &veth_num_attr) {
1524 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1527 if (netif_running(netdev)) {
1528 adapter->pool_config = 1;
1529 ibmveth_close(netdev);
1530 adapter->pool_config = 0;
1532 if ((rc = ibmveth_open(netdev)))
1538 } else if (attr == &veth_size_attr) {
1539 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1542 if (netif_running(netdev)) {
1543 adapter->pool_config = 1;
1544 ibmveth_close(netdev);
1545 adapter->pool_config = 0;
1546 pool->buff_size = value;
1547 if ((rc = ibmveth_open(netdev)))
1550 pool->buff_size = value;
1555 /* kick the interrupt handler to allocate/deallocate pools */
1556 ibmveth_interrupt(netdev->irq, netdev);
1561 #define ATTR(_name, _mode) \
1562 struct attribute veth_##_name##_attr = { \
1563 .name = __stringify(_name), .mode = _mode, \
1566 static ATTR(active, 0644);
1567 static ATTR(num, 0644);
1568 static ATTR(size, 0644);
1570 static struct attribute *veth_pool_attrs[] = {
1577 static const struct sysfs_ops veth_pool_ops = {
1578 .show = veth_pool_show,
1579 .store = veth_pool_store,
1582 static struct kobj_type ktype_veth_pool = {
1584 .sysfs_ops = &veth_pool_ops,
1585 .default_attrs = veth_pool_attrs,
1588 static int ibmveth_resume(struct device *dev)
1590 struct net_device *netdev = dev_get_drvdata(dev);
1591 ibmveth_interrupt(netdev->irq, netdev);
1595 static struct vio_device_id ibmveth_device_table[] = {
1596 { "network", "IBM,l-lan"},
1599 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1601 static struct dev_pm_ops ibmveth_pm_ops = {
1602 .resume = ibmveth_resume
1605 static struct vio_driver ibmveth_driver = {
1606 .id_table = ibmveth_device_table,
1607 .probe = ibmveth_probe,
1608 .remove = ibmveth_remove,
1609 .get_desired_dma = ibmveth_get_desired_dma,
1610 .name = ibmveth_driver_name,
1611 .pm = &ibmveth_pm_ops,
1614 static int __init ibmveth_module_init(void)
1616 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1617 ibmveth_driver_string, ibmveth_driver_version);
1619 return vio_register_driver(&ibmveth_driver);
1622 static void __exit ibmveth_module_exit(void)
1624 vio_unregister_driver(&ibmveth_driver);
1627 module_init(ibmveth_module_init);
1628 module_exit(ibmveth_module_exit);