1 /*********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2010 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 *********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/ratelimit.h>
33 #include <linux/string.h>
34 #include <linux/interrupt.h>
37 #include <linux/xfrm.h>
39 #endif /* CONFIG_XFRM */
41 #include <linux/atomic.h>
43 #include <asm/octeon/octeon.h>
45 #include "ethernet-defines.h"
46 #include "octeon-ethernet.h"
47 #include "ethernet-tx.h"
48 #include "ethernet-util.h"
50 #include <asm/octeon/cvmx-wqe.h>
51 #include <asm/octeon/cvmx-fau.h>
52 #include <asm/octeon/cvmx-pip.h>
53 #include <asm/octeon/cvmx-pko.h>
54 #include <asm/octeon/cvmx-helper.h>
56 #include <asm/octeon/cvmx-gmxx-defs.h>
58 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
61 * You can define GET_SKBUFF_QOS() to override how the skbuff output
62 * function determines which output queue is used. The default
63 * implementation always uses the base queue for the port. If, for
64 * example, you wanted to use the skb->priority field, define
65 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
67 #ifndef GET_SKBUFF_QOS
68 #define GET_SKBUFF_QOS(skb) 0
71 static void cvm_oct_tx_do_cleanup(unsigned long arg);
72 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
74 /* Maximum number of SKBs to try to free per xmit packet. */
75 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
77 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
81 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
84 cvmx_fau_atomic_add32(fau, -undo);
85 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
90 static void cvm_oct_kick_tx_poll_watchdog(void)
92 union cvmx_ciu_timx ciu_timx;
95 ciu_timx.s.one_shot = 1;
96 ciu_timx.s.len = cvm_oct_tx_poll_interval;
97 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
100 static void cvm_oct_free_tx_skbs(struct net_device *dev)
103 int qos, queues_per_port;
105 int total_remaining = 0;
107 struct octeon_ethernet *priv = netdev_priv(dev);
109 queues_per_port = cvmx_pko_get_num_queues(priv->port);
110 /* Drain any pending packets in the free list */
111 for (qos = 0; qos < queues_per_port; qos++) {
112 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
114 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
116 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
120 total_freed += skb_to_free;
121 if (skb_to_free > 0) {
122 struct sk_buff *to_free_list = NULL;
124 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
125 while (skb_to_free > 0) {
128 t = __skb_dequeue(&priv->tx_free_list[qos]);
129 t->next = to_free_list;
133 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
135 /* Do the actual freeing outside of the lock. */
136 while (to_free_list) {
137 struct sk_buff *t = to_free_list;
139 to_free_list = to_free_list->next;
140 dev_kfree_skb_any(t);
143 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
145 if (total_freed >= 0 && netif_queue_stopped(dev))
146 netif_wake_queue(dev);
148 cvm_oct_kick_tx_poll_watchdog();
152 * cvm_oct_xmit - transmit a packet
153 * @skb: Packet to send
154 * @dev: Device info structure
156 * Returns Always returns NETDEV_TX_OK
158 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
160 cvmx_pko_command_word0_t pko_command;
161 union cvmx_buf_ptr hw_buffer;
162 uint64_t old_scratch;
163 uint64_t old_scratch2;
166 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
167 struct octeon_ethernet *priv = netdev_priv(dev);
168 struct sk_buff *to_free_list;
170 int32_t buffers_to_free;
173 #if REUSE_SKBUFFS_WITHOUT_FREE
174 unsigned char *fpa_head;
178 * Prefetch the private data structure. It is larger than the
184 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
185 * completely remove "qos" in the event neither interface
186 * supports multiple queues per port.
188 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
189 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
190 qos = GET_SKBUFF_QOS(skb);
193 else if (qos >= cvmx_pko_get_num_queues(priv->port))
198 if (USE_ASYNC_IOBDMA) {
199 /* Save scratch in case userspace is using it */
201 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
202 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
205 * Fetch and increment the number of packets to be
208 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
209 FAU_NUM_PACKET_BUFFERS_TO_FREE,
211 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
217 * We have space for 6 segment pointers, If there will be more
218 * than that, we must linearize.
220 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
221 if (unlikely(__skb_linearize(skb))) {
222 queue_type = QUEUE_DROP;
223 if (USE_ASYNC_IOBDMA) {
225 * Get the number of skbuffs in use
230 cvmx_scratch_read64(CVMX_SCR_SCRATCH);
233 * Get the number of skbuffs in use
236 skb_to_free = cvmx_fau_fetch_and_add32(
237 priv->fau + qos * 4, MAX_SKB_TO_FREE);
239 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
240 priv->fau + qos * 4);
241 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
247 * The CN3XXX series of parts has an errata (GMX-401) which
248 * causes the GMX block to hang if a collision occurs towards
249 * the end of a <68 byte packet. As a workaround for this, we
250 * pad packets to be 68 bytes whenever we are in half duplex
251 * mode. We don't handle the case of having a small packet but
252 * no room to add the padding. The kernel should always give
253 * us at least a cache line
255 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
256 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
257 int interface = INTERFACE(priv->port);
258 int index = INDEX(priv->port);
261 /* We only need to pad packet in half duplex mode */
263 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
264 if (gmx_prt_cfg.s.duplex == 0) {
265 int add_bytes = 64 - skb->len;
267 if ((skb_tail_pointer(skb) + add_bytes) <=
268 skb_end_pointer(skb))
269 memset(__skb_put(skb, add_bytes), 0,
275 /* Build the PKO command */
277 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
278 pko_command.s.segs = 1;
279 pko_command.s.total_bytes = skb->len;
280 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
281 pko_command.s.subone0 = 1;
283 pko_command.s.dontfree = 1;
285 /* Build the PKO buffer pointer */
287 if (skb_shinfo(skb)->nr_frags == 0) {
288 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
289 hw_buffer.s.pool = 0;
290 hw_buffer.s.size = skb->len;
292 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
293 hw_buffer.s.pool = 0;
294 hw_buffer.s.size = skb_headlen(skb);
295 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
296 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
297 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
299 hw_buffer.s.addr = XKPHYS_TO_PHYS(
300 (u64)(page_address(fs->page.p) +
302 hw_buffer.s.size = fs->size;
303 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
305 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
306 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
307 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
308 pko_command.s.gather = 1;
309 goto dont_put_skbuff_in_hw;
313 * See if we can put this skb in the FPA pool. Any strange
314 * behavior from the Linux networking stack will most likely
315 * be caused by a bug in the following code. If some field is
316 * in use by the network stack and gets carried over when a
317 * buffer is reused, bad things may happen. If in doubt and
318 * you dont need the absolute best performance, disable the
319 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
320 * shown a 25% increase in performance under some loads.
322 #if REUSE_SKBUFFS_WITHOUT_FREE
323 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
324 if (unlikely(skb->data < fpa_head)) {
326 * printk("TX buffer beginning can't meet FPA
327 * alignment constraints\n");
329 goto dont_put_skbuff_in_hw;
332 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
334 printk("TX buffer isn't large enough for the FPA\n");
336 goto dont_put_skbuff_in_hw;
338 if (unlikely(skb_shared(skb))) {
340 printk("TX buffer sharing data with someone else\n");
342 goto dont_put_skbuff_in_hw;
344 if (unlikely(skb_cloned(skb))) {
346 printk("TX buffer has been cloned\n");
348 goto dont_put_skbuff_in_hw;
350 if (unlikely(skb_header_cloned(skb))) {
352 printk("TX buffer header has been cloned\n");
354 goto dont_put_skbuff_in_hw;
356 if (unlikely(skb->destructor)) {
358 printk("TX buffer has a destructor\n");
360 goto dont_put_skbuff_in_hw;
362 if (unlikely(skb_shinfo(skb)->nr_frags)) {
364 printk("TX buffer has fragments\n");
366 goto dont_put_skbuff_in_hw;
370 sizeof(*skb) + skb_end_offset(skb))) {
372 printk("TX buffer truesize has been changed\n");
374 goto dont_put_skbuff_in_hw;
378 * We can use this buffer in the FPA. We don't need the FAU
381 pko_command.s.dontfree = 0;
383 hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
384 ((unsigned long)fpa_head >> 7);
386 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
389 * The skbuff will be reused without ever being freed. We must
390 * cleanup a bunch of core things.
392 dst_release(skb_dst(skb));
393 skb_dst_set(skb, NULL);
395 secpath_put(skb->sp);
400 #ifdef CONFIG_NET_SCHED
402 #ifdef CONFIG_NET_CLS_ACT
404 #endif /* CONFIG_NET_CLS_ACT */
405 #endif /* CONFIG_NET_SCHED */
406 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
408 dont_put_skbuff_in_hw:
410 /* Check if we can use the hardware checksumming */
411 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
412 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
413 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
414 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
415 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
416 /* Use hardware checksum calc */
417 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
420 if (USE_ASYNC_IOBDMA) {
421 /* Get the number of skbuffs in use by the hardware */
423 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
424 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
426 /* Get the number of skbuffs in use by the hardware */
427 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
430 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
433 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
436 * If we're sending faster than the receive can free them then
437 * don't do the HW free.
439 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
440 pko_command.s.dontfree = 1;
442 if (pko_command.s.dontfree) {
443 queue_type = QUEUE_CORE;
444 pko_command.s.reg0 = priv->fau+qos*4;
446 queue_type = QUEUE_HW;
448 if (USE_ASYNC_IOBDMA)
449 cvmx_fau_async_fetch_and_add32(
450 CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
452 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
454 /* Drop this packet if we have too many already queued to the HW */
455 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
456 MAX_OUT_QUEUE_DEPTH)) {
458 if (dev->tx_queue_len != 0) {
459 /* Drop the lock when notifying the core. */
460 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
462 netif_stop_queue(dev);
463 spin_lock_irqsave(&priv->tx_free_list[qos].lock,
466 /* If not using normal queueing. */
467 queue_type = QUEUE_DROP;
472 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
475 /* Send the packet to the output queue */
476 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
478 pko_command, hw_buffer,
479 CVMX_PKO_LOCK_NONE))) {
480 printk_ratelimited("%s: Failed to send the packet\n",
482 queue_type = QUEUE_DROP;
487 switch (queue_type) {
489 skb->next = to_free_list;
491 priv->stats.tx_dropped++;
494 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
497 __skb_queue_tail(&priv->tx_free_list[qos], skb);
503 while (skb_to_free > 0) {
504 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
506 t->next = to_free_list;
511 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
513 /* Do the actual freeing outside of the lock. */
514 while (to_free_list) {
515 struct sk_buff *t = to_free_list;
517 to_free_list = to_free_list->next;
518 dev_kfree_skb_any(t);
521 if (USE_ASYNC_IOBDMA) {
523 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
524 /* Restore the scratch area */
525 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
526 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
528 total_to_clean = cvmx_fau_fetch_and_add32(
529 FAU_TOTAL_TX_TO_CLEAN, 1);
532 if (total_to_clean & 0x3ff) {
534 * Schedule the cleanup tasklet every 1024 packets for
535 * the pathological case of high traffic on one port
536 * delaying clean up of packets on a different port
537 * that is blocked waiting for the cleanup.
539 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
542 cvm_oct_kick_tx_poll_watchdog();
548 * cvm_oct_xmit_pow - transmit a packet to the POW
549 * @skb: Packet to send
550 * @dev: Device info structure
552 * Returns Always returns zero
554 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
556 struct octeon_ethernet *priv = netdev_priv(dev);
560 /* Get a work queue entry */
561 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
563 if (unlikely(work == NULL)) {
564 printk_ratelimited("%s: Failed to allocate a work queue entry\n",
566 priv->stats.tx_dropped++;
567 dev_kfree_skb_any(skb);
571 /* Get a packet buffer */
572 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
573 if (unlikely(packet_buffer == NULL)) {
574 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
576 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
577 priv->stats.tx_dropped++;
578 dev_kfree_skb_any(skb);
583 * Calculate where we need to copy the data to. We need to
584 * leave 8 bytes for a next pointer (unused). We also need to
585 * include any configure skip. Then we need to align the IP
586 * packet src and dest into the same 64bit word. The below
587 * calculation may add a little extra, but that doesn't
590 copy_location = packet_buffer + sizeof(uint64_t);
591 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
594 * We have to copy the packet since whoever processes this
595 * packet will free it to a hardware pool. We can't use the
596 * trick of counting outstanding packets like in
599 memcpy(copy_location, skb->data, skb->len);
602 * Fill in some of the work queue fields. We may need to add
603 * more if the software at the other end needs them.
605 work->hw_chksum = skb->csum;
606 work->len = skb->len;
607 work->ipprt = priv->port;
608 work->qos = priv->port & 0x7;
609 work->grp = pow_send_group;
610 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
611 work->tag = pow_send_group; /* FIXME */
612 /* Default to zero. Sets of zero later are commented out */
614 work->word2.s.bufs = 1;
615 work->packet_ptr.u64 = 0;
616 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
617 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
618 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
619 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
621 if (skb->protocol == htons(ETH_P_IP)) {
622 work->word2.s.ip_offset = 14;
624 work->word2.s.vlan_valid = 0; /* FIXME */
625 work->word2.s.vlan_cfi = 0; /* FIXME */
626 work->word2.s.vlan_id = 0; /* FIXME */
627 work->word2.s.dec_ipcomp = 0; /* FIXME */
629 work->word2.s.tcp_or_udp =
630 (ip_hdr(skb)->protocol == IPPROTO_TCP)
631 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
634 work->word2.s.dec_ipsec = 0;
635 /* We only support IPv4 right now */
636 work->word2.s.is_v6 = 0;
637 /* Hardware would set to zero */
638 work->word2.s.software = 0;
639 /* No error, packet is internal */
640 work->word2.s.L4_error = 0;
642 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
643 || (ip_hdr(skb)->frag_off ==
646 /* Assume Linux is sending a good packet */
647 work->word2.s.IP_exc = 0;
649 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
650 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
652 /* This is an IP packet */
653 work->word2.s.not_IP = 0;
654 /* No error, packet is internal */
655 work->word2.s.rcv_error = 0;
656 /* No error, packet is internal */
657 work->word2.s.err_code = 0;
661 * When copying the data, include 4 bytes of the
662 * ethernet header to align the same way hardware
665 memcpy(work->packet_data, skb->data + 10,
666 sizeof(work->packet_data));
669 work->word2.snoip.vlan_valid = 0; /* FIXME */
670 work->word2.snoip.vlan_cfi = 0; /* FIXME */
671 work->word2.snoip.vlan_id = 0; /* FIXME */
672 work->word2.snoip.software = 0; /* Hardware would set to zero */
674 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
675 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
676 work->word2.snoip.is_bcast =
677 (skb->pkt_type == PACKET_BROADCAST);
678 work->word2.snoip.is_mcast =
679 (skb->pkt_type == PACKET_MULTICAST);
680 work->word2.snoip.not_IP = 1; /* IP was done up above */
682 /* No error, packet is internal */
683 work->word2.snoip.rcv_error = 0;
684 /* No error, packet is internal */
685 work->word2.snoip.err_code = 0;
687 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
690 /* Submit the packet to the POW */
691 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
693 priv->stats.tx_packets++;
694 priv->stats.tx_bytes += skb->len;
695 dev_consume_skb_any(skb);
700 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
701 * @dev: Device being shutdown
704 void cvm_oct_tx_shutdown_dev(struct net_device *dev)
706 struct octeon_ethernet *priv = netdev_priv(dev);
710 for (qos = 0; qos < 16; qos++) {
711 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
712 while (skb_queue_len(&priv->tx_free_list[qos]))
713 dev_kfree_skb_any(__skb_dequeue
714 (&priv->tx_free_list[qos]));
715 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
719 static void cvm_oct_tx_do_cleanup(unsigned long arg)
723 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
724 if (cvm_oct_device[port]) {
725 struct net_device *dev = cvm_oct_device[port];
727 cvm_oct_free_tx_skbs(dev);
732 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
734 /* Disable the interrupt. */
735 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
736 /* Do the work in the tasklet. */
737 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
741 void cvm_oct_tx_initialize(void)
745 /* Disable the interrupt. */
746 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
747 /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
748 i = request_irq(OCTEON_IRQ_TIMER1,
749 cvm_oct_tx_cleanup_watchdog, 0,
750 "Ethernet", cvm_oct_device);
753 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
756 void cvm_oct_tx_shutdown(void)
758 /* Free the interrupt handler */
759 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);