1 /*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
25 #include <linux/stmmac.h>
27 #include "descs_com.h"
29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
30 struct dma_desc *p, void __iomem *ioaddr)
33 struct net_device_stats *stats = (struct net_device_stats *)data;
35 if (unlikely(p->des01.etx.error_summary)) {
36 if (unlikely(p->des01.etx.jabber_timeout))
39 if (unlikely(p->des01.etx.frame_flushed)) {
40 x->tx_frame_flushed++;
41 dwmac_dma_flush_tx_fifo(ioaddr);
44 if (unlikely(p->des01.etx.loss_carrier)) {
46 stats->tx_carrier_errors++;
48 if (unlikely(p->des01.etx.no_carrier)) {
50 stats->tx_carrier_errors++;
52 if (unlikely(p->des01.etx.late_collision))
53 stats->collisions += p->des01.etx.collision_count;
55 if (unlikely(p->des01.etx.excessive_collisions))
56 stats->collisions += p->des01.etx.collision_count;
58 if (unlikely(p->des01.etx.excessive_deferral))
61 if (unlikely(p->des01.etx.underflow_error)) {
62 dwmac_dma_flush_tx_fifo(ioaddr);
66 if (unlikely(p->des01.etx.ip_header_error))
67 x->tx_ip_header_error++;
69 if (unlikely(p->des01.etx.payload_error)) {
70 x->tx_payload_error++;
71 dwmac_dma_flush_tx_fifo(ioaddr);
77 if (unlikely(p->des01.etx.deferred))
80 #ifdef STMMAC_VLAN_TAG_USED
81 if (p->des01.etx.vlan_frame)
88 static int enh_desc_get_tx_len(struct dma_desc *p)
90 return p->des01.etx.buffer1_size;
93 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
96 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
98 /* bits 5 7 0 | Frame status
99 * ----------------------------------------------------------
100 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
101 * 1 0 0 | IPv4/6 No CSUM errorS.
102 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
103 * 1 1 0 | IPv4/6 CSUM IP HR error
104 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
105 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
106 * 0 1 1 | COE bypassed.. no IPv4/6 frame
111 else if (status == 0x4)
113 else if (status == 0x5)
115 else if (status == 0x6)
117 else if (status == 0x7)
119 else if (status == 0x1)
121 else if (status == 0x3)
126 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
127 struct dma_extended_desc *p)
129 if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
130 if (p->des4.erx.ip_hdr_err)
132 if (p->des4.erx.ip_payload_err)
134 if (p->des4.erx.ip_csum_bypassed)
135 x->ip_csum_bypassed++;
136 if (p->des4.erx.ipv4_pkt_rcvd)
138 if (p->des4.erx.ipv6_pkt_rcvd)
140 if (p->des4.erx.msg_type == RDES_EXT_SYNC)
141 x->rx_msg_type_sync++;
142 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
143 x->rx_msg_type_follow_up++;
144 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
145 x->rx_msg_type_delay_req++;
146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
147 x->rx_msg_type_delay_resp++;
148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
149 x->rx_msg_type_pdelay_req++;
150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
151 x->rx_msg_type_pdelay_resp++;
152 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
153 x->rx_msg_type_pdelay_follow_up++;
155 x->rx_msg_type_ext_no_ptp++;
156 if (p->des4.erx.ptp_frame_type)
158 if (p->des4.erx.ptp_ver)
160 if (p->des4.erx.timestamp_dropped)
161 x->timestamp_dropped++;
162 if (p->des4.erx.av_pkt_rcvd)
164 if (p->des4.erx.av_tagged_pkt_rcvd)
165 x->av_tagged_pkt_rcvd++;
166 if (p->des4.erx.vlan_tag_priority_val)
167 x->vlan_tag_priority_val++;
168 if (p->des4.erx.l3_filter_match)
169 x->l3_filter_match++;
170 if (p->des4.erx.l4_filter_match)
171 x->l4_filter_match++;
172 if (p->des4.erx.l3_l4_filter_no_match)
173 x->l3_l4_filter_no_match++;
177 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
180 int ret = good_frame;
181 struct net_device_stats *stats = (struct net_device_stats *)data;
183 if (unlikely(p->des01.erx.error_summary)) {
184 if (unlikely(p->des01.erx.descriptor_error)) {
186 stats->rx_length_errors++;
188 if (unlikely(p->des01.erx.overflow_error))
189 x->rx_gmac_overflow++;
191 if (unlikely(p->des01.erx.ipc_csum_error))
192 pr_err("\tIPC Csum Error/Giant frame\n");
194 if (unlikely(p->des01.erx.late_collision)) {
197 if (unlikely(p->des01.erx.receive_watchdog))
200 if (unlikely(p->des01.erx.error_gmii))
203 if (unlikely(p->des01.erx.crc_error)) {
205 stats->rx_crc_errors++;
210 /* After a payload csum error, the ES bit is set.
211 * It doesn't match with the information reported into the databook.
212 * At any rate, we need to understand if the CSUM hw computation is ok
213 * and report this info to the upper layers. */
214 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
215 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
217 if (unlikely(p->des01.erx.dribbling))
220 if (unlikely(p->des01.erx.sa_filter_fail)) {
221 x->sa_rx_filter_fail++;
224 if (unlikely(p->des01.erx.da_filter_fail)) {
225 x->da_rx_filter_fail++;
228 if (unlikely(p->des01.erx.length_error)) {
232 #ifdef STMMAC_VLAN_TAG_USED
233 if (p->des01.erx.vlan_tag)
240 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
243 p->des01.all_flags = 0;
244 p->des01.erx.own = 1;
245 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
247 if (mode == STMMAC_CHAIN_MODE)
248 ehn_desc_rx_set_on_chain(p, end);
250 ehn_desc_rx_set_on_ring(p, end);
253 p->des01.erx.disable_ic = 1;
256 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
258 p->des01.all_flags = 0;
259 if (mode == STMMAC_CHAIN_MODE)
260 ehn_desc_tx_set_on_chain(p, end);
262 ehn_desc_tx_set_on_ring(p, end);
265 static int enh_desc_get_tx_owner(struct dma_desc *p)
267 return p->des01.etx.own;
270 static int enh_desc_get_rx_owner(struct dma_desc *p)
272 return p->des01.erx.own;
275 static void enh_desc_set_tx_owner(struct dma_desc *p)
277 p->des01.etx.own = 1;
280 static void enh_desc_set_rx_owner(struct dma_desc *p)
282 p->des01.erx.own = 1;
285 static int enh_desc_get_tx_ls(struct dma_desc *p)
287 return p->des01.etx.last_segment;
290 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
292 int ter = p->des01.etx.end_ring;
294 memset(p, 0, offsetof(struct dma_desc, des2));
295 if (mode == STMMAC_CHAIN_MODE)
296 enh_desc_end_tx_desc_on_chain(p, ter);
298 enh_desc_end_tx_desc_on_ring(p, ter);
301 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
302 int csum_flag, int mode)
304 p->des01.etx.first_segment = is_fs;
306 if (mode == STMMAC_CHAIN_MODE)
307 enh_set_tx_desc_len_on_chain(p, len);
309 enh_set_tx_desc_len_on_ring(p, len);
311 if (likely(csum_flag))
312 p->des01.etx.checksum_insertion = cic_full;
315 static void enh_desc_clear_tx_ic(struct dma_desc *p)
317 p->des01.etx.interrupt = 0;
320 static void enh_desc_close_tx_desc(struct dma_desc *p)
322 p->des01.etx.last_segment = 1;
323 p->des01.etx.interrupt = 1;
326 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
328 /* The type-1 checksum offload engines append the checksum at
329 * the end of frame and the two bytes of checksum are added in
331 * Adjust for that in the framelen for type-1 checksum offload
333 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
334 return p->des01.erx.frame_length - 2;
336 return p->des01.erx.frame_length;
339 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
341 p->des01.etx.time_stamp_enable = 1;
344 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
346 return p->des01.etx.time_stamp_status;
349 static u64 enh_desc_get_timestamp(void *desc, u32 ats)
354 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
356 /* convert high/sec time stamp value to nanosecond */
357 ns += p->des7 * 1000000000ULL;
359 struct dma_desc *p = (struct dma_desc *)desc;
361 ns += p->des3 * 1000000000ULL;
367 static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
370 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
371 return p->basic.des01.erx.ipc_csum_error;
373 struct dma_desc *p = (struct dma_desc *)desc;
374 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
375 /* timestamp is corrupted, hence don't store it */
382 const struct stmmac_desc_ops enh_desc_ops = {
383 .tx_status = enh_desc_get_tx_status,
384 .rx_status = enh_desc_get_rx_status,
385 .get_tx_len = enh_desc_get_tx_len,
386 .init_rx_desc = enh_desc_init_rx_desc,
387 .init_tx_desc = enh_desc_init_tx_desc,
388 .get_tx_owner = enh_desc_get_tx_owner,
389 .get_rx_owner = enh_desc_get_rx_owner,
390 .release_tx_desc = enh_desc_release_tx_desc,
391 .prepare_tx_desc = enh_desc_prepare_tx_desc,
392 .clear_tx_ic = enh_desc_clear_tx_ic,
393 .close_tx_desc = enh_desc_close_tx_desc,
394 .get_tx_ls = enh_desc_get_tx_ls,
395 .set_tx_owner = enh_desc_set_tx_owner,
396 .set_rx_owner = enh_desc_set_rx_owner,
397 .get_rx_frame_len = enh_desc_get_rx_frame_len,
398 .rx_extended_status = enh_desc_get_ext_status,
399 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
400 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
401 .get_timestamp = enh_desc_get_timestamp,
402 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,