2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages {
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages *arr;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
75 struct mmc_test_area {
77 unsigned int dev_addr;
79 unsigned int max_segs;
80 unsigned int max_seg_sz;
83 struct mmc_test_mem *mem;
84 struct scatterlist *sg;
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
96 struct mmc_test_transfer_result {
97 struct list_head link;
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
113 struct mmc_test_general_result {
114 struct list_head link;
115 struct mmc_card *card;
118 struct list_head tr_lst;
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
127 struct mmc_test_dbgfs_file {
128 struct list_head link;
129 struct mmc_card *card;
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
142 struct mmc_test_card {
143 struct mmc_card *card;
145 u8 scratch[BUFFER_SIZE];
147 #ifdef CONFIG_HIGHMEM
148 struct page *highmem;
150 struct mmc_test_area area;
151 struct mmc_test_general_result *gr;
154 enum mmc_test_prep_media {
155 MMC_TEST_PREP_NONE = 0,
156 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
157 MMC_TEST_PREP_ERASE = 1 << 1,
160 struct mmc_test_multiple_rw {
161 unsigned int *sg_len;
166 bool do_nonblock_req;
167 enum mmc_test_prep_media prepare;
170 struct mmc_test_async_req {
171 struct mmc_async_req areq;
172 struct mmc_test_card *test;
175 /*******************************************************************/
176 /* General helper functions */
177 /*******************************************************************/
180 * Configure correct block size in card
182 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
184 return mmc_set_blocklen(test->card, size);
188 * Fill in the mmc_request structure given a set of transfer parameters.
190 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
191 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
192 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
194 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
197 mrq->cmd->opcode = write ?
198 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
200 mrq->cmd->opcode = write ?
201 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
204 mrq->cmd->arg = dev_addr;
205 if (!mmc_card_blockaddr(test->card))
208 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
213 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
215 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
218 mrq->data->blksz = blksz;
219 mrq->data->blocks = blocks;
220 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
222 mrq->data->sg_len = sg_len;
224 mmc_set_data_timeout(mrq->data, test->card);
227 static int mmc_test_busy(struct mmc_command *cmd)
229 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
230 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
234 * Wait for the card to finish the busy state
236 static int mmc_test_wait_busy(struct mmc_test_card *test)
239 struct mmc_command cmd = {0};
243 memset(&cmd, 0, sizeof(struct mmc_command));
245 cmd.opcode = MMC_SEND_STATUS;
246 cmd.arg = test->card->rca << 16;
247 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
249 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
253 if (!busy && mmc_test_busy(&cmd)) {
255 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
256 pr_info("%s: Warning: Host did not "
257 "wait for busy state to end.\n",
258 mmc_hostname(test->card->host));
260 } while (mmc_test_busy(&cmd));
266 * Transfer a single sector of kernel addressable data
268 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
269 u8 *buffer, unsigned addr, unsigned blksz, int write)
271 struct mmc_request mrq = {0};
272 struct mmc_command cmd = {0};
273 struct mmc_command stop = {0};
274 struct mmc_data data = {0};
276 struct scatterlist sg;
282 sg_init_one(&sg, buffer, blksz);
284 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
286 mmc_wait_for_req(test->card->host, &mrq);
293 return mmc_test_wait_busy(test);
296 static void mmc_test_free_mem(struct mmc_test_mem *mem)
301 __free_pages(mem->arr[mem->cnt].page,
302 mem->arr[mem->cnt].order);
308 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
309 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
310 * not exceed a maximum number of segments and try not to make segments much
311 * bigger than maximum segment size.
313 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
314 unsigned long max_sz,
315 unsigned int max_segs,
316 unsigned int max_seg_sz)
318 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
319 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
320 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
321 unsigned long page_cnt = 0;
322 unsigned long limit = nr_free_buffer_pages() >> 4;
323 struct mmc_test_mem *mem;
325 if (max_page_cnt > limit)
326 max_page_cnt = limit;
327 if (min_page_cnt > max_page_cnt)
328 min_page_cnt = max_page_cnt;
330 if (max_seg_page_cnt > max_page_cnt)
331 max_seg_page_cnt = max_page_cnt;
333 if (max_segs > max_page_cnt)
334 max_segs = max_page_cnt;
336 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
340 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
345 while (max_page_cnt) {
348 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
351 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
353 page = alloc_pages(flags, order);
359 if (page_cnt < min_page_cnt)
363 mem->arr[mem->cnt].page = page;
364 mem->arr[mem->cnt].order = order;
366 if (max_page_cnt <= (1UL << order))
368 max_page_cnt -= 1UL << order;
369 page_cnt += 1UL << order;
370 if (mem->cnt >= max_segs) {
371 if (page_cnt < min_page_cnt)
380 mmc_test_free_mem(mem);
385 * Map memory into a scatterlist. Optionally allow the same memory to be
386 * mapped more than once.
388 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
389 struct scatterlist *sglist, int repeat,
390 unsigned int max_segs, unsigned int max_seg_sz,
391 unsigned int *sg_len, int min_sg_len)
393 struct scatterlist *sg = NULL;
395 unsigned long sz = size;
397 sg_init_table(sglist, max_segs);
398 if (min_sg_len > max_segs)
399 min_sg_len = max_segs;
403 for (i = 0; i < mem->cnt; i++) {
404 unsigned long len = PAGE_SIZE << mem->arr[i].order;
406 if (min_sg_len && (size / min_sg_len < len))
407 len = ALIGN(size / min_sg_len, 512);
410 if (len > max_seg_sz)
418 sg_set_page(sg, mem->arr[i].page, len, 0);
424 } while (sz && repeat);
436 * Map memory into a scatterlist so that no pages are contiguous. Allow the
437 * same memory to be mapped more than once.
439 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
441 struct scatterlist *sglist,
442 unsigned int max_segs,
443 unsigned int max_seg_sz,
444 unsigned int *sg_len)
446 struct scatterlist *sg = NULL;
447 unsigned int i = mem->cnt, cnt;
449 void *base, *addr, *last_addr = NULL;
451 sg_init_table(sglist, max_segs);
455 base = page_address(mem->arr[--i].page);
456 cnt = 1 << mem->arr[i].order;
458 addr = base + PAGE_SIZE * --cnt;
459 if (last_addr && last_addr + PAGE_SIZE == addr)
463 if (len > max_seg_sz)
473 sg_set_page(sg, virt_to_page(addr), len, 0);
488 * Calculate transfer rate in bytes per second.
490 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
500 while (ns > UINT_MAX) {
508 do_div(bytes, (uint32_t)ns);
514 * Save transfer results for future usage
516 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
517 unsigned int count, unsigned int sectors, struct timespec ts,
518 unsigned int rate, unsigned int iops)
520 struct mmc_test_transfer_result *tr;
525 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
530 tr->sectors = sectors;
535 list_add_tail(&tr->link, &test->gr->tr_lst);
539 * Print the transfer rate.
541 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
542 struct timespec *ts1, struct timespec *ts2)
544 unsigned int rate, iops, sectors = bytes >> 9;
547 ts = timespec_sub(*ts2, *ts1);
549 rate = mmc_test_rate(bytes, &ts);
550 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
552 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
553 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
554 mmc_hostname(test->card->host), sectors, sectors >> 1,
555 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
556 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
557 iops / 100, iops % 100);
559 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
563 * Print the average transfer rate.
565 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
566 unsigned int count, struct timespec *ts1,
567 struct timespec *ts2)
569 unsigned int rate, iops, sectors = bytes >> 9;
570 uint64_t tot = bytes * count;
573 ts = timespec_sub(*ts2, *ts1);
575 rate = mmc_test_rate(tot, &ts);
576 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
578 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
579 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
580 "%u.%02u IOPS, sg_len %d)\n",
581 mmc_hostname(test->card->host), count, sectors, count,
582 sectors >> 1, (sectors & 1 ? ".5" : ""),
583 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
584 rate / 1000, rate / 1024, iops / 100, iops % 100,
587 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
591 * Return the card size in sectors.
593 static unsigned int mmc_test_capacity(struct mmc_card *card)
595 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
596 return card->ext_csd.sectors;
598 return card->csd.capacity << (card->csd.read_blkbits - 9);
601 /*******************************************************************/
602 /* Test preparation and cleanup */
603 /*******************************************************************/
606 * Fill the first couple of sectors of the card with known data
607 * so that bad reads/writes can be detected
609 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
613 ret = mmc_test_set_blksize(test, 512);
618 memset(test->buffer, 0xDF, 512);
620 for (i = 0;i < 512;i++)
624 for (i = 0;i < BUFFER_SIZE / 512;i++) {
625 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
633 static int mmc_test_prepare_write(struct mmc_test_card *test)
635 return __mmc_test_prepare(test, 1);
638 static int mmc_test_prepare_read(struct mmc_test_card *test)
640 return __mmc_test_prepare(test, 0);
643 static int mmc_test_cleanup(struct mmc_test_card *test)
647 ret = mmc_test_set_blksize(test, 512);
651 memset(test->buffer, 0, 512);
653 for (i = 0;i < BUFFER_SIZE / 512;i++) {
654 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
662 /*******************************************************************/
663 /* Test execution helpers */
664 /*******************************************************************/
667 * Modifies the mmc_request to perform the "short transfer" tests
669 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
670 struct mmc_request *mrq, int write)
672 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
674 if (mrq->data->blocks > 1) {
675 mrq->cmd->opcode = write ?
676 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
679 mrq->cmd->opcode = MMC_SEND_STATUS;
680 mrq->cmd->arg = test->card->rca << 16;
685 * Checks that a normal transfer didn't have any errors
687 static int mmc_test_check_result(struct mmc_test_card *test,
688 struct mmc_request *mrq)
692 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
696 if (!ret && mrq->cmd->error)
697 ret = mrq->cmd->error;
698 if (!ret && mrq->data->error)
699 ret = mrq->data->error;
700 if (!ret && mrq->stop && mrq->stop->error)
701 ret = mrq->stop->error;
702 if (!ret && mrq->data->bytes_xfered !=
703 mrq->data->blocks * mrq->data->blksz)
707 ret = RESULT_UNSUP_HOST;
712 static int mmc_test_check_result_async(struct mmc_card *card,
713 struct mmc_async_req *areq)
715 struct mmc_test_async_req *test_async =
716 container_of(areq, struct mmc_test_async_req, areq);
718 mmc_test_wait_busy(test_async->test);
720 return mmc_test_check_result(test_async->test, areq->mrq);
724 * Checks that a "short transfer" behaved as expected
726 static int mmc_test_check_broken_result(struct mmc_test_card *test,
727 struct mmc_request *mrq)
731 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
735 if (!ret && mrq->cmd->error)
736 ret = mrq->cmd->error;
737 if (!ret && mrq->data->error == 0)
739 if (!ret && mrq->data->error != -ETIMEDOUT)
740 ret = mrq->data->error;
741 if (!ret && mrq->stop && mrq->stop->error)
742 ret = mrq->stop->error;
743 if (mrq->data->blocks > 1) {
744 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
747 if (!ret && mrq->data->bytes_xfered > 0)
752 ret = RESULT_UNSUP_HOST;
758 * Tests nonblock transfer with certain parameters
760 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
761 struct mmc_command *cmd,
762 struct mmc_command *stop,
763 struct mmc_data *data)
765 memset(mrq, 0, sizeof(struct mmc_request));
766 memset(cmd, 0, sizeof(struct mmc_command));
767 memset(data, 0, sizeof(struct mmc_data));
768 memset(stop, 0, sizeof(struct mmc_command));
774 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
775 struct scatterlist *sg, unsigned sg_len,
776 unsigned dev_addr, unsigned blocks,
777 unsigned blksz, int write, int count)
779 struct mmc_request mrq1;
780 struct mmc_command cmd1;
781 struct mmc_command stop1;
782 struct mmc_data data1;
784 struct mmc_request mrq2;
785 struct mmc_command cmd2;
786 struct mmc_command stop2;
787 struct mmc_data data2;
789 struct mmc_test_async_req test_areq[2];
790 struct mmc_async_req *done_areq;
791 struct mmc_async_req *cur_areq = &test_areq[0].areq;
792 struct mmc_async_req *other_areq = &test_areq[1].areq;
796 test_areq[0].test = test;
797 test_areq[1].test = test;
799 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
800 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
802 cur_areq->mrq = &mrq1;
803 cur_areq->err_check = mmc_test_check_result_async;
804 other_areq->mrq = &mrq2;
805 other_areq->err_check = mmc_test_check_result_async;
807 for (i = 0; i < count; i++) {
808 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
809 blocks, blksz, write);
810 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
812 if (ret || (!done_areq && i > 0))
816 if (done_areq->mrq == &mrq2)
817 mmc_test_nonblock_reset(&mrq2, &cmd2,
820 mmc_test_nonblock_reset(&mrq1, &cmd1,
823 swap(cur_areq, other_areq);
827 done_areq = mmc_start_req(test->card->host, NULL, &ret);
835 * Tests a basic transfer with certain parameters
837 static int mmc_test_simple_transfer(struct mmc_test_card *test,
838 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
839 unsigned blocks, unsigned blksz, int write)
841 struct mmc_request mrq = {0};
842 struct mmc_command cmd = {0};
843 struct mmc_command stop = {0};
844 struct mmc_data data = {0};
850 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
851 blocks, blksz, write);
853 mmc_wait_for_req(test->card->host, &mrq);
855 mmc_test_wait_busy(test);
857 return mmc_test_check_result(test, &mrq);
861 * Tests a transfer where the card will fail completely or partly
863 static int mmc_test_broken_transfer(struct mmc_test_card *test,
864 unsigned blocks, unsigned blksz, int write)
866 struct mmc_request mrq = {0};
867 struct mmc_command cmd = {0};
868 struct mmc_command stop = {0};
869 struct mmc_data data = {0};
871 struct scatterlist sg;
877 sg_init_one(&sg, test->buffer, blocks * blksz);
879 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
880 mmc_test_prepare_broken_mrq(test, &mrq, write);
882 mmc_wait_for_req(test->card->host, &mrq);
884 mmc_test_wait_busy(test);
886 return mmc_test_check_broken_result(test, &mrq);
890 * Does a complete transfer test where data is also validated
892 * Note: mmc_test_prepare() must have been done before this call
894 static int mmc_test_transfer(struct mmc_test_card *test,
895 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
896 unsigned blocks, unsigned blksz, int write)
902 for (i = 0;i < blocks * blksz;i++)
903 test->scratch[i] = i;
905 memset(test->scratch, 0, BUFFER_SIZE);
907 local_irq_save(flags);
908 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
909 local_irq_restore(flags);
911 ret = mmc_test_set_blksize(test, blksz);
915 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
916 blocks, blksz, write);
923 ret = mmc_test_set_blksize(test, 512);
927 sectors = (blocks * blksz + 511) / 512;
928 if ((sectors * 512) == (blocks * blksz))
931 if ((sectors * 512) > BUFFER_SIZE)
934 memset(test->buffer, 0, sectors * 512);
936 for (i = 0;i < sectors;i++) {
937 ret = mmc_test_buffer_transfer(test,
938 test->buffer + i * 512,
939 dev_addr + i, 512, 0);
944 for (i = 0;i < blocks * blksz;i++) {
945 if (test->buffer[i] != (u8)i)
949 for (;i < sectors * 512;i++) {
950 if (test->buffer[i] != 0xDF)
954 local_irq_save(flags);
955 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
956 local_irq_restore(flags);
957 for (i = 0;i < blocks * blksz;i++) {
958 if (test->scratch[i] != (u8)i)
966 /*******************************************************************/
968 /*******************************************************************/
970 struct mmc_test_case {
973 int (*prepare)(struct mmc_test_card *);
974 int (*run)(struct mmc_test_card *);
975 int (*cleanup)(struct mmc_test_card *);
978 static int mmc_test_basic_write(struct mmc_test_card *test)
981 struct scatterlist sg;
983 ret = mmc_test_set_blksize(test, 512);
987 sg_init_one(&sg, test->buffer, 512);
989 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
992 static int mmc_test_basic_read(struct mmc_test_card *test)
995 struct scatterlist sg;
997 ret = mmc_test_set_blksize(test, 512);
1001 sg_init_one(&sg, test->buffer, 512);
1003 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1006 static int mmc_test_verify_write(struct mmc_test_card *test)
1008 struct scatterlist sg;
1010 sg_init_one(&sg, test->buffer, 512);
1012 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1015 static int mmc_test_verify_read(struct mmc_test_card *test)
1017 struct scatterlist sg;
1019 sg_init_one(&sg, test->buffer, 512);
1021 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1024 static int mmc_test_multi_write(struct mmc_test_card *test)
1027 struct scatterlist sg;
1029 if (test->card->host->max_blk_count == 1)
1030 return RESULT_UNSUP_HOST;
1032 size = PAGE_SIZE * 2;
1033 size = min(size, test->card->host->max_req_size);
1034 size = min(size, test->card->host->max_seg_size);
1035 size = min(size, test->card->host->max_blk_count * 512);
1038 return RESULT_UNSUP_HOST;
1040 sg_init_one(&sg, test->buffer, size);
1042 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1045 static int mmc_test_multi_read(struct mmc_test_card *test)
1048 struct scatterlist sg;
1050 if (test->card->host->max_blk_count == 1)
1051 return RESULT_UNSUP_HOST;
1053 size = PAGE_SIZE * 2;
1054 size = min(size, test->card->host->max_req_size);
1055 size = min(size, test->card->host->max_seg_size);
1056 size = min(size, test->card->host->max_blk_count * 512);
1059 return RESULT_UNSUP_HOST;
1061 sg_init_one(&sg, test->buffer, size);
1063 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1066 static int mmc_test_pow2_write(struct mmc_test_card *test)
1069 struct scatterlist sg;
1071 if (!test->card->csd.write_partial)
1072 return RESULT_UNSUP_CARD;
1074 for (i = 1; i < 512;i <<= 1) {
1075 sg_init_one(&sg, test->buffer, i);
1076 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1084 static int mmc_test_pow2_read(struct mmc_test_card *test)
1087 struct scatterlist sg;
1089 if (!test->card->csd.read_partial)
1090 return RESULT_UNSUP_CARD;
1092 for (i = 1; i < 512;i <<= 1) {
1093 sg_init_one(&sg, test->buffer, i);
1094 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1102 static int mmc_test_weird_write(struct mmc_test_card *test)
1105 struct scatterlist sg;
1107 if (!test->card->csd.write_partial)
1108 return RESULT_UNSUP_CARD;
1110 for (i = 3; i < 512;i += 7) {
1111 sg_init_one(&sg, test->buffer, i);
1112 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1120 static int mmc_test_weird_read(struct mmc_test_card *test)
1123 struct scatterlist sg;
1125 if (!test->card->csd.read_partial)
1126 return RESULT_UNSUP_CARD;
1128 for (i = 3; i < 512;i += 7) {
1129 sg_init_one(&sg, test->buffer, i);
1130 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1138 static int mmc_test_align_write(struct mmc_test_card *test)
1141 struct scatterlist sg;
1143 for (i = 1; i < TEST_ALIGN_END; i++) {
1144 sg_init_one(&sg, test->buffer + i, 512);
1145 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1153 static int mmc_test_align_read(struct mmc_test_card *test)
1156 struct scatterlist sg;
1158 for (i = 1; i < TEST_ALIGN_END; i++) {
1159 sg_init_one(&sg, test->buffer + i, 512);
1160 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1168 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1172 struct scatterlist sg;
1174 if (test->card->host->max_blk_count == 1)
1175 return RESULT_UNSUP_HOST;
1177 size = PAGE_SIZE * 2;
1178 size = min(size, test->card->host->max_req_size);
1179 size = min(size, test->card->host->max_seg_size);
1180 size = min(size, test->card->host->max_blk_count * 512);
1183 return RESULT_UNSUP_HOST;
1185 for (i = 1; i < TEST_ALIGN_END; i++) {
1186 sg_init_one(&sg, test->buffer + i, size);
1187 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1195 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1199 struct scatterlist sg;
1201 if (test->card->host->max_blk_count == 1)
1202 return RESULT_UNSUP_HOST;
1204 size = PAGE_SIZE * 2;
1205 size = min(size, test->card->host->max_req_size);
1206 size = min(size, test->card->host->max_seg_size);
1207 size = min(size, test->card->host->max_blk_count * 512);
1210 return RESULT_UNSUP_HOST;
1212 for (i = 1; i < TEST_ALIGN_END; i++) {
1213 sg_init_one(&sg, test->buffer + i, size);
1214 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1222 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1226 ret = mmc_test_set_blksize(test, 512);
1230 return mmc_test_broken_transfer(test, 1, 512, 1);
1233 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1237 ret = mmc_test_set_blksize(test, 512);
1241 return mmc_test_broken_transfer(test, 1, 512, 0);
1244 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1248 if (test->card->host->max_blk_count == 1)
1249 return RESULT_UNSUP_HOST;
1251 ret = mmc_test_set_blksize(test, 512);
1255 return mmc_test_broken_transfer(test, 2, 512, 1);
1258 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1262 if (test->card->host->max_blk_count == 1)
1263 return RESULT_UNSUP_HOST;
1265 ret = mmc_test_set_blksize(test, 512);
1269 return mmc_test_broken_transfer(test, 2, 512, 0);
1272 #ifdef CONFIG_HIGHMEM
1274 static int mmc_test_write_high(struct mmc_test_card *test)
1276 struct scatterlist sg;
1278 sg_init_table(&sg, 1);
1279 sg_set_page(&sg, test->highmem, 512, 0);
1281 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1284 static int mmc_test_read_high(struct mmc_test_card *test)
1286 struct scatterlist sg;
1288 sg_init_table(&sg, 1);
1289 sg_set_page(&sg, test->highmem, 512, 0);
1291 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1294 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1297 struct scatterlist sg;
1299 if (test->card->host->max_blk_count == 1)
1300 return RESULT_UNSUP_HOST;
1302 size = PAGE_SIZE * 2;
1303 size = min(size, test->card->host->max_req_size);
1304 size = min(size, test->card->host->max_seg_size);
1305 size = min(size, test->card->host->max_blk_count * 512);
1308 return RESULT_UNSUP_HOST;
1310 sg_init_table(&sg, 1);
1311 sg_set_page(&sg, test->highmem, size, 0);
1313 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1316 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1319 struct scatterlist sg;
1321 if (test->card->host->max_blk_count == 1)
1322 return RESULT_UNSUP_HOST;
1324 size = PAGE_SIZE * 2;
1325 size = min(size, test->card->host->max_req_size);
1326 size = min(size, test->card->host->max_seg_size);
1327 size = min(size, test->card->host->max_blk_count * 512);
1330 return RESULT_UNSUP_HOST;
1332 sg_init_table(&sg, 1);
1333 sg_set_page(&sg, test->highmem, size, 0);
1335 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1340 static int mmc_test_no_highmem(struct mmc_test_card *test)
1342 pr_info("%s: Highmem not configured - test skipped\n",
1343 mmc_hostname(test->card->host));
1347 #endif /* CONFIG_HIGHMEM */
1350 * Map sz bytes so that it can be transferred.
1352 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1353 int max_scatter, int min_sg_len)
1355 struct mmc_test_area *t = &test->area;
1358 t->blocks = sz >> 9;
1361 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1362 t->max_segs, t->max_seg_sz,
1365 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1366 t->max_seg_sz, &t->sg_len, min_sg_len);
1369 pr_info("%s: Failed to map sg list\n",
1370 mmc_hostname(test->card->host));
1375 * Transfer bytes mapped by mmc_test_area_map().
1377 static int mmc_test_area_transfer(struct mmc_test_card *test,
1378 unsigned int dev_addr, int write)
1380 struct mmc_test_area *t = &test->area;
1382 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1383 t->blocks, 512, write);
1387 * Map and transfer bytes for multiple transfers.
1389 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1390 unsigned int dev_addr, int write,
1391 int max_scatter, int timed, int count,
1392 bool nonblock, int min_sg_len)
1394 struct timespec ts1, ts2;
1397 struct mmc_test_area *t = &test->area;
1400 * In the case of a maximally scattered transfer, the maximum transfer
1401 * size is further limited by using PAGE_SIZE segments.
1404 struct mmc_test_area *t = &test->area;
1405 unsigned long max_tfr;
1407 if (t->max_seg_sz >= PAGE_SIZE)
1408 max_tfr = t->max_segs * PAGE_SIZE;
1410 max_tfr = t->max_segs * t->max_seg_sz;
1415 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1420 getnstimeofday(&ts1);
1422 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1423 dev_addr, t->blocks, 512, write, count);
1425 for (i = 0; i < count && ret == 0; i++) {
1426 ret = mmc_test_area_transfer(test, dev_addr, write);
1427 dev_addr += sz >> 9;
1434 getnstimeofday(&ts2);
1437 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1442 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1443 unsigned int dev_addr, int write, int max_scatter,
1446 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1447 timed, 1, false, 0);
1451 * Write the test area entirely.
1453 static int mmc_test_area_fill(struct mmc_test_card *test)
1455 struct mmc_test_area *t = &test->area;
1457 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1461 * Erase the test area entirely.
1463 static int mmc_test_area_erase(struct mmc_test_card *test)
1465 struct mmc_test_area *t = &test->area;
1467 if (!mmc_can_erase(test->card))
1470 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1475 * Cleanup struct mmc_test_area.
1477 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1479 struct mmc_test_area *t = &test->area;
1482 mmc_test_free_mem(t->mem);
1488 * Initialize an area for testing large transfers. The test area is set to the
1489 * middle of the card because cards may have different charateristics at the
1490 * front (for FAT file system optimization). Optionally, the area is erased
1491 * (if the card supports it) which may improve write performance. Optionally,
1492 * the area is filled with data for subsequent read tests.
1494 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1496 struct mmc_test_area *t = &test->area;
1497 unsigned long min_sz = 64 * 1024, sz;
1500 ret = mmc_test_set_blksize(test, 512);
1504 /* Make the test area size about 4MiB */
1505 sz = (unsigned long)test->card->pref_erase << 9;
1507 while (t->max_sz < 4 * 1024 * 1024)
1509 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1512 t->max_segs = test->card->host->max_segs;
1513 t->max_seg_sz = test->card->host->max_seg_size;
1514 t->max_seg_sz -= t->max_seg_sz % 512;
1516 t->max_tfr = t->max_sz;
1517 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1518 t->max_tfr = test->card->host->max_blk_count << 9;
1519 if (t->max_tfr > test->card->host->max_req_size)
1520 t->max_tfr = test->card->host->max_req_size;
1521 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1522 t->max_tfr = t->max_segs * t->max_seg_sz;
1525 * Try to allocate enough memory for a max. sized transfer. Less is OK
1526 * because the same memory can be mapped into the scatterlist more than
1527 * once. Also, take into account the limits imposed on scatterlist
1528 * segments by the host driver.
1530 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1535 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1541 t->dev_addr = mmc_test_capacity(test->card) / 2;
1542 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1545 ret = mmc_test_area_erase(test);
1551 ret = mmc_test_area_fill(test);
1559 mmc_test_area_cleanup(test);
1564 * Prepare for large transfers. Do not erase the test area.
1566 static int mmc_test_area_prepare(struct mmc_test_card *test)
1568 return mmc_test_area_init(test, 0, 0);
1572 * Prepare for large transfers. Do erase the test area.
1574 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1576 return mmc_test_area_init(test, 1, 0);
1580 * Prepare for large transfers. Erase and fill the test area.
1582 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1584 return mmc_test_area_init(test, 1, 1);
1588 * Test best-case performance. Best-case performance is expected from
1589 * a single large transfer.
1591 * An additional option (max_scatter) allows the measurement of the same
1592 * transfer but with no contiguous pages in the scatter list. This tests
1593 * the efficiency of DMA to handle scattered pages.
1595 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1598 struct mmc_test_area *t = &test->area;
1600 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1605 * Best-case read performance.
1607 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1609 return mmc_test_best_performance(test, 0, 0);
1613 * Best-case write performance.
1615 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1617 return mmc_test_best_performance(test, 1, 0);
1621 * Best-case read performance into scattered pages.
1623 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1625 return mmc_test_best_performance(test, 0, 1);
1629 * Best-case write performance from scattered pages.
1631 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1633 return mmc_test_best_performance(test, 1, 1);
1637 * Single read performance by transfer size.
1639 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1641 struct mmc_test_area *t = &test->area;
1643 unsigned int dev_addr;
1646 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1647 dev_addr = t->dev_addr + (sz >> 9);
1648 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1653 dev_addr = t->dev_addr;
1654 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1658 * Single write performance by transfer size.
1660 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1662 struct mmc_test_area *t = &test->area;
1664 unsigned int dev_addr;
1667 ret = mmc_test_area_erase(test);
1670 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1671 dev_addr = t->dev_addr + (sz >> 9);
1672 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1676 ret = mmc_test_area_erase(test);
1680 dev_addr = t->dev_addr;
1681 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1685 * Single trim performance by transfer size.
1687 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1689 struct mmc_test_area *t = &test->area;
1691 unsigned int dev_addr;
1692 struct timespec ts1, ts2;
1695 if (!mmc_can_trim(test->card))
1696 return RESULT_UNSUP_CARD;
1698 if (!mmc_can_erase(test->card))
1699 return RESULT_UNSUP_HOST;
1701 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1702 dev_addr = t->dev_addr + (sz >> 9);
1703 getnstimeofday(&ts1);
1704 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1707 getnstimeofday(&ts2);
1708 mmc_test_print_rate(test, sz, &ts1, &ts2);
1710 dev_addr = t->dev_addr;
1711 getnstimeofday(&ts1);
1712 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1715 getnstimeofday(&ts2);
1716 mmc_test_print_rate(test, sz, &ts1, &ts2);
1720 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1722 struct mmc_test_area *t = &test->area;
1723 unsigned int dev_addr, i, cnt;
1724 struct timespec ts1, ts2;
1727 cnt = t->max_sz / sz;
1728 dev_addr = t->dev_addr;
1729 getnstimeofday(&ts1);
1730 for (i = 0; i < cnt; i++) {
1731 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1734 dev_addr += (sz >> 9);
1736 getnstimeofday(&ts2);
1737 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1742 * Consecutive read performance by transfer size.
1744 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1746 struct mmc_test_area *t = &test->area;
1750 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1751 ret = mmc_test_seq_read_perf(test, sz);
1756 return mmc_test_seq_read_perf(test, sz);
1759 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1761 struct mmc_test_area *t = &test->area;
1762 unsigned int dev_addr, i, cnt;
1763 struct timespec ts1, ts2;
1766 ret = mmc_test_area_erase(test);
1769 cnt = t->max_sz / sz;
1770 dev_addr = t->dev_addr;
1771 getnstimeofday(&ts1);
1772 for (i = 0; i < cnt; i++) {
1773 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1776 dev_addr += (sz >> 9);
1778 getnstimeofday(&ts2);
1779 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1784 * Consecutive write performance by transfer size.
1786 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1788 struct mmc_test_area *t = &test->area;
1792 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1793 ret = mmc_test_seq_write_perf(test, sz);
1798 return mmc_test_seq_write_perf(test, sz);
1802 * Consecutive trim performance by transfer size.
1804 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1806 struct mmc_test_area *t = &test->area;
1808 unsigned int dev_addr, i, cnt;
1809 struct timespec ts1, ts2;
1812 if (!mmc_can_trim(test->card))
1813 return RESULT_UNSUP_CARD;
1815 if (!mmc_can_erase(test->card))
1816 return RESULT_UNSUP_HOST;
1818 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1819 ret = mmc_test_area_erase(test);
1822 ret = mmc_test_area_fill(test);
1825 cnt = t->max_sz / sz;
1826 dev_addr = t->dev_addr;
1827 getnstimeofday(&ts1);
1828 for (i = 0; i < cnt; i++) {
1829 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1833 dev_addr += (sz >> 9);
1835 getnstimeofday(&ts2);
1836 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1841 static unsigned int rnd_next = 1;
1843 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1847 rnd_next = rnd_next * 1103515245 + 12345;
1848 r = (rnd_next >> 16) & 0x7fff;
1849 return (r * rnd_cnt) >> 15;
1852 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1855 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1857 struct timespec ts1, ts2, ts;
1862 rnd_addr = mmc_test_capacity(test->card) / 4;
1863 range1 = rnd_addr / test->card->pref_erase;
1864 range2 = range1 / ssz;
1866 getnstimeofday(&ts1);
1867 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1868 getnstimeofday(&ts2);
1869 ts = timespec_sub(ts2, ts1);
1870 if (ts.tv_sec >= 10)
1872 ea = mmc_test_rnd_num(range1);
1876 dev_addr = rnd_addr + test->card->pref_erase * ea +
1877 ssz * mmc_test_rnd_num(range2);
1878 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1883 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1887 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1889 struct mmc_test_area *t = &test->area;
1894 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1896 * When writing, try to get more consistent results by running
1897 * the test twice with exactly the same I/O but outputting the
1898 * results only for the 2nd run.
1902 ret = mmc_test_rnd_perf(test, write, 0, sz);
1907 ret = mmc_test_rnd_perf(test, write, 1, sz);
1914 ret = mmc_test_rnd_perf(test, write, 0, sz);
1919 return mmc_test_rnd_perf(test, write, 1, sz);
1923 * Random read performance by transfer size.
1925 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1927 return mmc_test_random_perf(test, 0);
1931 * Random write performance by transfer size.
1933 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1935 return mmc_test_random_perf(test, 1);
1938 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1939 unsigned int tot_sz, int max_scatter)
1941 struct mmc_test_area *t = &test->area;
1942 unsigned int dev_addr, i, cnt, sz, ssz;
1943 struct timespec ts1, ts2;
1949 * In the case of a maximally scattered transfer, the maximum transfer
1950 * size is further limited by using PAGE_SIZE segments.
1953 unsigned long max_tfr;
1955 if (t->max_seg_sz >= PAGE_SIZE)
1956 max_tfr = t->max_segs * PAGE_SIZE;
1958 max_tfr = t->max_segs * t->max_seg_sz;
1964 dev_addr = mmc_test_capacity(test->card) / 4;
1965 if (tot_sz > dev_addr << 9)
1966 tot_sz = dev_addr << 9;
1968 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1970 getnstimeofday(&ts1);
1971 for (i = 0; i < cnt; i++) {
1972 ret = mmc_test_area_io(test, sz, dev_addr, write,
1978 getnstimeofday(&ts2);
1980 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1985 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1989 for (i = 0; i < 10; i++) {
1990 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1994 for (i = 0; i < 5; i++) {
1995 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1999 for (i = 0; i < 3; i++) {
2000 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2009 * Large sequential read performance.
2011 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2013 return mmc_test_large_seq_perf(test, 0);
2017 * Large sequential write performance.
2019 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2021 return mmc_test_large_seq_perf(test, 1);
2024 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2025 struct mmc_test_multiple_rw *tdata,
2026 unsigned int reqsize, unsigned int size,
2029 unsigned int dev_addr;
2030 struct mmc_test_area *t = &test->area;
2033 /* Set up test area */
2034 if (size > mmc_test_capacity(test->card) / 2 * 512)
2035 size = mmc_test_capacity(test->card) / 2 * 512;
2036 if (reqsize > t->max_tfr)
2037 reqsize = t->max_tfr;
2038 dev_addr = mmc_test_capacity(test->card) / 4;
2039 if ((dev_addr & 0xffff0000))
2040 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2042 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2049 /* prepare test area */
2050 if (mmc_can_erase(test->card) &&
2051 tdata->prepare & MMC_TEST_PREP_ERASE) {
2052 ret = mmc_erase(test->card, dev_addr,
2053 size / 512, MMC_SECURE_ERASE_ARG);
2055 ret = mmc_erase(test->card, dev_addr,
2056 size / 512, MMC_ERASE_ARG);
2062 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2063 tdata->do_write, 0, 1, size / reqsize,
2064 tdata->do_nonblock_req, min_sg_len);
2070 pr_info("[%s] error\n", __func__);
2074 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2075 struct mmc_test_multiple_rw *rw)
2079 void *pre_req = test->card->host->ops->pre_req;
2080 void *post_req = test->card->host->ops->post_req;
2082 if (rw->do_nonblock_req &&
2083 ((!pre_req && post_req) || (pre_req && !post_req))) {
2084 pr_info("error: only one of pre/post is defined\n");
2088 for (i = 0 ; i < rw->len && ret == 0; i++) {
2089 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2096 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2097 struct mmc_test_multiple_rw *rw)
2102 for (i = 0 ; i < rw->len && ret == 0; i++) {
2103 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2112 * Multiple blocking write 4k to 4 MB chunks
2114 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2116 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2117 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2118 struct mmc_test_multiple_rw test_data = {
2120 .size = TEST_AREA_MAX_SIZE,
2121 .len = ARRAY_SIZE(bs),
2123 .do_nonblock_req = false,
2124 .prepare = MMC_TEST_PREP_ERASE,
2127 return mmc_test_rw_multiple_size(test, &test_data);
2131 * Multiple non-blocking write 4k to 4 MB chunks
2133 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2135 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2136 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2137 struct mmc_test_multiple_rw test_data = {
2139 .size = TEST_AREA_MAX_SIZE,
2140 .len = ARRAY_SIZE(bs),
2142 .do_nonblock_req = true,
2143 .prepare = MMC_TEST_PREP_ERASE,
2146 return mmc_test_rw_multiple_size(test, &test_data);
2150 * Multiple blocking read 4k to 4 MB chunks
2152 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2154 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2155 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2156 struct mmc_test_multiple_rw test_data = {
2158 .size = TEST_AREA_MAX_SIZE,
2159 .len = ARRAY_SIZE(bs),
2161 .do_nonblock_req = false,
2162 .prepare = MMC_TEST_PREP_NONE,
2165 return mmc_test_rw_multiple_size(test, &test_data);
2169 * Multiple non-blocking read 4k to 4 MB chunks
2171 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2173 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2174 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2175 struct mmc_test_multiple_rw test_data = {
2177 .size = TEST_AREA_MAX_SIZE,
2178 .len = ARRAY_SIZE(bs),
2180 .do_nonblock_req = true,
2181 .prepare = MMC_TEST_PREP_NONE,
2184 return mmc_test_rw_multiple_size(test, &test_data);
2188 * Multiple blocking write 1 to 512 sg elements
2190 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2192 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2193 1 << 7, 1 << 8, 1 << 9};
2194 struct mmc_test_multiple_rw test_data = {
2196 .size = TEST_AREA_MAX_SIZE,
2197 .len = ARRAY_SIZE(sg_len),
2199 .do_nonblock_req = false,
2200 .prepare = MMC_TEST_PREP_ERASE,
2203 return mmc_test_rw_multiple_sg_len(test, &test_data);
2207 * Multiple non-blocking write 1 to 512 sg elements
2209 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2211 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2212 1 << 7, 1 << 8, 1 << 9};
2213 struct mmc_test_multiple_rw test_data = {
2215 .size = TEST_AREA_MAX_SIZE,
2216 .len = ARRAY_SIZE(sg_len),
2218 .do_nonblock_req = true,
2219 .prepare = MMC_TEST_PREP_ERASE,
2222 return mmc_test_rw_multiple_sg_len(test, &test_data);
2226 * Multiple blocking read 1 to 512 sg elements
2228 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2230 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2231 1 << 7, 1 << 8, 1 << 9};
2232 struct mmc_test_multiple_rw test_data = {
2234 .size = TEST_AREA_MAX_SIZE,
2235 .len = ARRAY_SIZE(sg_len),
2237 .do_nonblock_req = false,
2238 .prepare = MMC_TEST_PREP_NONE,
2241 return mmc_test_rw_multiple_sg_len(test, &test_data);
2245 * Multiple non-blocking read 1 to 512 sg elements
2247 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2249 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2250 1 << 7, 1 << 8, 1 << 9};
2251 struct mmc_test_multiple_rw test_data = {
2253 .size = TEST_AREA_MAX_SIZE,
2254 .len = ARRAY_SIZE(sg_len),
2256 .do_nonblock_req = true,
2257 .prepare = MMC_TEST_PREP_NONE,
2260 return mmc_test_rw_multiple_sg_len(test, &test_data);
2264 * eMMC hardware reset.
2266 static int mmc_test_hw_reset(struct mmc_test_card *test)
2268 struct mmc_card *card = test->card;
2269 struct mmc_host *host = card->host;
2272 if (!mmc_card_mmc(card) || !mmc_can_reset(card))
2273 return RESULT_UNSUP_CARD;
2275 err = mmc_hw_reset(host);
2278 else if (err == -EOPNOTSUPP)
2279 return RESULT_UNSUP_HOST;
2284 static const struct mmc_test_case mmc_test_cases[] = {
2286 .name = "Basic write (no data verification)",
2287 .run = mmc_test_basic_write,
2291 .name = "Basic read (no data verification)",
2292 .run = mmc_test_basic_read,
2296 .name = "Basic write (with data verification)",
2297 .prepare = mmc_test_prepare_write,
2298 .run = mmc_test_verify_write,
2299 .cleanup = mmc_test_cleanup,
2303 .name = "Basic read (with data verification)",
2304 .prepare = mmc_test_prepare_read,
2305 .run = mmc_test_verify_read,
2306 .cleanup = mmc_test_cleanup,
2310 .name = "Multi-block write",
2311 .prepare = mmc_test_prepare_write,
2312 .run = mmc_test_multi_write,
2313 .cleanup = mmc_test_cleanup,
2317 .name = "Multi-block read",
2318 .prepare = mmc_test_prepare_read,
2319 .run = mmc_test_multi_read,
2320 .cleanup = mmc_test_cleanup,
2324 .name = "Power of two block writes",
2325 .prepare = mmc_test_prepare_write,
2326 .run = mmc_test_pow2_write,
2327 .cleanup = mmc_test_cleanup,
2331 .name = "Power of two block reads",
2332 .prepare = mmc_test_prepare_read,
2333 .run = mmc_test_pow2_read,
2334 .cleanup = mmc_test_cleanup,
2338 .name = "Weird sized block writes",
2339 .prepare = mmc_test_prepare_write,
2340 .run = mmc_test_weird_write,
2341 .cleanup = mmc_test_cleanup,
2345 .name = "Weird sized block reads",
2346 .prepare = mmc_test_prepare_read,
2347 .run = mmc_test_weird_read,
2348 .cleanup = mmc_test_cleanup,
2352 .name = "Badly aligned write",
2353 .prepare = mmc_test_prepare_write,
2354 .run = mmc_test_align_write,
2355 .cleanup = mmc_test_cleanup,
2359 .name = "Badly aligned read",
2360 .prepare = mmc_test_prepare_read,
2361 .run = mmc_test_align_read,
2362 .cleanup = mmc_test_cleanup,
2366 .name = "Badly aligned multi-block write",
2367 .prepare = mmc_test_prepare_write,
2368 .run = mmc_test_align_multi_write,
2369 .cleanup = mmc_test_cleanup,
2373 .name = "Badly aligned multi-block read",
2374 .prepare = mmc_test_prepare_read,
2375 .run = mmc_test_align_multi_read,
2376 .cleanup = mmc_test_cleanup,
2380 .name = "Correct xfer_size at write (start failure)",
2381 .run = mmc_test_xfersize_write,
2385 .name = "Correct xfer_size at read (start failure)",
2386 .run = mmc_test_xfersize_read,
2390 .name = "Correct xfer_size at write (midway failure)",
2391 .run = mmc_test_multi_xfersize_write,
2395 .name = "Correct xfer_size at read (midway failure)",
2396 .run = mmc_test_multi_xfersize_read,
2399 #ifdef CONFIG_HIGHMEM
2402 .name = "Highmem write",
2403 .prepare = mmc_test_prepare_write,
2404 .run = mmc_test_write_high,
2405 .cleanup = mmc_test_cleanup,
2409 .name = "Highmem read",
2410 .prepare = mmc_test_prepare_read,
2411 .run = mmc_test_read_high,
2412 .cleanup = mmc_test_cleanup,
2416 .name = "Multi-block highmem write",
2417 .prepare = mmc_test_prepare_write,
2418 .run = mmc_test_multi_write_high,
2419 .cleanup = mmc_test_cleanup,
2423 .name = "Multi-block highmem read",
2424 .prepare = mmc_test_prepare_read,
2425 .run = mmc_test_multi_read_high,
2426 .cleanup = mmc_test_cleanup,
2432 .name = "Highmem write",
2433 .run = mmc_test_no_highmem,
2437 .name = "Highmem read",
2438 .run = mmc_test_no_highmem,
2442 .name = "Multi-block highmem write",
2443 .run = mmc_test_no_highmem,
2447 .name = "Multi-block highmem read",
2448 .run = mmc_test_no_highmem,
2451 #endif /* CONFIG_HIGHMEM */
2454 .name = "Best-case read performance",
2455 .prepare = mmc_test_area_prepare_fill,
2456 .run = mmc_test_best_read_performance,
2457 .cleanup = mmc_test_area_cleanup,
2461 .name = "Best-case write performance",
2462 .prepare = mmc_test_area_prepare_erase,
2463 .run = mmc_test_best_write_performance,
2464 .cleanup = mmc_test_area_cleanup,
2468 .name = "Best-case read performance into scattered pages",
2469 .prepare = mmc_test_area_prepare_fill,
2470 .run = mmc_test_best_read_perf_max_scatter,
2471 .cleanup = mmc_test_area_cleanup,
2475 .name = "Best-case write performance from scattered pages",
2476 .prepare = mmc_test_area_prepare_erase,
2477 .run = mmc_test_best_write_perf_max_scatter,
2478 .cleanup = mmc_test_area_cleanup,
2482 .name = "Single read performance by transfer size",
2483 .prepare = mmc_test_area_prepare_fill,
2484 .run = mmc_test_profile_read_perf,
2485 .cleanup = mmc_test_area_cleanup,
2489 .name = "Single write performance by transfer size",
2490 .prepare = mmc_test_area_prepare,
2491 .run = mmc_test_profile_write_perf,
2492 .cleanup = mmc_test_area_cleanup,
2496 .name = "Single trim performance by transfer size",
2497 .prepare = mmc_test_area_prepare_fill,
2498 .run = mmc_test_profile_trim_perf,
2499 .cleanup = mmc_test_area_cleanup,
2503 .name = "Consecutive read performance by transfer size",
2504 .prepare = mmc_test_area_prepare_fill,
2505 .run = mmc_test_profile_seq_read_perf,
2506 .cleanup = mmc_test_area_cleanup,
2510 .name = "Consecutive write performance by transfer size",
2511 .prepare = mmc_test_area_prepare,
2512 .run = mmc_test_profile_seq_write_perf,
2513 .cleanup = mmc_test_area_cleanup,
2517 .name = "Consecutive trim performance by transfer size",
2518 .prepare = mmc_test_area_prepare,
2519 .run = mmc_test_profile_seq_trim_perf,
2520 .cleanup = mmc_test_area_cleanup,
2524 .name = "Random read performance by transfer size",
2525 .prepare = mmc_test_area_prepare,
2526 .run = mmc_test_random_read_perf,
2527 .cleanup = mmc_test_area_cleanup,
2531 .name = "Random write performance by transfer size",
2532 .prepare = mmc_test_area_prepare,
2533 .run = mmc_test_random_write_perf,
2534 .cleanup = mmc_test_area_cleanup,
2538 .name = "Large sequential read into scattered pages",
2539 .prepare = mmc_test_area_prepare,
2540 .run = mmc_test_large_seq_read_perf,
2541 .cleanup = mmc_test_area_cleanup,
2545 .name = "Large sequential write from scattered pages",
2546 .prepare = mmc_test_area_prepare,
2547 .run = mmc_test_large_seq_write_perf,
2548 .cleanup = mmc_test_area_cleanup,
2552 .name = "Write performance with blocking req 4k to 4MB",
2553 .prepare = mmc_test_area_prepare,
2554 .run = mmc_test_profile_mult_write_blocking_perf,
2555 .cleanup = mmc_test_area_cleanup,
2559 .name = "Write performance with non-blocking req 4k to 4MB",
2560 .prepare = mmc_test_area_prepare,
2561 .run = mmc_test_profile_mult_write_nonblock_perf,
2562 .cleanup = mmc_test_area_cleanup,
2566 .name = "Read performance with blocking req 4k to 4MB",
2567 .prepare = mmc_test_area_prepare,
2568 .run = mmc_test_profile_mult_read_blocking_perf,
2569 .cleanup = mmc_test_area_cleanup,
2573 .name = "Read performance with non-blocking req 4k to 4MB",
2574 .prepare = mmc_test_area_prepare,
2575 .run = mmc_test_profile_mult_read_nonblock_perf,
2576 .cleanup = mmc_test_area_cleanup,
2580 .name = "Write performance blocking req 1 to 512 sg elems",
2581 .prepare = mmc_test_area_prepare,
2582 .run = mmc_test_profile_sglen_wr_blocking_perf,
2583 .cleanup = mmc_test_area_cleanup,
2587 .name = "Write performance non-blocking req 1 to 512 sg elems",
2588 .prepare = mmc_test_area_prepare,
2589 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2590 .cleanup = mmc_test_area_cleanup,
2594 .name = "Read performance blocking req 1 to 512 sg elems",
2595 .prepare = mmc_test_area_prepare,
2596 .run = mmc_test_profile_sglen_r_blocking_perf,
2597 .cleanup = mmc_test_area_cleanup,
2601 .name = "Read performance non-blocking req 1 to 512 sg elems",
2602 .prepare = mmc_test_area_prepare,
2603 .run = mmc_test_profile_sglen_r_nonblock_perf,
2604 .cleanup = mmc_test_area_cleanup,
2608 .name = "eMMC hardware reset",
2609 .run = mmc_test_hw_reset,
2613 static DEFINE_MUTEX(mmc_test_lock);
2615 static LIST_HEAD(mmc_test_result);
2617 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2621 pr_info("%s: Starting tests of card %s...\n",
2622 mmc_hostname(test->card->host), mmc_card_id(test->card));
2624 mmc_claim_host(test->card->host);
2626 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2627 struct mmc_test_general_result *gr;
2629 if (testcase && ((i + 1) != testcase))
2632 pr_info("%s: Test case %d. %s...\n",
2633 mmc_hostname(test->card->host), i + 1,
2634 mmc_test_cases[i].name);
2636 if (mmc_test_cases[i].prepare) {
2637 ret = mmc_test_cases[i].prepare(test);
2639 pr_info("%s: Result: Prepare "
2640 "stage failed! (%d)\n",
2641 mmc_hostname(test->card->host),
2647 gr = kzalloc(sizeof(struct mmc_test_general_result),
2650 INIT_LIST_HEAD(&gr->tr_lst);
2652 /* Assign data what we know already */
2653 gr->card = test->card;
2656 /* Append container to global one */
2657 list_add_tail(&gr->link, &mmc_test_result);
2660 * Save the pointer to created container in our private
2666 ret = mmc_test_cases[i].run(test);
2669 pr_info("%s: Result: OK\n",
2670 mmc_hostname(test->card->host));
2673 pr_info("%s: Result: FAILED\n",
2674 mmc_hostname(test->card->host));
2676 case RESULT_UNSUP_HOST:
2677 pr_info("%s: Result: UNSUPPORTED "
2679 mmc_hostname(test->card->host));
2681 case RESULT_UNSUP_CARD:
2682 pr_info("%s: Result: UNSUPPORTED "
2684 mmc_hostname(test->card->host));
2687 pr_info("%s: Result: ERROR (%d)\n",
2688 mmc_hostname(test->card->host), ret);
2691 /* Save the result */
2695 if (mmc_test_cases[i].cleanup) {
2696 ret = mmc_test_cases[i].cleanup(test);
2698 pr_info("%s: Warning: Cleanup "
2699 "stage failed! (%d)\n",
2700 mmc_hostname(test->card->host),
2706 mmc_release_host(test->card->host);
2708 pr_info("%s: Tests completed.\n",
2709 mmc_hostname(test->card->host));
2712 static void mmc_test_free_result(struct mmc_card *card)
2714 struct mmc_test_general_result *gr, *grs;
2716 mutex_lock(&mmc_test_lock);
2718 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2719 struct mmc_test_transfer_result *tr, *trs;
2721 if (card && gr->card != card)
2724 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2725 list_del(&tr->link);
2729 list_del(&gr->link);
2733 mutex_unlock(&mmc_test_lock);
2736 static LIST_HEAD(mmc_test_file_test);
2738 static int mtf_test_show(struct seq_file *sf, void *data)
2740 struct mmc_card *card = (struct mmc_card *)sf->private;
2741 struct mmc_test_general_result *gr;
2743 mutex_lock(&mmc_test_lock);
2745 list_for_each_entry(gr, &mmc_test_result, link) {
2746 struct mmc_test_transfer_result *tr;
2748 if (gr->card != card)
2751 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2753 list_for_each_entry(tr, &gr->tr_lst, link) {
2754 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2755 tr->count, tr->sectors,
2756 (unsigned long)tr->ts.tv_sec,
2757 (unsigned long)tr->ts.tv_nsec,
2758 tr->rate, tr->iops / 100, tr->iops % 100);
2762 mutex_unlock(&mmc_test_lock);
2767 static int mtf_test_open(struct inode *inode, struct file *file)
2769 return single_open(file, mtf_test_show, inode->i_private);
2772 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2773 size_t count, loff_t *pos)
2775 struct seq_file *sf = (struct seq_file *)file->private_data;
2776 struct mmc_card *card = (struct mmc_card *)sf->private;
2777 struct mmc_test_card *test;
2781 ret = kstrtol_from_user(buf, count, 10, &testcase);
2785 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2790 * Remove all test cases associated with given card. Thus we have only
2791 * actual data of the last run.
2793 mmc_test_free_result(card);
2797 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2798 #ifdef CONFIG_HIGHMEM
2799 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2802 #ifdef CONFIG_HIGHMEM
2803 if (test->buffer && test->highmem) {
2807 mutex_lock(&mmc_test_lock);
2808 mmc_test_run(test, testcase);
2809 mutex_unlock(&mmc_test_lock);
2812 #ifdef CONFIG_HIGHMEM
2813 __free_pages(test->highmem, BUFFER_ORDER);
2815 kfree(test->buffer);
2821 static const struct file_operations mmc_test_fops_test = {
2822 .open = mtf_test_open,
2824 .write = mtf_test_write,
2825 .llseek = seq_lseek,
2826 .release = single_release,
2829 static int mtf_testlist_show(struct seq_file *sf, void *data)
2833 mutex_lock(&mmc_test_lock);
2835 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2836 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2838 mutex_unlock(&mmc_test_lock);
2843 static int mtf_testlist_open(struct inode *inode, struct file *file)
2845 return single_open(file, mtf_testlist_show, inode->i_private);
2848 static const struct file_operations mmc_test_fops_testlist = {
2849 .open = mtf_testlist_open,
2851 .llseek = seq_lseek,
2852 .release = single_release,
2855 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2857 struct mmc_test_dbgfs_file *df, *dfs;
2859 mutex_lock(&mmc_test_lock);
2861 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2862 if (card && df->card != card)
2864 debugfs_remove(df->file);
2865 list_del(&df->link);
2869 mutex_unlock(&mmc_test_lock);
2872 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2873 const char *name, umode_t mode, const struct file_operations *fops)
2875 struct dentry *file = NULL;
2876 struct mmc_test_dbgfs_file *df;
2878 if (card->debugfs_root)
2879 file = debugfs_create_file(name, mode, card->debugfs_root,
2882 if (IS_ERR_OR_NULL(file)) {
2884 "Can't create %s. Perhaps debugfs is disabled.\n",
2889 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2891 debugfs_remove(file);
2893 "Can't allocate memory for internal usage.\n");
2900 list_add(&df->link, &mmc_test_file_test);
2904 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2908 mutex_lock(&mmc_test_lock);
2910 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2911 &mmc_test_fops_test);
2915 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2916 &mmc_test_fops_testlist);
2921 mutex_unlock(&mmc_test_lock);
2926 static int mmc_test_probe(struct mmc_card *card)
2930 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2933 ret = mmc_test_register_dbgfs_file(card);
2937 dev_info(&card->dev, "Card claimed for testing.\n");
2942 static void mmc_test_remove(struct mmc_card *card)
2944 mmc_test_free_result(card);
2945 mmc_test_free_dbgfs_file(card);
2948 static void mmc_test_shutdown(struct mmc_card *card)
2952 static struct mmc_driver mmc_driver = {
2956 .probe = mmc_test_probe,
2957 .remove = mmc_test_remove,
2958 .shutdown = mmc_test_shutdown,
2961 static int __init mmc_test_init(void)
2963 return mmc_register_driver(&mmc_driver);
2966 static void __exit mmc_test_exit(void)
2968 /* Clear stalled data if card is still plugged */
2969 mmc_test_free_result(NULL);
2970 mmc_test_free_dbgfs_file(NULL);
2972 mmc_unregister_driver(&mmc_driver);
2975 module_init(mmc_test_init);
2976 module_exit(mmc_test_exit);
2978 MODULE_LICENSE("GPL");
2979 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2980 MODULE_AUTHOR("Pierre Ossman");