1 #include <linux/errno.h>
2 #include <linux/numa.h>
3 #include <linux/slab.h>
4 #include <linux/rculist.h>
5 #include <linux/threads.h>
6 #include <linux/preempt.h>
7 #include <linux/irqflags.h>
8 #include <linux/vmalloc.h>
10 #include <linux/module.h>
11 #include <linux/device-mapper.h>
16 #define DM_MSG_PREFIX "stats"
18 static int dm_stat_need_rcu_barrier;
21 * Using 64-bit values to avoid overflow (which is a
22 * problem that block/genhd.c's IO accounting has).
24 struct dm_stat_percpu {
25 unsigned long long sectors[2];
26 unsigned long long ios[2];
27 unsigned long long merges[2];
28 unsigned long long ticks[2];
29 unsigned long long io_ticks[2];
30 unsigned long long io_ticks_total;
31 unsigned long long time_in_queue;
32 unsigned long long *histogram;
35 struct dm_stat_shared {
36 atomic_t in_flight[2];
37 unsigned long long stamp;
38 struct dm_stat_percpu tmp;
42 struct list_head list_entry;
49 unsigned n_histogram_entries;
50 unsigned long long *histogram_boundaries;
51 const char *program_id;
53 struct rcu_head rcu_head;
54 size_t shared_alloc_size;
55 size_t percpu_alloc_size;
56 size_t histogram_alloc_size;
57 struct dm_stat_percpu *stat_percpu[NR_CPUS];
58 struct dm_stat_shared stat_shared[0];
61 #define STAT_PRECISE_TIMESTAMPS 1
63 struct dm_stats_last_position {
69 * A typo on the command line could possibly make the kernel run out of memory
70 * and crash. To prevent the crash we account all used memory. We fail if we
71 * exhaust 1/4 of all memory or 1/2 of vmalloc space.
73 #define DM_STATS_MEMORY_FACTOR 4
74 #define DM_STATS_VMALLOC_FACTOR 2
76 static DEFINE_SPINLOCK(shared_memory_lock);
78 static unsigned long shared_memory_amount;
80 static bool __check_shared_memory(size_t alloc_size)
84 a = shared_memory_amount + alloc_size;
85 if (a < shared_memory_amount)
87 if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
90 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
96 static bool check_shared_memory(size_t alloc_size)
100 spin_lock_irq(&shared_memory_lock);
102 ret = __check_shared_memory(alloc_size);
104 spin_unlock_irq(&shared_memory_lock);
109 static bool claim_shared_memory(size_t alloc_size)
111 spin_lock_irq(&shared_memory_lock);
113 if (!__check_shared_memory(alloc_size)) {
114 spin_unlock_irq(&shared_memory_lock);
118 shared_memory_amount += alloc_size;
120 spin_unlock_irq(&shared_memory_lock);
125 static void free_shared_memory(size_t alloc_size)
129 spin_lock_irqsave(&shared_memory_lock, flags);
131 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
132 spin_unlock_irqrestore(&shared_memory_lock, flags);
133 DMCRIT("Memory usage accounting bug.");
137 shared_memory_amount -= alloc_size;
139 spin_unlock_irqrestore(&shared_memory_lock, flags);
142 static void *dm_kvzalloc(size_t alloc_size, int node)
146 if (!claim_shared_memory(alloc_size))
149 if (alloc_size <= KMALLOC_MAX_SIZE) {
150 p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
154 p = vzalloc_node(alloc_size, node);
158 free_shared_memory(alloc_size);
163 static void dm_kvfree(void *ptr, size_t alloc_size)
168 free_shared_memory(alloc_size);
173 static void dm_stat_free(struct rcu_head *head)
176 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
178 kfree(s->histogram_boundaries);
179 kfree(s->program_id);
181 for_each_possible_cpu(cpu) {
182 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
183 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
185 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
186 dm_kvfree(s, s->shared_alloc_size);
189 static int dm_stat_in_flight(struct dm_stat_shared *shared)
191 return atomic_read(&shared->in_flight[READ]) +
192 atomic_read(&shared->in_flight[WRITE]);
195 void dm_stats_init(struct dm_stats *stats)
198 struct dm_stats_last_position *last;
200 mutex_init(&stats->mutex);
201 INIT_LIST_HEAD(&stats->list);
202 stats->last = alloc_percpu(struct dm_stats_last_position);
203 for_each_possible_cpu(cpu) {
204 last = per_cpu_ptr(stats->last, cpu);
205 last->last_sector = (sector_t)ULLONG_MAX;
206 last->last_rw = UINT_MAX;
210 void dm_stats_cleanup(struct dm_stats *stats)
214 struct dm_stat_shared *shared;
216 while (!list_empty(&stats->list)) {
217 s = container_of(stats->list.next, struct dm_stat, list_entry);
218 list_del(&s->list_entry);
219 for (ni = 0; ni < s->n_entries; ni++) {
220 shared = &s->stat_shared[ni];
221 if (WARN_ON(dm_stat_in_flight(shared))) {
222 DMCRIT("leaked in-flight counter at index %lu "
223 "(start %llu, end %llu, step %llu): reads %d, writes %d",
225 (unsigned long long)s->start,
226 (unsigned long long)s->end,
227 (unsigned long long)s->step,
228 atomic_read(&shared->in_flight[READ]),
229 atomic_read(&shared->in_flight[WRITE]));
232 dm_stat_free(&s->rcu_head);
234 free_percpu(stats->last);
237 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
238 sector_t step, unsigned stat_flags,
239 unsigned n_histogram_entries,
240 unsigned long long *histogram_boundaries,
241 const char *program_id, const char *aux_data,
242 void (*suspend_callback)(struct mapped_device *),
243 void (*resume_callback)(struct mapped_device *),
244 struct mapped_device *md)
247 struct dm_stat *s, *tmp_s;
250 size_t shared_alloc_size;
251 size_t percpu_alloc_size;
252 size_t histogram_alloc_size;
253 struct dm_stat_percpu *p;
258 if (end < start || !step)
261 n_entries = end - start;
262 if (dm_sector_div64(n_entries, step))
265 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
268 shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
269 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
272 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
273 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
276 histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
277 if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
280 if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
281 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
284 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
288 s->stat_flags = stat_flags;
289 s->n_entries = n_entries;
293 s->shared_alloc_size = shared_alloc_size;
294 s->percpu_alloc_size = percpu_alloc_size;
295 s->histogram_alloc_size = histogram_alloc_size;
297 s->n_histogram_entries = n_histogram_entries;
298 s->histogram_boundaries = kmemdup(histogram_boundaries,
299 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
300 if (!s->histogram_boundaries) {
305 s->program_id = kstrdup(program_id, GFP_KERNEL);
306 if (!s->program_id) {
310 s->aux_data = kstrdup(aux_data, GFP_KERNEL);
316 for (ni = 0; ni < n_entries; ni++) {
317 atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
318 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
321 if (s->n_histogram_entries) {
322 unsigned long long *hi;
323 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
328 for (ni = 0; ni < n_entries; ni++) {
329 s->stat_shared[ni].tmp.histogram = hi;
330 hi += s->n_histogram_entries + 1;
334 for_each_possible_cpu(cpu) {
335 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
340 s->stat_percpu[cpu] = p;
341 if (s->n_histogram_entries) {
342 unsigned long long *hi;
343 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
348 for (ni = 0; ni < n_entries; ni++) {
349 p[ni].histogram = hi;
350 hi += s->n_histogram_entries + 1;
356 * Suspend/resume to make sure there is no i/o in flight,
357 * so that newly created statistics will be exact.
359 * (note: we couldn't suspend earlier because we must not
360 * allocate memory while suspended)
362 suspend_callback(md);
364 mutex_lock(&stats->mutex);
366 list_for_each(l, &stats->list) {
367 tmp_s = container_of(l, struct dm_stat, list_entry);
368 if (WARN_ON(tmp_s->id < s->id)) {
370 goto out_unlock_resume;
372 if (tmp_s->id > s->id)
374 if (unlikely(s->id == INT_MAX)) {
376 goto out_unlock_resume;
381 list_add_tail_rcu(&s->list_entry, l);
382 mutex_unlock(&stats->mutex);
389 mutex_unlock(&stats->mutex);
392 dm_stat_free(&s->rcu_head);
396 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
400 list_for_each_entry(s, &stats->list, list_entry) {
410 static int dm_stats_delete(struct dm_stats *stats, int id)
415 mutex_lock(&stats->mutex);
417 s = __dm_stats_find(stats, id);
419 mutex_unlock(&stats->mutex);
423 list_del_rcu(&s->list_entry);
424 mutex_unlock(&stats->mutex);
427 * vfree can't be called from RCU callback
429 for_each_possible_cpu(cpu)
430 if (is_vmalloc_addr(s->stat_percpu) ||
431 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
433 if (is_vmalloc_addr(s) ||
434 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
436 synchronize_rcu_expedited();
437 dm_stat_free(&s->rcu_head);
439 ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
440 call_rcu(&s->rcu_head, dm_stat_free);
445 static int dm_stats_list(struct dm_stats *stats, const char *program,
446 char *result, unsigned maxlen)
454 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
457 mutex_lock(&stats->mutex);
458 list_for_each_entry(s, &stats->list, list_entry) {
459 if (!program || !strcmp(program, s->program_id)) {
460 len = s->end - s->start;
461 DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
462 (unsigned long long)s->start,
463 (unsigned long long)len,
464 (unsigned long long)s->step,
467 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
468 DMEMIT(" precise_timestamps");
469 if (s->n_histogram_entries) {
471 DMEMIT(" histogram:");
472 for (i = 0; i < s->n_histogram_entries; i++) {
475 DMEMIT("%llu", s->histogram_boundaries[i]);
481 mutex_unlock(&stats->mutex);
486 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
487 struct dm_stat_percpu *p)
490 * This is racy, but so is part_round_stats_single.
492 unsigned long long now, difference;
493 unsigned in_flight_read, in_flight_write;
495 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
498 now = ktime_to_ns(ktime_get());
500 difference = now - shared->stamp;
504 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
505 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
507 p->io_ticks[READ] += difference;
509 p->io_ticks[WRITE] += difference;
510 if (in_flight_read + in_flight_write) {
511 p->io_ticks_total += difference;
512 p->time_in_queue += (in_flight_read + in_flight_write) * difference;
517 static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
518 unsigned long bi_rw, sector_t len,
519 struct dm_stats_aux *stats_aux, bool end,
520 unsigned long duration_jiffies)
522 unsigned long idx = bi_rw & REQ_WRITE;
523 struct dm_stat_shared *shared = &s->stat_shared[entry];
524 struct dm_stat_percpu *p;
527 * For strict correctness we should use local_irq_save/restore
528 * instead of preempt_disable/enable.
530 * preempt_disable/enable is racy if the driver finishes bios
531 * from non-interrupt context as well as from interrupt context
532 * or from more different interrupts.
534 * On 64-bit architectures the race only results in not counting some
535 * events, so it is acceptable. On 32-bit architectures the race could
536 * cause the counter going off by 2^32, so we need to do proper locking
539 * part_stat_lock()/part_stat_unlock() have this race too.
541 #if BITS_PER_LONG == 32
543 local_irq_save(flags);
547 p = &s->stat_percpu[smp_processor_id()][entry];
550 dm_stat_round(s, shared, p);
551 atomic_inc(&shared->in_flight[idx]);
553 unsigned long long duration;
554 dm_stat_round(s, shared, p);
555 atomic_dec(&shared->in_flight[idx]);
556 p->sectors[idx] += len;
558 p->merges[idx] += stats_aux->merged;
559 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
560 p->ticks[idx] += duration_jiffies;
561 duration = jiffies_to_msecs(duration_jiffies);
563 p->ticks[idx] += stats_aux->duration_ns;
564 duration = stats_aux->duration_ns;
566 if (s->n_histogram_entries) {
567 unsigned lo = 0, hi = s->n_histogram_entries + 1;
568 while (lo + 1 < hi) {
569 unsigned mid = (lo + hi) / 2;
570 if (s->histogram_boundaries[mid - 1] > duration) {
581 #if BITS_PER_LONG == 32
582 local_irq_restore(flags);
588 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
589 sector_t bi_sector, sector_t end_sector,
590 bool end, unsigned long duration_jiffies,
591 struct dm_stats_aux *stats_aux)
593 sector_t rel_sector, offset, todo, fragment_len;
596 if (end_sector <= s->start || bi_sector >= s->end)
598 if (unlikely(bi_sector < s->start)) {
600 todo = end_sector - s->start;
602 rel_sector = bi_sector - s->start;
603 todo = end_sector - bi_sector;
605 if (unlikely(end_sector > s->end))
606 todo -= (end_sector - s->end);
608 offset = dm_sector_div64(rel_sector, s->step);
611 if (WARN_ON_ONCE(entry >= s->n_entries)) {
612 DMCRIT("Invalid area access in region id %d", s->id);
616 if (fragment_len > s->step - offset)
617 fragment_len = s->step - offset;
618 dm_stat_for_entry(s, entry, bi_rw, fragment_len,
619 stats_aux, end, duration_jiffies);
620 todo -= fragment_len;
623 } while (unlikely(todo != 0));
626 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
627 sector_t bi_sector, unsigned bi_sectors, bool end,
628 unsigned long duration_jiffies,
629 struct dm_stats_aux *stats_aux)
633 struct dm_stats_last_position *last;
634 bool got_precise_time;
636 if (unlikely(!bi_sectors))
639 end_sector = bi_sector + bi_sectors;
643 * A race condition can at worst result in the merged flag being
644 * misrepresented, so we don't have to disable preemption here.
646 last = raw_cpu_ptr(stats->last);
648 (bi_sector == (ACCESS_ONCE(last->last_sector) &&
649 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
650 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
652 ACCESS_ONCE(last->last_sector) = end_sector;
653 ACCESS_ONCE(last->last_rw) = bi_rw;
658 got_precise_time = false;
659 list_for_each_entry_rcu(s, &stats->list, list_entry) {
660 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
662 stats_aux->duration_ns = ktime_to_ns(ktime_get());
664 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
665 got_precise_time = true;
667 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
673 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
674 struct dm_stat *s, size_t x)
677 struct dm_stat_percpu *p;
680 p = &s->stat_percpu[smp_processor_id()][x];
681 dm_stat_round(s, shared, p);
684 shared->tmp.sectors[READ] = 0;
685 shared->tmp.sectors[WRITE] = 0;
686 shared->tmp.ios[READ] = 0;
687 shared->tmp.ios[WRITE] = 0;
688 shared->tmp.merges[READ] = 0;
689 shared->tmp.merges[WRITE] = 0;
690 shared->tmp.ticks[READ] = 0;
691 shared->tmp.ticks[WRITE] = 0;
692 shared->tmp.io_ticks[READ] = 0;
693 shared->tmp.io_ticks[WRITE] = 0;
694 shared->tmp.io_ticks_total = 0;
695 shared->tmp.time_in_queue = 0;
697 if (s->n_histogram_entries)
698 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
700 for_each_possible_cpu(cpu) {
701 p = &s->stat_percpu[cpu][x];
702 shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
703 shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
704 shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
705 shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
706 shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
707 shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
708 shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
709 shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
710 shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
711 shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
712 shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
713 shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
714 if (s->n_histogram_entries) {
716 for (i = 0; i < s->n_histogram_entries + 1; i++)
717 shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
722 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
723 bool init_tmp_percpu_totals)
726 struct dm_stat_shared *shared;
727 struct dm_stat_percpu *p;
729 for (x = idx_start; x < idx_end; x++) {
730 shared = &s->stat_shared[x];
731 if (init_tmp_percpu_totals)
732 __dm_stat_init_temporary_percpu_totals(shared, s, x);
734 p = &s->stat_percpu[smp_processor_id()][x];
735 p->sectors[READ] -= shared->tmp.sectors[READ];
736 p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
737 p->ios[READ] -= shared->tmp.ios[READ];
738 p->ios[WRITE] -= shared->tmp.ios[WRITE];
739 p->merges[READ] -= shared->tmp.merges[READ];
740 p->merges[WRITE] -= shared->tmp.merges[WRITE];
741 p->ticks[READ] -= shared->tmp.ticks[READ];
742 p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
743 p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
744 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
745 p->io_ticks_total -= shared->tmp.io_ticks_total;
746 p->time_in_queue -= shared->tmp.time_in_queue;
748 if (s->n_histogram_entries) {
750 for (i = 0; i < s->n_histogram_entries + 1; i++) {
752 p = &s->stat_percpu[smp_processor_id()][x];
753 p->histogram[i] -= shared->tmp.histogram[i];
760 static int dm_stats_clear(struct dm_stats *stats, int id)
764 mutex_lock(&stats->mutex);
766 s = __dm_stats_find(stats, id);
768 mutex_unlock(&stats->mutex);
772 __dm_stat_clear(s, 0, s->n_entries, true);
774 mutex_unlock(&stats->mutex);
780 * This is like jiffies_to_msec, but works for 64-bit values.
782 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
784 unsigned long long result;
787 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
792 result = jiffies_to_msecs(j & 0x3fffff);
794 mult = jiffies_to_msecs(1 << 22);
795 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
798 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
803 static int dm_stats_print(struct dm_stats *stats, int id,
804 size_t idx_start, size_t idx_len,
805 bool clear, char *result, unsigned maxlen)
810 sector_t start, end, step;
812 struct dm_stat_shared *shared;
816 * <start_sector>+<length> counters
819 mutex_lock(&stats->mutex);
821 s = __dm_stats_find(stats, id);
823 mutex_unlock(&stats->mutex);
827 idx_end = idx_start + idx_len;
828 if (idx_end < idx_start ||
829 idx_end > s->n_entries)
830 idx_end = s->n_entries;
832 if (idx_start > idx_end)
836 start = s->start + (step * idx_start);
838 for (x = idx_start; x < idx_end; x++, start = end) {
839 shared = &s->stat_shared[x];
841 if (unlikely(end > s->end))
844 __dm_stat_init_temporary_percpu_totals(shared, s, x);
846 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
847 (unsigned long long)start,
848 (unsigned long long)step,
849 shared->tmp.ios[READ],
850 shared->tmp.merges[READ],
851 shared->tmp.sectors[READ],
852 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
853 shared->tmp.ios[WRITE],
854 shared->tmp.merges[WRITE],
855 shared->tmp.sectors[WRITE],
856 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
857 dm_stat_in_flight(shared),
858 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
859 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
860 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
861 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
862 if (s->n_histogram_entries) {
864 for (i = 0; i < s->n_histogram_entries + 1; i++) {
865 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
870 if (unlikely(sz + 1 >= maxlen))
871 goto buffer_overflow;
875 __dm_stat_clear(s, idx_start, idx_end, false);
878 mutex_unlock(&stats->mutex);
883 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
886 const char *new_aux_data;
888 mutex_lock(&stats->mutex);
890 s = __dm_stats_find(stats, id);
892 mutex_unlock(&stats->mutex);
896 new_aux_data = kstrdup(aux_data, GFP_KERNEL);
898 mutex_unlock(&stats->mutex);
903 s->aux_data = new_aux_data;
905 mutex_unlock(&stats->mutex);
910 static int parse_histogram(const char *h, unsigned *n_histogram_entries,
911 unsigned long long **histogram_boundaries)
915 unsigned long long last;
917 *n_histogram_entries = 1;
920 (*n_histogram_entries)++;
922 *histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
923 if (!*histogram_boundaries)
929 unsigned long long hi;
932 s = sscanf(h, "%llu%c", &hi, &ch);
933 if (!s || (s == 2 && ch != ','))
938 (*histogram_boundaries)[n] = hi;
941 h = strchr(h, ',') + 1;
946 static int message_stats_create(struct mapped_device *md,
947 unsigned argc, char **argv,
948 char *result, unsigned maxlen)
953 unsigned long long start, end, len, step;
955 const char *program_id, *aux_data;
956 unsigned stat_flags = 0;
958 unsigned n_histogram_entries = 0;
959 unsigned long long *histogram_boundaries = NULL;
961 struct dm_arg_set as, as_backup;
963 unsigned feature_args;
967 * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
975 dm_consume_args(&as, 1);
977 a = dm_shift_arg(&as);
978 if (!strcmp(a, "-")) {
980 len = dm_get_size(md);
983 } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
984 start != (sector_t)start || len != (sector_t)len)
991 a = dm_shift_arg(&as);
992 if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
996 if (do_div(step, divisor))
1000 } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1001 step != (sector_t)step || !step)
1005 a = dm_shift_arg(&as);
1006 if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1007 while (feature_args--) {
1008 a = dm_shift_arg(&as);
1011 if (!strcasecmp(a, "precise_timestamps"))
1012 stat_flags |= STAT_PRECISE_TIMESTAMPS;
1013 else if (!strncasecmp(a, "histogram:", 10)) {
1014 if (n_histogram_entries)
1016 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1028 a = dm_shift_arg(&as);
1032 a = dm_shift_arg(&as);
1040 * If a buffer overflow happens after we created the region,
1041 * it's too late (the userspace would retry with a larger
1042 * buffer, but the region id that caused the overflow is already
1043 * leaked). So we must detect buffer overflow in advance.
1045 snprintf(result, maxlen, "%d", INT_MAX);
1046 if (dm_message_test_buffer_overflow(result, maxlen)) {
1051 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1052 n_histogram_entries, histogram_boundaries, program_id, aux_data,
1053 dm_internal_suspend_fast, dm_internal_resume_fast, md);
1059 snprintf(result, maxlen, "%d", id);
1067 kfree(histogram_boundaries);
1071 static int message_stats_delete(struct mapped_device *md,
1072 unsigned argc, char **argv)
1080 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1083 return dm_stats_delete(dm_get_stats(md), id);
1086 static int message_stats_clear(struct mapped_device *md,
1087 unsigned argc, char **argv)
1095 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1098 return dm_stats_clear(dm_get_stats(md), id);
1101 static int message_stats_list(struct mapped_device *md,
1102 unsigned argc, char **argv,
1103 char *result, unsigned maxlen)
1106 const char *program = NULL;
1108 if (argc < 1 || argc > 2)
1112 program = kstrdup(argv[1], GFP_KERNEL);
1117 r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1124 static int message_stats_print(struct mapped_device *md,
1125 unsigned argc, char **argv, bool clear,
1126 char *result, unsigned maxlen)
1130 unsigned long idx_start = 0, idx_len = ULONG_MAX;
1132 if (argc != 2 && argc != 4)
1135 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1139 if (strcmp(argv[2], "-") &&
1140 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1142 if (strcmp(argv[3], "-") &&
1143 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1147 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1151 static int message_stats_set_aux(struct mapped_device *md,
1152 unsigned argc, char **argv)
1160 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1163 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1166 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1167 char *result, unsigned maxlen)
1171 /* All messages here must start with '@' */
1172 if (!strcasecmp(argv[0], "@stats_create"))
1173 r = message_stats_create(md, argc, argv, result, maxlen);
1174 else if (!strcasecmp(argv[0], "@stats_delete"))
1175 r = message_stats_delete(md, argc, argv);
1176 else if (!strcasecmp(argv[0], "@stats_clear"))
1177 r = message_stats_clear(md, argc, argv);
1178 else if (!strcasecmp(argv[0], "@stats_list"))
1179 r = message_stats_list(md, argc, argv, result, maxlen);
1180 else if (!strcasecmp(argv[0], "@stats_print"))
1181 r = message_stats_print(md, argc, argv, false, result, maxlen);
1182 else if (!strcasecmp(argv[0], "@stats_print_clear"))
1183 r = message_stats_print(md, argc, argv, true, result, maxlen);
1184 else if (!strcasecmp(argv[0], "@stats_set_aux"))
1185 r = message_stats_set_aux(md, argc, argv);
1187 return 2; /* this wasn't a stats message */
1190 DMWARN("Invalid parameters for message %s", argv[0]);
1195 int __init dm_statistics_init(void)
1197 shared_memory_amount = 0;
1198 dm_stat_need_rcu_barrier = 0;
1202 void dm_statistics_exit(void)
1204 if (dm_stat_need_rcu_barrier)
1206 if (WARN_ON(shared_memory_amount))
1207 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1210 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1211 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");