2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
12 * Theory of operation:
14 * At the lowest level, there is the standard driver for the CD/DVD device,
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
16 * but it doesn't know anything about the special restrictions that apply to
17 * packet writing. One restriction is that write requests must be aligned to
18 * packet boundaries on the physical media, and the size of a write request
19 * must be equal to the packet size. Another restriction is that a
20 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
21 * command, if the previous command was a write.
23 * The purpose of the packet writing driver is to hide these restrictions from
24 * higher layers, such as file systems, and present a block device that can be
25 * randomly read and written using 2kB-sized blocks.
27 * The lowest layer in the packet writing driver is the packet I/O scheduler.
28 * Its data is defined by the struct packet_iosched and includes two bio
29 * queues with pending read and write requests. These queues are processed
30 * by the pkt_iosched_process_queue() function. The write requests in this
31 * queue are already properly aligned and sized. This layer is responsible for
32 * issuing the flush cache commands and scheduling the I/O in a good order.
34 * The next layer transforms unaligned write requests to aligned writes. This
35 * transformation requires reading missing pieces of data from the underlying
36 * block device, assembling the pieces to full packets and queuing them to the
37 * packet I/O scheduler.
39 * At the top layer there is a custom make_request_fn function that forwards
40 * read requests directly to the iosched queue and puts write requests in the
41 * unaligned write queue. A kernel thread performs the necessary read
42 * gathering to convert the unaligned writes to aligned writes and then feeds
43 * them to the packet I/O scheduler.
45 *************************************************************************/
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #include <linux/pktcdvd.h>
50 #include <linux/module.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/compat.h>
54 #include <linux/kthread.h>
55 #include <linux/errno.h>
56 #include <linux/spinlock.h>
57 #include <linux/file.h>
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/miscdevice.h>
61 #include <linux/freezer.h>
62 #include <linux/mutex.h>
63 #include <linux/slab.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_ioctl.h>
66 #include <scsi/scsi.h>
67 #include <linux/debugfs.h>
68 #include <linux/device.h>
70 #include <asm/uaccess.h>
72 #define DRIVER_NAME "pktcdvd"
74 #define pkt_err(pd, fmt, ...) \
75 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_dbg(level, pd, fmt, ...) \
79 if (level == 2 && PACKET_DEBUG >= 2) \
80 pr_notice("%s: %s():" fmt, \
81 pd->name, __func__, ##__VA_ARGS__); \
82 else if (level == 1 && PACKET_DEBUG >= 1) \
83 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
86 #define MAX_SPEED 0xffff
88 static DEFINE_MUTEX(pktcdvd_mutex);
89 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
90 static struct proc_dir_entry *pkt_proc;
91 static int pktdev_major;
92 static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
93 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
94 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
95 static mempool_t *psd_pool;
97 static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
98 static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
100 /* forward declaration */
101 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
102 static int pkt_remove_dev(dev_t pkt_dev);
103 static int pkt_seq_show(struct seq_file *m, void *p);
105 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
107 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
111 * create and register a pktcdvd kernel object.
113 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
115 struct kobject* parent,
116 struct kobj_type* ktype)
118 struct pktcdvd_kobj *p;
121 p = kzalloc(sizeof(*p), GFP_KERNEL);
125 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
127 kobject_put(&p->kobj);
130 kobject_uevent(&p->kobj, KOBJ_ADD);
134 * remove a pktcdvd kernel object.
136 static void pkt_kobj_remove(struct pktcdvd_kobj *p)
139 kobject_put(&p->kobj);
142 * default release function for pktcdvd kernel objects.
144 static void pkt_kobj_release(struct kobject *kobj)
146 kfree(to_pktcdvdkobj(kobj));
150 /**********************************************************
152 * sysfs interface for pktcdvd
153 * by (C) 2006 Thomas Maier <balagi@justmail.de>
155 **********************************************************/
157 #define DEF_ATTR(_obj,_name,_mode) \
158 static struct attribute _obj = { .name = _name, .mode = _mode }
160 /**********************************************************
161 /sys/class/pktcdvd/pktcdvd[0-7]/
164 stat/packets_finished
169 write_queue/congestion_off
170 write_queue/congestion_on
171 **********************************************************/
173 DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
174 DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
175 DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
176 DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
177 DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
178 DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
180 static struct attribute *kobj_pkt_attrs_stat[] = {
190 DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
191 DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
192 DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
194 static struct attribute *kobj_pkt_attrs_wqueue[] = {
201 static ssize_t kobj_pkt_show(struct kobject *kobj,
202 struct attribute *attr, char *data)
204 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
207 if (strcmp(attr->name, "packets_started") == 0) {
208 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
210 } else if (strcmp(attr->name, "packets_finished") == 0) {
211 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
213 } else if (strcmp(attr->name, "kb_written") == 0) {
214 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
216 } else if (strcmp(attr->name, "kb_read") == 0) {
217 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
219 } else if (strcmp(attr->name, "kb_read_gather") == 0) {
220 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
222 } else if (strcmp(attr->name, "size") == 0) {
223 spin_lock(&pd->lock);
224 v = pd->bio_queue_size;
225 spin_unlock(&pd->lock);
226 n = sprintf(data, "%d\n", v);
228 } else if (strcmp(attr->name, "congestion_off") == 0) {
229 spin_lock(&pd->lock);
230 v = pd->write_congestion_off;
231 spin_unlock(&pd->lock);
232 n = sprintf(data, "%d\n", v);
234 } else if (strcmp(attr->name, "congestion_on") == 0) {
235 spin_lock(&pd->lock);
236 v = pd->write_congestion_on;
237 spin_unlock(&pd->lock);
238 n = sprintf(data, "%d\n", v);
243 static void init_write_congestion_marks(int* lo, int* hi)
247 *hi = min(*hi, 1000000);
251 *lo = min(*lo, *hi - 100);
260 static ssize_t kobj_pkt_store(struct kobject *kobj,
261 struct attribute *attr,
262 const char *data, size_t len)
264 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
267 if (strcmp(attr->name, "reset") == 0 && len > 0) {
268 pd->stats.pkt_started = 0;
269 pd->stats.pkt_ended = 0;
270 pd->stats.secs_w = 0;
271 pd->stats.secs_rg = 0;
272 pd->stats.secs_r = 0;
274 } else if (strcmp(attr->name, "congestion_off") == 0
275 && sscanf(data, "%d", &val) == 1) {
276 spin_lock(&pd->lock);
277 pd->write_congestion_off = val;
278 init_write_congestion_marks(&pd->write_congestion_off,
279 &pd->write_congestion_on);
280 spin_unlock(&pd->lock);
282 } else if (strcmp(attr->name, "congestion_on") == 0
283 && sscanf(data, "%d", &val) == 1) {
284 spin_lock(&pd->lock);
285 pd->write_congestion_on = val;
286 init_write_congestion_marks(&pd->write_congestion_off,
287 &pd->write_congestion_on);
288 spin_unlock(&pd->lock);
293 static const struct sysfs_ops kobj_pkt_ops = {
294 .show = kobj_pkt_show,
295 .store = kobj_pkt_store
297 static struct kobj_type kobj_pkt_type_stat = {
298 .release = pkt_kobj_release,
299 .sysfs_ops = &kobj_pkt_ops,
300 .default_attrs = kobj_pkt_attrs_stat
302 static struct kobj_type kobj_pkt_type_wqueue = {
303 .release = pkt_kobj_release,
304 .sysfs_ops = &kobj_pkt_ops,
305 .default_attrs = kobj_pkt_attrs_wqueue
308 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
311 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
317 pd->kobj_stat = pkt_kobj_create(pd, "stat",
319 &kobj_pkt_type_stat);
320 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
322 &kobj_pkt_type_wqueue);
326 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
328 pkt_kobj_remove(pd->kobj_stat);
329 pkt_kobj_remove(pd->kobj_wqueue);
331 device_unregister(pd->dev);
335 /********************************************************************
338 remove unmap packet dev
339 device_map show mappings
340 *******************************************************************/
342 static void class_pktcdvd_release(struct class *cls)
346 static ssize_t class_pktcdvd_show_map(struct class *c,
347 struct class_attribute *attr,
352 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
353 for (idx = 0; idx < MAX_WRITERS; idx++) {
354 struct pktcdvd_device *pd = pkt_devs[idx];
357 n += sprintf(data+n, "%s %u:%u %u:%u\n",
359 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
360 MAJOR(pd->bdev->bd_dev),
361 MINOR(pd->bdev->bd_dev));
363 mutex_unlock(&ctl_mutex);
367 static ssize_t class_pktcdvd_store_add(struct class *c,
368 struct class_attribute *attr,
372 unsigned int major, minor;
374 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
375 /* pkt_setup_dev() expects caller to hold reference to self */
376 if (!try_module_get(THIS_MODULE))
379 pkt_setup_dev(MKDEV(major, minor), NULL);
381 module_put(THIS_MODULE);
389 static ssize_t class_pktcdvd_store_remove(struct class *c,
390 struct class_attribute *attr,
394 unsigned int major, minor;
395 if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
396 pkt_remove_dev(MKDEV(major, minor));
402 static struct class_attribute class_pktcdvd_attrs[] = {
403 __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
404 __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
405 __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
410 static int pkt_sysfs_init(void)
415 * create control files in sysfs
416 * /sys/class/pktcdvd/...
418 class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
421 class_pktcdvd->name = DRIVER_NAME;
422 class_pktcdvd->owner = THIS_MODULE;
423 class_pktcdvd->class_release = class_pktcdvd_release;
424 class_pktcdvd->class_attrs = class_pktcdvd_attrs;
425 ret = class_register(class_pktcdvd);
427 kfree(class_pktcdvd);
428 class_pktcdvd = NULL;
429 pr_err("failed to create class pktcdvd\n");
435 static void pkt_sysfs_cleanup(void)
438 class_destroy(class_pktcdvd);
439 class_pktcdvd = NULL;
442 /********************************************************************
445 /sys/kernel/debug/pktcdvd[0-7]/
448 *******************************************************************/
450 static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
452 return pkt_seq_show(m, p);
455 static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
457 return single_open(file, pkt_debugfs_seq_show, inode->i_private);
460 static const struct file_operations debug_fops = {
461 .open = pkt_debugfs_fops_open,
464 .release = single_release,
465 .owner = THIS_MODULE,
468 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
470 if (!pkt_debugfs_root)
472 pd->dfs_f_info = NULL;
473 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
474 if (IS_ERR(pd->dfs_d_root)) {
475 pd->dfs_d_root = NULL;
478 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
479 pd->dfs_d_root, pd, &debug_fops);
480 if (IS_ERR(pd->dfs_f_info)) {
481 pd->dfs_f_info = NULL;
486 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
488 if (!pkt_debugfs_root)
491 debugfs_remove(pd->dfs_f_info);
492 pd->dfs_f_info = NULL;
494 debugfs_remove(pd->dfs_d_root);
495 pd->dfs_d_root = NULL;
498 static void pkt_debugfs_init(void)
500 pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
501 if (IS_ERR(pkt_debugfs_root)) {
502 pkt_debugfs_root = NULL;
507 static void pkt_debugfs_cleanup(void)
509 if (!pkt_debugfs_root)
511 debugfs_remove(pkt_debugfs_root);
512 pkt_debugfs_root = NULL;
515 /* ----------------------------------------------------------*/
518 static void pkt_bio_finished(struct pktcdvd_device *pd)
520 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
521 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
522 pkt_dbg(2, pd, "queue empty\n");
523 atomic_set(&pd->iosched.attention, 1);
524 wake_up(&pd->wqueue);
529 * Allocate a packet_data struct
531 static struct packet_data *pkt_alloc_packet_data(int frames)
534 struct packet_data *pkt;
536 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
540 pkt->frames = frames;
541 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
545 for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
546 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
551 spin_lock_init(&pkt->lock);
552 bio_list_init(&pkt->orig_bios);
554 for (i = 0; i < frames; i++) {
555 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
559 pkt->r_bios[i] = bio;
565 for (i = 0; i < frames; i++) {
566 struct bio *bio = pkt->r_bios[i];
572 for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
574 __free_page(pkt->pages[i]);
583 * Free a packet_data struct
585 static void pkt_free_packet_data(struct packet_data *pkt)
589 for (i = 0; i < pkt->frames; i++) {
590 struct bio *bio = pkt->r_bios[i];
594 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
595 __free_page(pkt->pages[i]);
600 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
602 struct packet_data *pkt, *next;
604 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
606 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
607 pkt_free_packet_data(pkt);
609 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
612 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
614 struct packet_data *pkt;
616 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
618 while (nr_packets > 0) {
619 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
621 pkt_shrink_pktlist(pd);
624 pkt->id = nr_packets;
626 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
632 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
634 struct rb_node *n = rb_next(&node->rb_node);
637 return rb_entry(n, struct pkt_rb_node, rb_node);
640 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
642 rb_erase(&node->rb_node, &pd->bio_queue);
643 mempool_free(node, pd->rb_pool);
644 pd->bio_queue_size--;
645 BUG_ON(pd->bio_queue_size < 0);
649 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
651 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
653 struct rb_node *n = pd->bio_queue.rb_node;
654 struct rb_node *next;
655 struct pkt_rb_node *tmp;
658 BUG_ON(pd->bio_queue_size > 0);
663 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
664 if (s <= tmp->bio->bi_sector)
673 if (s > tmp->bio->bi_sector) {
674 tmp = pkt_rbtree_next(tmp);
678 BUG_ON(s > tmp->bio->bi_sector);
683 * Insert a node into the pd->bio_queue rb tree.
685 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
687 struct rb_node **p = &pd->bio_queue.rb_node;
688 struct rb_node *parent = NULL;
689 sector_t s = node->bio->bi_sector;
690 struct pkt_rb_node *tmp;
694 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
695 if (s < tmp->bio->bi_sector)
700 rb_link_node(&node->rb_node, parent, p);
701 rb_insert_color(&node->rb_node, &pd->bio_queue);
702 pd->bio_queue_size++;
706 * Send a packet_command to the underlying block device and
707 * wait for completion.
709 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
711 struct request_queue *q = bdev_get_queue(pd->bdev);
715 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
716 WRITE : READ, __GFP_WAIT);
719 if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
723 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
724 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
727 rq->cmd_type = REQ_TYPE_BLOCK_PC;
729 rq->cmd_flags |= REQ_QUIET;
731 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
739 static const char *sense_key_string(__u8 index)
741 static const char * const info[] = {
742 "No sense", "Recovered error", "Not ready",
743 "Medium error", "Hardware error", "Illegal request",
744 "Unit attention", "Data protect", "Blank check",
747 return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
751 * A generic sense dump / resolve mechanism should be implemented across
752 * all ATAPI + SCSI devices.
754 static void pkt_dump_sense(struct packet_command *cgc)
756 struct request_sense *sense = cgc->sense;
759 pr_err("%*ph - sense %02x.%02x.%02x (%s)\n",
760 CDROM_PACKET_SIZE, cgc->cmd,
761 sense->sense_key, sense->asc, sense->ascq,
762 sense_key_string(sense->sense_key));
764 pr_err("%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
768 * flush the drive cache to media
770 static int pkt_flush_cache(struct pktcdvd_device *pd)
772 struct packet_command cgc;
774 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
775 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
779 * the IMMED bit -- we default to not setting it, although that
780 * would allow a much faster close, this is safer
785 return pkt_generic_packet(pd, &cgc);
789 * speed is given as the normal factor, e.g. 4 for 4x
791 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
792 unsigned write_speed, unsigned read_speed)
794 struct packet_command cgc;
795 struct request_sense sense;
798 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
800 cgc.cmd[0] = GPCMD_SET_SPEED;
801 cgc.cmd[2] = (read_speed >> 8) & 0xff;
802 cgc.cmd[3] = read_speed & 0xff;
803 cgc.cmd[4] = (write_speed >> 8) & 0xff;
804 cgc.cmd[5] = write_speed & 0xff;
806 if ((ret = pkt_generic_packet(pd, &cgc)))
807 pkt_dump_sense(&cgc);
813 * Queue a bio for processing by the low-level CD device. Must be called
814 * from process context.
816 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
818 spin_lock(&pd->iosched.lock);
819 if (bio_data_dir(bio) == READ)
820 bio_list_add(&pd->iosched.read_queue, bio);
822 bio_list_add(&pd->iosched.write_queue, bio);
823 spin_unlock(&pd->iosched.lock);
825 atomic_set(&pd->iosched.attention, 1);
826 wake_up(&pd->wqueue);
830 * Process the queued read/write requests. This function handles special
831 * requirements for CDRW drives:
832 * - A cache flush command must be inserted before a read request if the
833 * previous request was a write.
834 * - Switching between reading and writing is slow, so don't do it more often
836 * - Optimize for throughput at the expense of latency. This means that streaming
837 * writes will never be interrupted by a read, but if the drive has to seek
838 * before the next write, switch to reading instead if there are any pending
840 * - Set the read speed according to current usage pattern. When only reading
841 * from the device, it's best to use the highest possible read speed, but
842 * when switching often between reading and writing, it's better to have the
843 * same read and write speeds.
845 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
848 if (atomic_read(&pd->iosched.attention) == 0)
850 atomic_set(&pd->iosched.attention, 0);
854 int reads_queued, writes_queued;
856 spin_lock(&pd->iosched.lock);
857 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
858 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock);
861 if (!reads_queued && !writes_queued)
864 if (pd->iosched.writing) {
865 int need_write_seek = 1;
866 spin_lock(&pd->iosched.lock);
867 bio = bio_list_peek(&pd->iosched.write_queue);
868 spin_unlock(&pd->iosched.lock);
869 if (bio && (bio->bi_sector == pd->iosched.last_write))
871 if (need_write_seek && reads_queued) {
872 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
873 pkt_dbg(2, pd, "write, waiting\n");
877 pd->iosched.writing = 0;
880 if (!reads_queued && writes_queued) {
881 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
882 pkt_dbg(2, pd, "read, waiting\n");
885 pd->iosched.writing = 1;
889 spin_lock(&pd->iosched.lock);
890 if (pd->iosched.writing)
891 bio = bio_list_pop(&pd->iosched.write_queue);
893 bio = bio_list_pop(&pd->iosched.read_queue);
894 spin_unlock(&pd->iosched.lock);
899 if (bio_data_dir(bio) == READ)
900 pd->iosched.successive_reads += bio->bi_size >> 10;
902 pd->iosched.successive_reads = 0;
903 pd->iosched.last_write = bio_end_sector(bio);
905 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
906 if (pd->read_speed == pd->write_speed) {
907 pd->read_speed = MAX_SPEED;
908 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
911 if (pd->read_speed != pd->write_speed) {
912 pd->read_speed = pd->write_speed;
913 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
917 atomic_inc(&pd->cdrw.pending_bios);
918 generic_make_request(bio);
923 * Special care is needed if the underlying block device has a small
924 * max_phys_segments value.
926 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
928 if ((pd->settings.size << 9) / CD_FRAMESIZE
929 <= queue_max_segments(q)) {
931 * The cdrom device can handle one segment/frame
933 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
935 } else if ((pd->settings.size << 9) / PAGE_SIZE
936 <= queue_max_segments(q)) {
938 * We can handle this case at the expense of some extra memory
939 * copies during write operations
941 set_bit(PACKET_MERGE_SEGS, &pd->flags);
944 pkt_err(pd, "cdrom max_phys_segments too small\n");
950 * Copy all data for this packet to pkt->pages[], so that
951 * a) The number of required segments for the write bio is minimized, which
952 * is necessary for some scsi controllers.
953 * b) The data can be used as cache to avoid read requests if we receive a
954 * new write request for the same zone.
956 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
960 /* Copy all data to pkt->pages[] */
963 for (f = 0; f < pkt->frames; f++) {
964 if (bvec[f].bv_page != pkt->pages[p]) {
965 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
966 void *vto = page_address(pkt->pages[p]) + offs;
967 memcpy(vto, vfrom, CD_FRAMESIZE);
968 kunmap_atomic(vfrom);
969 bvec[f].bv_page = pkt->pages[p];
970 bvec[f].bv_offset = offs;
972 BUG_ON(bvec[f].bv_offset != offs);
974 offs += CD_FRAMESIZE;
975 if (offs >= PAGE_SIZE) {
982 static void pkt_end_io_read(struct bio *bio, int err)
984 struct packet_data *pkt = bio->bi_private;
985 struct pktcdvd_device *pd = pkt->pd;
988 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
989 bio, (unsigned long long)pkt->sector,
990 (unsigned long long)bio->bi_sector, err);
993 atomic_inc(&pkt->io_errors);
994 if (atomic_dec_and_test(&pkt->io_wait)) {
995 atomic_inc(&pkt->run_sm);
996 wake_up(&pd->wqueue);
998 pkt_bio_finished(pd);
1001 static void pkt_end_io_packet_write(struct bio *bio, int err)
1003 struct packet_data *pkt = bio->bi_private;
1004 struct pktcdvd_device *pd = pkt->pd;
1007 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
1009 pd->stats.pkt_ended++;
1011 pkt_bio_finished(pd);
1012 atomic_dec(&pkt->io_wait);
1013 atomic_inc(&pkt->run_sm);
1014 wake_up(&pd->wqueue);
1018 * Schedule reads for the holes in a packet
1020 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1022 int frames_read = 0;
1025 char written[PACKET_MAX_SIZE];
1027 BUG_ON(bio_list_empty(&pkt->orig_bios));
1029 atomic_set(&pkt->io_wait, 0);
1030 atomic_set(&pkt->io_errors, 0);
1033 * Figure out which frames we need to read before we can write.
1035 memset(written, 0, sizeof(written));
1036 spin_lock(&pkt->lock);
1037 bio_list_for_each(bio, &pkt->orig_bios) {
1038 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1039 int num_frames = bio->bi_size / CD_FRAMESIZE;
1040 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1041 BUG_ON(first_frame < 0);
1042 BUG_ON(first_frame + num_frames > pkt->frames);
1043 for (f = first_frame; f < first_frame + num_frames; f++)
1046 spin_unlock(&pkt->lock);
1048 if (pkt->cache_valid) {
1049 pkt_dbg(2, pd, "zone %llx cached\n",
1050 (unsigned long long)pkt->sector);
1055 * Schedule reads for missing parts of the packet.
1057 for (f = 0; f < pkt->frames; f++) {
1063 bio = pkt->r_bios[f];
1065 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1066 bio->bi_bdev = pd->bdev;
1067 bio->bi_end_io = pkt_end_io_read;
1068 bio->bi_private = pkt;
1070 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
1071 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1072 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
1073 f, pkt->pages[p], offset);
1074 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
1077 atomic_inc(&pkt->io_wait);
1079 pkt_queue_bio(pd, bio);
1084 pkt_dbg(2, pd, "need %d frames for zone %llx\n",
1085 frames_read, (unsigned long long)pkt->sector);
1086 pd->stats.pkt_started++;
1087 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1091 * Find a packet matching zone, or the least recently used packet if
1092 * there is no match.
1094 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1096 struct packet_data *pkt;
1098 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1099 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1100 list_del_init(&pkt->list);
1101 if (pkt->sector != zone)
1102 pkt->cache_valid = 0;
1110 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1112 if (pkt->cache_valid) {
1113 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1115 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1120 * recover a failed write, query for relocation if possible
1122 * returns 1 if recovery is possible, or 0 if not
1125 static int pkt_start_recovery(struct packet_data *pkt)
1128 * FIXME. We need help from the file system to implement
1129 * recovery handling.
1133 struct request *rq = pkt->rq;
1134 struct pktcdvd_device *pd = rq->rq_disk->private_data;
1135 struct block_device *pkt_bdev;
1136 struct super_block *sb = NULL;
1137 unsigned long old_block, new_block;
1138 sector_t new_sector;
1140 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
1142 sb = get_super(pkt_bdev);
1149 if (!sb->s_op->relocate_blocks)
1152 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
1153 if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
1156 new_sector = new_block * (CD_FRAMESIZE >> 9);
1157 pkt->sector = new_sector;
1159 bio_reset(pkt->bio);
1160 pkt->bio->bi_bdev = pd->bdev;
1161 pkt->bio->bi_rw = REQ_WRITE;
1162 pkt->bio->bi_sector = new_sector;
1163 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
1164 pkt->bio->bi_vcnt = pkt->frames;
1166 pkt->bio->bi_end_io = pkt_end_io_packet_write;
1167 pkt->bio->bi_private = pkt;
1178 static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
1180 #if PACKET_DEBUG > 1
1181 static const char *state_name[] = {
1182 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
1184 enum packet_data_state old_state = pkt->state;
1185 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
1186 pkt->id, (unsigned long long)pkt->sector,
1187 state_name[old_state], state_name[state]);
1193 * Scan the work queue to see if we can start a new packet.
1194 * returns non-zero if any work was done.
1196 static int pkt_handle_queue(struct pktcdvd_device *pd)
1198 struct packet_data *pkt, *p;
1199 struct bio *bio = NULL;
1200 sector_t zone = 0; /* Suppress gcc warning */
1201 struct pkt_rb_node *node, *first_node;
1205 atomic_set(&pd->scan_queue, 0);
1207 if (list_empty(&pd->cdrw.pkt_free_list)) {
1208 pkt_dbg(2, pd, "no pkt\n");
1213 * Try to find a zone we are not already working on.
1215 spin_lock(&pd->lock);
1216 first_node = pkt_rbtree_find(pd, pd->current_sector);
1218 n = rb_first(&pd->bio_queue);
1220 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
1225 zone = get_zone(bio->bi_sector, pd);
1226 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1227 if (p->sector == zone) {
1234 node = pkt_rbtree_next(node);
1236 n = rb_first(&pd->bio_queue);
1238 node = rb_entry(n, struct pkt_rb_node, rb_node);
1240 if (node == first_node)
1243 spin_unlock(&pd->lock);
1245 pkt_dbg(2, pd, "no bio\n");
1249 pkt = pkt_get_packet_data(pd, zone);
1251 pd->current_sector = zone + pd->settings.size;
1253 BUG_ON(pkt->frames != pd->settings.size >> 2);
1254 pkt->write_size = 0;
1257 * Scan work queue for bios in the same zone and link them
1260 spin_lock(&pd->lock);
1261 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1262 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1264 pkt_dbg(2, pd, "found zone=%llx\n",
1265 (unsigned long long)get_zone(bio->bi_sector, pd));
1266 if (get_zone(bio->bi_sector, pd) != zone)
1268 pkt_rbtree_erase(pd, node);
1269 spin_lock(&pkt->lock);
1270 bio_list_add(&pkt->orig_bios, bio);
1271 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
1272 spin_unlock(&pkt->lock);
1274 /* check write congestion marks, and if bio_queue_size is
1275 below, wake up any waiters */
1276 wakeup = (pd->write_congestion_on > 0
1277 && pd->bio_queue_size <= pd->write_congestion_off);
1278 spin_unlock(&pd->lock);
1280 clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1284 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
1285 pkt_set_state(pkt, PACKET_WAITING_STATE);
1286 atomic_set(&pkt->run_sm, 1);
1288 spin_lock(&pd->cdrw.active_list_lock);
1289 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1290 spin_unlock(&pd->cdrw.active_list_lock);
1296 * Assemble a bio to write one packet and queue the bio for processing
1297 * by the underlying block device.
1299 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1302 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1304 bio_reset(pkt->w_bio);
1305 pkt->w_bio->bi_sector = pkt->sector;
1306 pkt->w_bio->bi_bdev = pd->bdev;
1307 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1308 pkt->w_bio->bi_private = pkt;
1311 for (f = 0; f < pkt->frames; f++) {
1312 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
1313 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
1314 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1317 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
1320 * Fill-in bvec with data from orig_bios.
1322 spin_lock(&pkt->lock);
1323 bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
1325 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1326 spin_unlock(&pkt->lock);
1328 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
1329 pkt->write_size, (unsigned long long)pkt->sector);
1331 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1332 pkt_make_local_copy(pkt, bvec);
1333 pkt->cache_valid = 1;
1335 pkt->cache_valid = 0;
1338 /* Start the write request */
1339 atomic_set(&pkt->io_wait, 1);
1340 pkt->w_bio->bi_rw = WRITE;
1341 pkt_queue_bio(pd, pkt->w_bio);
1344 static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1349 pkt->cache_valid = 0;
1351 /* Finish all bios corresponding to this packet */
1352 while ((bio = bio_list_pop(&pkt->orig_bios)))
1353 bio_endio(bio, uptodate ? 0 : -EIO);
1356 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1360 pkt_dbg(2, pd, "pkt %d\n", pkt->id);
1363 switch (pkt->state) {
1364 case PACKET_WAITING_STATE:
1365 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1368 pkt->sleep_time = 0;
1369 pkt_gather_data(pd, pkt);
1370 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1373 case PACKET_READ_WAIT_STATE:
1374 if (atomic_read(&pkt->io_wait) > 0)
1377 if (atomic_read(&pkt->io_errors) > 0) {
1378 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1380 pkt_start_write(pd, pkt);
1384 case PACKET_WRITE_WAIT_STATE:
1385 if (atomic_read(&pkt->io_wait) > 0)
1388 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1389 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1391 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1395 case PACKET_RECOVERY_STATE:
1396 if (pkt_start_recovery(pkt)) {
1397 pkt_start_write(pd, pkt);
1399 pkt_dbg(2, pd, "No recovery possible\n");
1400 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1404 case PACKET_FINISHED_STATE:
1405 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1406 pkt_finish_packet(pkt, uptodate);
1416 static void pkt_handle_packets(struct pktcdvd_device *pd)
1418 struct packet_data *pkt, *next;
1421 * Run state machine for active packets
1423 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1424 if (atomic_read(&pkt->run_sm) > 0) {
1425 atomic_set(&pkt->run_sm, 0);
1426 pkt_run_state_machine(pd, pkt);
1431 * Move no longer active packets to the free list
1433 spin_lock(&pd->cdrw.active_list_lock);
1434 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1435 if (pkt->state == PACKET_FINISHED_STATE) {
1436 list_del(&pkt->list);
1437 pkt_put_packet_data(pd, pkt);
1438 pkt_set_state(pkt, PACKET_IDLE_STATE);
1439 atomic_set(&pd->scan_queue, 1);
1442 spin_unlock(&pd->cdrw.active_list_lock);
1445 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1447 struct packet_data *pkt;
1450 for (i = 0; i < PACKET_NUM_STATES; i++)
1453 spin_lock(&pd->cdrw.active_list_lock);
1454 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1455 states[pkt->state]++;
1457 spin_unlock(&pd->cdrw.active_list_lock);
1461 * kcdrwd is woken up when writes have been queued for one of our
1462 * registered devices
1464 static int kcdrwd(void *foobar)
1466 struct pktcdvd_device *pd = foobar;
1467 struct packet_data *pkt;
1468 long min_sleep_time, residue;
1470 set_user_nice(current, -20);
1474 DECLARE_WAITQUEUE(wait, current);
1477 * Wait until there is something to do
1479 add_wait_queue(&pd->wqueue, &wait);
1481 set_current_state(TASK_INTERRUPTIBLE);
1483 /* Check if we need to run pkt_handle_queue */
1484 if (atomic_read(&pd->scan_queue) > 0)
1487 /* Check if we need to run the state machine for some packet */
1488 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1489 if (atomic_read(&pkt->run_sm) > 0)
1493 /* Check if we need to process the iosched queues */
1494 if (atomic_read(&pd->iosched.attention) != 0)
1497 /* Otherwise, go to sleep */
1498 if (PACKET_DEBUG > 1) {
1499 int states[PACKET_NUM_STATES];
1500 pkt_count_states(pd, states);
1501 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1502 states[0], states[1], states[2],
1503 states[3], states[4], states[5]);
1506 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1507 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1508 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1509 min_sleep_time = pkt->sleep_time;
1512 pkt_dbg(2, pd, "sleeping\n");
1513 residue = schedule_timeout(min_sleep_time);
1514 pkt_dbg(2, pd, "wake up\n");
1516 /* make swsusp happy with our thread */
1519 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1520 if (!pkt->sleep_time)
1522 pkt->sleep_time -= min_sleep_time - residue;
1523 if (pkt->sleep_time <= 0) {
1524 pkt->sleep_time = 0;
1525 atomic_inc(&pkt->run_sm);
1529 if (kthread_should_stop())
1533 set_current_state(TASK_RUNNING);
1534 remove_wait_queue(&pd->wqueue, &wait);
1536 if (kthread_should_stop())
1540 * if pkt_handle_queue returns true, we can queue
1543 while (pkt_handle_queue(pd))
1547 * Handle packet state machine
1549 pkt_handle_packets(pd);
1552 * Handle iosched queues
1554 pkt_iosched_process_queue(pd);
1560 static void pkt_print_settings(struct pktcdvd_device *pd)
1562 pr_info("%s packets, %u blocks, Mode-%c disc\n",
1563 pd->settings.fp ? "Fixed" : "Variable",
1564 pd->settings.size >> 2,
1565 pd->settings.block_mode == 8 ? '1' : '2');
1568 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1570 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1572 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1573 cgc->cmd[2] = page_code | (page_control << 6);
1574 cgc->cmd[7] = cgc->buflen >> 8;
1575 cgc->cmd[8] = cgc->buflen & 0xff;
1576 cgc->data_direction = CGC_DATA_READ;
1577 return pkt_generic_packet(pd, cgc);
1580 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1582 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1583 memset(cgc->buffer, 0, 2);
1584 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1585 cgc->cmd[1] = 0x10; /* PF */
1586 cgc->cmd[7] = cgc->buflen >> 8;
1587 cgc->cmd[8] = cgc->buflen & 0xff;
1588 cgc->data_direction = CGC_DATA_WRITE;
1589 return pkt_generic_packet(pd, cgc);
1592 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1594 struct packet_command cgc;
1597 /* set up command and get the disc info */
1598 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1599 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1600 cgc.cmd[8] = cgc.buflen = 2;
1603 if ((ret = pkt_generic_packet(pd, &cgc)))
1606 /* not all drives have the same disc_info length, so requeue
1607 * packet with the length the drive tells us it can supply
1609 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1610 sizeof(di->disc_information_length);
1612 if (cgc.buflen > sizeof(disc_information))
1613 cgc.buflen = sizeof(disc_information);
1615 cgc.cmd[8] = cgc.buflen;
1616 return pkt_generic_packet(pd, &cgc);
1619 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1621 struct packet_command cgc;
1624 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1625 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1626 cgc.cmd[1] = type & 3;
1627 cgc.cmd[4] = (track & 0xff00) >> 8;
1628 cgc.cmd[5] = track & 0xff;
1632 if ((ret = pkt_generic_packet(pd, &cgc)))
1635 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1636 sizeof(ti->track_information_length);
1638 if (cgc.buflen > sizeof(track_information))
1639 cgc.buflen = sizeof(track_information);
1641 cgc.cmd[8] = cgc.buflen;
1642 return pkt_generic_packet(pd, &cgc);
1645 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1648 disc_information di;
1649 track_information ti;
1653 if ((ret = pkt_get_disc_info(pd, &di)))
1656 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1657 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1660 /* if this track is blank, try the previous. */
1663 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1667 /* if last recorded field is valid, return it. */
1669 *last_written = be32_to_cpu(ti.last_rec_address);
1671 /* make it up instead */
1672 *last_written = be32_to_cpu(ti.track_start) +
1673 be32_to_cpu(ti.track_size);
1675 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1681 * write mode select package based on pd->settings
1683 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1685 struct packet_command cgc;
1686 struct request_sense sense;
1687 write_param_page *wp;
1691 /* doesn't apply to DVD+RW or DVD-RAM */
1692 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1695 memset(buffer, 0, sizeof(buffer));
1696 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1698 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1699 pkt_dump_sense(&cgc);
1703 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1704 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1705 if (size > sizeof(buffer))
1706 size = sizeof(buffer);
1711 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1713 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1714 pkt_dump_sense(&cgc);
1719 * write page is offset header + block descriptor length
1721 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1723 wp->fp = pd->settings.fp;
1724 wp->track_mode = pd->settings.track_mode;
1725 wp->write_type = pd->settings.write_type;
1726 wp->data_block_type = pd->settings.block_mode;
1728 wp->multi_session = 0;
1730 #ifdef PACKET_USE_LS
1735 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1736 wp->session_format = 0;
1738 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1739 wp->session_format = 0x20;
1743 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1749 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
1752 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1754 cgc.buflen = cgc.cmd[8] = size;
1755 if ((ret = pkt_mode_select(pd, &cgc))) {
1756 pkt_dump_sense(&cgc);
1760 pkt_print_settings(pd);
1765 * 1 -- we can write to this track, 0 -- we can't
1767 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1769 switch (pd->mmc3_profile) {
1770 case 0x1a: /* DVD+RW */
1771 case 0x12: /* DVD-RAM */
1772 /* The track is always writable on DVD+RW/DVD-RAM */
1778 if (!ti->packet || !ti->fp)
1782 * "good" settings as per Mt Fuji.
1784 if (ti->rt == 0 && ti->blank == 0)
1787 if (ti->rt == 0 && ti->blank == 1)
1790 if (ti->rt == 1 && ti->blank == 0)
1793 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1798 * 1 -- we can write to this disc, 0 -- we can't
1800 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1802 switch (pd->mmc3_profile) {
1803 case 0x0a: /* CD-RW */
1804 case 0xffff: /* MMC3 not supported */
1806 case 0x1a: /* DVD+RW */
1807 case 0x13: /* DVD-RW */
1808 case 0x12: /* DVD-RAM */
1811 pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
1817 * for disc type 0xff we should probably reserve a new track.
1818 * but i'm not sure, should we leave this to user apps? probably.
1820 if (di->disc_type == 0xff) {
1821 pr_notice("unknown disc - no track?\n");
1825 if (di->disc_type != 0x20 && di->disc_type != 0) {
1826 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
1830 if (di->erasable == 0) {
1831 pr_notice("disc not erasable\n");
1835 if (di->border_status == PACKET_SESSION_RESERVED) {
1836 pkt_err(pd, "can't write to last track (reserved)\n");
1843 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1845 struct packet_command cgc;
1846 unsigned char buf[12];
1847 disc_information di;
1848 track_information ti;
1851 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1852 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1854 ret = pkt_generic_packet(pd, &cgc);
1855 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1857 memset(&di, 0, sizeof(disc_information));
1858 memset(&ti, 0, sizeof(track_information));
1860 if ((ret = pkt_get_disc_info(pd, &di))) {
1861 pkt_err(pd, "failed get_disc\n");
1865 if (!pkt_writable_disc(pd, &di))
1868 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1870 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1871 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1872 pkt_err(pd, "failed get_track\n");
1876 if (!pkt_writable_track(pd, &ti)) {
1877 pkt_err(pd, "can't write to this track\n");
1882 * we keep packet size in 512 byte units, makes it easier to
1883 * deal with request calculations.
1885 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1886 if (pd->settings.size == 0) {
1887 pr_notice("detected zero packet size!\n");
1890 if (pd->settings.size > PACKET_MAX_SECTORS) {
1891 pkt_err(pd, "packet size is too big\n");
1894 pd->settings.fp = ti.fp;
1895 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1898 pd->nwa = be32_to_cpu(ti.next_writable);
1899 set_bit(PACKET_NWA_VALID, &pd->flags);
1903 * in theory we could use lra on -RW media as well and just zero
1904 * blocks that haven't been written yet, but in practice that
1905 * is just a no-go. we'll use that for -R, naturally.
1908 pd->lra = be32_to_cpu(ti.last_rec_address);
1909 set_bit(PACKET_LRA_VALID, &pd->flags);
1911 pd->lra = 0xffffffff;
1912 set_bit(PACKET_LRA_VALID, &pd->flags);
1918 pd->settings.link_loss = 7;
1919 pd->settings.write_type = 0; /* packet */
1920 pd->settings.track_mode = ti.track_mode;
1923 * mode1 or mode2 disc
1925 switch (ti.data_mode) {
1927 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1930 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1933 pkt_err(pd, "unknown data mode\n");
1940 * enable/disable write caching on drive
1942 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1945 struct packet_command cgc;
1946 struct request_sense sense;
1947 unsigned char buf[64];
1950 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1952 cgc.buflen = pd->mode_offset + 12;
1955 * caching mode page might not be there, so quiet this command
1959 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1962 buf[pd->mode_offset + 10] |= (!!set << 2);
1964 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1965 ret = pkt_mode_select(pd, &cgc);
1967 pkt_err(pd, "write caching control failed\n");
1968 pkt_dump_sense(&cgc);
1969 } else if (!ret && set)
1970 pr_notice("enabled write caching on %s\n", pd->name);
1974 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1976 struct packet_command cgc;
1978 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1979 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1980 cgc.cmd[4] = lockflag ? 1 : 0;
1981 return pkt_generic_packet(pd, &cgc);
1985 * Returns drive maximum write speed
1987 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
1988 unsigned *write_speed)
1990 struct packet_command cgc;
1991 struct request_sense sense;
1992 unsigned char buf[256+18];
1993 unsigned char *cap_buf;
1996 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1997 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
2000 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2002 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
2003 sizeof(struct mode_page_header);
2004 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2006 pkt_dump_sense(&cgc);
2011 offset = 20; /* Obsoleted field, used by older drives */
2012 if (cap_buf[1] >= 28)
2013 offset = 28; /* Current write speed selected */
2014 if (cap_buf[1] >= 30) {
2015 /* If the drive reports at least one "Logical Unit Write
2016 * Speed Performance Descriptor Block", use the information
2017 * in the first block. (contains the highest speed)
2019 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
2024 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
2028 /* These tables from cdrecord - I don't have orange book */
2029 /* standard speed CD-RW (1-4x) */
2030 static char clv_to_speed[16] = {
2031 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2032 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2034 /* high speed CD-RW (-10x) */
2035 static char hs_clv_to_speed[16] = {
2036 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2037 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2039 /* ultra high speed CD-RW */
2040 static char us_clv_to_speed[16] = {
2041 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
2042 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
2046 * reads the maximum media speed from ATIP
2048 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2051 struct packet_command cgc;
2052 struct request_sense sense;
2053 unsigned char buf[64];
2054 unsigned int size, st, sp;
2057 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
2059 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2061 cgc.cmd[2] = 4; /* READ ATIP */
2063 ret = pkt_generic_packet(pd, &cgc);
2065 pkt_dump_sense(&cgc);
2068 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
2069 if (size > sizeof(buf))
2072 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
2074 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
2078 ret = pkt_generic_packet(pd, &cgc);
2080 pkt_dump_sense(&cgc);
2084 if (!(buf[6] & 0x40)) {
2085 pr_notice("disc type is not CD-RW\n");
2088 if (!(buf[6] & 0x4)) {
2089 pr_notice("A1 values on media are not valid, maybe not CDRW?\n");
2093 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
2095 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
2097 /* Info from cdrecord */
2099 case 0: /* standard speed */
2100 *speed = clv_to_speed[sp];
2102 case 1: /* high speed */
2103 *speed = hs_clv_to_speed[sp];
2105 case 2: /* ultra high speed */
2106 *speed = us_clv_to_speed[sp];
2109 pr_notice("unknown disc sub-type %d\n", st);
2113 pr_info("maximum media speed: %d\n", *speed);
2116 pr_notice("unknown speed %d for sub-type %d\n", sp, st);
2121 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2123 struct packet_command cgc;
2124 struct request_sense sense;
2127 pkt_dbg(2, pd, "Performing OPC\n");
2129 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
2131 cgc.timeout = 60*HZ;
2132 cgc.cmd[0] = GPCMD_SEND_OPC;
2134 if ((ret = pkt_generic_packet(pd, &cgc)))
2135 pkt_dump_sense(&cgc);
2139 static int pkt_open_write(struct pktcdvd_device *pd)
2142 unsigned int write_speed, media_write_speed, read_speed;
2144 if ((ret = pkt_probe_settings(pd))) {
2145 pkt_dbg(2, pd, "failed probe\n");
2149 if ((ret = pkt_set_write_settings(pd))) {
2150 pkt_dbg(1, pd, "failed saving write settings\n");
2154 pkt_write_caching(pd, USE_WCACHING);
2156 if ((ret = pkt_get_max_speed(pd, &write_speed)))
2157 write_speed = 16 * 177;
2158 switch (pd->mmc3_profile) {
2159 case 0x13: /* DVD-RW */
2160 case 0x1a: /* DVD+RW */
2161 case 0x12: /* DVD-RAM */
2162 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
2165 if ((ret = pkt_media_speed(pd, &media_write_speed)))
2166 media_write_speed = 16;
2167 write_speed = min(write_speed, media_write_speed * 177);
2168 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
2171 read_speed = write_speed;
2173 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2174 pkt_dbg(1, pd, "couldn't set write speed\n");
2177 pd->write_speed = write_speed;
2178 pd->read_speed = read_speed;
2180 if ((ret = pkt_perform_opc(pd))) {
2181 pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
2188 * called at open time.
2190 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2194 struct request_queue *q;
2197 * We need to re-open the cdrom device without O_NONBLOCK to be able
2198 * to read/write from/to it. It is already opened in O_NONBLOCK mode
2199 * so bdget() can't fail.
2201 bdget(pd->bdev->bd_dev);
2202 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
2205 if ((ret = pkt_get_last_written(pd, &lba))) {
2206 pkt_err(pd, "pkt_get_last_written failed\n");
2210 set_capacity(pd->disk, lba << 2);
2211 set_capacity(pd->bdev->bd_disk, lba << 2);
2212 bd_set_size(pd->bdev, (loff_t)lba << 11);
2214 q = bdev_get_queue(pd->bdev);
2216 if ((ret = pkt_open_write(pd)))
2219 * Some CDRW drives can not handle writes larger than one packet,
2220 * even if the size is a multiple of the packet size.
2222 spin_lock_irq(q->queue_lock);
2223 blk_queue_max_hw_sectors(q, pd->settings.size);
2224 spin_unlock_irq(q->queue_lock);
2225 set_bit(PACKET_WRITABLE, &pd->flags);
2227 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2228 clear_bit(PACKET_WRITABLE, &pd->flags);
2231 if ((ret = pkt_set_segment_merging(pd, q)))
2235 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2236 pkt_err(pd, "not enough memory for buffers\n");
2240 pr_info("%lukB available on disc\n", lba << 1);
2246 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2252 * called when the device is closed. makes sure that the device flushes
2253 * the internal cache before we close.
2255 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2257 if (flush && pkt_flush_cache(pd))
2258 pkt_dbg(1, pd, "not flushing cache\n");
2260 pkt_lock_door(pd, 0);
2262 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2263 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
2265 pkt_shrink_pktlist(pd);
2268 static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2270 if (dev_minor >= MAX_WRITERS)
2272 return pkt_devs[dev_minor];
2275 static int pkt_open(struct block_device *bdev, fmode_t mode)
2277 struct pktcdvd_device *pd = NULL;
2280 mutex_lock(&pktcdvd_mutex);
2281 mutex_lock(&ctl_mutex);
2282 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2287 BUG_ON(pd->refcnt < 0);
2290 if (pd->refcnt > 1) {
2291 if ((mode & FMODE_WRITE) &&
2292 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2297 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2301 * needed here as well, since ext2 (among others) may change
2302 * the blocksize at mount time
2304 set_blocksize(bdev, CD_FRAMESIZE);
2307 mutex_unlock(&ctl_mutex);
2308 mutex_unlock(&pktcdvd_mutex);
2314 mutex_unlock(&ctl_mutex);
2315 mutex_unlock(&pktcdvd_mutex);
2319 static void pkt_close(struct gendisk *disk, fmode_t mode)
2321 struct pktcdvd_device *pd = disk->private_data;
2323 mutex_lock(&pktcdvd_mutex);
2324 mutex_lock(&ctl_mutex);
2326 BUG_ON(pd->refcnt < 0);
2327 if (pd->refcnt == 0) {
2328 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2329 pkt_release_dev(pd, flush);
2331 mutex_unlock(&ctl_mutex);
2332 mutex_unlock(&pktcdvd_mutex);
2336 static void pkt_end_io_read_cloned(struct bio *bio, int err)
2338 struct packet_stacked_data *psd = bio->bi_private;
2339 struct pktcdvd_device *pd = psd->pd;
2342 bio_endio(psd->bio, err);
2343 mempool_free(psd, psd_pool);
2344 pkt_bio_finished(pd);
2347 static void pkt_make_request(struct request_queue *q, struct bio *bio)
2349 struct pktcdvd_device *pd;
2350 char b[BDEVNAME_SIZE];
2352 struct packet_data *pkt;
2353 int was_empty, blocked_bio;
2354 struct pkt_rb_node *node;
2358 pkt_err(pd, "%s incorrect request queue\n",
2359 bdevname(bio->bi_bdev, b));
2364 * Clone READ bios so we can have our own bi_end_io callback.
2366 if (bio_data_dir(bio) == READ) {
2367 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2368 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2372 cloned_bio->bi_bdev = pd->bdev;
2373 cloned_bio->bi_private = psd;
2374 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2375 pd->stats.secs_r += bio_sectors(bio);
2376 pkt_queue_bio(pd, cloned_bio);
2380 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2381 pr_notice("WRITE for ro device %s (%llu)\n",
2382 pd->name, (unsigned long long)bio->bi_sector);
2386 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2387 pkt_err(pd, "wrong bio size\n");
2391 blk_queue_bounce(q, &bio);
2393 zone = get_zone(bio->bi_sector, pd);
2394 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2395 (unsigned long long)bio->bi_sector,
2396 (unsigned long long)bio_end_sector(bio));
2398 /* Check if we have to split the bio */
2400 struct bio_pair *bp;
2404 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2405 if (last_zone != zone) {
2406 BUG_ON(last_zone != zone + pd->settings.size);
2407 first_sectors = last_zone - bio->bi_sector;
2408 bp = bio_split(bio, first_sectors);
2410 pkt_make_request(q, &bp->bio1);
2411 pkt_make_request(q, &bp->bio2);
2412 bio_pair_release(bp);
2418 * If we find a matching packet in state WAITING or READ_WAIT, we can
2419 * just append this bio to that packet.
2421 spin_lock(&pd->cdrw.active_list_lock);
2423 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2424 if (pkt->sector == zone) {
2425 spin_lock(&pkt->lock);
2426 if ((pkt->state == PACKET_WAITING_STATE) ||
2427 (pkt->state == PACKET_READ_WAIT_STATE)) {
2428 bio_list_add(&pkt->orig_bios, bio);
2429 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2430 if ((pkt->write_size >= pkt->frames) &&
2431 (pkt->state == PACKET_WAITING_STATE)) {
2432 atomic_inc(&pkt->run_sm);
2433 wake_up(&pd->wqueue);
2435 spin_unlock(&pkt->lock);
2436 spin_unlock(&pd->cdrw.active_list_lock);
2441 spin_unlock(&pkt->lock);
2444 spin_unlock(&pd->cdrw.active_list_lock);
2447 * Test if there is enough room left in the bio work queue
2448 * (queue size >= congestion on mark).
2449 * If not, wait till the work queue size is below the congestion off mark.
2451 spin_lock(&pd->lock);
2452 if (pd->write_congestion_on > 0
2453 && pd->bio_queue_size >= pd->write_congestion_on) {
2454 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
2456 spin_unlock(&pd->lock);
2457 congestion_wait(BLK_RW_ASYNC, HZ);
2458 spin_lock(&pd->lock);
2459 } while(pd->bio_queue_size > pd->write_congestion_off);
2461 spin_unlock(&pd->lock);
2464 * No matching packet found. Store the bio in the work queue.
2466 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2468 spin_lock(&pd->lock);
2469 BUG_ON(pd->bio_queue_size < 0);
2470 was_empty = (pd->bio_queue_size == 0);
2471 pkt_rbtree_insert(pd, node);
2472 spin_unlock(&pd->lock);
2475 * Wake up the worker thread.
2477 atomic_set(&pd->scan_queue, 1);
2479 /* This wake_up is required for correct operation */
2480 wake_up(&pd->wqueue);
2481 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2483 * This wake up is not required for correct operation,
2484 * but improves performance in some cases.
2486 wake_up(&pd->wqueue);
2495 static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2496 struct bio_vec *bvec)
2498 struct pktcdvd_device *pd = q->queuedata;
2499 sector_t zone = get_zone(bmd->bi_sector, pd);
2500 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2501 int remaining = (pd->settings.size << 9) - used;
2505 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2506 * boundary, pkt_make_request() will split the bio.
2508 remaining2 = PAGE_SIZE - bmd->bi_size;
2509 remaining = max(remaining, remaining2);
2511 BUG_ON(remaining < 0);
2515 static void pkt_init_queue(struct pktcdvd_device *pd)
2517 struct request_queue *q = pd->disk->queue;
2519 blk_queue_make_request(q, pkt_make_request);
2520 blk_queue_logical_block_size(q, CD_FRAMESIZE);
2521 blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
2522 blk_queue_merge_bvec(q, pkt_merge_bvec);
2526 static int pkt_seq_show(struct seq_file *m, void *p)
2528 struct pktcdvd_device *pd = m->private;
2530 char bdev_buf[BDEVNAME_SIZE];
2531 int states[PACKET_NUM_STATES];
2533 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2534 bdevname(pd->bdev, bdev_buf));
2536 seq_printf(m, "\nSettings:\n");
2537 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2539 if (pd->settings.write_type == 0)
2543 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2545 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2546 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2548 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2550 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2552 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2556 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2558 seq_printf(m, "\nStatistics:\n");
2559 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2560 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2561 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2562 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2563 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2565 seq_printf(m, "\nMisc:\n");
2566 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2567 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2568 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2569 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2570 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2571 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2573 seq_printf(m, "\nQueue state:\n");
2574 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2575 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2576 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2578 pkt_count_states(pd, states);
2579 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2580 states[0], states[1], states[2], states[3], states[4], states[5]);
2582 seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
2583 pd->write_congestion_off,
2584 pd->write_congestion_on);
2588 static int pkt_seq_open(struct inode *inode, struct file *file)
2590 return single_open(file, pkt_seq_show, PDE_DATA(inode));
2593 static const struct file_operations pkt_proc_fops = {
2594 .open = pkt_seq_open,
2596 .llseek = seq_lseek,
2597 .release = single_release
2600 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2604 char b[BDEVNAME_SIZE];
2605 struct block_device *bdev;
2607 if (pd->pkt_dev == dev) {
2608 pkt_err(pd, "recursive setup not allowed\n");
2611 for (i = 0; i < MAX_WRITERS; i++) {
2612 struct pktcdvd_device *pd2 = pkt_devs[i];
2615 if (pd2->bdev->bd_dev == dev) {
2616 pkt_err(pd, "%s already setup\n",
2617 bdevname(pd2->bdev, b));
2620 if (pd2->pkt_dev == dev) {
2621 pkt_err(pd, "can't chain pktcdvd devices\n");
2629 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
2633 /* This is safe, since we have a reference from open(). */
2634 __module_get(THIS_MODULE);
2637 set_blocksize(bdev, CD_FRAMESIZE);
2641 atomic_set(&pd->cdrw.pending_bios, 0);
2642 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2643 if (IS_ERR(pd->cdrw.thread)) {
2644 pkt_err(pd, "can't start kernel thread\n");
2649 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2650 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
2654 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
2655 /* This is safe: open() is still holding a reference. */
2656 module_put(THIS_MODULE);
2660 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
2662 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2665 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
2666 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2668 mutex_lock(&pktcdvd_mutex);
2672 * The door gets locked when the device is opened, so we
2673 * have to unlock it or else the eject command fails.
2675 if (pd->refcnt == 1)
2676 pkt_lock_door(pd, 0);
2679 * forward selected CDROM ioctls to CD-ROM, for UDF
2681 case CDROMMULTISESSION:
2682 case CDROMREADTOCENTRY:
2683 case CDROM_LAST_WRITTEN:
2684 case CDROM_SEND_PACKET:
2685 case SCSI_IOCTL_SEND_COMMAND:
2686 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2690 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
2693 mutex_unlock(&pktcdvd_mutex);
2698 static unsigned int pkt_check_events(struct gendisk *disk,
2699 unsigned int clearing)
2701 struct pktcdvd_device *pd = disk->private_data;
2702 struct gendisk *attached_disk;
2708 attached_disk = pd->bdev->bd_disk;
2709 if (!attached_disk || !attached_disk->fops->check_events)
2711 return attached_disk->fops->check_events(attached_disk, clearing);
2714 static const struct block_device_operations pktcdvd_ops = {
2715 .owner = THIS_MODULE,
2717 .release = pkt_close,
2719 .check_events = pkt_check_events,
2722 static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
2724 return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
2728 * Set up mapping from pktcdvd device to CD-ROM device.
2730 static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2734 struct pktcdvd_device *pd;
2735 struct gendisk *disk;
2737 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2739 for (idx = 0; idx < MAX_WRITERS; idx++)
2742 if (idx == MAX_WRITERS) {
2743 pr_err("max %d writers supported\n", MAX_WRITERS);
2748 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2752 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2753 sizeof(struct pkt_rb_node));
2757 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2758 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2759 spin_lock_init(&pd->cdrw.active_list_lock);
2761 spin_lock_init(&pd->lock);
2762 spin_lock_init(&pd->iosched.lock);
2763 bio_list_init(&pd->iosched.read_queue);
2764 bio_list_init(&pd->iosched.write_queue);
2765 sprintf(pd->name, DRIVER_NAME"%d", idx);
2766 init_waitqueue_head(&pd->wqueue);
2767 pd->bio_queue = RB_ROOT;
2769 pd->write_congestion_on = write_congestion_on;
2770 pd->write_congestion_off = write_congestion_off;
2772 disk = alloc_disk(1);
2776 disk->major = pktdev_major;
2777 disk->first_minor = idx;
2778 disk->fops = &pktcdvd_ops;
2779 disk->flags = GENHD_FL_REMOVABLE;
2780 strcpy(disk->disk_name, pd->name);
2781 disk->devnode = pktcdvd_devnode;
2782 disk->private_data = pd;
2783 disk->queue = blk_alloc_queue(GFP_KERNEL);
2787 pd->pkt_dev = MKDEV(pktdev_major, idx);
2788 ret = pkt_new_dev(pd, dev);
2792 /* inherit events of the host device */
2793 disk->events = pd->bdev->bd_disk->events;
2794 disk->async_events = pd->bdev->bd_disk->async_events;
2798 pkt_sysfs_dev_new(pd);
2799 pkt_debugfs_dev_new(pd);
2803 *pkt_dev = pd->pkt_dev;
2805 mutex_unlock(&ctl_mutex);
2809 blk_cleanup_queue(disk->queue);
2814 mempool_destroy(pd->rb_pool);
2817 mutex_unlock(&ctl_mutex);
2818 pr_err("setup of pktcdvd device failed\n");
2823 * Tear down mapping from pktcdvd device to CD-ROM device.
2825 static int pkt_remove_dev(dev_t pkt_dev)
2827 struct pktcdvd_device *pd;
2831 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2833 for (idx = 0; idx < MAX_WRITERS; idx++) {
2835 if (pd && (pd->pkt_dev == pkt_dev))
2838 if (idx == MAX_WRITERS) {
2839 pkt_dbg(1, pd, "dev not setup\n");
2844 if (pd->refcnt > 0) {
2848 if (!IS_ERR(pd->cdrw.thread))
2849 kthread_stop(pd->cdrw.thread);
2851 pkt_devs[idx] = NULL;
2853 pkt_debugfs_dev_remove(pd);
2854 pkt_sysfs_dev_remove(pd);
2856 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2858 remove_proc_entry(pd->name, pkt_proc);
2859 pkt_dbg(1, pd, "writer unmapped\n");
2861 del_gendisk(pd->disk);
2862 blk_cleanup_queue(pd->disk->queue);
2865 mempool_destroy(pd->rb_pool);
2868 /* This is safe: open() is still holding a reference. */
2869 module_put(THIS_MODULE);
2872 mutex_unlock(&ctl_mutex);
2876 static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2878 struct pktcdvd_device *pd;
2880 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2882 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2884 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2885 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2888 ctrl_cmd->pkt_dev = 0;
2890 ctrl_cmd->num_devices = MAX_WRITERS;
2892 mutex_unlock(&ctl_mutex);
2895 static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2897 void __user *argp = (void __user *)arg;
2898 struct pkt_ctrl_command ctrl_cmd;
2902 if (cmd != PACKET_CTRL_CMD)
2905 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2908 switch (ctrl_cmd.command) {
2909 case PKT_CTRL_CMD_SETUP:
2910 if (!capable(CAP_SYS_ADMIN))
2912 ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
2913 ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
2915 case PKT_CTRL_CMD_TEARDOWN:
2916 if (!capable(CAP_SYS_ADMIN))
2918 ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
2920 case PKT_CTRL_CMD_STATUS:
2921 pkt_get_status(&ctrl_cmd);
2927 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2932 #ifdef CONFIG_COMPAT
2933 static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2935 return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2939 static const struct file_operations pkt_ctl_fops = {
2940 .open = nonseekable_open,
2941 .unlocked_ioctl = pkt_ctl_ioctl,
2942 #ifdef CONFIG_COMPAT
2943 .compat_ioctl = pkt_ctl_compat_ioctl,
2945 .owner = THIS_MODULE,
2946 .llseek = no_llseek,
2949 static struct miscdevice pkt_misc = {
2950 .minor = MISC_DYNAMIC_MINOR,
2951 .name = DRIVER_NAME,
2952 .nodename = "pktcdvd/control",
2953 .fops = &pkt_ctl_fops
2956 static int __init pkt_init(void)
2960 mutex_init(&ctl_mutex);
2962 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2963 sizeof(struct packet_stacked_data));
2967 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2969 pr_err("unable to register block device\n");
2975 ret = pkt_sysfs_init();
2981 ret = misc_register(&pkt_misc);
2983 pr_err("unable to register misc device\n");
2987 pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
2992 pkt_debugfs_cleanup();
2993 pkt_sysfs_cleanup();
2995 unregister_blkdev(pktdev_major, DRIVER_NAME);
2997 mempool_destroy(psd_pool);
3001 static void __exit pkt_exit(void)
3003 remove_proc_entry("driver/"DRIVER_NAME, NULL);
3004 misc_deregister(&pkt_misc);
3006 pkt_debugfs_cleanup();
3007 pkt_sysfs_cleanup();
3009 unregister_blkdev(pktdev_major, DRIVER_NAME);
3010 mempool_destroy(psd_pool);
3013 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
3014 MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
3015 MODULE_LICENSE("GPL");
3017 module_init(pkt_init);
3018 module_exit(pkt_exit);