* response queued for it, with the saved 'id' passed back.
*/
struct pending_req {
- struct blkif_st *blkif;
+ struct xen_blkif *blkif;
u64 id;
int nr_pages;
atomic_t pendcnt;
(blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
-static int do_block_io_op(struct blkif_st *blkif);
-static int dispatch_rw_block_io(struct blkif_st *blkif,
+static int do_block_io_op(struct xen_blkif *blkif);
+static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct blkif_request *req,
struct pending_req *pending_req);
-static void make_response(struct blkif_st *blkif, u64 id,
+static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st);
/*
/*
* Routines for managing virtual block devices (vbds).
*/
-static int vbd_translate(struct phys_req *req, struct blkif_st *blkif,
+static int vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
int operation)
{
struct vbd *vbd = &blkif->vbd;
return rc;
}
-static void vbd_resize(struct blkif_st *blkif)
+static void vbd_resize(struct xen_blkif *blkif)
{
struct vbd *vbd = &blkif->vbd;
struct xenbus_transaction xbt;
/*
* Notification from the guest OS.
*/
-static void blkif_notify_work(struct blkif_st *blkif)
+static void blkif_notify_work(struct xen_blkif *blkif)
{
blkif->waiting_reqs = 1;
wake_up(&blkif->wq);
* SCHEDULER FUNCTIONS
*/
-static void print_stats(struct blkif_st *blkif)
+static void print_stats(struct xen_blkif *blkif)
{
pr_debug("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n",
current->comm, blkif->st_oo_req,
int xen_blkif_schedule(void *arg)
{
- struct blkif_st *blkif = arg;
+ struct xen_blkif *blkif = arg;
struct vbd *vbd = &blkif->vbd;
xen_blkif_get(blkif);
* (which has the sectors we want, number of them, grant references, etc),
* and transmute it to the block API to hand it over to the proper block disk.
*/
-static int do_block_io_op(struct blkif_st *blkif)
+static int do_block_io_op(struct xen_blkif *blkif)
{
union blkif_back_rings *blk_rings = &blkif->blk_rings;
struct blkif_request req;
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
*/
-static int dispatch_rw_block_io(struct blkif_st *blkif,
- struct blkif_request *req,
- struct pending_req *pending_req)
+static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ struct blkif_request *req,
+ struct pending_req *pending_req)
{
struct phys_req preq;
struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
/*
* Put a response on the ring on how the operation fared.
*/
-static void make_response(struct blkif_st *blkif, u64 id,
+static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st)
{
struct blkif_response resp;
struct backend_info {
struct xenbus_device *dev;
- struct blkif_st *blkif;
+ struct xen_blkif *blkif;
struct xenbus_watch backend_watch;
unsigned major;
unsigned minor;
return be->dev;
}
-static int blkback_name(struct blkif_st *blkif, char *buf)
+static int blkback_name(struct xen_blkif *blkif, char *buf)
{
char *devpath, *devname;
struct xenbus_device *dev = blkif->be->dev;
return 0;
}
-static void xen_update_blkif_status(struct blkif_st *blkif)
+static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
char name[TASK_COMM_LEN];
}
}
-static struct blkif_st *xen_blkif_alloc(domid_t domid)
+static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
- struct blkif_st *blkif;
+ struct xen_blkif *blkif;
blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
return blkif;
}
-static int map_frontend_page(struct blkif_st *blkif, unsigned long shared_page)
+static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
{
struct gnttab_map_grant_ref op;
return 0;
}
-static void unmap_frontend_page(struct blkif_st *blkif)
+static void unmap_frontend_page(struct xen_blkif *blkif)
{
struct gnttab_unmap_grant_ref op;
BUG();
}
-static int xen_blkif_map(struct blkif_st *blkif, unsigned long shared_page,
+static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
unsigned int evtchn)
{
int err;
return 0;
}
-static void xen_blkif_disconnect(struct blkif_st *blkif)
+static void xen_blkif_disconnect(struct xen_blkif *blkif)
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
}
}
-void xen_blkif_free(struct blkif_st *blkif)
+void xen_blkif_free(struct xen_blkif *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
int __init xen_blkif_interface_init(void)
{
xen_blkif_cachep = kmem_cache_create("blkif_cache",
- sizeof(struct blkif_st),
+ sizeof(struct xen_blkif),
0, 0, NULL);
if (!xen_blkif_cachep)
return -ENOMEM;
vbd->bdev = NULL;
}
-static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
+static int vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
{