structbdev_inode { structblock_devicebdev; structinodevfs_inode; }; // block_device块设备结构体 structblock_device { sector_t bd_start_sect; sector_t bd_nr_sectors; structdisk_stats __percpu *bd_stats; unsignedlong bd_stamp; bool bd_read_only; /* read-only policy */ dev_t bd_dev; int bd_openers; structinode * bd_inode;/* will die */ structsuper_block * bd_super; void * bd_claiming; structdevicebd_device; void * bd_holder; int bd_holders; bool bd_write_holder; structkobject *bd_holder_dir; u8 bd_partno; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ structgendisk * bd_disk; structrequest_queue * bd_queue; } // gendisk磁盘描述符结构体 structgendisk { /* * major/first_minor/minors should not be set by any new driver, the * block core will take care of allocating them automatically. */ int major; int first_minor; int minors;
char disk_name[DISK_NAME_LEN]; /* name of major driver */
unsignedshort events; /* supported events */ unsignedshort event_flags; /* flags related to event processing */
/* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void *queuedata; } // 驱动行为函数 structblk_mq_ops { /** * @queue_rq: Queue a new request from block IO. */ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, conststruct blk_mq_queue_data *);
/** * @commit_rqs: If a driver uses bd->last to judge when to submit * requests to hardware, it must define this function. In case of errors * that make us stop issuing further requests, this hook serves the * purpose of kicking the hardware (which the last request otherwise * would have done). */ void (*commit_rqs)(struct blk_mq_hw_ctx *);
/** * @queue_rqs: Queue a list of new requests. Driver is guaranteed * that each request belongs to the same queue. If the driver doesn't * empty the @rqlist completely, then the rest will be queued * individually by the block layer upon return. */ void (*queue_rqs)(struct request **rqlist);
/** * @get_budget: Reserve budget before queue request, once .queue_rq is * run, it is driver's responsibility to release the * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ int (*get_budget)(struct request_queue *);
/** * @timeout: Called on request timeout. */ enumblk_eh_timer_return(*timeout)(struct request *, bool);
/** * @poll: Called to poll for completion of a specific tag. */ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
/** * @complete: Mark the request as complete. */ void (*complete)(struct request *);
/** * @init_hctx: Called when the block layer side of a hardware queue has * been set up, allowing the driver to allocate/init matching * structures. */ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsignedint); /** * @exit_hctx: Ditto for exit/teardown. */ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsignedint);
/** * @init_request: Called for every command allocated by the block layer * to allow the driver to set up driver specific data. * * Tag greater than or equal to queue_depth is for setting up * flush request. */ int (*init_request)(struct blk_mq_tag_set *set, struct request *, unsignedint, unsignedint); /** * @exit_request: Ditto for exit/teardown. */ void (*exit_request)(struct blk_mq_tag_set *set, struct request *, unsignedint);
/** * @cleanup_rq: Called before freeing one request which isn't completed * yet, and usually for freeing the driver private data. */ void (*cleanup_rq)(struct request *);
/** * @busy: If set, returns whether or not this queue currently is busy. */ bool (*busy)(struct request_queue *);
/** * @map_queues: This allows drivers specify their own queue mapping by * overriding the setup-time function that builds the mq_map. */ int (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS /** * @show_rq: Used by the debugfs implementation to show driver-specific * information about a request. */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif };