181/* 182 * The nvme_iod describes the data in an I/O, including the list of PRP 183 * entries. You can't see it in this data structure because C doesn't let 184 * me express that. Use nvme_init_iod to ensure there's enough space 185 * allocated to store the PRP list. 186 */ 187structnvme_iod { 188structnvme_requestreq;// 在之后会遇到 189structnvme_queue *nvmeq; 190bool use_sgl; 191int aborted; 192int npages; /* In the PRP list. 0 means small pool in use */ 193int nents; /* Used in scatterlist */ 194int length; /* Of data, in bytes */ 195dma_addr_t first_dma; 196structscatterlistmeta_sg;/* metadata requires single contiguous buffer */ 197structscatterlist *sg; 198structscatterlistinline_sg[0]; 199 };
/** * blk_mq_rq_to_pdu - cast a request to a PDU * @rq: the request to be casted * * Return: pointer to the PDU * * Driver command data is immediately after the request. So add request to get * the PDU. */ staticinlinevoid *blk_mq_rq_to_pdu(struct request *rq) { return rq + 1; }
298staticinlineintnvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 299 { 300return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 301 } 302 303/* Update dbbuf and return true if an MMIO is required */ 304staticboolnvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 305volatile u32 *dbbuf_ei) 306 { 307if (dbbuf_db) { 308 u16 old_value; 309 310/* 311 * Ensure that the queue is written before updating 312 * the doorbell in memory 313 */ 314 wmb(); 315 316 old_value = *dbbuf_db; 317 *dbbuf_db = value; 318 319/* 320 * Ensure that the doorbell is updated before reading the event 321 * index from memory. The controller needs to provide similar 322 * ordering to ensure the envent index is updated before reading 323 * the doorbell. 324 */ 325 mb(); 326 327if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 328returnfalse; 329 } 330 331returntrue; 332 }
// unlikely分支不用看 871staticinlinevoidnvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 872 { 873volatilestructnvme_completion *cqe = &nvmeq->cqes[idx]; 874structrequest *req; 875 876if (unlikely(cqe->command_id >= nvmeq->q_depth)) { 877 dev_warn(nvmeq->dev->ctrl.device, 878"invalid id %d completed on queue %d\n", 879 cqe->command_id, le16_to_cpu(cqe->sq_id)); 880return; 881 } 882 883/* 884 * AEN requests are special as they don't time out and can 885 * survive any kind of queue freeze and often don't respond to 886 * aborts. We don't even bother to allocate a struct request 887 * for them but rather special case them here. 888 */ 889if (unlikely(nvmeq->qid == 0 && 890 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { 891 nvme_complete_async_event(&nvmeq->dev->ctrl, 892 cqe->status, &cqe->result); 893return; 894 } 895 896 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); // 由tag和索引,找到相应请求 897 nvme_end_request(req, cqe->status, cqe->result); 898 }
blk_mq_start_request() - must be called before starting processing a request; 在nvme_queue_rq函数中调用 blk_mq_requeue_request() - to re-send the request in the queue; blk_mq_end_request() - to end request processing and notify the upper layers. 在nvme_complete_rq函数中调用