source: src/linux/universal/linux-4.9/drivers/nvme/host/nvme.h @ 31859

Last change on this file since 31859 was 31859, checked in by brainslayer, 6 weeks ago

kernel update

File size: 9.2 KB
Line 
1/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
21#include <linux/lightnvm.h>
22
23enum {
24        /*
25         * Driver internal status code for commands that were cancelled due
26         * to timeouts or controller shutdown.  The value is negative so
27         * that it a) doesn't overlap with the unsigned hardware error codes,
28         * and b) can easily be tested for.
29         */
30        NVME_SC_CANCELLED               = -EINTR,
31};
32
33extern unsigned char nvme_io_timeout;
34#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
35
36extern unsigned char admin_timeout;
37#define ADMIN_TIMEOUT   (admin_timeout * HZ)
38
39extern unsigned char shutdown_timeout;
40#define SHUTDOWN_TIMEOUT        (shutdown_timeout * HZ)
41
42#define NVME_DEFAULT_KATO       5
43#define NVME_KATO_GRACE         10
44
45extern unsigned int nvme_max_retries;
46
47enum {
48        NVME_NS_LBA             = 0,
49        NVME_NS_LIGHTNVM        = 1,
50};
51
52/*
53 * List of workarounds for devices that required behavior not specified in
54 * the standard.
55 */
56enum nvme_quirks {
57        /*
58         * Prefers I/O aligned to a stripe size specified in a vendor
59         * specific Identify field.
60         */
61        NVME_QUIRK_STRIPE_SIZE                  = (1 << 0),
62
63        /*
64         * The controller doesn't handle Identify value others than 0 or 1
65         * correctly.
66         */
67        NVME_QUIRK_IDENTIFY_CNS                 = (1 << 1),
68
69        /*
70         * The controller deterministically returns O's on reads to discarded
71         * logical blocks.
72         */
73        NVME_QUIRK_DISCARD_ZEROES               = (1 << 2),
74
75        /*
76         * The controller needs a delay before starts checking the device
77         * readiness, which is done by reading the NVME_CSTS_RDY bit.
78         */
79        NVME_QUIRK_DELAY_BEFORE_CHK_RDY         = (1 << 3),
80};
81
82/* The below value is the specific amount of delay needed before checking
83 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
84 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
85 * found empirically.
86 */
87#define NVME_QUIRK_DELAY_AMOUNT         2000
88
89enum nvme_ctrl_state {
90        NVME_CTRL_NEW,
91        NVME_CTRL_LIVE,
92        NVME_CTRL_RESETTING,
93        NVME_CTRL_RECONNECTING,
94        NVME_CTRL_DELETING,
95        NVME_CTRL_DEAD,
96};
97
98struct nvme_ctrl {
99        enum nvme_ctrl_state state;
100        spinlock_t lock;
101        const struct nvme_ctrl_ops *ops;
102        struct request_queue *admin_q;
103        struct request_queue *connect_q;
104        struct device *dev;
105        struct kref kref;
106        int instance;
107        struct blk_mq_tag_set *tagset;
108        struct list_head namespaces;
109        struct mutex namespaces_mutex;
110        struct device *device;  /* char device */
111        struct list_head node;
112        struct ida ns_ida;
113
114        char name[12];
115        char serial[20];
116        char model[40];
117        char firmware_rev[8];
118        u16 cntlid;
119
120        u32 ctrl_config;
121
122        u32 page_size;
123        u32 max_hw_sectors;
124        u16 oncs;
125        u16 vid;
126        atomic_t abort_limit;
127        u8 event_limit;
128        u8 vwc;
129        u32 vs;
130        u32 sgls;
131        u16 kas;
132        unsigned int kato;
133        bool subsystem;
134        unsigned long quirks;
135        struct work_struct scan_work;
136        struct work_struct async_event_work;
137        struct delayed_work ka_work;
138
139        /* Fabrics only */
140        u16 sqsize;
141        u32 ioccsz;
142        u32 iorcsz;
143        u16 icdoff;
144        u16 maxcmd;
145        struct nvmf_ctrl_options *opts;
146};
147
148/*
149 * An NVM Express namespace is equivalent to a SCSI LUN
150 */
151struct nvme_ns {
152        struct list_head list;
153
154        struct nvme_ctrl *ctrl;
155        struct request_queue *queue;
156        struct gendisk *disk;
157        struct nvm_dev *ndev;
158        struct kref kref;
159        int instance;
160
161        u8 eui[8];
162        u8 uuid[16];
163
164        unsigned ns_id;
165        int lba_shift;
166        u16 ms;
167        bool ext;
168        u8 pi_type;
169        unsigned long flags;
170
171#define NVME_NS_REMOVING 0
172#define NVME_NS_DEAD     1
173
174        u64 mode_select_num_blocks;
175        u32 mode_select_block_len;
176};
177
178struct nvme_ctrl_ops {
179        const char *name;
180        struct module *module;
181        bool is_fabrics;
182        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
183        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
184        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
185        int (*reset_ctrl)(struct nvme_ctrl *ctrl);
186        void (*free_ctrl)(struct nvme_ctrl *ctrl);
187        void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
188        int (*delete_ctrl)(struct nvme_ctrl *ctrl);
189        const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
190        int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
191};
192
193static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
194{
195        u32 val = 0;
196
197        if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
198                return false;
199        return val & NVME_CSTS_RDY;
200}
201
202static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
203{
204        if (!ctrl->subsystem)
205                return -ENOTTY;
206        return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
207}
208
209static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
210{
211        return (sector >> (ns->lba_shift - 9));
212}
213
214static inline unsigned nvme_map_len(struct request *rq)
215{
216        if (req_op(rq) == REQ_OP_DISCARD)
217                return sizeof(struct nvme_dsm_range);
218        else
219                return blk_rq_bytes(rq);
220}
221
222static inline void nvme_cleanup_cmd(struct request *req)
223{
224        if (req_op(req) == REQ_OP_DISCARD)
225                kfree(req->completion_data);
226}
227
228static inline int nvme_error_status(u16 status)
229{
230        switch (status & 0x7ff) {
231        case NVME_SC_SUCCESS:
232                return 0;
233        case NVME_SC_CAP_EXCEEDED:
234                return -ENOSPC;
235        default:
236                return -EIO;
237        }
238}
239
240static inline bool nvme_req_needs_retry(struct request *req, u16 status)
241{
242        return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
243                (jiffies - req->start_time) < req->timeout &&
244                req->retries < nvme_max_retries;
245}
246
247void nvme_cancel_request(struct request *req, void *data, bool reserved);
248bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
249                enum nvme_ctrl_state new_state);
250int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
251int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
252int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
253int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
254                const struct nvme_ctrl_ops *ops, unsigned long quirks);
255void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
256void nvme_put_ctrl(struct nvme_ctrl *ctrl);
257int nvme_init_identify(struct nvme_ctrl *ctrl);
258
259void nvme_queue_scan(struct nvme_ctrl *ctrl);
260void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
261
262#define NVME_NR_AERS    1
263void nvme_complete_async_event(struct nvme_ctrl *ctrl,
264                struct nvme_completion *cqe);
265void nvme_queue_async_events(struct nvme_ctrl *ctrl);
266
267void nvme_stop_queues(struct nvme_ctrl *ctrl);
268void nvme_start_queues(struct nvme_ctrl *ctrl);
269void nvme_kill_queues(struct nvme_ctrl *ctrl);
270
271#define NVME_QID_ANY -1
272struct request *nvme_alloc_request(struct request_queue *q,
273                struct nvme_command *cmd, unsigned int flags, int qid);
274void nvme_requeue_req(struct request *req);
275int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
276                struct nvme_command *cmd);
277int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
278                void *buf, unsigned bufflen);
279int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
280                struct nvme_completion *cqe, void *buffer, unsigned bufflen,
281                unsigned timeout, int qid, int at_head, int flags);
282int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
283                void __user *ubuffer, unsigned bufflen, u32 *result,
284                unsigned timeout);
285int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
286                void __user *ubuffer, unsigned bufflen,
287                void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
288                u32 *result, unsigned timeout);
289int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
290int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
291                struct nvme_id_ns **id);
292int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
293int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
294                      void *buffer, size_t buflen, u32 *result);
295int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
296                      void *buffer, size_t buflen, u32 *result);
297int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
298void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
299void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
300
301struct sg_io_hdr;
302
303int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
304int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
305int nvme_sg_get_version_num(int __user *ip);
306
307#ifdef CONFIG_NVM
308int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
309int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
310                      const struct attribute_group *attrs);
311void nvme_nvm_unregister(struct nvme_ns *ns);
312
313static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
314{
315        if (dev->type->devnode)
316                return dev_to_disk(dev)->private_data;
317
318        return (container_of(dev, struct nvm_dev, dev))->private_data;
319}
320#else
321static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
322                                    int node,
323                                    const struct attribute_group *attrs)
324{
325        return 0;
326}
327
328static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
329
330static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
331{
332        return 0;
333}
334static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
335{
336        return dev_to_disk(dev)->private_data;
337}
338#endif /* CONFIG_NVM */
339
340int __init nvme_core_init(void);
341void nvme_core_exit(void);
342
343#endif /* _NVME_H */
Note: See TracBrowser for help on using the repository browser.