blob: b9a53615bdef4945748057e5efbfef56c5cb4847
1 | /* |
2 | * bsg.c - block layer implementation of the sg v4 interface |
3 | * |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> |
6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License version 2. See the file "COPYING" in the main directory of this |
9 | * archive for more details. |
10 | * |
11 | */ |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> |
14 | #include <linux/file.h> |
15 | #include <linux/blkdev.h> |
16 | #include <linux/poll.h> |
17 | #include <linux/cdev.h> |
18 | #include <linux/jiffies.h> |
19 | #include <linux/percpu.h> |
20 | #include <linux/uio.h> |
21 | #include <linux/idr.h> |
22 | #include <linux/bsg.h> |
23 | #include <linux/slab.h> |
24 | |
25 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_ioctl.h> |
27 | #include <scsi/scsi_cmnd.h> |
28 | #include <scsi/scsi_device.h> |
29 | #include <scsi/scsi_driver.h> |
30 | #include <scsi/sg.h> |
31 | |
32 | #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" |
33 | #define BSG_VERSION "0.4" |
34 | |
35 | struct bsg_device { |
36 | struct request_queue *queue; |
37 | spinlock_t lock; |
38 | struct list_head busy_list; |
39 | struct list_head done_list; |
40 | struct hlist_node dev_list; |
41 | atomic_t ref_count; |
42 | int queued_cmds; |
43 | int done_cmds; |
44 | wait_queue_head_t wq_done; |
45 | wait_queue_head_t wq_free; |
46 | char name[20]; |
47 | int max_queue; |
48 | unsigned long flags; |
49 | }; |
50 | |
51 | enum { |
52 | BSG_F_BLOCK = 1, |
53 | }; |
54 | |
55 | #define BSG_DEFAULT_CMDS 64 |
56 | #define BSG_MAX_DEVS 32768 |
57 | |
58 | #undef BSG_DEBUG |
59 | |
60 | #ifdef BSG_DEBUG |
61 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) |
62 | #else |
63 | #define dprintk(fmt, args...) |
64 | #endif |
65 | |
66 | static DEFINE_MUTEX(bsg_mutex); |
67 | static DEFINE_IDR(bsg_minor_idr); |
68 | |
69 | #define BSG_LIST_ARRAY_SIZE 8 |
70 | static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; |
71 | |
72 | static struct class *bsg_class; |
73 | static int bsg_major; |
74 | |
75 | static struct kmem_cache *bsg_cmd_cachep; |
76 | |
77 | /* |
78 | * our internal command type |
79 | */ |
80 | struct bsg_command { |
81 | struct bsg_device *bd; |
82 | struct list_head list; |
83 | struct request *rq; |
84 | struct bio *bio; |
85 | struct bio *bidi_bio; |
86 | int err; |
87 | struct sg_io_v4 hdr; |
88 | char sense[SCSI_SENSE_BUFFERSIZE]; |
89 | }; |
90 | |
91 | static void bsg_free_command(struct bsg_command *bc) |
92 | { |
93 | struct bsg_device *bd = bc->bd; |
94 | unsigned long flags; |
95 | |
96 | kmem_cache_free(bsg_cmd_cachep, bc); |
97 | |
98 | spin_lock_irqsave(&bd->lock, flags); |
99 | bd->queued_cmds--; |
100 | spin_unlock_irqrestore(&bd->lock, flags); |
101 | |
102 | wake_up(&bd->wq_free); |
103 | } |
104 | |
105 | static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) |
106 | { |
107 | struct bsg_command *bc = ERR_PTR(-EINVAL); |
108 | |
109 | spin_lock_irq(&bd->lock); |
110 | |
111 | if (bd->queued_cmds >= bd->max_queue) |
112 | goto out; |
113 | |
114 | bd->queued_cmds++; |
115 | spin_unlock_irq(&bd->lock); |
116 | |
117 | bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); |
118 | if (unlikely(!bc)) { |
119 | spin_lock_irq(&bd->lock); |
120 | bd->queued_cmds--; |
121 | bc = ERR_PTR(-ENOMEM); |
122 | goto out; |
123 | } |
124 | |
125 | bc->bd = bd; |
126 | INIT_LIST_HEAD(&bc->list); |
127 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
128 | return bc; |
129 | out: |
130 | spin_unlock_irq(&bd->lock); |
131 | return bc; |
132 | } |
133 | |
134 | static inline struct hlist_head *bsg_dev_idx_hash(int index) |
135 | { |
136 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
137 | } |
138 | |
139 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
140 | struct sg_io_v4 *hdr, struct bsg_device *bd, |
141 | fmode_t has_write_perm) |
142 | { |
143 | if (hdr->request_len > BLK_MAX_CDB) { |
144 | rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); |
145 | if (!rq->cmd) |
146 | return -ENOMEM; |
147 | } |
148 | |
149 | if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, |
150 | hdr->request_len)) |
151 | return -EFAULT; |
152 | |
153 | if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { |
154 | if (blk_verify_command(rq->cmd, has_write_perm)) |
155 | return -EPERM; |
156 | } else if (!capable(CAP_SYS_RAWIO)) |
157 | return -EPERM; |
158 | |
159 | /* |
160 | * fill in request structure |
161 | */ |
162 | rq->cmd_len = hdr->request_len; |
163 | |
164 | rq->timeout = msecs_to_jiffies(hdr->timeout); |
165 | if (!rq->timeout) |
166 | rq->timeout = q->sg_timeout; |
167 | if (!rq->timeout) |
168 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; |
169 | if (rq->timeout < BLK_MIN_SG_TIMEOUT) |
170 | rq->timeout = BLK_MIN_SG_TIMEOUT; |
171 | |
172 | return 0; |
173 | } |
174 | |
175 | /* |
176 | * Check if sg_io_v4 from user is allowed and valid |
177 | */ |
178 | static int |
179 | bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) |
180 | { |
181 | int ret = 0; |
182 | |
183 | if (hdr->guard != 'Q') |
184 | return -EINVAL; |
185 | |
186 | switch (hdr->protocol) { |
187 | case BSG_PROTOCOL_SCSI: |
188 | switch (hdr->subprotocol) { |
189 | case BSG_SUB_PROTOCOL_SCSI_CMD: |
190 | case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: |
191 | break; |
192 | default: |
193 | ret = -EINVAL; |
194 | } |
195 | break; |
196 | default: |
197 | ret = -EINVAL; |
198 | } |
199 | |
200 | *rw = hdr->dout_xfer_len ? WRITE : READ; |
201 | return ret; |
202 | } |
203 | |
204 | /* |
205 | * map sg_io_v4 to a request. |
206 | */ |
207 | static struct request * |
208 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, |
209 | u8 *sense) |
210 | { |
211 | struct request_queue *q = bd->queue; |
212 | struct request *rq, *next_rq = NULL; |
213 | int ret, rw; |
214 | unsigned int dxfer_len; |
215 | void __user *dxferp = NULL; |
216 | struct bsg_class_device *bcd = &q->bsg_dev; |
217 | |
218 | /* if the LLD has been removed then the bsg_unregister_queue will |
219 | * eventually be called and the class_dev was freed, so we can no |
220 | * longer use this request_queue. Return no such address. |
221 | */ |
222 | if (!bcd->class_dev) |
223 | return ERR_PTR(-ENXIO); |
224 | |
225 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
226 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, |
227 | hdr->din_xfer_len); |
228 | |
229 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
230 | if (ret) |
231 | return ERR_PTR(ret); |
232 | |
233 | /* |
234 | * map scatter-gather elements separately and string them to request |
235 | */ |
236 | rq = blk_get_request(q, rw, GFP_KERNEL); |
237 | if (IS_ERR(rq)) |
238 | return rq; |
239 | blk_rq_set_block_pc(rq); |
240 | |
241 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); |
242 | if (ret) |
243 | goto out; |
244 | |
245 | if (rw == WRITE && hdr->din_xfer_len) { |
246 | if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { |
247 | ret = -EOPNOTSUPP; |
248 | goto out; |
249 | } |
250 | |
251 | next_rq = blk_get_request(q, READ, GFP_KERNEL); |
252 | if (IS_ERR(next_rq)) { |
253 | ret = PTR_ERR(next_rq); |
254 | next_rq = NULL; |
255 | goto out; |
256 | } |
257 | rq->next_rq = next_rq; |
258 | next_rq->cmd_type = rq->cmd_type; |
259 | |
260 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
261 | ret = blk_rq_map_user(q, next_rq, NULL, dxferp, |
262 | hdr->din_xfer_len, GFP_KERNEL); |
263 | if (ret) |
264 | goto out; |
265 | } |
266 | |
267 | if (hdr->dout_xfer_len) { |
268 | dxfer_len = hdr->dout_xfer_len; |
269 | dxferp = (void __user *)(unsigned long)hdr->dout_xferp; |
270 | } else if (hdr->din_xfer_len) { |
271 | dxfer_len = hdr->din_xfer_len; |
272 | dxferp = (void __user *)(unsigned long)hdr->din_xferp; |
273 | } else |
274 | dxfer_len = 0; |
275 | |
276 | if (dxfer_len) { |
277 | ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, |
278 | GFP_KERNEL); |
279 | if (ret) |
280 | goto out; |
281 | } |
282 | |
283 | rq->sense = sense; |
284 | rq->sense_len = 0; |
285 | |
286 | return rq; |
287 | out: |
288 | if (rq->cmd != rq->__cmd) |
289 | kfree(rq->cmd); |
290 | blk_put_request(rq); |
291 | if (next_rq) { |
292 | blk_rq_unmap_user(next_rq->bio); |
293 | blk_put_request(next_rq); |
294 | } |
295 | return ERR_PTR(ret); |
296 | } |
297 | |
298 | /* |
299 | * async completion call-back from the block layer, when scsi/ide/whatever |
300 | * calls end_that_request_last() on a request |
301 | */ |
302 | static void bsg_rq_end_io(struct request *rq, int uptodate) |
303 | { |
304 | struct bsg_command *bc = rq->end_io_data; |
305 | struct bsg_device *bd = bc->bd; |
306 | unsigned long flags; |
307 | |
308 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
309 | bd->name, rq, bc, bc->bio, uptodate); |
310 | |
311 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); |
312 | |
313 | spin_lock_irqsave(&bd->lock, flags); |
314 | list_move_tail(&bc->list, &bd->done_list); |
315 | bd->done_cmds++; |
316 | spin_unlock_irqrestore(&bd->lock, flags); |
317 | |
318 | wake_up(&bd->wq_done); |
319 | } |
320 | |
321 | /* |
322 | * do final setup of a 'bc' and submit the matching 'rq' to the block |
323 | * layer for io |
324 | */ |
325 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
326 | struct bsg_command *bc, struct request *rq) |
327 | { |
328 | int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
329 | |
330 | /* |
331 | * add bc command to busy queue and submit rq for io |
332 | */ |
333 | bc->rq = rq; |
334 | bc->bio = rq->bio; |
335 | if (rq->next_rq) |
336 | bc->bidi_bio = rq->next_rq->bio; |
337 | bc->hdr.duration = jiffies; |
338 | spin_lock_irq(&bd->lock); |
339 | list_add_tail(&bc->list, &bd->busy_list); |
340 | spin_unlock_irq(&bd->lock); |
341 | |
342 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); |
343 | |
344 | rq->end_io_data = bc; |
345 | blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); |
346 | } |
347 | |
348 | static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) |
349 | { |
350 | struct bsg_command *bc = NULL; |
351 | |
352 | spin_lock_irq(&bd->lock); |
353 | if (bd->done_cmds) { |
354 | bc = list_first_entry(&bd->done_list, struct bsg_command, list); |
355 | list_del(&bc->list); |
356 | bd->done_cmds--; |
357 | } |
358 | spin_unlock_irq(&bd->lock); |
359 | |
360 | return bc; |
361 | } |
362 | |
363 | /* |
364 | * Get a finished command from the done list |
365 | */ |
366 | static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) |
367 | { |
368 | struct bsg_command *bc; |
369 | int ret; |
370 | |
371 | do { |
372 | bc = bsg_next_done_cmd(bd); |
373 | if (bc) |
374 | break; |
375 | |
376 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { |
377 | bc = ERR_PTR(-EAGAIN); |
378 | break; |
379 | } |
380 | |
381 | ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); |
382 | if (ret) { |
383 | bc = ERR_PTR(-ERESTARTSYS); |
384 | break; |
385 | } |
386 | } while (1); |
387 | |
388 | dprintk("%s: returning done %p\n", bd->name, bc); |
389 | |
390 | return bc; |
391 | } |
392 | |
393 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
394 | struct bio *bio, struct bio *bidi_bio) |
395 | { |
396 | int ret = 0; |
397 | |
398 | dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); |
399 | /* |
400 | * fill in all the output members |
401 | */ |
402 | hdr->device_status = rq->errors & 0xff; |
403 | hdr->transport_status = host_byte(rq->errors); |
404 | hdr->driver_status = driver_byte(rq->errors); |
405 | hdr->info = 0; |
406 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) |
407 | hdr->info |= SG_INFO_CHECK; |
408 | hdr->response_len = 0; |
409 | |
410 | if (rq->sense_len && hdr->response) { |
411 | int len = min_t(unsigned int, hdr->max_response_len, |
412 | rq->sense_len); |
413 | |
414 | ret = copy_to_user((void __user *)(unsigned long)hdr->response, |
415 | rq->sense, len); |
416 | if (!ret) |
417 | hdr->response_len = len; |
418 | else |
419 | ret = -EFAULT; |
420 | } |
421 | |
422 | if (rq->next_rq) { |
423 | hdr->dout_resid = rq->resid_len; |
424 | hdr->din_resid = rq->next_rq->resid_len; |
425 | blk_rq_unmap_user(bidi_bio); |
426 | blk_put_request(rq->next_rq); |
427 | } else if (rq_data_dir(rq) == READ) |
428 | hdr->din_resid = rq->resid_len; |
429 | else |
430 | hdr->dout_resid = rq->resid_len; |
431 | |
432 | /* |
433 | * If the request generated a negative error number, return it |
434 | * (providing we aren't already returning an error); if it's |
435 | * just a protocol response (i.e. non negative), that gets |
436 | * processed above. |
437 | */ |
438 | if (!ret && rq->errors < 0) |
439 | ret = rq->errors; |
440 | |
441 | blk_rq_unmap_user(bio); |
442 | if (rq->cmd != rq->__cmd) |
443 | kfree(rq->cmd); |
444 | blk_put_request(rq); |
445 | |
446 | return ret; |
447 | } |
448 | |
449 | static bool bsg_complete(struct bsg_device *bd) |
450 | { |
451 | bool ret = false; |
452 | bool spin; |
453 | |
454 | do { |
455 | spin_lock_irq(&bd->lock); |
456 | |
457 | BUG_ON(bd->done_cmds > bd->queued_cmds); |
458 | |
459 | /* |
460 | * All commands consumed. |
461 | */ |
462 | if (bd->done_cmds == bd->queued_cmds) |
463 | ret = true; |
464 | |
465 | spin = !test_bit(BSG_F_BLOCK, &bd->flags); |
466 | |
467 | spin_unlock_irq(&bd->lock); |
468 | } while (!ret && spin); |
469 | |
470 | return ret; |
471 | } |
472 | |
473 | static int bsg_complete_all_commands(struct bsg_device *bd) |
474 | { |
475 | struct bsg_command *bc; |
476 | int ret, tret; |
477 | |
478 | dprintk("%s: entered\n", bd->name); |
479 | |
480 | /* |
481 | * wait for all commands to complete |
482 | */ |
483 | io_wait_event(bd->wq_done, bsg_complete(bd)); |
484 | |
485 | /* |
486 | * discard done commands |
487 | */ |
488 | ret = 0; |
489 | do { |
490 | spin_lock_irq(&bd->lock); |
491 | if (!bd->queued_cmds) { |
492 | spin_unlock_irq(&bd->lock); |
493 | break; |
494 | } |
495 | spin_unlock_irq(&bd->lock); |
496 | |
497 | bc = bsg_get_done_cmd(bd); |
498 | if (IS_ERR(bc)) |
499 | break; |
500 | |
501 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
502 | bc->bidi_bio); |
503 | if (!ret) |
504 | ret = tret; |
505 | |
506 | bsg_free_command(bc); |
507 | } while (1); |
508 | |
509 | return ret; |
510 | } |
511 | |
512 | static int |
513 | __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, |
514 | const struct iovec *iov, ssize_t *bytes_read) |
515 | { |
516 | struct bsg_command *bc; |
517 | int nr_commands, ret; |
518 | |
519 | if (count % sizeof(struct sg_io_v4)) |
520 | return -EINVAL; |
521 | |
522 | ret = 0; |
523 | nr_commands = count / sizeof(struct sg_io_v4); |
524 | while (nr_commands) { |
525 | bc = bsg_get_done_cmd(bd); |
526 | if (IS_ERR(bc)) { |
527 | ret = PTR_ERR(bc); |
528 | break; |
529 | } |
530 | |
531 | /* |
532 | * this is the only case where we need to copy data back |
533 | * after completing the request. so do that here, |
534 | * bsg_complete_work() cannot do that for us |
535 | */ |
536 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, |
537 | bc->bidi_bio); |
538 | |
539 | if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) |
540 | ret = -EFAULT; |
541 | |
542 | bsg_free_command(bc); |
543 | |
544 | if (ret) |
545 | break; |
546 | |
547 | buf += sizeof(struct sg_io_v4); |
548 | *bytes_read += sizeof(struct sg_io_v4); |
549 | nr_commands--; |
550 | } |
551 | |
552 | return ret; |
553 | } |
554 | |
555 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) |
556 | { |
557 | if (file->f_flags & O_NONBLOCK) |
558 | clear_bit(BSG_F_BLOCK, &bd->flags); |
559 | else |
560 | set_bit(BSG_F_BLOCK, &bd->flags); |
561 | } |
562 | |
563 | /* |
564 | * Check if the error is a "real" error that we should return. |
565 | */ |
566 | static inline int err_block_err(int ret) |
567 | { |
568 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) |
569 | return 1; |
570 | |
571 | return 0; |
572 | } |
573 | |
574 | static ssize_t |
575 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
576 | { |
577 | struct bsg_device *bd = file->private_data; |
578 | int ret; |
579 | ssize_t bytes_read; |
580 | |
581 | dprintk("%s: read %Zd bytes\n", bd->name, count); |
582 | |
583 | bsg_set_block(bd, file); |
584 | |
585 | bytes_read = 0; |
586 | ret = __bsg_read(buf, count, bd, NULL, &bytes_read); |
587 | *ppos = bytes_read; |
588 | |
589 | if (!bytes_read || err_block_err(ret)) |
590 | bytes_read = ret; |
591 | |
592 | return bytes_read; |
593 | } |
594 | |
595 | static int __bsg_write(struct bsg_device *bd, const char __user *buf, |
596 | size_t count, ssize_t *bytes_written, |
597 | fmode_t has_write_perm) |
598 | { |
599 | struct bsg_command *bc; |
600 | struct request *rq; |
601 | int ret, nr_commands; |
602 | |
603 | if (count % sizeof(struct sg_io_v4)) |
604 | return -EINVAL; |
605 | |
606 | nr_commands = count / sizeof(struct sg_io_v4); |
607 | rq = NULL; |
608 | bc = NULL; |
609 | ret = 0; |
610 | while (nr_commands) { |
611 | struct request_queue *q = bd->queue; |
612 | |
613 | bc = bsg_alloc_command(bd); |
614 | if (IS_ERR(bc)) { |
615 | ret = PTR_ERR(bc); |
616 | bc = NULL; |
617 | break; |
618 | } |
619 | |
620 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
621 | ret = -EFAULT; |
622 | break; |
623 | } |
624 | |
625 | /* |
626 | * get a request, fill in the blanks, and add to request queue |
627 | */ |
628 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
629 | if (IS_ERR(rq)) { |
630 | ret = PTR_ERR(rq); |
631 | rq = NULL; |
632 | break; |
633 | } |
634 | |
635 | bsg_add_command(bd, q, bc, rq); |
636 | bc = NULL; |
637 | rq = NULL; |
638 | nr_commands--; |
639 | buf += sizeof(struct sg_io_v4); |
640 | *bytes_written += sizeof(struct sg_io_v4); |
641 | } |
642 | |
643 | if (bc) |
644 | bsg_free_command(bc); |
645 | |
646 | return ret; |
647 | } |
648 | |
649 | static ssize_t |
650 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
651 | { |
652 | struct bsg_device *bd = file->private_data; |
653 | ssize_t bytes_written; |
654 | int ret; |
655 | |
656 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
657 | |
658 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) |
659 | return -EINVAL; |
660 | |
661 | bsg_set_block(bd, file); |
662 | |
663 | bytes_written = 0; |
664 | ret = __bsg_write(bd, buf, count, &bytes_written, |
665 | file->f_mode & FMODE_WRITE); |
666 | |
667 | *ppos = bytes_written; |
668 | |
669 | /* |
670 | * return bytes written on non-fatal errors |
671 | */ |
672 | if (!bytes_written || err_block_err(ret)) |
673 | bytes_written = ret; |
674 | |
675 | dprintk("%s: returning %Zd\n", bd->name, bytes_written); |
676 | return bytes_written; |
677 | } |
678 | |
679 | static struct bsg_device *bsg_alloc_device(void) |
680 | { |
681 | struct bsg_device *bd; |
682 | |
683 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); |
684 | if (unlikely(!bd)) |
685 | return NULL; |
686 | |
687 | spin_lock_init(&bd->lock); |
688 | |
689 | bd->max_queue = BSG_DEFAULT_CMDS; |
690 | |
691 | INIT_LIST_HEAD(&bd->busy_list); |
692 | INIT_LIST_HEAD(&bd->done_list); |
693 | INIT_HLIST_NODE(&bd->dev_list); |
694 | |
695 | init_waitqueue_head(&bd->wq_free); |
696 | init_waitqueue_head(&bd->wq_done); |
697 | return bd; |
698 | } |
699 | |
700 | static void bsg_kref_release_function(struct kref *kref) |
701 | { |
702 | struct bsg_class_device *bcd = |
703 | container_of(kref, struct bsg_class_device, ref); |
704 | struct device *parent = bcd->parent; |
705 | |
706 | if (bcd->release) |
707 | bcd->release(bcd->parent); |
708 | |
709 | put_device(parent); |
710 | } |
711 | |
712 | static int bsg_put_device(struct bsg_device *bd) |
713 | { |
714 | int ret = 0, do_free; |
715 | struct request_queue *q = bd->queue; |
716 | |
717 | mutex_lock(&bsg_mutex); |
718 | |
719 | do_free = atomic_dec_and_test(&bd->ref_count); |
720 | if (!do_free) { |
721 | mutex_unlock(&bsg_mutex); |
722 | goto out; |
723 | } |
724 | |
725 | hlist_del(&bd->dev_list); |
726 | mutex_unlock(&bsg_mutex); |
727 | |
728 | dprintk("%s: tearing down\n", bd->name); |
729 | |
730 | /* |
731 | * close can always block |
732 | */ |
733 | set_bit(BSG_F_BLOCK, &bd->flags); |
734 | |
735 | /* |
736 | * correct error detection baddies here again. it's the responsibility |
737 | * of the app to properly reap commands before close() if it wants |
738 | * fool-proof error detection |
739 | */ |
740 | ret = bsg_complete_all_commands(bd); |
741 | |
742 | kfree(bd); |
743 | out: |
744 | kref_put(&q->bsg_dev.ref, bsg_kref_release_function); |
745 | if (do_free) |
746 | blk_put_queue(q); |
747 | return ret; |
748 | } |
749 | |
750 | static struct bsg_device *bsg_add_device(struct inode *inode, |
751 | struct request_queue *rq, |
752 | struct file *file) |
753 | { |
754 | struct bsg_device *bd; |
755 | #ifdef BSG_DEBUG |
756 | unsigned char buf[32]; |
757 | #endif |
758 | if (!blk_get_queue(rq)) |
759 | return ERR_PTR(-ENXIO); |
760 | |
761 | bd = bsg_alloc_device(); |
762 | if (!bd) { |
763 | blk_put_queue(rq); |
764 | return ERR_PTR(-ENOMEM); |
765 | } |
766 | |
767 | bd->queue = rq; |
768 | |
769 | bsg_set_block(bd, file); |
770 | |
771 | atomic_set(&bd->ref_count, 1); |
772 | mutex_lock(&bsg_mutex); |
773 | hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); |
774 | |
775 | strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); |
776 | dprintk("bound to <%s>, max queue %d\n", |
777 | format_dev_t(buf, inode->i_rdev), bd->max_queue); |
778 | |
779 | mutex_unlock(&bsg_mutex); |
780 | return bd; |
781 | } |
782 | |
783 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
784 | { |
785 | struct bsg_device *bd; |
786 | |
787 | mutex_lock(&bsg_mutex); |
788 | |
789 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
790 | if (bd->queue == q) { |
791 | atomic_inc(&bd->ref_count); |
792 | goto found; |
793 | } |
794 | } |
795 | bd = NULL; |
796 | found: |
797 | mutex_unlock(&bsg_mutex); |
798 | return bd; |
799 | } |
800 | |
801 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) |
802 | { |
803 | struct bsg_device *bd; |
804 | struct bsg_class_device *bcd; |
805 | |
806 | /* |
807 | * find the class device |
808 | */ |
809 | mutex_lock(&bsg_mutex); |
810 | bcd = idr_find(&bsg_minor_idr, iminor(inode)); |
811 | if (bcd) |
812 | kref_get(&bcd->ref); |
813 | mutex_unlock(&bsg_mutex); |
814 | |
815 | if (!bcd) |
816 | return ERR_PTR(-ENODEV); |
817 | |
818 | bd = __bsg_get_device(iminor(inode), bcd->queue); |
819 | if (bd) |
820 | return bd; |
821 | |
822 | bd = bsg_add_device(inode, bcd->queue, file); |
823 | if (IS_ERR(bd)) |
824 | kref_put(&bcd->ref, bsg_kref_release_function); |
825 | |
826 | return bd; |
827 | } |
828 | |
829 | static int bsg_open(struct inode *inode, struct file *file) |
830 | { |
831 | struct bsg_device *bd; |
832 | |
833 | bd = bsg_get_device(inode, file); |
834 | |
835 | if (IS_ERR(bd)) |
836 | return PTR_ERR(bd); |
837 | |
838 | file->private_data = bd; |
839 | return 0; |
840 | } |
841 | |
842 | static int bsg_release(struct inode *inode, struct file *file) |
843 | { |
844 | struct bsg_device *bd = file->private_data; |
845 | |
846 | file->private_data = NULL; |
847 | return bsg_put_device(bd); |
848 | } |
849 | |
850 | static unsigned int bsg_poll(struct file *file, poll_table *wait) |
851 | { |
852 | struct bsg_device *bd = file->private_data; |
853 | unsigned int mask = 0; |
854 | |
855 | poll_wait(file, &bd->wq_done, wait); |
856 | poll_wait(file, &bd->wq_free, wait); |
857 | |
858 | spin_lock_irq(&bd->lock); |
859 | if (!list_empty(&bd->done_list)) |
860 | mask |= POLLIN | POLLRDNORM; |
861 | if (bd->queued_cmds < bd->max_queue) |
862 | mask |= POLLOUT; |
863 | spin_unlock_irq(&bd->lock); |
864 | |
865 | return mask; |
866 | } |
867 | |
868 | static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
869 | { |
870 | struct bsg_device *bd = file->private_data; |
871 | int __user *uarg = (int __user *) arg; |
872 | int ret; |
873 | |
874 | switch (cmd) { |
875 | /* |
876 | * our own ioctls |
877 | */ |
878 | case SG_GET_COMMAND_Q: |
879 | return put_user(bd->max_queue, uarg); |
880 | case SG_SET_COMMAND_Q: { |
881 | int queue; |
882 | |
883 | if (get_user(queue, uarg)) |
884 | return -EFAULT; |
885 | if (queue < 1) |
886 | return -EINVAL; |
887 | |
888 | spin_lock_irq(&bd->lock); |
889 | bd->max_queue = queue; |
890 | spin_unlock_irq(&bd->lock); |
891 | return 0; |
892 | } |
893 | |
894 | /* |
895 | * SCSI/sg ioctls |
896 | */ |
897 | case SG_GET_VERSION_NUM: |
898 | case SCSI_IOCTL_GET_IDLUN: |
899 | case SCSI_IOCTL_GET_BUS_NUMBER: |
900 | case SG_SET_TIMEOUT: |
901 | case SG_GET_TIMEOUT: |
902 | case SG_GET_RESERVED_SIZE: |
903 | case SG_SET_RESERVED_SIZE: |
904 | case SG_EMULATED_HOST: |
905 | case SCSI_IOCTL_SEND_COMMAND: { |
906 | void __user *uarg = (void __user *) arg; |
907 | return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); |
908 | } |
909 | case SG_IO: { |
910 | struct request *rq; |
911 | struct bio *bio, *bidi_bio = NULL; |
912 | struct sg_io_v4 hdr; |
913 | int at_head; |
914 | u8 sense[SCSI_SENSE_BUFFERSIZE]; |
915 | |
916 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) |
917 | return -EFAULT; |
918 | |
919 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
920 | if (IS_ERR(rq)) |
921 | return PTR_ERR(rq); |
922 | |
923 | bio = rq->bio; |
924 | if (rq->next_rq) |
925 | bidi_bio = rq->next_rq->bio; |
926 | |
927 | at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); |
928 | blk_execute_rq(bd->queue, NULL, rq, at_head); |
929 | ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); |
930 | |
931 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) |
932 | return -EFAULT; |
933 | |
934 | return ret; |
935 | } |
936 | /* |
937 | * block device ioctls |
938 | */ |
939 | default: |
940 | #if 0 |
941 | return ioctl_by_bdev(bd->bdev, cmd, arg); |
942 | #else |
943 | return -ENOTTY; |
944 | #endif |
945 | } |
946 | } |
947 | |
948 | static const struct file_operations bsg_fops = { |
949 | .read = bsg_read, |
950 | .write = bsg_write, |
951 | .poll = bsg_poll, |
952 | .open = bsg_open, |
953 | .release = bsg_release, |
954 | .unlocked_ioctl = bsg_ioctl, |
955 | .owner = THIS_MODULE, |
956 | .llseek = default_llseek, |
957 | }; |
958 | |
959 | void bsg_unregister_queue(struct request_queue *q) |
960 | { |
961 | struct bsg_class_device *bcd = &q->bsg_dev; |
962 | |
963 | if (!bcd->class_dev) |
964 | return; |
965 | |
966 | mutex_lock(&bsg_mutex); |
967 | idr_remove(&bsg_minor_idr, bcd->minor); |
968 | if (q->kobj.sd) |
969 | sysfs_remove_link(&q->kobj, "bsg"); |
970 | device_unregister(bcd->class_dev); |
971 | bcd->class_dev = NULL; |
972 | kref_put(&bcd->ref, bsg_kref_release_function); |
973 | mutex_unlock(&bsg_mutex); |
974 | } |
975 | EXPORT_SYMBOL_GPL(bsg_unregister_queue); |
976 | |
977 | int bsg_register_queue(struct request_queue *q, struct device *parent, |
978 | const char *name, void (*release)(struct device *)) |
979 | { |
980 | struct bsg_class_device *bcd; |
981 | dev_t dev; |
982 | int ret; |
983 | struct device *class_dev = NULL; |
984 | const char *devname; |
985 | |
986 | if (name) |
987 | devname = name; |
988 | else |
989 | devname = dev_name(parent); |
990 | |
991 | /* |
992 | * we need a proper transport to send commands, not a stacked device |
993 | */ |
994 | if (!queue_is_rq_based(q)) |
995 | return 0; |
996 | |
997 | bcd = &q->bsg_dev; |
998 | memset(bcd, 0, sizeof(*bcd)); |
999 | |
1000 | mutex_lock(&bsg_mutex); |
1001 | |
1002 | ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); |
1003 | if (ret < 0) { |
1004 | if (ret == -ENOSPC) { |
1005 | printk(KERN_ERR "bsg: too many bsg devices\n"); |
1006 | ret = -EINVAL; |
1007 | } |
1008 | goto unlock; |
1009 | } |
1010 | |
1011 | bcd->minor = ret; |
1012 | bcd->queue = q; |
1013 | bcd->parent = get_device(parent); |
1014 | bcd->release = release; |
1015 | kref_init(&bcd->ref); |
1016 | dev = MKDEV(bsg_major, bcd->minor); |
1017 | class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); |
1018 | if (IS_ERR(class_dev)) { |
1019 | ret = PTR_ERR(class_dev); |
1020 | goto put_dev; |
1021 | } |
1022 | bcd->class_dev = class_dev; |
1023 | |
1024 | if (q->kobj.sd) { |
1025 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); |
1026 | if (ret) |
1027 | goto unregister_class_dev; |
1028 | } |
1029 | |
1030 | mutex_unlock(&bsg_mutex); |
1031 | return 0; |
1032 | |
1033 | unregister_class_dev: |
1034 | device_unregister(class_dev); |
1035 | put_dev: |
1036 | put_device(parent); |
1037 | idr_remove(&bsg_minor_idr, bcd->minor); |
1038 | unlock: |
1039 | mutex_unlock(&bsg_mutex); |
1040 | return ret; |
1041 | } |
1042 | EXPORT_SYMBOL_GPL(bsg_register_queue); |
1043 | |
1044 | static struct cdev bsg_cdev; |
1045 | |
1046 | static char *bsg_devnode(struct device *dev, umode_t *mode) |
1047 | { |
1048 | return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); |
1049 | } |
1050 | |
1051 | static int __init bsg_init(void) |
1052 | { |
1053 | int ret, i; |
1054 | dev_t devid; |
1055 | |
1056 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
1057 | sizeof(struct bsg_command), 0, 0, NULL); |
1058 | if (!bsg_cmd_cachep) { |
1059 | printk(KERN_ERR "bsg: failed creating slab cache\n"); |
1060 | return -ENOMEM; |
1061 | } |
1062 | |
1063 | for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) |
1064 | INIT_HLIST_HEAD(&bsg_device_list[i]); |
1065 | |
1066 | bsg_class = class_create(THIS_MODULE, "bsg"); |
1067 | if (IS_ERR(bsg_class)) { |
1068 | ret = PTR_ERR(bsg_class); |
1069 | goto destroy_kmemcache; |
1070 | } |
1071 | bsg_class->devnode = bsg_devnode; |
1072 | |
1073 | ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); |
1074 | if (ret) |
1075 | goto destroy_bsg_class; |
1076 | |
1077 | bsg_major = MAJOR(devid); |
1078 | |
1079 | cdev_init(&bsg_cdev, &bsg_fops); |
1080 | ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1081 | if (ret) |
1082 | goto unregister_chrdev; |
1083 | |
1084 | printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION |
1085 | " loaded (major %d)\n", bsg_major); |
1086 | return 0; |
1087 | unregister_chrdev: |
1088 | unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); |
1089 | destroy_bsg_class: |
1090 | class_destroy(bsg_class); |
1091 | destroy_kmemcache: |
1092 | kmem_cache_destroy(bsg_cmd_cachep); |
1093 | return ret; |
1094 | } |
1095 | |
1096 | MODULE_AUTHOR("Jens Axboe"); |
1097 | MODULE_DESCRIPTION(BSG_DESCRIPTION); |
1098 | MODULE_LICENSE("GPL"); |
1099 | |
1100 | device_initcall(bsg_init); |
1101 |