virtio: allow finalize_features to fail
[linux-block.git] / drivers / s390 / kvm / virtio_ccw.c
CommitLineData
7e64e059
CH
1/*
2 * ccw based virtio transport
3 *
96b14536 4 * Copyright IBM Corp. 2012, 2014
7e64e059
CH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16#include <linux/err.h>
17#include <linux/virtio.h>
18#include <linux/virtio_config.h>
19#include <linux/slab.h>
20#include <linux/interrupt.h>
21#include <linux/virtio_ring.h>
22#include <linux/pfn.h>
23#include <linux/async.h>
24#include <linux/wait.h>
25#include <linux/list.h>
26#include <linux/bitops.h>
27#include <linux/module.h>
28#include <linux/io.h>
29#include <linux/kvm_para.h>
e75279c4 30#include <linux/notifier.h>
7e64e059
CH
31#include <asm/setup.h>
32#include <asm/irq.h>
33#include <asm/cio.h>
34#include <asm/ccwdev.h>
6a773cb8 35#include <asm/virtio-ccw.h>
96b14536
CH
36#include <asm/isc.h>
37#include <asm/airq.h>
7e64e059
CH
38
39/*
40 * virtio related functions
41 */
42
43struct vq_config_block {
44 __u16 index;
45 __u16 num;
46} __packed;
47
48#define VIRTIO_CCW_CONFIG_SIZE 0x100
49/* same as PCI config space size, should be enough for all drivers */
50
51struct virtio_ccw_device {
52 struct virtio_device vdev;
73fa21ea 53 __u8 *status;
7e64e059
CH
54 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
55 struct ccw_device *cdev;
7e64e059
CH
56 __u32 curr_io;
57 int err;
6bb2c835 58 unsigned int revision; /* Transport revision */
7e64e059
CH
59 wait_queue_head_t wait_q;
60 spinlock_t lock;
61 struct list_head virtqueues;
62 unsigned long indicators;
63 unsigned long indicators2;
64 struct vq_config_block *config_block;
96b14536 65 bool is_thinint;
79629b20 66 bool going_away;
e75279c4 67 bool device_lost;
96b14536 68 void *airq_info;
7e64e059
CH
69};
70
d4674240 71struct vq_info_block_legacy {
7e64e059
CH
72 __u64 queue;
73 __u32 align;
74 __u16 index;
75 __u16 num;
76} __packed;
77
d4674240
CH
78struct vq_info_block {
79 __u64 desc;
80 __u32 res0;
81 __u16 index;
82 __u16 num;
83 __u64 avail;
84 __u64 used;
85} __packed;
86
7e64e059
CH
87struct virtio_feature_desc {
88 __u32 features;
89 __u8 index;
90} __packed;
91
96b14536
CH
92struct virtio_thinint_area {
93 unsigned long summary_indicator;
94 unsigned long indicator;
95 u64 bit_nr;
96 u8 isc;
97} __packed;
98
6bb2c835
TH
99struct virtio_rev_info {
100 __u16 revision;
101 __u16 length;
102 __u8 data[];
103};
104
105/* the highest virtio-ccw revision we support */
af9ca13b 106#define VIRTIO_CCW_REV_MAX 1
6bb2c835 107
7e64e059
CH
108struct virtio_ccw_vq_info {
109 struct virtqueue *vq;
110 int num;
111 void *queue;
d4674240
CH
112 union {
113 struct vq_info_block s;
114 struct vq_info_block_legacy l;
115 } *info_block;
96b14536 116 int bit_nr;
7e64e059 117 struct list_head node;
07e16933 118 long cookie;
7e64e059
CH
119};
120
96b14536
CH
121#define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
122
123#define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
124#define MAX_AIRQ_AREAS 20
125
126static int virtio_ccw_use_airq = 1;
127
128struct airq_info {
129 rwlock_t lock;
130 u8 summary_indicator;
131 struct airq_struct airq;
132 struct airq_iv *aiv;
133};
134static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
135
7e64e059
CH
136#define CCW_CMD_SET_VQ 0x13
137#define CCW_CMD_VDEV_RESET 0x33
138#define CCW_CMD_SET_IND 0x43
139#define CCW_CMD_SET_CONF_IND 0x53
140#define CCW_CMD_READ_FEAT 0x12
141#define CCW_CMD_WRITE_FEAT 0x11
142#define CCW_CMD_READ_CONF 0x22
143#define CCW_CMD_WRITE_CONF 0x21
144#define CCW_CMD_WRITE_STATUS 0x31
145#define CCW_CMD_READ_VQ_CONF 0x32
96b14536 146#define CCW_CMD_SET_IND_ADAPTER 0x73
6bb2c835 147#define CCW_CMD_SET_VIRTIO_REV 0x83
7e64e059
CH
148
149#define VIRTIO_CCW_DOING_SET_VQ 0x00010000
150#define VIRTIO_CCW_DOING_RESET 0x00040000
151#define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
152#define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
153#define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
154#define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
155#define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
156#define VIRTIO_CCW_DOING_SET_IND 0x01000000
157#define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
158#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
96b14536 159#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
6bb2c835 160#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
7e64e059
CH
161#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
162
163static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
164{
165 return container_of(vdev, struct virtio_ccw_device, vdev);
166}
167
96b14536
CH
168static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
169{
170 unsigned long i, flags;
171
172 write_lock_irqsave(&info->lock, flags);
173 for (i = 0; i < airq_iv_end(info->aiv); i++) {
174 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
175 airq_iv_free_bit(info->aiv, i);
176 airq_iv_set_ptr(info->aiv, i, 0);
177 break;
178 }
179 }
180 write_unlock_irqrestore(&info->lock, flags);
181}
182
183static void virtio_airq_handler(struct airq_struct *airq)
184{
185 struct airq_info *info = container_of(airq, struct airq_info, airq);
186 unsigned long ai;
187
188 inc_irq_stat(IRQIO_VAI);
189 read_lock(&info->lock);
190 /* Walk through indicators field, summary indicator active. */
191 for (ai = 0;;) {
192 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
193 if (ai == -1UL)
194 break;
195 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
196 }
197 info->summary_indicator = 0;
198 smp_wmb();
199 /* Walk through indicators field, summary indicator not active. */
200 for (ai = 0;;) {
201 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
202 if (ai == -1UL)
203 break;
204 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
205 }
206 read_unlock(&info->lock);
207}
208
209static struct airq_info *new_airq_info(void)
210{
211 struct airq_info *info;
212 int rc;
213
214 info = kzalloc(sizeof(*info), GFP_KERNEL);
215 if (!info)
216 return NULL;
217 rwlock_init(&info->lock);
218 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
219 if (!info->aiv) {
220 kfree(info);
221 return NULL;
222 }
223 info->airq.handler = virtio_airq_handler;
224 info->airq.lsi_ptr = &info->summary_indicator;
225 info->airq.lsi_mask = 0xff;
226 info->airq.isc = VIRTIO_AIRQ_ISC;
227 rc = register_adapter_interrupt(&info->airq);
228 if (rc) {
229 airq_iv_release(info->aiv);
230 kfree(info);
231 return NULL;
232 }
233 return info;
234}
235
236static void destroy_airq_info(struct airq_info *info)
237{
238 if (!info)
239 return;
240
241 unregister_adapter_interrupt(&info->airq);
242 airq_iv_release(info->aiv);
243 kfree(info);
244}
245
246static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
247 u64 *first, void **airq_info)
248{
249 int i, j;
250 struct airq_info *info;
251 unsigned long indicator_addr = 0;
252 unsigned long bit, flags;
253
254 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
255 if (!airq_areas[i])
256 airq_areas[i] = new_airq_info();
257 info = airq_areas[i];
258 if (!info)
259 return 0;
260 write_lock_irqsave(&info->lock, flags);
261 bit = airq_iv_alloc(info->aiv, nvqs);
262 if (bit == -1UL) {
263 /* Not enough vacancies. */
264 write_unlock_irqrestore(&info->lock, flags);
265 continue;
266 }
267 *first = bit;
268 *airq_info = info;
269 indicator_addr = (unsigned long)info->aiv->vector;
270 for (j = 0; j < nvqs; j++) {
271 airq_iv_set_ptr(info->aiv, bit + j,
272 (unsigned long)vqs[j]);
273 }
274 write_unlock_irqrestore(&info->lock, flags);
275 }
276 return indicator_addr;
277}
278
279static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
280{
281 struct virtio_ccw_vq_info *info;
282
283 list_for_each_entry(info, &vcdev->virtqueues, node)
284 drop_airq_indicator(info->vq, vcdev->airq_info);
285}
286
7e64e059
CH
287static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
288{
289 unsigned long flags;
290 __u32 ret;
291
292 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
293 if (vcdev->err)
294 ret = 0;
295 else
296 ret = vcdev->curr_io & flag;
297 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
298 return ret;
299}
300
73fa21ea
CH
301static int ccw_io_helper(struct virtio_ccw_device *vcdev,
302 struct ccw1 *ccw, __u32 intparm)
7e64e059
CH
303{
304 int ret;
305 unsigned long flags;
306 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
307
b26ba22b
CB
308 do {
309 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
310 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
99437a27
CH
311 if (!ret) {
312 if (!vcdev->curr_io)
313 vcdev->err = 0;
b26ba22b 314 vcdev->curr_io |= flag;
99437a27 315 }
b26ba22b
CB
316 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
317 cpu_relax();
318 } while (ret == -EBUSY);
7e64e059
CH
319 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
320 return ret ? ret : vcdev->err;
321}
322
96b14536
CH
323static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
324 struct ccw1 *ccw)
325{
326 int ret;
327 unsigned long *indicatorp = NULL;
328 struct virtio_thinint_area *thinint_area = NULL;
329 struct airq_info *airq_info = vcdev->airq_info;
330
331 if (vcdev->is_thinint) {
332 thinint_area = kzalloc(sizeof(*thinint_area),
333 GFP_DMA | GFP_KERNEL);
334 if (!thinint_area)
335 return;
336 thinint_area->summary_indicator =
337 (unsigned long) &airq_info->summary_indicator;
338 thinint_area->isc = VIRTIO_AIRQ_ISC;
339 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
340 ccw->count = sizeof(*thinint_area);
341 ccw->cda = (__u32)(unsigned long) thinint_area;
342 } else {
343 indicatorp = kmalloc(sizeof(&vcdev->indicators),
344 GFP_DMA | GFP_KERNEL);
345 if (!indicatorp)
346 return;
347 *indicatorp = 0;
348 ccw->cmd_code = CCW_CMD_SET_IND;
349 ccw->count = sizeof(vcdev->indicators);
350 ccw->cda = (__u32)(unsigned long) indicatorp;
351 }
352 /* Deregister indicators from host. */
353 vcdev->indicators = 0;
354 ccw->flags = 0;
355 ret = ccw_io_helper(vcdev, ccw,
356 vcdev->is_thinint ?
357 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
358 VIRTIO_CCW_DOING_SET_IND);
359 if (ret && (ret != -ENODEV))
360 dev_info(&vcdev->cdev->dev,
361 "Failed to deregister indicators (%d)\n", ret);
362 else if (vcdev->is_thinint)
363 virtio_ccw_drop_indicators(vcdev);
364 kfree(indicatorp);
365 kfree(thinint_area);
366}
367
7e64e059 368static inline long do_kvm_notify(struct subchannel_id schid,
07e16933
MT
369 unsigned long queue_index,
370 long cookie)
7e64e059
CH
371{
372 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
373 register struct subchannel_id __schid asm("2") = schid;
374 register unsigned long __index asm("3") = queue_index;
375 register long __rc asm("2");
07e16933 376 register long __cookie asm("4") = cookie;
7e64e059
CH
377
378 asm volatile ("diag 2,4,0x500\n"
07e16933
MT
379 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
380 "d"(__cookie)
7e64e059
CH
381 : "memory", "cc");
382 return __rc;
383}
384
46f9c2b9 385static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
7e64e059
CH
386{
387 struct virtio_ccw_vq_info *info = vq->priv;
388 struct virtio_ccw_device *vcdev;
389 struct subchannel_id schid;
390
391 vcdev = to_vc_device(info->vq->vdev);
392 ccw_device_get_schid(vcdev->cdev, &schid);
01227a88 393 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
46f9c2b9
HG
394 if (info->cookie < 0)
395 return false;
396 return true;
7e64e059
CH
397}
398
73fa21ea
CH
399static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
400 struct ccw1 *ccw, int index)
7e64e059
CH
401{
402 vcdev->config_block->index = index;
73fa21ea
CH
403 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
404 ccw->flags = 0;
405 ccw->count = sizeof(struct vq_config_block);
406 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
407 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
7e64e059
CH
408 return vcdev->config_block->num;
409}
410
73fa21ea 411static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
7e64e059
CH
412{
413 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
414 struct virtio_ccw_vq_info *info = vq->priv;
415 unsigned long flags;
416 unsigned long size;
417 int ret;
9d0ca6ed 418 unsigned int index = vq->index;
7e64e059
CH
419
420 /* Remove from our list. */
421 spin_lock_irqsave(&vcdev->lock, flags);
422 list_del(&info->node);
423 spin_unlock_irqrestore(&vcdev->lock, flags);
424
425 /* Release from host. */
d4674240
CH
426 if (vcdev->revision == 0) {
427 info->info_block->l.queue = 0;
428 info->info_block->l.align = 0;
429 info->info_block->l.index = index;
430 info->info_block->l.num = 0;
431 ccw->count = sizeof(info->info_block->l);
432 } else {
433 info->info_block->s.desc = 0;
434 info->info_block->s.index = index;
435 info->info_block->s.num = 0;
436 info->info_block->s.avail = 0;
437 info->info_block->s.used = 0;
438 ccw->count = sizeof(info->info_block->s);
439 }
73fa21ea
CH
440 ccw->cmd_code = CCW_CMD_SET_VQ;
441 ccw->flags = 0;
73fa21ea
CH
442 ccw->cda = (__u32)(unsigned long)(info->info_block);
443 ret = ccw_io_helper(vcdev, ccw,
444 VIRTIO_CCW_DOING_SET_VQ | index);
7e64e059
CH
445 /*
446 * -ENODEV isn't considered an error: The device is gone anyway.
447 * This may happen on device detach.
448 */
449 if (ret && (ret != -ENODEV))
450 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
451 ret, index);
452
453 vring_del_virtqueue(vq);
454 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
455 free_pages_exact(info->queue, size);
456 kfree(info->info_block);
457 kfree(info);
458}
459
460static void virtio_ccw_del_vqs(struct virtio_device *vdev)
461{
462 struct virtqueue *vq, *n;
73fa21ea 463 struct ccw1 *ccw;
96b14536 464 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
465
466 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
467 if (!ccw)
468 return;
469
96b14536 470 virtio_ccw_drop_indicator(vcdev, ccw);
7e64e059
CH
471
472 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
73fa21ea
CH
473 virtio_ccw_del_vq(vq, ccw);
474
475 kfree(ccw);
7e64e059
CH
476}
477
478static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
479 int i, vq_callback_t *callback,
73fa21ea
CH
480 const char *name,
481 struct ccw1 *ccw)
7e64e059
CH
482{
483 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
484 int err;
c98d3683 485 struct virtqueue *vq = NULL;
7e64e059 486 struct virtio_ccw_vq_info *info;
c98d3683 487 unsigned long size = 0; /* silence the compiler */
7e64e059
CH
488 unsigned long flags;
489
490 /* Allocate queue. */
491 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
492 if (!info) {
493 dev_warn(&vcdev->cdev->dev, "no info\n");
494 err = -ENOMEM;
495 goto out_err;
496 }
497 info->info_block = kzalloc(sizeof(*info->info_block),
498 GFP_DMA | GFP_KERNEL);
499 if (!info->info_block) {
500 dev_warn(&vcdev->cdev->dev, "no info block\n");
501 err = -ENOMEM;
502 goto out_err;
503 }
73fa21ea 504 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
7e64e059
CH
505 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
506 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
507 if (info->queue == NULL) {
508 dev_warn(&vcdev->cdev->dev, "no queue\n");
509 err = -ENOMEM;
510 goto out_err;
511 }
512
513 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
514 true, info->queue, virtio_ccw_kvm_notify,
515 callback, name);
516 if (!vq) {
517 /* For now, we fail if we can't get the requested size. */
518 dev_warn(&vcdev->cdev->dev, "no vq\n");
519 err = -ENOMEM;
7e64e059
CH
520 goto out_err;
521 }
7e64e059
CH
522
523 /* Register it with the host. */
d4674240
CH
524 if (vcdev->revision == 0) {
525 info->info_block->l.queue = (__u64)info->queue;
526 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
527 info->info_block->l.index = i;
528 info->info_block->l.num = info->num;
529 ccw->count = sizeof(info->info_block->l);
530 } else {
531 info->info_block->s.desc = (__u64)info->queue;
532 info->info_block->s.index = i;
533 info->info_block->s.num = info->num;
534 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
535 info->info_block->s.used = (__u64)virtqueue_get_used(vq);
536 ccw->count = sizeof(info->info_block->s);
537 }
73fa21ea
CH
538 ccw->cmd_code = CCW_CMD_SET_VQ;
539 ccw->flags = 0;
73fa21ea
CH
540 ccw->cda = (__u32)(unsigned long)(info->info_block);
541 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
7e64e059
CH
542 if (err) {
543 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
7e64e059
CH
544 goto out_err;
545 }
546
c98d3683
CH
547 info->vq = vq;
548 vq->priv = info;
549
7e64e059
CH
550 /* Save it to our list. */
551 spin_lock_irqsave(&vcdev->lock, flags);
552 list_add(&info->node, &vcdev->virtqueues);
553 spin_unlock_irqrestore(&vcdev->lock, flags);
554
555 return vq;
556
557out_err:
c98d3683
CH
558 if (vq)
559 vring_del_virtqueue(vq);
560 if (info) {
561 if (info->queue)
562 free_pages_exact(info->queue, size);
7e64e059 563 kfree(info->info_block);
c98d3683 564 }
7e64e059
CH
565 kfree(info);
566 return ERR_PTR(err);
567}
568
96b14536
CH
569static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
570 struct virtqueue *vqs[], int nvqs,
571 struct ccw1 *ccw)
572{
573 int ret;
574 struct virtio_thinint_area *thinint_area = NULL;
575 struct airq_info *info;
576
577 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
578 if (!thinint_area) {
579 ret = -ENOMEM;
580 goto out;
581 }
582 /* Try to get an indicator. */
583 thinint_area->indicator = get_airq_indicator(vqs, nvqs,
584 &thinint_area->bit_nr,
585 &vcdev->airq_info);
586 if (!thinint_area->indicator) {
587 ret = -ENOSPC;
588 goto out;
589 }
590 info = vcdev->airq_info;
591 thinint_area->summary_indicator =
592 (unsigned long) &info->summary_indicator;
593 thinint_area->isc = VIRTIO_AIRQ_ISC;
594 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
595 ccw->flags = CCW_FLAG_SLI;
596 ccw->count = sizeof(*thinint_area);
597 ccw->cda = (__u32)(unsigned long)thinint_area;
598 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
599 if (ret) {
600 if (ret == -EOPNOTSUPP) {
601 /*
602 * The host does not support adapter interrupts
603 * for virtio-ccw, stop trying.
604 */
605 virtio_ccw_use_airq = 0;
606 pr_info("Adapter interrupts unsupported on host\n");
607 } else
608 dev_warn(&vcdev->cdev->dev,
609 "enabling adapter interrupts = %d\n", ret);
610 virtio_ccw_drop_indicators(vcdev);
611 }
612out:
613 kfree(thinint_area);
614 return ret;
615}
616
7e64e059
CH
617static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
618 struct virtqueue *vqs[],
619 vq_callback_t *callbacks[],
620 const char *names[])
621{
622 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
623 unsigned long *indicatorp = NULL;
624 int ret, i;
73fa21ea
CH
625 struct ccw1 *ccw;
626
627 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
628 if (!ccw)
629 return -ENOMEM;
7e64e059
CH
630
631 for (i = 0; i < nvqs; ++i) {
73fa21ea
CH
632 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
633 ccw);
7e64e059
CH
634 if (IS_ERR(vqs[i])) {
635 ret = PTR_ERR(vqs[i]);
636 vqs[i] = NULL;
637 goto out;
638 }
639 }
640 ret = -ENOMEM;
641 /* We need a data area under 2G to communicate. */
642 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
643 if (!indicatorp)
644 goto out;
645 *indicatorp = (unsigned long) &vcdev->indicators;
96b14536
CH
646 if (vcdev->is_thinint) {
647 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
648 if (ret)
649 /* no error, just fall back to legacy interrupts */
650 vcdev->is_thinint = 0;
651 }
652 if (!vcdev->is_thinint) {
653 /* Register queue indicators with host. */
654 vcdev->indicators = 0;
655 ccw->cmd_code = CCW_CMD_SET_IND;
656 ccw->flags = 0;
657 ccw->count = sizeof(vcdev->indicators);
658 ccw->cda = (__u32)(unsigned long) indicatorp;
659 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
660 if (ret)
661 goto out;
662 }
7e64e059
CH
663 /* Register indicators2 with host for config changes */
664 *indicatorp = (unsigned long) &vcdev->indicators2;
665 vcdev->indicators2 = 0;
73fa21ea
CH
666 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
667 ccw->flags = 0;
668 ccw->count = sizeof(vcdev->indicators2);
669 ccw->cda = (__u32)(unsigned long) indicatorp;
670 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
7e64e059
CH
671 if (ret)
672 goto out;
673
674 kfree(indicatorp);
73fa21ea 675 kfree(ccw);
7e64e059
CH
676 return 0;
677out:
678 kfree(indicatorp);
73fa21ea 679 kfree(ccw);
7e64e059
CH
680 virtio_ccw_del_vqs(vdev);
681 return ret;
682}
683
684static void virtio_ccw_reset(struct virtio_device *vdev)
685{
686 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
687 struct ccw1 *ccw;
688
689 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
690 if (!ccw)
691 return;
7e64e059
CH
692
693 /* Zero status bits. */
73fa21ea 694 *vcdev->status = 0;
7e64e059
CH
695
696 /* Send a reset ccw on device. */
73fa21ea
CH
697 ccw->cmd_code = CCW_CMD_VDEV_RESET;
698 ccw->flags = 0;
699 ccw->count = 0;
700 ccw->cda = 0;
701 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
702 kfree(ccw);
7e64e059
CH
703}
704
d0254773 705static u64 virtio_ccw_get_features(struct virtio_device *vdev)
7e64e059
CH
706{
707 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea 708 struct virtio_feature_desc *features;
732c56e9
MT
709 int ret;
710 u64 rc;
73fa21ea 711 struct ccw1 *ccw;
7e64e059 712
73fa21ea
CH
713 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
714 if (!ccw)
715 return 0;
716
717 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
718 if (!features) {
719 rc = 0;
720 goto out_free;
721 }
7e64e059 722 /* Read the feature bits from the host. */
73fa21ea
CH
723 features->index = 0;
724 ccw->cmd_code = CCW_CMD_READ_FEAT;
725 ccw->flags = 0;
726 ccw->count = sizeof(*features);
727 ccw->cda = (__u32)(unsigned long)features;
728 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
729 if (ret) {
730 rc = 0;
731 goto out_free;
732 }
733
734 rc = le32_to_cpu(features->features);
7e64e059 735
ce15408f
MT
736 if (vcdev->revision == 0)
737 goto out_free;
738
732c56e9
MT
739 /* Read second half of the feature bits from the host. */
740 features->index = 1;
741 ccw->cmd_code = CCW_CMD_READ_FEAT;
742 ccw->flags = 0;
743 ccw->count = sizeof(*features);
744 ccw->cda = (__u32)(unsigned long)features;
745 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
746 if (ret == 0)
747 rc |= (u64)le32_to_cpu(features->features) << 32;
748
73fa21ea
CH
749out_free:
750 kfree(features);
751 kfree(ccw);
752 return rc;
7e64e059
CH
753}
754
5c609a5e 755static int virtio_ccw_finalize_features(struct virtio_device *vdev)
7e64e059
CH
756{
757 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea 758 struct virtio_feature_desc *features;
73fa21ea
CH
759 struct ccw1 *ccw;
760
761 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
762 if (!ccw)
5c609a5e 763 return 0;
73fa21ea
CH
764
765 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
766 if (!features)
767 goto out_free;
7e64e059
CH
768
769 /* Give virtio_ring a chance to accept features. */
770 vring_transport_features(vdev);
771
e16e12be 772 features->index = 0;
732c56e9
MT
773 features->features = cpu_to_le32((u32)vdev->features);
774 /* Write the first half of the feature bits to the host. */
775 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
776 ccw->flags = 0;
777 ccw->count = sizeof(*features);
778 ccw->cda = (__u32)(unsigned long)features;
779 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
780
ce15408f
MT
781 if (vcdev->revision == 0)
782 goto out_free;
783
732c56e9
MT
784 features->index = 1;
785 features->features = cpu_to_le32(vdev->features >> 32);
786 /* Write the second half of the feature bits to the host. */
e16e12be
MT
787 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
788 ccw->flags = 0;
789 ccw->count = sizeof(*features);
790 ccw->cda = (__u32)(unsigned long)features;
791 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
792
73fa21ea
CH
793out_free:
794 kfree(features);
795 kfree(ccw);
5c609a5e
MT
796
797 return 0;
7e64e059
CH
798}
799
800static void virtio_ccw_get_config(struct virtio_device *vdev,
801 unsigned int offset, void *buf, unsigned len)
802{
803 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
804 int ret;
73fa21ea
CH
805 struct ccw1 *ccw;
806 void *config_area;
807
808 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
809 if (!ccw)
810 return;
811
812 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
813 if (!config_area)
814 goto out_free;
7e64e059
CH
815
816 /* Read the config area from the host. */
73fa21ea
CH
817 ccw->cmd_code = CCW_CMD_READ_CONF;
818 ccw->flags = 0;
819 ccw->count = offset + len;
820 ccw->cda = (__u32)(unsigned long)config_area;
821 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
7e64e059 822 if (ret)
73fa21ea 823 goto out_free;
7e64e059 824
73fa21ea 825 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
7e64e059 826 memcpy(buf, &vcdev->config[offset], len);
73fa21ea
CH
827
828out_free:
829 kfree(config_area);
830 kfree(ccw);
7e64e059
CH
831}
832
833static void virtio_ccw_set_config(struct virtio_device *vdev,
834 unsigned int offset, const void *buf,
835 unsigned len)
836{
837 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
73fa21ea
CH
838 struct ccw1 *ccw;
839 void *config_area;
840
841 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
842 if (!ccw)
843 return;
844
845 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
846 if (!config_area)
847 goto out_free;
7e64e059
CH
848
849 memcpy(&vcdev->config[offset], buf, len);
850 /* Write the config area to the host. */
73fa21ea
CH
851 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
852 ccw->cmd_code = CCW_CMD_WRITE_CONF;
853 ccw->flags = 0;
854 ccw->count = offset + len;
855 ccw->cda = (__u32)(unsigned long)config_area;
856 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
857
858out_free:
859 kfree(config_area);
860 kfree(ccw);
7e64e059
CH
861}
862
863static u8 virtio_ccw_get_status(struct virtio_device *vdev)
864{
865 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
866
73fa21ea 867 return *vcdev->status;
7e64e059
CH
868}
869
870static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
871{
872 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
48555136 873 u8 old_status = *vcdev->status;
73fa21ea 874 struct ccw1 *ccw;
48555136 875 int ret;
73fa21ea
CH
876
877 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
878 if (!ccw)
879 return;
7e64e059
CH
880
881 /* Write the status to the host. */
73fa21ea
CH
882 *vcdev->status = status;
883 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
884 ccw->flags = 0;
885 ccw->count = sizeof(status);
886 ccw->cda = (__u32)(unsigned long)vcdev->status;
48555136
MT
887 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
888 /* Write failed? We assume status is unchanged. */
889 if (ret)
890 *vcdev->status = old_status;
73fa21ea 891 kfree(ccw);
7e64e059
CH
892}
893
894static struct virtio_config_ops virtio_ccw_config_ops = {
895 .get_features = virtio_ccw_get_features,
896 .finalize_features = virtio_ccw_finalize_features,
897 .get = virtio_ccw_get_config,
898 .set = virtio_ccw_set_config,
899 .get_status = virtio_ccw_get_status,
900 .set_status = virtio_ccw_set_status,
901 .reset = virtio_ccw_reset,
902 .find_vqs = virtio_ccw_find_vqs,
903 .del_vqs = virtio_ccw_del_vqs,
904};
905
906
907/*
908 * ccw bus driver related functions
909 */
910
911static void virtio_ccw_release_dev(struct device *_d)
912{
913 struct virtio_device *dev = container_of(_d, struct virtio_device,
914 dev);
915 struct virtio_ccw_device *vcdev = to_vc_device(dev);
916
73fa21ea 917 kfree(vcdev->status);
7e64e059 918 kfree(vcdev->config_block);
7e64e059
CH
919 kfree(vcdev);
920}
921
922static int irb_is_error(struct irb *irb)
923{
924 if (scsw_cstat(&irb->scsw) != 0)
925 return 1;
926 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
927 return 1;
928 if (scsw_cc(&irb->scsw) != 0)
929 return 1;
930 return 0;
931}
932
933static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
934 int index)
935{
936 struct virtio_ccw_vq_info *info;
937 unsigned long flags;
938 struct virtqueue *vq;
939
940 vq = NULL;
941 spin_lock_irqsave(&vcdev->lock, flags);
942 list_for_each_entry(info, &vcdev->virtqueues, node) {
9d0ca6ed 943 if (info->vq->index == index) {
7e64e059
CH
944 vq = info->vq;
945 break;
946 }
947 }
948 spin_unlock_irqrestore(&vcdev->lock, flags);
949 return vq;
950}
951
952static void virtio_ccw_int_handler(struct ccw_device *cdev,
953 unsigned long intparm,
954 struct irb *irb)
955{
956 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
957 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
958 int i;
959 struct virtqueue *vq;
7e64e059 960
2e021043
HG
961 if (!vcdev)
962 return;
7e64e059
CH
963 /* Check if it's a notification from the host. */
964 if ((intparm == 0) &&
965 (scsw_stctl(&irb->scsw) ==
966 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
967 /* OK */
968 }
19e4735b
CH
969 if (irb_is_error(irb)) {
970 /* Command reject? */
971 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
972 (irb->ecw[0] & SNS0_CMD_REJECT))
973 vcdev->err = -EOPNOTSUPP;
974 else
975 /* Map everything else to -EIO. */
976 vcdev->err = -EIO;
977 }
7e64e059
CH
978 if (vcdev->curr_io & activity) {
979 switch (activity) {
980 case VIRTIO_CCW_DOING_READ_FEAT:
981 case VIRTIO_CCW_DOING_WRITE_FEAT:
982 case VIRTIO_CCW_DOING_READ_CONFIG:
983 case VIRTIO_CCW_DOING_WRITE_CONFIG:
984 case VIRTIO_CCW_DOING_WRITE_STATUS:
985 case VIRTIO_CCW_DOING_SET_VQ:
986 case VIRTIO_CCW_DOING_SET_IND:
987 case VIRTIO_CCW_DOING_SET_CONF_IND:
988 case VIRTIO_CCW_DOING_RESET:
989 case VIRTIO_CCW_DOING_READ_VQ_CONF:
96b14536 990 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
6bb2c835 991 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
7e64e059
CH
992 vcdev->curr_io &= ~activity;
993 wake_up(&vcdev->wait_q);
994 break;
995 default:
996 /* don't know what to do... */
997 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
998 activity);
999 WARN_ON(1);
1000 break;
1001 }
1002 }
1003 for_each_set_bit(i, &vcdev->indicators,
1004 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
1005 /* The bit clear must happen before the vring kick. */
1006 clear_bit(i, &vcdev->indicators);
1007 barrier();
1008 vq = virtio_ccw_vq_by_ind(vcdev, i);
1009 vring_interrupt(0, vq);
1010 }
1011 if (test_bit(0, &vcdev->indicators2)) {
016c98c6 1012 virtio_config_changed(&vcdev->vdev);
7e64e059
CH
1013 clear_bit(0, &vcdev->indicators2);
1014 }
1015}
1016
1017/*
1018 * We usually want to autoonline all devices, but give the admin
1019 * a way to exempt devices from this.
1020 */
1021#define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
1022 (8*sizeof(long)))
1023static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
1024
1025static char *no_auto = "";
1026
1027module_param(no_auto, charp, 0444);
1028MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
1029
1030static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
1031{
1032 struct ccw_dev_id id;
1033
1034 ccw_device_get_id(cdev, &id);
1035 if (test_bit(id.devno, devs_no_auto[id.ssid]))
1036 return 0;
1037 return 1;
1038}
1039
1040static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
1041{
1042 struct ccw_device *cdev = data;
1043 int ret;
1044
1045 ret = ccw_device_set_online(cdev);
1046 if (ret)
1047 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
1048}
1049
1050static int virtio_ccw_probe(struct ccw_device *cdev)
1051{
1052 cdev->handler = virtio_ccw_int_handler;
1053
1054 if (virtio_ccw_check_autoonline(cdev))
1055 async_schedule(virtio_ccw_auto_online, cdev);
1056 return 0;
1057}
1058
2e021043
HG
1059static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
1060{
1061 unsigned long flags;
1062 struct virtio_ccw_device *vcdev;
1063
1064 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1065 vcdev = dev_get_drvdata(&cdev->dev);
79629b20 1066 if (!vcdev || vcdev->going_away) {
2e021043
HG
1067 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1068 return NULL;
1069 }
79629b20 1070 vcdev->going_away = true;
2e021043
HG
1071 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1072 return vcdev;
1073}
1074
7e64e059
CH
1075static void virtio_ccw_remove(struct ccw_device *cdev)
1076{
79629b20 1077 unsigned long flags;
2e021043 1078 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1079
e75279c4
HG
1080 if (vcdev && cdev->online) {
1081 if (vcdev->device_lost)
1082 virtio_break_device(&vcdev->vdev);
7e64e059 1083 unregister_virtio_device(&vcdev->vdev);
e75279c4
HG
1084 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1085 dev_set_drvdata(&cdev->dev, NULL);
1086 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1087 }
7e64e059
CH
1088 cdev->handler = NULL;
1089}
1090
1091static int virtio_ccw_offline(struct ccw_device *cdev)
1092{
79629b20 1093 unsigned long flags;
2e021043 1094 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
7e64e059 1095
e75279c4
HG
1096 if (!vcdev)
1097 return 0;
1098 if (vcdev->device_lost)
1099 virtio_break_device(&vcdev->vdev);
1100 unregister_virtio_device(&vcdev->vdev);
1101 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1102 dev_set_drvdata(&cdev->dev, NULL);
1103 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1104 return 0;
1105}
1106
6bb2c835
TH
1107static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
1108{
1109 struct virtio_rev_info *rev;
1110 struct ccw1 *ccw;
1111 int ret;
1112
1113 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
1114 if (!ccw)
1115 return -ENOMEM;
1116 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
1117 if (!rev) {
1118 kfree(ccw);
1119 return -ENOMEM;
1120 }
1121
1122 /* Set transport revision */
1123 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
1124 ccw->flags = 0;
1125 ccw->count = sizeof(*rev);
1126 ccw->cda = (__u32)(unsigned long)rev;
1127
1128 vcdev->revision = VIRTIO_CCW_REV_MAX;
1129 do {
1130 rev->revision = vcdev->revision;
1131 /* none of our supported revisions carry payload */
1132 rev->length = 0;
1133 ret = ccw_io_helper(vcdev, ccw,
1134 VIRTIO_CCW_DOING_SET_VIRTIO_REV);
1135 if (ret == -EOPNOTSUPP) {
1136 if (vcdev->revision == 0)
1137 /*
1138 * The host device does not support setting
1139 * the revision: let's operate it in legacy
1140 * mode.
1141 */
1142 ret = 0;
1143 else
1144 vcdev->revision--;
1145 }
1146 } while (ret == -EOPNOTSUPP);
1147
1148 kfree(ccw);
1149 kfree(rev);
1150 return ret;
1151}
7e64e059 1152
7e64e059
CH
1153static int virtio_ccw_online(struct ccw_device *cdev)
1154{
1155 int ret;
1156 struct virtio_ccw_device *vcdev;
2e021043 1157 unsigned long flags;
7e64e059
CH
1158
1159 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
1160 if (!vcdev) {
1161 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1162 ret = -ENOMEM;
1163 goto out_free;
1164 }
7e64e059
CH
1165 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
1166 GFP_DMA | GFP_KERNEL);
1167 if (!vcdev->config_block) {
1168 ret = -ENOMEM;
1169 goto out_free;
1170 }
73fa21ea
CH
1171 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
1172 if (!vcdev->status) {
7e64e059
CH
1173 ret = -ENOMEM;
1174 goto out_free;
1175 }
1176
96b14536
CH
1177 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1178
7e64e059
CH
1179 vcdev->vdev.dev.parent = &cdev->dev;
1180 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1181 vcdev->vdev.config = &virtio_ccw_config_ops;
1182 vcdev->cdev = cdev;
1183 init_waitqueue_head(&vcdev->wait_q);
1184 INIT_LIST_HEAD(&vcdev->virtqueues);
1185 spin_lock_init(&vcdev->lock);
1186
2e021043 1187 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1188 dev_set_drvdata(&cdev->dev, vcdev);
2e021043 1189 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1190 vcdev->vdev.id.vendor = cdev->id.cu_type;
1191 vcdev->vdev.id.device = cdev->id.cu_model;
6bb2c835 1192
ce15408f
MT
1193 if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
1194 vcdev->revision = 0;
1195 } else {
1196 ret = virtio_ccw_set_transport_rev(vcdev);
1197 if (ret)
1198 goto out_free;
1199 }
6bb2c835 1200
7e64e059
CH
1201 ret = register_virtio_device(&vcdev->vdev);
1202 if (ret) {
1203 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1204 ret);
1205 goto out_put;
1206 }
1207 return 0;
1208out_put:
2e021043 1209 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
7e64e059 1210 dev_set_drvdata(&cdev->dev, NULL);
2e021043 1211 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
7e64e059
CH
1212 put_device(&vcdev->vdev.dev);
1213 return ret;
1214out_free:
1215 if (vcdev) {
73fa21ea 1216 kfree(vcdev->status);
7e64e059 1217 kfree(vcdev->config_block);
7e64e059
CH
1218 }
1219 kfree(vcdev);
1220 return ret;
1221}
1222
1223static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1224{
e75279c4
HG
1225 int rc;
1226 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1227
1228 /*
1229 * Make sure vcdev is set
1230 * i.e. set_offline/remove callback not already running
1231 */
1232 if (!vcdev)
1233 return NOTIFY_DONE;
1234
1235 switch (event) {
1236 case CIO_GONE:
1237 vcdev->device_lost = true;
1238 rc = NOTIFY_DONE;
1239 break;
1240 default:
1241 rc = NOTIFY_DONE;
1242 break;
1243 }
1244 return rc;
7e64e059
CH
1245}
1246
1247static struct ccw_device_id virtio_ids[] = {
1248 { CCW_DEVICE(0x3832, 0) },
1249 {},
1250};
1251MODULE_DEVICE_TABLE(ccw, virtio_ids);
1252
1253static struct ccw_driver virtio_ccw_driver = {
1254 .driver = {
1255 .owner = THIS_MODULE,
1256 .name = "virtio_ccw",
1257 },
1258 .ids = virtio_ids,
1259 .probe = virtio_ccw_probe,
1260 .remove = virtio_ccw_remove,
1261 .set_offline = virtio_ccw_offline,
1262 .set_online = virtio_ccw_online,
1263 .notify = virtio_ccw_cio_notify,
89f88337 1264 .int_class = IRQIO_VIR,
7e64e059
CH
1265};
1266
1267static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1268 int max_digit, int max_val)
1269{
1270 int diff;
1271
1272 diff = 0;
1273 *val = 0;
1274
1275 while (diff <= max_digit) {
1276 int value = hex_to_bin(**cp);
1277
1278 if (value < 0)
1279 break;
1280 *val = *val * 16 + value;
1281 (*cp)++;
1282 diff++;
1283 }
1284
1285 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1286 return 1;
1287
1288 return 0;
1289}
1290
1291static int __init parse_busid(char *str, unsigned int *cssid,
1292 unsigned int *ssid, unsigned int *devno)
1293{
1294 char *str_work;
1295 int rc, ret;
1296
1297 rc = 1;
1298
1299 if (*str == '\0')
1300 goto out;
1301
1302 str_work = str;
1303 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1304 if (ret || (str_work[0] != '.'))
1305 goto out;
1306 str_work++;
1307 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1308 if (ret || (str_work[0] != '.'))
1309 goto out;
1310 str_work++;
1311 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1312 if (ret || (str_work[0] != '\0'))
1313 goto out;
1314
1315 rc = 0;
1316out:
1317 return rc;
1318}
1319
1320static void __init no_auto_parse(void)
1321{
1322 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1323 char *parm, *str;
1324 int rc;
1325
1326 str = no_auto;
1327 while ((parm = strsep(&str, ","))) {
1328 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1329 &from_ssid, &from);
1330 if (rc)
1331 continue;
1332 if (parm != NULL) {
1333 rc = parse_busid(parm, &to_cssid,
1334 &to_ssid, &to);
1335 if ((from_ssid > to_ssid) ||
1336 ((from_ssid == to_ssid) && (from > to)))
1337 rc = -EINVAL;
1338 } else {
1339 to_cssid = from_cssid;
1340 to_ssid = from_ssid;
1341 to = from;
1342 }
1343 if (rc)
1344 continue;
1345 while ((from_ssid < to_ssid) ||
1346 ((from_ssid == to_ssid) && (from <= to))) {
1347 set_bit(from, devs_no_auto[from_ssid]);
1348 from++;
1349 if (from > __MAX_SUBCHANNEL) {
1350 from_ssid++;
1351 from = 0;
1352 }
1353 }
1354 }
1355}
1356
1357static int __init virtio_ccw_init(void)
1358{
1359 /* parse no_auto string before we do anything further */
1360 no_auto_parse();
1361 return ccw_driver_register(&virtio_ccw_driver);
1362}
1363module_init(virtio_ccw_init);
1364
1365static void __exit virtio_ccw_exit(void)
1366{
96b14536
CH
1367 int i;
1368
7e64e059 1369 ccw_driver_unregister(&virtio_ccw_driver);
96b14536
CH
1370 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1371 destroy_airq_info(airq_areas[i]);
7e64e059
CH
1372}
1373module_exit(virtio_ccw_exit);