[S390] zfcpdump: Do not initialize zfcpdump in kdump mode
[linux-2.6-block.git] / drivers / s390 / char / zcore.c
CommitLineData
411ed322
MH
1/*
2 * zcore module to export memory content and register sets for creating system
3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
4 * dump format as s390 standalone dumps.
5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 *
099b7651 8 * Copyright IBM Corp. 2003,2008
411ed322
MH
9 * Author(s): Michael Holzheu
10 */
11
17159dc6
MH
12#define KMSG_COMPONENT "zdump"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
411ed322 15#include <linux/init.h>
5a0e3ad6 16#include <linux/slab.h>
411ed322 17#include <linux/miscdevice.h>
411ed322 18#include <linux/debugfs.h>
3948a102 19#include <linux/module.h>
cbb870c8 20#include <asm/asm-offsets.h>
411ed322
MH
21#include <asm/ipl.h>
22#include <asm/sclp.h>
23#include <asm/setup.h>
24#include <asm/sigp.h>
25#include <asm/uaccess.h>
26#include <asm/debug.h>
27#include <asm/processor.h>
28#include <asm/irqflags.h>
159d1ff8 29#include <asm/checksum.h>
763968e2 30#include "sclp.h"
411ed322
MH
31
32#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
411ed322
MH
33
34#define TO_USER 0
35#define TO_KERNEL 1
12e0c95e 36#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
411ed322
MH
37
38enum arch_id {
39 ARCH_S390 = 0,
40 ARCH_S390X = 1,
41};
42
43/* dump system info */
44
45struct sys_info {
f64ca217
HC
46 enum arch_id arch;
47 unsigned long sa_base;
48 u32 sa_size;
49 int cpu_map[NR_CPUS];
50 unsigned long mem_size;
51 struct save_area lc_mask;
411ed322
MH
52};
53
099b7651
FM
54struct ipib_info {
55 unsigned long ipib;
56 u32 checksum;
57} __attribute__((packed));
58
411ed322
MH
59static struct sys_info sys_info;
60static struct debug_info *zcore_dbf;
61static int hsa_available;
62static struct dentry *zcore_dir;
63static struct dentry *zcore_file;
12e0c95e 64static struct dentry *zcore_memmap_file;
099b7651
FM
65static struct dentry *zcore_reipl_file;
66static struct ipl_parameter_block *ipl_block;
411ed322
MH
67
68/*
69 * Copy memory from HSA to kernel or user memory (not reentrant):
70 *
71 * @dest: Kernel or user buffer where memory should be copied to
72 * @src: Start address within HSA where data should be copied
73 * @count: Size of buffer, which should be copied
74 * @mode: Either TO_KERNEL or TO_USER
75 */
76static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
77{
78 int offs, blk_num;
79 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
80
81 if (count == 0)
82 return 0;
83
84 /* copy first block */
85 offs = 0;
86 if ((src % PAGE_SIZE) != 0) {
87 blk_num = src / PAGE_SIZE + 2;
88 if (sclp_sdias_copy(buf, blk_num, 1)) {
89 TRACE("sclp_sdias_copy() failed\n");
90 return -EIO;
91 }
92 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
93 if (mode == TO_USER) {
94 if (copy_to_user((__force __user void*) dest,
95 buf + (src % PAGE_SIZE), offs))
96 return -EFAULT;
97 } else
98 memcpy(dest, buf + (src % PAGE_SIZE), offs);
99 }
100 if (offs == count)
101 goto out;
102
103 /* copy middle */
104 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
105 blk_num = (src + offs) / PAGE_SIZE + 2;
106 if (sclp_sdias_copy(buf, blk_num, 1)) {
107 TRACE("sclp_sdias_copy() failed\n");
108 return -EIO;
109 }
110 if (mode == TO_USER) {
111 if (copy_to_user((__force __user void*) dest + offs,
112 buf, PAGE_SIZE))
113 return -EFAULT;
114 } else
115 memcpy(dest + offs, buf, PAGE_SIZE);
116 }
117 if (offs == count)
118 goto out;
119
120 /* copy last block */
121 blk_num = (src + offs) / PAGE_SIZE + 2;
122 if (sclp_sdias_copy(buf, blk_num, 1)) {
123 TRACE("sclp_sdias_copy() failed\n");
124 return -EIO;
125 }
126 if (mode == TO_USER) {
127 if (copy_to_user((__force __user void*) dest + offs, buf,
128 PAGE_SIZE))
129 return -EFAULT;
130 } else
131 memcpy(dest + offs, buf, count - offs);
132out:
133 return 0;
134}
135
136static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
137{
138 return memcpy_hsa((void __force *) dest, src, count, TO_USER);
139}
140
141static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
142{
143 return memcpy_hsa(dest, src, count, TO_KERNEL);
144}
145
411ed322
MH
146static int __init init_cpu_info(enum arch_id arch)
147{
f64ca217 148 struct save_area *sa;
411ed322
MH
149
150 /* get info for boot cpu from lowcore, stored in the HSA */
151
152 sa = kmalloc(sizeof(*sa), GFP_KERNEL);
2a062ab4 153 if (!sa)
411ed322 154 return -ENOMEM;
411ed322 155 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
2a062ab4 156 TRACE("could not copy from HSA\n");
411ed322
MH
157 kfree(sa);
158 return -EIO;
159 }
160 zfcpdump_save_areas[0] = sa;
411ed322
MH
161 return 0;
162}
163
164static DEFINE_MUTEX(zcore_mutex);
165
0cbde8ee 166#define DUMP_VERSION 0x5
411ed322
MH
167#define DUMP_MAGIC 0xa8190173618f23fdULL
168#define DUMP_ARCH_S390X 2
169#define DUMP_ARCH_S390 1
170#define HEADER_SIZE 4096
171
172/* dump header dumped according to s390 crash dump format */
173
174struct zcore_header {
175 u64 magic;
176 u32 version;
177 u32 header_size;
178 u32 dump_level;
179 u32 page_size;
180 u64 mem_size;
181 u64 mem_start;
182 u64 mem_end;
183 u32 num_pages;
184 u32 pad1;
185 u64 tod;
e86a6ed6 186 struct cpuid cpu_id;
411ed322 187 u32 arch_id;
ce444823 188 u32 volnr;
411ed322 189 u32 build_arch;
ce444823 190 u64 rmem_size;
0cbde8ee
MH
191 u8 mvdump;
192 u16 cpu_cnt;
193 u16 real_cpu_cnt;
194 u8 end_pad1[0x200-0x061];
195 u64 mvdump_sign;
196 u64 mvdump_zipl_time;
197 u8 end_pad2[0x800-0x210];
198 u32 lc_vec[512];
411ed322
MH
199} __attribute__((packed,__aligned__(16)));
200
201static struct zcore_header zcore_header = {
202 .magic = DUMP_MAGIC,
203 .version = DUMP_VERSION,
204 .header_size = 4096,
205 .dump_level = 0,
206 .page_size = PAGE_SIZE,
207 .mem_start = 0,
f64ca217 208#ifdef CONFIG_64BIT
411ed322
MH
209 .build_arch = DUMP_ARCH_S390X,
210#else
211 .build_arch = DUMP_ARCH_S390,
212#endif
213};
214
215/*
216 * Copy lowcore info to buffer. Use map in order to copy only register parts.
217 *
218 * @buf: User buffer
219 * @sa: Pointer to save area
220 * @sa_off: Offset in save area to copy
221 * @len: Number of bytes to copy
222 */
223static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
224{
225 int i;
226 char *lc_mask = (char*)&sys_info.lc_mask;
227
228 for (i = 0; i < len; i++) {
229 if (!lc_mask[i + sa_off])
230 continue;
231 if (copy_to_user(buf + i, sa + sa_off + i, 1))
232 return -EFAULT;
233 }
234 return 0;
235}
236
237/*
238 * Copy lowcores info to memory, if necessary
239 *
240 * @buf: User buffer
241 * @addr: Start address of buffer in dump memory
242 * @count: Size of buffer
243 */
244static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
245{
246 unsigned long end;
247 int i = 0;
248
249 if (count == 0)
250 return 0;
251
252 end = start + count;
253 while (zfcpdump_save_areas[i]) {
254 unsigned long cp_start, cp_end; /* copy range */
255 unsigned long sa_start, sa_end; /* save area range */
256 unsigned long prefix;
257 unsigned long sa_off, len, buf_off;
258
f64ca217 259 prefix = zfcpdump_save_areas[i]->pref_reg;
411ed322
MH
260 sa_start = prefix + sys_info.sa_base;
261 sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
262
263 if ((end < sa_start) || (start > sa_end))
264 goto next;
265 cp_start = max(start, sa_start);
266 cp_end = min(end, sa_end);
267
268 buf_off = cp_start - start;
269 sa_off = cp_start - sa_start;
270 len = cp_end - cp_start;
271
272 TRACE("copy_lc for: %lx\n", start);
273 if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
274 return -EFAULT;
275next:
276 i++;
277 }
278 return 0;
279}
280
281/*
282 * Read routine for zcore character device
283 * First 4K are dump header
284 * Next 32MB are HSA Memory
285 * Rest is read from absolute Memory
286 */
287static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
288 loff_t *ppos)
289{
290 unsigned long mem_start; /* Start address in memory */
291 size_t mem_offs; /* Offset in dump memory */
292 size_t hdr_count; /* Size of header part of output buffer */
293 size_t size;
294 int rc;
295
296 mutex_lock(&zcore_mutex);
297
298 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
299 rc = -EINVAL;
300 goto fail;
301 }
302
303 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
304
305 /* Copy dump header */
306 if (*ppos < HEADER_SIZE) {
307 size = min(count, (size_t) (HEADER_SIZE - *ppos));
308 if (copy_to_user(buf, &zcore_header + *ppos, size)) {
309 rc = -EFAULT;
310 goto fail;
311 }
312 hdr_count = size;
313 mem_start = 0;
314 } else {
315 hdr_count = 0;
316 mem_start = *ppos - HEADER_SIZE;
317 }
318
319 mem_offs = 0;
320
321 /* Copy from HSA data */
322 if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) {
323 size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE
324 - mem_start));
325 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
326 if (rc)
327 goto fail;
328
329 mem_offs += size;
330 }
331
332 /* Copy from real mem */
333 size = count - mem_offs - hdr_count;
7f0bf656
MH
334 rc = copy_to_user_real(buf + hdr_count + mem_offs,
335 (void *) mem_start + mem_offs, size);
411ed322
MH
336 if (rc)
337 goto fail;
338
339 /*
340 * Since s390 dump analysis tools like lcrash or crash
341 * expect register sets in the prefix pages of the cpus,
342 * we copy them into the read buffer, if necessary.
343 * buf + hdr_count: Start of memory part of output buffer
344 * mem_start: Start memory address to copy from
345 * count - hdr_count: Size of memory area to copy
346 */
347 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
348 rc = -EFAULT;
349 goto fail;
350 }
351 *ppos += count;
352fail:
353 mutex_unlock(&zcore_mutex);
354 return (rc < 0) ? rc : count;
355}
356
357static int zcore_open(struct inode *inode, struct file *filp)
358{
359 if (!hsa_available)
360 return -ENODATA;
361 else
362 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
363}
364
365static int zcore_release(struct inode *inode, struct file *filep)
366{
367 diag308(DIAG308_REL_HSA, NULL);
368 hsa_available = 0;
369 return 0;
370}
371
372static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
373{
374 loff_t rc;
375
376 mutex_lock(&zcore_mutex);
377 switch (orig) {
378 case 0:
379 file->f_pos = offset;
380 rc = file->f_pos;
381 break;
382 case 1:
383 file->f_pos += offset;
384 rc = file->f_pos;
385 break;
386 default:
387 rc = -EINVAL;
388 }
389 mutex_unlock(&zcore_mutex);
390 return rc;
391}
392
5c81cdbe 393static const struct file_operations zcore_fops = {
411ed322
MH
394 .owner = THIS_MODULE,
395 .llseek = zcore_lseek,
396 .read = zcore_read,
397 .open = zcore_open,
398 .release = zcore_release,
399};
400
12e0c95e
FM
401static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
402 size_t count, loff_t *ppos)
403{
404 return simple_read_from_buffer(buf, count, ppos, filp->private_data,
405 MEMORY_CHUNKS * CHUNK_INFO_SIZE);
406}
407
408static int zcore_memmap_open(struct inode *inode, struct file *filp)
409{
410 int i;
411 char *buf;
412 struct mem_chunk *chunk_array;
413
414 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
415 GFP_KERNEL);
416 if (!chunk_array)
417 return -ENOMEM;
418 detect_memory_layout(chunk_array);
419 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL);
420 if (!buf) {
421 kfree(chunk_array);
422 return -ENOMEM;
423 }
424 for (i = 0; i < MEMORY_CHUNKS; i++) {
425 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ",
426 (unsigned long long) chunk_array[i].addr,
427 (unsigned long long) chunk_array[i].size);
428 if (chunk_array[i].size == 0)
429 break;
430 }
431 kfree(chunk_array);
432 filp->private_data = buf;
58ea91c0 433 return nonseekable_open(inode, filp);
12e0c95e
FM
434}
435
436static int zcore_memmap_release(struct inode *inode, struct file *filp)
437{
438 kfree(filp->private_data);
439 return 0;
440}
441
442static const struct file_operations zcore_memmap_fops = {
443 .owner = THIS_MODULE,
444 .read = zcore_memmap_read,
445 .open = zcore_memmap_open,
446 .release = zcore_memmap_release,
6038f373 447 .llseek = no_llseek,
12e0c95e
FM
448};
449
099b7651
FM
450static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
451 size_t count, loff_t *ppos)
452{
453 if (ipl_block) {
454 diag308(DIAG308_SET, ipl_block);
455 diag308(DIAG308_IPL, NULL);
456 }
457 return count;
458}
459
460static int zcore_reipl_open(struct inode *inode, struct file *filp)
461{
58ea91c0 462 return nonseekable_open(inode, filp);
099b7651
FM
463}
464
465static int zcore_reipl_release(struct inode *inode, struct file *filp)
466{
467 return 0;
468}
469
470static const struct file_operations zcore_reipl_fops = {
471 .owner = THIS_MODULE,
472 .write = zcore_reipl_write,
473 .open = zcore_reipl_open,
474 .release = zcore_reipl_release,
6038f373 475 .llseek = no_llseek,
099b7651
FM
476};
477
f64ca217 478#ifdef CONFIG_32BIT
411ed322 479
f64ca217 480static void __init set_lc_mask(struct save_area *map)
411ed322 481{
f64ca217
HC
482 memset(&map->ext_save, 0xff, sizeof(map->ext_save));
483 memset(&map->timer, 0xff, sizeof(map->timer));
484 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
485 memset(&map->psw, 0xff, sizeof(map->psw));
486 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
487 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
488 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
489 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
490 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
411ed322
MH
491}
492
f64ca217
HC
493#else /* CONFIG_32BIT */
494
495static void __init set_lc_mask(struct save_area *map)
411ed322 496{
f64ca217
HC
497 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
498 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
499 memset(&map->psw, 0xff, sizeof(map->psw));
500 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
501 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
502 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
503 memset(&map->timer, 0xff, sizeof(map->timer));
504 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
505 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
506 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
411ed322
MH
507}
508
f64ca217
HC
509#endif /* CONFIG_32BIT */
510
411ed322
MH
511/*
512 * Initialize dump globals for a given architecture
513 */
514static int __init sys_info_init(enum arch_id arch)
515{
2a062ab4
MH
516 int rc;
517
411ed322
MH
518 switch (arch) {
519 case ARCH_S390X:
17159dc6 520 pr_alert("DETECTED 'S390X (64 bit) OS'\n");
411ed322
MH
521 break;
522 case ARCH_S390:
17159dc6 523 pr_alert("DETECTED 'S390 (32 bit) OS'\n");
411ed322
MH
524 break;
525 default:
17159dc6 526 pr_alert("0x%x is an unknown architecture.\n",arch);
411ed322
MH
527 return -EINVAL;
528 }
f64ca217
HC
529 sys_info.sa_base = SAVE_AREA_BASE;
530 sys_info.sa_size = sizeof(struct save_area);
411ed322 531 sys_info.arch = arch;
f64ca217 532 set_lc_mask(&sys_info.lc_mask);
2a062ab4
MH
533 rc = init_cpu_info(arch);
534 if (rc)
535 return rc;
411ed322
MH
536 sys_info.mem_size = real_memory_size;
537
538 return 0;
539}
540
541static int __init check_sdias(void)
542{
543 int rc, act_hsa_size;
544
545 rc = sclp_sdias_blk_count();
546 if (rc < 0) {
2a062ab4 547 TRACE("Could not determine HSA size\n");
411ed322
MH
548 return rc;
549 }
550 act_hsa_size = (rc - 1) * PAGE_SIZE;
551 if (act_hsa_size < ZFCPDUMP_HSA_SIZE) {
2a062ab4 552 TRACE("HSA size too small: %i\n", act_hsa_size);
411ed322
MH
553 return -EINVAL;
554 }
555 return 0;
556}
557
12e0c95e
FM
558static int __init get_mem_size(unsigned long *mem)
559{
560 int i;
561 struct mem_chunk *chunk_array;
562
563 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk),
564 GFP_KERNEL);
565 if (!chunk_array)
566 return -ENOMEM;
567 detect_memory_layout(chunk_array);
568 for (i = 0; i < MEMORY_CHUNKS; i++) {
569 if (chunk_array[i].size == 0)
570 break;
571 *mem += chunk_array[i].size;
572 }
573 kfree(chunk_array);
574 return 0;
575}
576
577static int __init zcore_header_init(int arch, struct zcore_header *hdr)
411ed322 578{
0cbde8ee 579 int rc, i;
12e0c95e 580 unsigned long memory = 0;
0cbde8ee 581 u32 prefix;
12e0c95e 582
411ed322
MH
583 if (arch == ARCH_S390X)
584 hdr->arch_id = DUMP_ARCH_S390X;
585 else
586 hdr->arch_id = DUMP_ARCH_S390;
12e0c95e
FM
587 rc = get_mem_size(&memory);
588 if (rc)
589 return rc;
590 hdr->mem_size = memory;
591 hdr->rmem_size = memory;
411ed322 592 hdr->mem_end = sys_info.mem_size;
12e0c95e 593 hdr->num_pages = memory / PAGE_SIZE;
411ed322
MH
594 hdr->tod = get_clock();
595 get_cpu_id(&hdr->cpu_id);
0cbde8ee
MH
596 for (i = 0; zfcpdump_save_areas[i]; i++) {
597 prefix = zfcpdump_save_areas[i]->pref_reg;
598 hdr->real_cpu_cnt++;
599 if (!prefix)
600 continue;
601 hdr->lc_vec[hdr->cpu_cnt] = prefix;
602 hdr->cpu_cnt++;
603 }
12e0c95e 604 return 0;
411ed322
MH
605}
606
099b7651
FM
607/*
608 * Provide IPL parameter information block from either HSA or memory
609 * for future reipl
610 */
611static int __init zcore_reipl_init(void)
612{
613 struct ipib_info ipib_info;
614 int rc;
615
616 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
617 if (rc)
618 return rc;
619 if (ipib_info.ipib == 0)
620 return 0;
621 ipl_block = (void *) __get_free_page(GFP_KERNEL);
622 if (!ipl_block)
623 return -ENOMEM;
624 if (ipib_info.ipib < ZFCPDUMP_HSA_SIZE)
625 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
626 else
92fe3132 627 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
76ef964c 628 if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
159d1ff8 629 ipib_info.checksum) {
099b7651
FM
630 TRACE("Checksum does not match\n");
631 free_page((unsigned long) ipl_block);
632 ipl_block = NULL;
633 }
634 return 0;
635}
636
411ed322
MH
637static int __init zcore_init(void)
638{
639 unsigned char arch;
640 int rc;
641
642 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
643 return -ENODATA;
3f25dc4f
MH
644 if (OLDMEM_BASE)
645 return -ENODATA;
411ed322
MH
646
647 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
648 debug_register_view(zcore_dbf, &debug_sprintf_view);
649 debug_set_level(zcore_dbf, 6);
650
651 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
652 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
653 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
654
763968e2 655 rc = sclp_sdias_init();
411ed322
MH
656 if (rc)
657 goto fail;
658
659 rc = check_sdias();
2a062ab4 660 if (rc)
411ed322 661 goto fail;
411ed322
MH
662
663 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
2a062ab4 664 if (rc)
411ed322 665 goto fail;
411ed322 666
f64ca217
HC
667#ifdef CONFIG_64BIT
668 if (arch == ARCH_S390) {
669 pr_alert("The 64-bit dump tool cannot be used for a "
670 "32-bit system\n");
671 rc = -EINVAL;
672 goto fail;
673 }
674#else /* CONFIG_64BIT */
411ed322 675 if (arch == ARCH_S390X) {
17159dc6
MH
676 pr_alert("The 32-bit dump tool cannot be used for a "
677 "64-bit system\n");
411ed322
MH
678 rc = -EINVAL;
679 goto fail;
680 }
f64ca217 681#endif /* CONFIG_64BIT */
411ed322
MH
682
683 rc = sys_info_init(arch);
2a062ab4 684 if (rc)
411ed322 685 goto fail;
411ed322 686
12e0c95e
FM
687 rc = zcore_header_init(arch, &zcore_header);
688 if (rc)
689 goto fail;
411ed322 690
099b7651
FM
691 rc = zcore_reipl_init();
692 if (rc)
693 goto fail;
694
411ed322
MH
695 zcore_dir = debugfs_create_dir("zcore" , NULL);
696 if (!zcore_dir) {
697 rc = -ENOMEM;
698 goto fail;
699 }
700 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
701 &zcore_fops);
702 if (!zcore_file) {
411ed322 703 rc = -ENOMEM;
12e0c95e
FM
704 goto fail_dir;
705 }
706 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
707 NULL, &zcore_memmap_fops);
708 if (!zcore_memmap_file) {
709 rc = -ENOMEM;
710 goto fail_file;
411ed322 711 }
099b7651
FM
712 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
713 NULL, &zcore_reipl_fops);
714 if (!zcore_reipl_file) {
715 rc = -ENOMEM;
716 goto fail_memmap_file;
717 }
411ed322
MH
718 hsa_available = 1;
719 return 0;
720
099b7651
FM
721fail_memmap_file:
722 debugfs_remove(zcore_memmap_file);
12e0c95e
FM
723fail_file:
724 debugfs_remove(zcore_file);
725fail_dir:
726 debugfs_remove(zcore_dir);
411ed322
MH
727fail:
728 diag308(DIAG308_REL_HSA, NULL);
729 return rc;
730}
731
411ed322
MH
732static void __exit zcore_exit(void)
733{
734 debug_unregister(zcore_dbf);
763968e2 735 sclp_sdias_exit();
099b7651
FM
736 free_page((unsigned long) ipl_block);
737 debugfs_remove(zcore_reipl_file);
738 debugfs_remove(zcore_memmap_file);
739 debugfs_remove(zcore_file);
740 debugfs_remove(zcore_dir);
411ed322
MH
741 diag308(DIAG308_REL_HSA, NULL);
742}
743
099b7651 744MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
411ed322
MH
745MODULE_DESCRIPTION("zcore module for zfcpdump support");
746MODULE_LICENSE("GPL");
747
748subsys_initcall(zcore_init);
749module_exit(zcore_exit);