Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Jul 2016 20:23:18 +0000 (13:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 29 Jul 2016 20:23:18 +0000 (13:23 -0700)
Pull sparc updates from David Miller:

 1) Double spin lock bug in sunhv serial driver, from Dan Carpenter.

 2) Use correct RSS estimate when determining whether to grow the huge
    TSB or not, from Mike Kravetz.

 3) Don't use full three level page tables for hugepages, PMD level is
    sufficient.  From Nitin Gupta.

 4) Mask out extraneous bits from TSB_TAG_ACCESS register, we only want
    the address bits.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Trim page tables for 8M hugepages
  sparc64 mm: Fix base TSB sizing when hugetlb pages are used
  sparc: serial: sunhv: fix a double lock bug
  sparc32: off by ones in BUG_ON()
  sparc: Don't leak context bits into thread->fault_address

14 files changed:
arch/sparc/include/asm/hugetlb.h
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/kernel/dtlb_prot.S
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/ktlb.S
arch/sparc/kernel/tsb.S
arch/sparc/mm/fault_64.c
arch/sparc/mm/hugetlbpage.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
drivers/tty/serial/sunhv.c

index 139e711ff80cddffa1a67b930b8cee00b744b6d9..dcbf985ab243201250222a824fc1146320522e65 100644 (file)
@@ -31,14 +31,6 @@ static inline int prepare_hugepage_range(struct file *file,
        return 0;
 }
 
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
-                                         unsigned long addr, unsigned long end,
-                                         unsigned long floor,
-                                         unsigned long ceiling)
-{
-       free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
 {
@@ -82,4 +74,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
 {
 }
 
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+                           unsigned long end, unsigned long floor,
+                           unsigned long ceiling);
+
 #endif /* _ASM_SPARC64_HUGETLB_H */
index 70067ce184b16a9d91de2737bf804dbb89e94307..f7de0dbc38af2dd36c9f34df53e6e951f6729825 100644 (file)
@@ -92,7 +92,8 @@ struct tsb_config {
 typedef struct {
        spinlock_t              lock;
        unsigned long           sparc64_ctx_val;
-       unsigned long           huge_pte_count;
+       unsigned long           hugetlb_pte_count;
+       unsigned long           thp_pte_count;
        struct tsb_config       tsb_block[MM_NUM_TSBS];
        struct hv_tsb_descr     tsb_descr[MM_NUM_TSBS];
 } mm_context_t;
index e7d82803a48fcc5a14e1f9cb283d71c58a002ce6..1fb317fbc0b3419123c99f5c4b2807d3162ffc5f 100644 (file)
@@ -395,7 +395,7 @@ static inline unsigned long __pte_huge_mask(void)
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) | __pte_huge_mask());
+       return __pte(pte_val(pte) | _PAGE_PMD_HUGE | __pte_huge_mask());
 }
 
 static inline bool is_hugetlb_pte(pte_t pte)
@@ -403,6 +403,11 @@ static inline bool is_hugetlb_pte(pte_t pte)
        return !!(pte_val(pte) & __pte_huge_mask());
 }
 
+static inline bool is_hugetlb_pmd(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_mkhuge(pmd_t pmd)
 {
index c6a155c3904ece984d3075094e3ee8b55845a68f..32258e08da035f018df2915bf9935556556628bb 100644 (file)
@@ -203,7 +203,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
         * We have to propagate the 4MB bit of the virtual address
         * because we are fabricating 8MB pages using 4MB hw pages.
         */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \
        brz,pn          REG1, FAIL_LABEL;               \
         sethi          %uhi(_PAGE_PMD_HUGE), REG2;     \
index d668ca149e647ac832f88b3602134768d712ef80..4087a62f96b099f3b2d9e46b1b73ff49220da718 100644 (file)
 
 /* PROT ** ICACHE line 2: More real fault processing */
        ldxa            [%g4] ASI_DMMU, %g5             ! Put tagaccess in %g5
+       srlx            %g5, PAGE_SHIFT, %g5
+       sllx            %g5, PAGE_SHIFT, %g5            ! Clear context ID bits
        bgu,pn          %xcc, winfix_trampoline         ! Yes, perform winfixup
         mov            FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
        ba,pt           %xcc, sparc64_realfault_common  ! Nope, normal fault
         nop
        nop
-       nop
-       nop
 
 /* PROT ** ICACHE line 3: Unused...    */
        nop
index a979e99f8751cd60393c1ee18c0a958ceddb34cf..cac4a5554c0ea0533247b79d64d93612171e93ac 100644 (file)
@@ -165,7 +165,7 @@ void irq_link(unsigned int irq)
 
        p = &irq_table[irq];
        pil = p->pil;
-       BUG_ON(pil > SUN4D_MAX_IRQ);
+       BUG_ON(pil >= SUN4D_MAX_IRQ);
        p->next = irq_map[pil];
        irq_map[pil] = p;
 
@@ -182,7 +182,7 @@ void irq_unlink(unsigned int irq)
        spin_lock_irqsave(&irq_map_lock, flags);
 
        p = &irq_table[irq];
-       BUG_ON(p->pil > SUN4D_MAX_IRQ);
+       BUG_ON(p->pil >= SUN4D_MAX_IRQ);
        pnext = &irq_map[p->pil];
        while (*pnext != p)
                pnext = &(*pnext)->next;
index ef0d8e9e1210e6dffbc184a8ce426c708a968062..f22bec0db64549ace3b68d645b8d23db87ec1b5b 100644 (file)
@@ -20,6 +20,10 @@ kvmap_itlb:
        mov             TLB_TAG_ACCESS, %g4
        ldxa            [%g4] ASI_IMMU, %g4
 
+       /* The kernel executes in context zero, therefore we do not
+        * need to clear the context ID bits out of %g4 here.
+        */
+
        /* sun4v_itlb_miss branches here with the missing virtual
         * address already loaded into %g4
         */
@@ -128,6 +132,10 @@ kvmap_dtlb:
        mov             TLB_TAG_ACCESS, %g4
        ldxa            [%g4] ASI_DMMU, %g4
 
+       /* The kernel executes in context zero, therefore we do not
+        * need to clear the context ID bits out of %g4 here.
+        */
+
        /* sun4v_dtlb_miss branches here with the missing virtual
         * address already loaded into %g4
         */
@@ -251,6 +259,10 @@ kvmap_dtlb_longpath:
        nop
        .previous
 
+       /* The kernel executes in context zero, therefore we do not
+        * need to clear the context ID bits out of %g5 here.
+        */
+
        be,pt   %xcc, sparc64_realfault_common
         mov    FAULT_CODE_DTLB, %g4
        ba,pt   %xcc, winfix_trampoline
index be98685c14c62301250db79791269fb411ea9e0a..d568c8207af72ffbd15aae8e5f41f77401ba5397 100644 (file)
         */
 tsb_miss_dtlb:
        mov             TLB_TAG_ACCESS, %g4
+       ldxa            [%g4] ASI_DMMU, %g4
+       srlx            %g4, PAGE_SHIFT, %g4
        ba,pt           %xcc, tsb_miss_page_table_walk
-        ldxa           [%g4] ASI_DMMU, %g4
+        sllx           %g4, PAGE_SHIFT, %g4
 
 tsb_miss_itlb:
        mov             TLB_TAG_ACCESS, %g4
+       ldxa            [%g4] ASI_IMMU, %g4
+       srlx            %g4, PAGE_SHIFT, %g4
        ba,pt           %xcc, tsb_miss_page_table_walk
-        ldxa           [%g4] ASI_IMMU, %g4
+        sllx           %g4, PAGE_SHIFT, %g4
 
        /* At this point we have:
         * %g1 --       PAGE_SIZE TSB entry address
@@ -284,6 +288,10 @@ tsb_do_dtlb_fault:
        nop
        .previous
 
+       /* Clear context ID bits.  */
+       srlx            %g5, PAGE_SHIFT, %g5
+       sllx            %g5, PAGE_SHIFT, %g5
+
        be,pt   %xcc, sparc64_realfault_common
         mov    FAULT_CODE_DTLB, %g4
        ba,pt   %xcc, winfix_trampoline
index 6c43b924a7a2bae17cfd98b4b7de0be64b16ad13..e16fdd28a93159ccd5ade26584e9cfc212fedb94 100644 (file)
@@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc)
        if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
                goto out_irq_enable;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (pmd_trans_huge(*pmdp)) {
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       if (is_hugetlb_pmd(*pmdp)) {
                pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
                pa += tpc & ~HPAGE_MASK;
 
@@ -476,14 +476,14 @@ good_area:
        up_read(&mm->mmap_sem);
 
        mm_rss = get_mm_rss(mm);
-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
+       mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
 #endif
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
                tsb_grow(mm, MM_TSB_BASE, mm_rss);
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       mm_rss = mm->context.huge_pte_count;
+       mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
        if (unlikely(mm_rss >
                     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
                if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
index ba52e6466a8252659d0f19e49abb6cba1a7762b2..988acc8b1b80a387d9119782f53f1d41dbe53c4e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/mman.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 {
        pgd_t *pgd;
        pud_t *pud;
-       pmd_t *pmd;
        pte_t *pte = NULL;
 
-       /* We must align the address, because our caller will run
-        * set_huge_pte_at() on whatever we return, which writes out
-        * all of the sub-ptes for the hugepage range.  So we have
-        * to give it the first such sub-pte.
-        */
-       addr &= HPAGE_MASK;
-
        pgd = pgd_offset(mm, addr);
        pud = pud_alloc(mm, pgd, addr);
-       if (pud) {
-               pmd = pmd_alloc(mm, pud, addr);
-               if (pmd)
-                       pte = pte_alloc_map(mm, pmd, addr);
-       }
+       if (pud)
+               pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
        return pte;
 }
 
@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
        pud_t *pud;
-       pmd_t *pmd;
        pte_t *pte = NULL;
 
-       addr &= HPAGE_MASK;
-
        pgd = pgd_offset(mm, addr);
        if (!pgd_none(*pgd)) {
                pud = pud_offset(pgd, addr);
-               if (!pud_none(*pud)) {
-                       pmd = pmd_offset(pud, addr);
-                       if (!pmd_none(*pmd))
-                               pte = pte_offset_map(pmd, addr);
-               }
+               if (!pud_none(*pud))
+                       pte = (pte_t *)pmd_offset(pud, addr);
        }
        return pte;
 }
@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t entry)
 {
-       int i;
-       pte_t orig[2];
-       unsigned long nptes;
+       pte_t orig;
 
        if (!pte_present(*ptep) && pte_present(entry))
-               mm->context.huge_pte_count++;
+               mm->context.hugetlb_pte_count++;
 
        addr &= HPAGE_MASK;
-
-       nptes = 1 << HUGETLB_PAGE_ORDER;
-       orig[0] = *ptep;
-       orig[1] = *(ptep + nptes / 2);
-       for (i = 0; i < nptes; i++) {
-               *ptep = entry;
-               ptep++;
-               addr += PAGE_SIZE;
-               pte_val(entry) += PAGE_SIZE;
-       }
+       orig = *ptep;
+       *ptep = entry;
 
        /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
+       maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
+       maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
 }
 
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep)
 {
        pte_t entry;
-       int i;
-       unsigned long nptes;
 
        entry = *ptep;
        if (pte_present(entry))
-               mm->context.huge_pte_count--;
+               mm->context.hugetlb_pte_count--;
 
        addr &= HPAGE_MASK;
-       nptes = 1 << HUGETLB_PAGE_ORDER;
-       for (i = 0; i < nptes; i++) {
-               *ptep = __pte(0UL);
-               addr += PAGE_SIZE;
-               ptep++;
-       }
+       *ptep = __pte(0UL);
 
        /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
        maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
+       maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
 
        return entry;
 }
 
 int pmd_huge(pmd_t pmd)
 {
-       return 0;
+       return !pmd_none(pmd) &&
+               (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
 }
 
 int pud_huge(pud_t pud)
 {
        return 0;
 }
+
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+                          unsigned long addr)
+{
+       pgtable_t token = pmd_pgtable(*pmd);
+
+       pmd_clear(pmd);
+       pte_free_tlb(tlb, token, addr);
+       atomic_long_dec(&tlb->mm->nr_ptes);
+}
+
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
+{
+       pmd_t *pmd;
+       unsigned long next;
+       unsigned long start;
+
+       start = addr;
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none(*pmd))
+                       continue;
+               if (is_hugetlb_pmd(*pmd))
+                       pmd_clear(pmd);
+               else
+                       hugetlb_free_pte_range(tlb, pmd, addr);
+       } while (pmd++, addr = next, addr != end);
+
+       start &= PUD_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= PUD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
+
+       pmd = pmd_offset(pud, start);
+       pud_clear(pud);
+       pmd_free_tlb(tlb, pmd, start);
+       mm_dec_nr_pmds(tlb->mm);
+}
+
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
+{
+       pud_t *pud;
+       unsigned long next;
+       unsigned long start;
+
+       start = addr;
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
+                                      ceiling);
+       } while (pud++, addr = next, addr != end);
+
+       start &= PGDIR_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= PGDIR_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
+
+       pud = pud_offset(pgd, start);
+       pgd_clear(pgd);
+       pud_free_tlb(tlb, pud, start);
+}
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                           unsigned long addr, unsigned long end,
+                           unsigned long floor, unsigned long ceiling)
+{
+       pgd_t *pgd;
+       unsigned long next;
+
+       pgd = pgd_offset(tlb->mm, addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+       } while (pgd++, addr = next, addr != end);
+}
index aec508e374906ba0b1860f3669a609407c080125..65457c9f1365f07b0b9d9337c897983234d85652 100644 (file)
@@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
        spin_lock_irqsave(&mm->context.lock, flags);
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
+       if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
+           is_hugetlb_pte(pte)) {
+               /* We are fabricating 8MB pages using 4MB real hw pages.  */
+               pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
                __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
                                        address, pte_val(pte));
-       else
+       else
 #endif
                __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
                                        address, pte_val(pte));
index f81cd973670079132681d69d0b9800bb12bfbb55..3659d37b4d818e30c614f46cf4b2aca8bf700aa0 100644 (file)
@@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 
        if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
                if (pmd_val(pmd) & _PAGE_PMD_HUGE)
-                       mm->context.huge_pte_count++;
+                       mm->context.thp_pte_count++;
                else
-                       mm->context.huge_pte_count--;
+                       mm->context.thp_pte_count--;
 
                /* Do not try to allocate the TSB hash table if we
                 * don't have one already.  We have various locks held
index a0604a493a361e51c055e825e65295a302aeec4c..6725ed45580e525cf5567b1b9ccbeb2c723738bf 100644 (file)
@@ -470,7 +470,7 @@ retry_tsb_alloc:
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       unsigned long huge_pte_count;
+       unsigned long total_huge_pte_count;
 #endif
        unsigned int i;
 
@@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        mm->context.sparc64_ctx_val = 0UL;
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       /* We reset it to zero because the fork() page copying
+       /* We reset them to zero because the fork() page copying
         * will re-increment the counters as the parent PTEs are
         * copied into the child address space.
         */
-       huge_pte_count = mm->context.huge_pte_count;
-       mm->context.huge_pte_count = 0;
+       total_huge_pte_count = mm->context.hugetlb_pte_count +
+                        mm->context.thp_pte_count;
+       mm->context.hugetlb_pte_count = 0;
+       mm->context.thp_pte_count = 0;
 #endif
 
        /* copy_mm() copies over the parent's mm_struct before calling
@@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-       if (unlikely(huge_pte_count))
-               tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+       if (unlikely(total_huge_pte_count))
+               tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
 #endif
 
        if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
index ca0d3802f2af45fb8be9cad11d9763fb615e516a..4e603d060e80c91b36aabb3961dc22c3596d37ec 100644 (file)
@@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
                locked = spin_trylock_irqsave(&port->lock, flags);
        else
                spin_lock_irqsave(&port->lock, flags);
-       if (port->sysrq) {
-               locked = 0;
-       } else if (oops_in_progress) {
-               locked = spin_trylock(&port->lock);
-       } else
-               spin_lock(&port->lock);
 
        for (i = 0; i < n; i++) {
                if (*s == '\n')