Staging: rar/sep: Remove C++isms from the headers
[linux-2.6-block.git] / drivers / staging / sep / sep_main_mod.c
CommitLineData
cd1bb431
MA
1/*
2 *
3 * sep_main_mod.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/fs.h>
35#include <linux/cdev.h>
36#include <linux/kdev_t.h>
37#include <linux/mutex.h>
38#include <linux/mm.h>
39#include <linux/poll.h>
40#include <linux/wait.h>
41#include <asm/ioctl.h>
42#include <linux/ioport.h>
43#include <asm/io.h>
44#include <linux/interrupt.h>
45#include <linux/pagemap.h>
46#include <asm/cacheflush.h>
47#include "sep_driver_hw_defs.h"
48#include "sep_driver_config.h"
49#include "sep_driver_api.h"
50#include "sep_driver_ext_api.h"
51
52/*----------------------------------------
53 DEFINES
54-----------------------------------------*/
55
56
57#define INT_MODULE_PARM(n, v) int n = v; module_param(n, int, 0)
58
59/*--------------------------------------
60 TYPEDEFS
61 -----------------------------------------*/
62
63
64
65/*--------------------------------------------
66 GLOBAL variables
67--------------------------------------------*/
68
69/* debug messages level */
70INT_MODULE_PARM(sepDebug, 0x0);
71MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
72
73/* address of the shared memory allocated during init for SEP driver */
74static unsigned long g_sep_shared_area_addr;
75
76/* the physical address of the shared area */
77static unsigned long g_sep_phys_shared_area_addr;
78
79/* Message Shared Area start address - will be allocated during init */
80static unsigned long g_message_shared_area_addr;
81
82/* major and minor device numbers */
83static dev_t g_sep_device_number;
84
85/* the files operations structure of the driver */
86static struct file_operations g_sep_fops;
87
88/* cdev struct of the driver */
89static struct cdev g_sep_cdev;
90
91/*
92 mutex for the access to the internals of the sep driver
93*/
94static DEFINE_MUTEX(sep_mutex);
95
96
97/* wait queue head (event) of the driver */
98DECLARE_WAIT_QUEUE_HEAD(g_sep_event);
99
100
101/* start address of the access to the SEP registers from driver */
102unsigned long g_sep_reg_base_address;
103
104/* transaction counter that coordinates the transactions between SEP and HOST */
105static unsigned long sep_host_to_sep_send_counter;
106
107/* counter for the messages from sep */
108static unsigned long sep_sep_to_host_reply_counter;
109
110/* counter for the number of bytes allocated in the pool for the current
111transaction */
112static unsigned long sep_data_pool_bytes_allocated;
113
114/* array of pointers to the pages that represent input data for the synchronic
115DMA action */
116struct page **sep_in_page_array;
117
118/* array of pointers to the pages that represent out data for the synchronic
119DMA action */
120struct page **sep_out_page_array;
121
122/* number of pages in the sep_in_page_array */
123unsigned long sep_in_num_pages;
124
125/* number of pages in the sep_out_page_array */
126unsigned long sep_out_num_pages;
127
128/* global data for every flow */
129static struct sep_flow_context_t g_sep_flows_data_array[SEP_DRIVER_NUM_FLOWS];
130
131/* flag for API mode - 1 -is blocking, 0 is non-blocking */
132static unsigned long g_sep_block_mode_flag;
133
134/* pointer to the workqueue that handles the flow done interrupts */
135static struct workqueue_struct *g_sep_flow_wq_ptr;
136
137/*------------------------------------------------
138 PROTOTYPES
139---------------------------------------------------*/
140
141/*
142 interrupt handler function
143*/
144irqreturn_t sep_inthandler(int irq, void *dev_id);
145
146/*
147 this function registers the driver to the file system
148*/
149static int sep_register_driver_to_fs(void);
150
151/*
152 this function unregisters driver from fs
153*/
154static void sep_unregister_driver_from_fs(void);
155
156/*
157 this function calculates the size of data that can be inserted into the lli
158 table from this array the condition is that either the table is full
159 (all etnries are entered), or there are no more entries in the lli array
160*/
161static unsigned long sep_calculate_lli_table_max_size(
162 struct sep_lli_entry_t *lli_in_array_ptr,
163 unsigned long num_array_entries);
164/*
165 this functions builds ont lli table from the lli_array according to the
166 given size of data
167*/
168static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr,
169 struct sep_lli_entry_t *lli_table_ptr,
170 unsigned long *num_processed_entries_ptr,
171 unsigned long *num_table_entries_ptr,
172 unsigned long table_data_size);
173
174/*
175 this function goes over the list of the print created tables and prints
176 all the data
177*/
178static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr,
179 unsigned long num_table_entries,
180 unsigned long table_data_size);
181
182
183
184/*
185 This function raises interrupt to SEPm that signals that is has a new
186 command from HOST
187*/
188static void sep_send_command_handler(void);
189
190
191/*
192 This function raises interrupt to SEP that signals that is has a
193 new reply from HOST
194*/
195static void sep_send_reply_command_handler(void);
196
197/*
198 This function handles the allocate data pool memory request
199 This function returns calculates the physical address of the allocated memory
200 and the offset of this area from the mapped address. Therefore, the FVOs in
201 user space can calculate the exact virtual address of this allocated memory
202*/
203static int sep_allocate_data_pool_memory_handler(unsigned long arg);
204
205
206/*
207 This function handles write into allocated data pool command
208*/
209static int sep_write_into_data_pool_handler(unsigned long arg);
210
211/*
212 this function handles the read from data pool command
213*/
214static int sep_read_from_data_pool_handler(unsigned long arg);
215
216/*
217 this function handles tha request for creation of the DMA table
218 for the synchronic symmetric operations (AES,DES)
219*/
220static int sep_create_sync_dma_tables_handler(unsigned long arg);
221
222/*
223 this function handles the request to create the DMA tables for flow
224*/
225static int sep_create_flow_dma_tables_handler(unsigned long arg);
226
227/*
228 This API handles the end transaction request
229*/
230static int sep_end_transaction_handler(unsigned long arg);
231
232
233/*
234 this function handles add tables to flow
235*/
236static int sep_add_flow_tables_handler(unsigned long arg);
237
238/*
239 this function add the flow add message to the specific flow
240*/
241static int sep_add_flow_tables_message_handler(unsigned long arg);
242
243/*
244 this function handles the request for SEP start
245*/
246static int sep_start_handler(void);
247
248/*
249 this function handles the request for SEP initialization
250*/
251static int sep_init_handler(unsigned long arg);
252
253/*
254 this function handles the request cache and resident reallocation
255*/
256static int sep_realloc_cache_resident_handler(unsigned long arg);
257
258
259/*
260 This api handles the setting of API mode to blocking or non-blocking
261*/
262static int sep_set_api_mode_handler(unsigned long arg);
263
264/* handler for flow done interrupt */
265static void sep_flow_done_handler(struct work_struct *work);
266
267/*
268 This function locks all the physical pages of the kernel virtual buffer
269 and construct a basic lli array, where each entry holds the physical
270 page address and the size that application data holds in this physical pages
271*/
272static int sep_lock_kernel_pages(unsigned long kernel_virt_addr,
273 unsigned long data_size,
274 unsigned long *num_pages_ptr,
275 struct sep_lli_entry_t **lli_array_ptr,
276 struct page ***page_array_ptr);
277
278/*
279 This function creates one DMA table for flow and returns its data,
280 and pointer to its info entry
281*/
282static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr,
283 unsigned long virt_buff_size,
284 struct sep_lli_entry_t *table_data,
285 struct sep_lli_entry_t **info_entry_ptr,
286 struct sep_flow_context_t *flow_data_ptr,
287 bool isKernelVirtualAddress);
288
289/*
290 This function creates a list of tables for flow and returns the data for the
291 first and last tables of the list
292*/
293static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
294 unsigned long first_buff_addr,
295 struct sep_flow_context_t *flow_data_ptr,
296 struct sep_lli_entry_t *first_table_data_ptr,
297 struct sep_lli_entry_t *last_table_data_ptr,
298 bool isKernelVirtualAddress);
299
300/*
301 this function find a space for the new flow dma table
302*/
303static int sep_find_free_flow_dma_table_space(
304 unsigned long **table_address_ptr);
305
306/*
307 this function goes over all the flow tables connected to the given table and
308 deallocate them
309*/
310static void sep_deallocated_flow_tables(
311 struct sep_lli_entry_t *first_table_ptr);
312
313/*
314 This function handler the set flow id command
315*/
316static int sep_set_flow_id_handler(unsigned long arg);
317
318/*
319 This function returns pointer to the flow data structure
320 that conatins the given id
321*/
322static int sep_find_flow_context(unsigned long flow_id,
323 struct sep_flow_context_t **flow_data_ptr);
324
325
326/*
327 this function returns the physical and virtual addresses of the static pool
328*/
329static int sep_get_static_pool_addr_handler(unsigned long arg);
330
331/*
332 this address gets the offset of the physical address from the start of
333 the mapped area
334*/
335static int sep_get_physical_mapped_offset_handler(unsigned long arg);
336
337
338/*
339 this function handles the request for get time
340*/
341static int sep_get_time_handler(unsigned long arg);
342
343/*
344 calculates time and sets it at the predefined address
345*/
346static int sep_set_time(unsigned long *address_ptr,
347 unsigned long *time_in_sec_ptr);
348
349/*
350 PATCH for configuring the DMA to single burst instead of multi-burst
351*/
352static void sep_configure_dma_burst(void);
353
354/*
355 This function locks all the physical pages of the
356 application virtual buffer and construct a basic lli
357 array, where each entry holds the physical page address
358 and the size that application data holds in this physical pages
359*/
360static int sep_lock_user_pages(unsigned long app_virt_addr,
361 unsigned long data_size,
362 unsigned long *num_pages_ptr,
363 struct sep_lli_entry_t **lli_array_ptr,
364 struct page ***page_array_ptr);
365
366/*---------------------------------------------
367 FUNCTIONS
368-----------------------------------------------*/
369
370/*
371 this function locks SEP by locking the semaphore
372*/
373int sep_lock()
374{
375 mutex_lock(&sep_mutex);
376
377 return 0;
378}
379
380/*
381 this function unlocks SEP
382*/
383void sep_unlock()
384{
385 /* release mutex */
386 mutex_unlock(&sep_mutex);
387}
388
389/*
390 this function returns the address of the message shared area
391*/
392void sep_map_shared_area(unsigned long *mappedAddr_ptr)
393{
394 *mappedAddr_ptr = g_sep_shared_area_addr;
395}
396
397/*
398 this function returns the address of the message shared area
399*/
400void sep_send_msg_rdy_cmd()
401{
402 sep_send_command_handler();
403}
404
405/* this functions frees all the resources that were allocated for the building
406of the LLI DMA tables */
407void sep_free_dma_resources()
408{
409 sep_free_dma_table_data_handler();
410}
411
412/* poll(suspend), until reply from sep */
413void sep_driver_poll()
414{
415 unsigned long retVal = 0;
416
417#ifdef SEP_DRIVER_POLLING_MODE
418
419 while (sep_host_to_sep_send_counter != (retVal & 0x7FFFFFFF))
420 SEP_READ_REGISTER(g_sep_reg_base_address +
421 HW_HOST_SEP_HOST_GPR2_REG_ADDR,
422 retVal);
423
424 sep_sep_to_host_reply_counter++;
425#else
426 /* poll, until reply from sep */
427 wait_event(g_sep_event,
428 (sep_host_to_sep_send_counter == sep_sep_to_host_reply_counter));
429
430#endif
431}
432
433/*----------------------------------------------------------------------
434 open function of the character driver - must only lock the mutex
435 must also release the memory data pool allocations
436------------------------------------------------------------------------*/
437static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
438{
439 /* return value */
440 int error;
441
442 /*-----------------
443 CODE
444 ---------------------*/
445
446 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:--------> open start\n");
447
448 error = 0;
449
450 /* check the blocking mode */
451 if (g_sep_block_mode_flag)
452 /* lock mutex */
453 mutex_lock(&sep_mutex);
454 else
455 error = mutex_trylock(&sep_mutex);
456
457 /* check the error */
458 if (error) {
459 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
460 "SEP Driver: down_interruptible failed\n");
461
462 goto end_function;
463 }
464
465 /* release data pool allocations */
466 sep_data_pool_bytes_allocated = 0;
467
468end_function:
469
470 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:<-------- open end\n");
471
472 return error;
473}
474
475
476
477
478/*------------------------------------------------------------
479 release function
480-------------------------------------------------------------*/
481static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
482{
483 /*-----------------
484 CODE
485 ---------------------*/
486
487 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
488 "----------->SEP Driver: sep_release start\n");
489
490#if 0/*!SEP_DRIVER_POLLING_MODE*/
491 /* close IMR */
492 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_IMR_REG_ADDR, 0x7FFF);
493
494 /* release IRQ line */
495 free_irq(SEP_DIRVER_IRQ_NUM, &g_sep_reg_base_address);
496
497#endif
498
499 /* unlock the sep mutex */
500 mutex_unlock(&sep_mutex);
501
502 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
503 "SEP Driver:<-------- sep_release end\n");
504
505 return 0;
506}
507
508
509
510
511/*---------------------------------------------------------------
512 map function - this functions maps the message shared area
513-----------------------------------------------------------------*/
514static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
515{
516 /* physical addr */
517 unsigned long phys_addr;
518
519 /*-----------------------
520 CODE
521 -------------------------*/
522
523 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "-------->SEP Driver: mmap start\n");
524
525 /* check that the size of the mapped range is as the size of the message
526 shared area */
527 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
528 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
529 "SEP Driver mmap requested size is more than allowed\n");
530 printk(KERN_WARNING "SEP Driver mmap requested size is more \
531 than allowed\n");
532 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n",
533 vma->vm_end);
534 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n",
535 vma->vm_start);
536 return -EAGAIN;
537 }
538
539 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
540 "SEP Driver:g_message_shared_area_addr is %08lx\n",
541 g_message_shared_area_addr);
542
543 /* get physical address */
544 phys_addr = g_sep_phys_shared_area_addr;
545
546 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver: phys_addr is %08lx\n",
547phys_addr);
548
549 if (remap_pfn_range(vma,
550 vma->vm_start,
551 phys_addr >> PAGE_SHIFT,
552 vma->vm_end - vma->vm_start,
553 vma->vm_page_prot)) {
554 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
555 "SEP Driver remap_page_range failed\n");
556 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
557 return -EAGAIN;
558 }
559
560 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:<-------- mmap end\n");
561
562 return 0;
563}
564
565
566/*-----------------------------------------------
567 poll function
568*----------------------------------------------*/
569static unsigned int sep_poll(struct file *filp, poll_table *wait)
570{
571 unsigned long count;
572
573 unsigned int mask = 0;
574
575 /* flow id */
576 unsigned long retVal = 0;
577
578 /*----------------------------------------------
579 CODE
580 -------------------------------------------------*/
581
582 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "---------->SEP Driver poll: start\n");
583
584
585#if SEP_DRIVER_POLLING_MODE
586
587 while (sep_host_to_sep_send_counter != (retVal & 0x7FFFFFFF)) {
588 SEP_READ_REGISTER(g_sep_reg_base_address +
589 HW_HOST_SEP_HOST_GPR2_REG_ADDR,
590 retVal);
591
592 for (count = 0; count < 10 * 4; count += 4)
593 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
594 "Poll Debug Word %lu of the message is %lu\n",
595 count,
596 *((unsigned long *)(g_sep_shared_area_addr +
597 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
598 }
599
600 sep_sep_to_host_reply_counter++;
601#else
602 /* add the event to the polling wait table */
603 poll_wait(filp, &g_sep_event, wait);
604
605#endif
606
607 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
608 "sep_host_to_sep_send_counter is %lu\n",
609 sep_host_to_sep_send_counter);
610 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
611 "sep_sep_to_host_reply_counter is %lu\n",
612 sep_sep_to_host_reply_counter);
613
614 /* check if the data is ready */
615 if (sep_host_to_sep_send_counter == sep_sep_to_host_reply_counter) {
616 for (count = 0; count < 12 * 4; count += 4)
617 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
618 "Sep Mesg Word %lu of the message is %lu\n",
619 count, *((unsigned long *)(g_sep_shared_area_addr + count)));
620
621 for (count = 0; count < 10 * 4; count += 4)
622 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
623 "Debug Data Word %lu of the message is %lu\n",
624 count,
625 *((unsigned long *)(g_sep_shared_area_addr + 0x1800 + count)));
626
627 SEP_READ_REGISTER(g_sep_reg_base_address +
628 HW_HOST_SEP_HOST_GPR2_REG_ADDR,
629 retVal);
630 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "retVal is %lu\n", retVal);
631 /* check if the this is sep reply or request */
632 if (retVal >> 31) {
633 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
634 "SEP Driver: sep request in\n");
635 /* request */
636 mask |= POLLOUT | POLLWRNORM;
637 } else {
638 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver: sep reply in\n");
639 mask |= POLLIN | POLLRDNORM;
640 }
641 }
642
643 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:<-------- poll exit\n");
644 return mask;
645}
646
647
648static int sep_ioctl(struct inode *inode,
649 struct file *filp,
650 unsigned int cmd,
651 unsigned long arg)
652{
653
654 /* error */
655 int error;
656
657 /*------------------------
658 CODE
659 ------------------------*/
660 error = 0;
661
662 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
663 "------------>SEP Driver: ioctl start\n");
664
665 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver: cmd is %x\n", cmd);
666
667 /* check that the command is for sep device */
668 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
669 error = -ENOTTY;
670
671 switch (cmd) {
672 case SEP_IOCSENDSEPCOMMAND:
673
674 /* send command to SEP */
675 sep_send_command_handler();
676
677 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
678 "SEP Driver: after sep_send_command_handler\n");
679
680 break;
681
682 case SEP_IOCSENDSEPRPLYCOMMAND:
683
684 /* send reply command to SEP */
685 sep_send_reply_command_handler();
686
687 break;
688
689 case SEP_IOCALLOCDATAPOLL:
690
691 /* allocate data pool */
692 error = sep_allocate_data_pool_memory_handler(arg);
693
694 break;
695
696 case SEP_IOCWRITEDATAPOLL:
697
698 /* write data into memory pool */
699 error = sep_write_into_data_pool_handler(arg);
700
701 break;
702
703 case SEP_IOCREADDATAPOLL:
704
705 /* read data from data pool into application memory */
706 error = sep_read_from_data_pool_handler(arg);
707
708 break;
709
710 case SEP_IOCCREATESYMDMATABLE:
711
712 /* create dma table for synhronic operation */
713 error = sep_create_sync_dma_tables_handler(arg);
714
715 break;
716
717 case SEP_IOCCREATEFLOWDMATABLE:
718
719 /* create flow dma tables */
720 error = sep_create_flow_dma_tables_handler(arg);
721
722 break;
723
724 case SEP_IOCFREEDMATABLEDATA:
725
726 /* free the pages */
727 error = sep_free_dma_table_data_handler();
728
729 break;
730
731 case SEP_IOCSETFLOWID:
732
733 /* set flow id */
734 error = sep_set_flow_id_handler(arg);
735
736 break;
737
738 case SEP_IOCADDFLOWTABLE:
739
740 /* add tables to the dynamic flow */
741 error = sep_add_flow_tables_handler(arg);
742
743 break;
744
745 case SEP_IOCADDFLOWMESSAGE:
746
747 /* add message of add tables to flow */
748 error = sep_add_flow_tables_message_handler(arg);
749
750 break;
751
752 case SEP_IOCSEPSTART:
753
754 /* start command to sep */
755 error = sep_start_handler();
756 break;
757
758 case SEP_IOCSEPINIT:
759
760 /* init command to sep */
761 error = sep_init_handler(arg);
762 break;
763
764 case SEP_IOCSETAPIMODE:
765
766 /* set non- blocking mode */
767 error = sep_set_api_mode_handler(arg);
768
769 break;
770
771 case SEP_IOCGETSTATICPOOLADDR:
772
773 /* get the physical and virtual addresses of the static pool */
774 error = sep_get_static_pool_addr_handler(arg);
775
776 break;
777
778 case SEP_IOCENDTRANSACTION:
779
780 error = sep_end_transaction_handler(arg);
781
782 break;
783
784 case SEP_IOCREALLOCCACHERES:
785
786 error = sep_realloc_cache_resident_handler(arg);
787
788 break;
789
790 case SEP_IOCGETMAPPEDADDROFFSET:
791
792 error = sep_get_physical_mapped_offset_handler(arg);
793
794 break;
795 case SEP_IOCGETIME:
796
797 error = sep_get_time_handler(arg);
798
799 break;
800
801 default:
802 error = -ENOTTY;
803 break;
804 }
805
806 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
807 "SEP Driver:<-------- ioctl end\n");
808
809 return error;
810}
811
812
813/*
814 this function registers the driver to the file system
815*/
816static int sep_register_driver_to_fs(void)
817{
818 /* return value */
819 int ret_val;
820
821 /*---------------------
822 CODE
823 -----------------------*/
824
825 ret_val = alloc_chrdev_region(&g_sep_device_number, 0, 1, "sep_sec_driver");
826 if (ret_val) {
827 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
828 "sep_driver:major number allocation failed, retval is %d\n", ret_val);
829 goto end_function;
830 }
831
832 /* set the files operations structure */
833 g_sep_fops.owner = THIS_MODULE;
834 g_sep_fops.ioctl = sep_ioctl;
835 g_sep_fops.poll = sep_poll;
836 g_sep_fops.open = sep_open;
837 g_sep_fops.release = sep_release;
838 g_sep_fops.mmap = sep_mmap;
839
840 /* init cdev */
841 cdev_init(&g_sep_cdev, &g_sep_fops);
842 g_sep_cdev.owner = THIS_MODULE;
843
844 /* register the driver with the kernel */
845 ret_val = cdev_add(&g_sep_cdev, g_sep_device_number, 1);
846
847 if (ret_val) {
848 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
849 "sep_driver:cdev_add failed, retval is %d\n",
850 ret_val);
851 goto end_function_unregister_devnum;
852 }
853
854 goto end_function;
855
856end_function_unregister_devnum:
857
858 /* unregister dev numbers */
859 unregister_chrdev_region(g_sep_device_number, 1);
860
861end_function:
862
863 return ret_val;
864}
865
866/*
867 this function unregisters driver from fs
868*/
869static void sep_unregister_driver_from_fs(void)
870{
871 /*-------------------
872 CODE
873 ---------------------*/
874
875 cdev_del(&g_sep_cdev);
876
877 /* unregister dev numbers */
878 unregister_chrdev_region(g_sep_device_number, 1);
879}
880
881/*--------------------------------------------------------------
882 init function
883----------------------------------------------------------------*/
884static int __init sep_init(void)
885{
886 /* return value */
887 int ret_val;
888
889 /* counter */
890 int counter;
891
892 /* size to of memory for allocation */
893 int size;
894
895 /*------------------------
896 CODE
897 ------------------------*/
898
899 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
900 "SEP Driver:-------->Init start\n");
901 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC,
902 "g_sep_shared_area_addr = %lx\n",
903 (unsigned long)&g_sep_shared_area_addr);
904
905 ret_val = 0;
906
907/* transaction counter that coordinates the transactions between SEP
908 and HOST */
909 sep_host_to_sep_send_counter = 0;
910
911/* counter for the messages from sep */
912 sep_sep_to_host_reply_counter = 0;
913
914/* counter for the number of bytes allocated in the pool
915for the current transaction */
916 sep_data_pool_bytes_allocated = 0;
917
918 /* set the starting mode to blocking */
919 g_sep_block_mode_flag = 1;
920
921
922 ret_val = sep_register_driver_to_device();
923 if (ret_val) {
924 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
925 "sep_driver:sep_driver_to_device failed, ret_val is %d\n",
926 ret_val);
927 goto end_function_unregister_from_fs;
928 }
929
930 /* calculate the total size for allocation */
931 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
932 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
933 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
934 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
935 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
936 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
937
938
939
940 /* allocate the shared area */
941 if (sep_map_and_alloc_shared_area(size,
942 &g_sep_shared_area_addr,
943 &g_sep_phys_shared_area_addr)) {
944 ret_val = -ENOMEM;
945 /* allocation failed */
946 goto end_function_unmap_io_memory;
947 }
948
949 /* now set the memory regions */
950 g_message_shared_area_addr = g_sep_shared_area_addr;
951
952 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
953 "SEP Driver: g_message_shared_area_addr is %08lx\n",
954 g_message_shared_area_addr);
955
956#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
957
958 /* send the new SHARED MESSAGE AREA to the SEP */
959 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_HOST_SEP_GPR1_REG_ADDR,
960 g_sep_phys_shared_area_addr);
961
962 /* poll for SEP response */
963 SEP_READ_REGISTER(g_sep_reg_base_address + HW_HOST_SEP_HOST_GPR1_REG_ADDR,
964 retVal);
965 while (retVal != 0xffffffff && retVal != g_sep_phys_shared_area_addr)
966 SEP_READ_REGISTER(g_sep_reg_base_address +
967 HW_HOST_SEP_HOST_GPR1_REG_ADDR,
968 retVal);
969
970 /* check the return value (register) */
971 if (retVal != g_sep_phys_shared_area_addr) {
972 ret_val = -ENOMEM;
973 goto end_function_deallocate_message_area;
974 }
975
976#endif
977
978 /* init the flow contextes */
979 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
980 g_sep_flows_data_array[counter].flow_id = SEP_FREE_FLOW_ID;
981
982 g_sep_flow_wq_ptr = create_singlethread_workqueue("sepflowwq");
983 if (g_sep_flow_wq_ptr == 0) {
984 ret_val = -ENOMEM;
985 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
986 "sep_driver:flow queue creation failed\n");
987 goto end_function_deallocate_sep_shared_area;
988 }
989
990 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
991 "SEP Driver: create flow workqueue \n");
992
993 /* register driver to fs */
994 ret_val = sep_register_driver_to_fs();
995 if (ret_val)
996 goto end_function_deallocate_sep_shared_area;
997
998 /* load the rom code */
999 sep_load_rom_code();
1000
1001 goto end_function;
1002
1003end_function_unregister_from_fs:
1004
1005 /* unregister from fs */
1006 sep_unregister_driver_from_fs();
1007
1008end_function_deallocate_sep_shared_area:
1009
1010 /* de-allocate shared area */
1011 sep_unmap_and_free_shared_area(size,
1012 g_sep_shared_area_addr,
1013 g_sep_phys_shared_area_addr);
1014
1015end_function_unmap_io_memory:
1016
1017 iounmap((void *)g_sep_reg_base_address);
1018
1019 /* release io memory region */
1020 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
1021
1022end_function:
1023
1024 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:<-------- Init end\n");
1025
1026 return ret_val;
1027}
1028
1029
1030
1031
1032/*-------------------------------------------------------------
1033 exit function
1034--------------------------------------------------------------*/
1035static void __exit sep_exit(void)
1036{
1037 /* size */
1038 int size;
1039
1040 /*-----------------------------
1041 CODE
1042 --------------------------------*/
1043
1044 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:--------> Exit start\n");
1045
1046 /* unregister from fs */
1047 sep_unregister_driver_from_fs();
1048
1049 /* calculate the total size for de-allocation */
1050 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
1051 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES +
1052 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
1053 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES +
1054 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
1055 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
1056
1057
1058 /* free shared area */
1059 sep_unmap_and_free_shared_area(size,
1060 g_sep_shared_area_addr,
1061 g_sep_phys_shared_area_addr);
1062
1063 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1064 "SEP Driver: free pages SEP SHARED AREA \n");
1065
1066 iounmap((void *)g_sep_reg_base_address);
1067
1068 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver: iounmap \n");
1069
1070 /* release io memory region */
1071 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
1072
1073 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver: release_mem_region \n");
1074
1075 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC, "SEP Driver:<-------- Exit end\n");
1076}
1077
1078
1079/*
1080 interrupt handler function
1081*/
1082irqreturn_t sep_inthandler(int irq, void *dev_id)
1083{
1084 /* int error */
1085 irqreturn_t int_error;
1086
1087 /* error */
1088 unsigned long error;
1089
1090 /* reg value */
1091 unsigned long reg_val;
1092
1093 /* flow id */
1094 unsigned long flow_id;
1095
1096 /* flow context */
1097 struct sep_flow_context_t *flow_context_ptr;
1098
1099 /*-----------------------------
1100 CODE
1101 -----------------------------*/
1102
1103 int_error = IRQ_HANDLED;
1104
1105 /* read the IRR register to check if this is SEP interrupt */
1106 SEP_READ_REGISTER(g_sep_reg_base_address + HW_HOST_IRR_REG_ADDR, reg_val);
1107 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "SEP Interrupt - reg is %08lx\n",
1108 reg_val);
1109
1110 /* check if this is the flow interrupt */
1111 if (0/*reg_val & (0x1 << 11)*/) {
1112 /* read GPRO to find out the which flow is done */
1113 SEP_READ_REGISTER(g_sep_reg_base_address + HW_HOST_IRR_REG_ADDR,
1114 flow_id);
1115
1116 /* find the contex of the flow */
1117 error = sep_find_flow_context(flow_id >> 28, &flow_context_ptr);
1118 if (error)
1119 goto end_function_with_error;
1120
1121 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
1122
1123 /* queue the work */
1124 queue_work(g_sep_flow_wq_ptr, &flow_context_ptr->flow_wq);
1125
1126 } else {
1127 /* check if this is reply interrupt from SEP */
1128 if (reg_val & (0x1 << 13)) {
1129 /* update the counter of reply messages */
1130 sep_sep_to_host_reply_counter++;
1131
1132 /* wake up the waiting process */
1133 wake_up(&g_sep_event);
1134 } else {
1135 int_error = IRQ_NONE;
1136 goto end_function;
1137 }
1138 }
1139
1140end_function_with_error:
1141
1142 /* clear the interrupt */
1143 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_ICR_REG_ADDR, reg_val);
1144
1145end_function:
1146
1147 return int_error;
1148}
1149
1150
1151/*
1152 This function prepares only input DMA table for synhronic symmetric
1153 operations (HASH)
1154*/
1155int sep_prepare_input_dma_table(unsigned long app_virt_addr,
1156 unsigned long data_size,
1157 unsigned long block_size,
1158 unsigned long *lli_table_ptr,
1159 unsigned long *num_entries_ptr,
1160 unsigned long *table_data_size_ptr,
1161 bool isKernelVirtualAddress)
1162
1163{
1164 /* pointer to the info entry of the table - the last entry */
1165 struct sep_lli_entry_t *info_entry_ptr;
1166
1167 /* array of pointers ot page */
1168 struct sep_lli_entry_t *lli_array_ptr;
1169
1170 /* points to the first entry to be processed in the lli_in_array */
1171 unsigned long current_entry;
1172
1173 /* num entries in the virtual buffer */
1174 unsigned long sep_lli_entries;
1175
1176 /* lli table pointer */
1177 struct sep_lli_entry_t *in_lli_table_ptr;
1178
1179 /* the total data in one table */
1180 unsigned long table_data_size;
1181
1182 /* number of entries in lli table */
1183 unsigned long num_entries_in_table;
1184
1185 /* next table address */
1186 unsigned long lli_table_alloc_addr;
1187
1188 /* result */
1189 unsigned long result;
1190
1191 /*------------------------
1192 CODE
1193 --------------------------*/
1194
1195 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1196 "SEP Driver:--------> sep_prepare_input_dma_table start\n");
1197
1198 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver:data_size is %lu\n",
1199 data_size);
1200 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED, "SEP Driver:block_size is %lu\n",
1201 block_size);
1202
1203 /* initialize the pages pointers */
1204 sep_in_page_array = 0;
1205 sep_in_num_pages = 0;
1206
1207 if (data_size == 0) {
1208 /* special case - created 2 entries table with zero data */
1209 in_lli_table_ptr = (struct sep_lli_entry_t *)(g_sep_shared_area_addr +
1210 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1211 in_lli_table_ptr->physical_address = g_sep_shared_area_addr +
1212 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1213 in_lli_table_ptr->block_size = 0;
1214
1215 in_lli_table_ptr++;
1216 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1217 in_lli_table_ptr->block_size = 0;
1218
1219 *lli_table_ptr = g_sep_phys_shared_area_addr +
1220 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1221 *num_entries_ptr = 2;
1222 *table_data_size_ptr = 0;
1223
1224 goto end_function;
1225 }
1226
1227 /* check if the pages are in Kernel Virtual Address layout */
1228 if (isKernelVirtualAddress == true)
1229 /* lock the pages of the kernel buffer and translate them to pages */
1230 result = sep_lock_kernel_pages(app_virt_addr,
1231 data_size,
1232 &sep_in_num_pages,
1233 &lli_array_ptr,
1234 &sep_in_page_array);
1235 else
1236 /* lock the pages of the user buffer and translate them to pages */
1237 result = sep_lock_user_pages(app_virt_addr,
1238 data_size,
1239 &sep_in_num_pages,
1240 &lli_array_ptr,
1241 &sep_in_page_array);
1242
1243 if (result)
1244 return result;
1245
1246 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1247 "SEP Driver:output sep_in_num_pages is %lu\n",
1248 sep_in_num_pages);
1249
1250 current_entry = 0;
1251 info_entry_ptr = 0;
1252 sep_lli_entries = sep_in_num_pages;
1253
1254 /* initiate to point after the message area */
1255 lli_table_alloc_addr = g_sep_shared_area_addr +
1256 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1257
1258 /* loop till all the entries in in array are not processed */
1259 while (current_entry < sep_lli_entries) {
1260 /* set the new input and output tables */
1261 in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
1262
1263 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
1264 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1265
1266 /* calculate the maximum size of data for input table */
1267 table_data_size = sep_calculate_lli_table_max_size(
1268 &lli_array_ptr[current_entry],
1269 (sep_lli_entries - current_entry));
1270
1271 /* now calculate the table size so that it will be module block size */
1272 table_data_size = (table_data_size / block_size) * block_size;
1273
1274 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1275 "SEP Driver:output table_data_size is %lu\n",
1276 table_data_size);
1277
1278 /* construct input lli table */
1279 sep_build_lli_table(&lli_array_ptr[current_entry],
1280 in_lli_table_ptr,
1281 &current_entry,
1282 &num_entries_in_table,
1283 table_data_size);
1284
1285 if (info_entry_ptr == 0) {
1286 /* set the output parameters to physical addresses */
1287 *lli_table_ptr = sep_shared_area_virt_to_phys(
1288 (unsigned long)in_lli_table_ptr);
1289 *num_entries_ptr = num_entries_in_table;
1290 *table_data_size_ptr = table_data_size;
1291
1292 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1293 "SEP Driver:output lli_table_in_ptr is %08lx\n",
1294 *lli_table_ptr);
1295 } else {
1296 /* update the info entry of the previous in table */
1297 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys(
1298 (unsigned long)in_lli_table_ptr);
1299 info_entry_ptr->block_size = ((num_entries_in_table) << 24) |
1300 (table_data_size);
1301 }
1302
1303 /* save the pointer to the info entry of the current tables */
1304 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1305 }
1306
1307 /* print input tables */
1308 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1309 sep_shared_area_phys_to_virt(*lli_table_ptr),
1310 *num_entries_ptr,
1311 *table_data_size_ptr);
1312
1313 /* the array of the pages */
1314 kfree(lli_array_ptr);
1315
1316end_function:
1317
1318 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1319 "SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1320
1321 return 0;
1322
1323}
1324
1325/*
1326 This function builds input and output DMA tables for synhronic
1327 symmetric operations (AES, DES). It also checks that each table
1328 is of the modular block size
1329*/
1330int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
1331 unsigned long app_virt_out_addr,
1332 unsigned long data_size,
1333 unsigned long block_size,
1334 unsigned long *lli_table_in_ptr,
1335 unsigned long *lli_table_out_ptr,
1336 unsigned long *in_num_entries_ptr,
1337 unsigned long *out_num_entries_ptr,
1338 unsigned long *table_data_size_ptr,
1339 bool isKernelVirtualAddress)
1340
1341{
1342 /* array of pointers of page */
1343 struct sep_lli_entry_t *lli_in_array;
1344
1345 /* array of pointers of page */
1346 struct sep_lli_entry_t *lli_out_array;
1347
1348 /* result */
1349 int result;
1350
1351
1352 /*------------------------
1353 CODE
1354 --------------------------*/
1355
1356 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1357 "SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1358
1359 result = 0;
1360
1361 /* initialize the pages pointers */
1362 sep_in_page_array = 0;
1363 sep_out_page_array = 0;
1364
1365 /* check if the pages are in Kernel Virtual Address layout */
1366 if (isKernelVirtualAddress == true) {
1367 /* lock the pages of the kernel buffer and translate them to pages */
1368 result = sep_lock_kernel_pages(app_virt_in_addr,
1369 data_size,
1370 &sep_in_num_pages,
1371 &lli_in_array,
1372 &sep_in_page_array);
1373 if (result) {
1374 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1375 "SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1376 goto end_function;
1377 }
1378 } else {
1379 /* lock the pages of the user buffer and translate them to pages */
1380 result = sep_lock_user_pages(app_virt_in_addr,
1381 data_size,
1382 &sep_in_num_pages,
1383 &lli_in_array,
1384 &sep_in_page_array);
1385 if (result) {
1386 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1387 "SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1388 goto end_function;
1389 }
1390 }
1391
1392 if (isKernelVirtualAddress == true) {
1393 result = sep_lock_kernel_pages(app_virt_out_addr,
1394 data_size,
1395 &sep_out_num_pages,
1396 &lli_out_array,
1397 &sep_out_page_array);
1398 if (result) {
1399 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1400 "SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1401 goto end_function_with_error1;
1402 }
1403 } else {
1404 result = sep_lock_user_pages(app_virt_out_addr,
1405 data_size,
1406 &sep_out_num_pages,
1407 &lli_out_array,
1408 &sep_out_page_array);
1409 if (result) {
1410 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1411 "SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1412 goto end_function_with_error1;
1413 }
1414 }
1415
1416
1417 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1418 "sep_in_num_pages is %lu\n", sep_in_num_pages);
1419 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1420 "sep_out_num_pages is %lu\n", sep_out_num_pages);
1421 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1422 "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1423 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1424
1425
1426 /* call the fucntion that creates table from the lli arrays */
1427 result = sep_construct_dma_tables_from_lli(lli_in_array,
1428 sep_in_num_pages,
1429 lli_out_array,
1430 sep_out_num_pages,
1431 block_size,
1432 lli_table_in_ptr,
1433 lli_table_out_ptr,
1434 in_num_entries_ptr,
1435 out_num_entries_ptr,
1436 table_data_size_ptr);
1437 if (result) {
1438 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1439 "SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1440 goto end_function_with_error2;
1441 }
1442
1443 /* fall through - free the lli entry arrays */
1444
1445 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC, "in_num_entries_ptr is %08lx\n",
1446 *in_num_entries_ptr);
1447 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC, "out_num_entries_ptr is %08lx\n",
1448 *out_num_entries_ptr);
1449 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC, "table_data_size_ptr is %08lx\n",
1450 *table_data_size_ptr);
1451
1452
1453end_function_with_error2:
1454
1455 kfree(lli_out_array);
1456
1457end_function_with_error1:
1458
1459 kfree(lli_in_array);
1460
1461end_function:
1462
1463 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC,
1464"SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n",
1465(int)result);
1466
1467 return result;
1468
1469}
1470
1471
1472/*
1473 This function creates the input and output dma tables for
1474 symmetric operations (AES/DES) according to the block size from LLI arays
1475*/
1476int sep_construct_dma_tables_from_lli(struct sep_lli_entry_t *lli_in_array,
1477 unsigned long sep_in_lli_entries,
1478 struct sep_lli_entry_t *lli_out_array,
1479 unsigned long sep_out_lli_entries,
1480 unsigned long block_size,
1481 unsigned long *lli_table_in_ptr,
1482 unsigned long *lli_table_out_ptr,
1483 unsigned long *in_num_entries_ptr,
1484 unsigned long *out_num_entries_ptr,
1485 unsigned long *table_data_size_ptr)
1486{
1487 /* points to the area where next lli table can be allocated */
1488 unsigned long lli_table_alloc_addr;
1489
1490 /* input lli table */
1491 struct sep_lli_entry_t *in_lli_table_ptr;
1492
1493 /* output lli table */
1494 struct sep_lli_entry_t *out_lli_table_ptr;
1495
1496 /* pointer to the info entry of the table - the last entry */
1497 struct sep_lli_entry_t *info_in_entry_ptr;
1498
1499 /* pointer to the info entry of the table - the last entry */
1500 struct sep_lli_entry_t *info_out_entry_ptr;
1501
1502 /* points to the first entry to be processed in the lli_in_array */
1503 unsigned long current_in_entry;
1504
1505 /* points to the first entry to be processed in the lli_out_array */
1506 unsigned long current_out_entry;
1507
1508 /* max size of the input table */
1509 unsigned long in_table_data_size;
1510
1511 /* max size of the output table */
1512 unsigned long out_table_data_size;
1513
1514 /* flag te signifies if this is the first tables build from the arrays */
1515 unsigned long first_table_flag;
1516
1517 /* the data size that should be in table */
1518 unsigned long table_data_size;
1519
1520 /* number of etnries in the input table */
1521 unsigned long num_entries_in_table;
1522
1523 /* number of etnries in the output table */
1524 unsigned long num_entries_out_table;
1525
1526 /*---------------------
1527 CODE
1528 ------------------------*/
1529
1530 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1531 "SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1532
1533 /* initiate to pint after the message area */
1534 lli_table_alloc_addr = g_sep_shared_area_addr +
1535 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1536
1537 current_in_entry = 0;
1538 current_out_entry = 0;
1539 first_table_flag = 1;
1540 info_in_entry_ptr = 0;
1541 info_out_entry_ptr = 0;
1542
1543 /* loop till all the entries in in array are not processed */
1544 while (current_in_entry < sep_in_lli_entries) {
1545 /* set the new input and output tables */
1546 in_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
1547
1548 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
1549 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1550
1551 /* set the first output tables */
1552 out_lli_table_ptr = (struct sep_lli_entry_t *)lli_table_alloc_addr;
1553
1554 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) *
1555 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1556
1557 /* calculate the maximum size of data for input table */
1558 in_table_data_size =
1559 sep_calculate_lli_table_max_size(
1560 &lli_in_array[current_in_entry],
1561 (sep_in_lli_entries - current_in_entry));
1562
1563 /* calculate the maximum size of data for output table */
1564 out_table_data_size =
1565 sep_calculate_lli_table_max_size(
1566 &lli_out_array[current_out_entry],
1567 (sep_out_lli_entries - current_out_entry));
1568
1569 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1570 "SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1571 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1572 "SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1573
1574 /* check where the data is smallest */
1575 table_data_size = in_table_data_size;
1576 if (table_data_size > out_table_data_size)
1577 table_data_size = out_table_data_size;
1578
1579 /* now calculate the table size so that it will be module block size */
1580 table_data_size = (table_data_size / block_size) * block_size;
1581
1582 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC,
1583 "SEP Driver:table_data_size is %lu\n",
1584 table_data_size);
1585
1586 /* construct input lli table */
1587 sep_build_lli_table(&lli_in_array[current_in_entry],
1588 in_lli_table_ptr,
1589 &current_in_entry,
1590 &num_entries_in_table,
1591 table_data_size);
1592
1593 /* construct output lli table */
1594 sep_build_lli_table(&lli_out_array[current_out_entry],
1595 out_lli_table_ptr,
1596 &current_out_entry,
1597 &num_entries_out_table,
1598 table_data_size);
1599
1600 /* if info entry is null - this is the first table built */
1601 if (info_in_entry_ptr == 0) {
1602 /* set the output parameters to physical addresses */
1603 *lli_table_in_ptr =
1604 sep_shared_area_virt_to_phys((unsigned long)in_lli_table_ptr);
1605 *in_num_entries_ptr = num_entries_in_table;
1606 *lli_table_out_ptr =
1607 sep_shared_area_virt_to_phys((unsigned long)out_lli_table_ptr);
1608 *out_num_entries_ptr = num_entries_out_table;
1609 *table_data_size_ptr = table_data_size;
1610
1611 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1612 "SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1613 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1614 "SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1615 } else {
1616 /* update the info entry of the previous in table */
1617 info_in_entry_ptr->physical_address =
1618 sep_shared_area_virt_to_phys((unsigned long)in_lli_table_ptr);
1619 info_in_entry_ptr->block_size =
1620 ((num_entries_in_table) << 24) | (table_data_size);
1621
1622 /* update the info entry of the previous in table */
1623 info_out_entry_ptr->physical_address =
1624 sep_shared_area_virt_to_phys((unsigned long)out_lli_table_ptr);
1625 info_out_entry_ptr->block_size =
1626 ((num_entries_out_table) << 24) | (table_data_size);
1627 }
1628
1629 /* save the pointer to the info entry of the current tables */
1630 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1631 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1632
1633 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1634 "SEP Driver:output num_entries_out_table is %lu\n",
1635 (unsigned long)num_entries_out_table);
1636 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1637 "SEP Driver:output info_in_entry_ptr is %lu\n",
1638 (unsigned long)info_in_entry_ptr);
1639 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1640 "SEP Driver:output info_out_entry_ptr is %lu\n",
1641 (unsigned long)info_out_entry_ptr);
1642 }
1643
1644 /* print input tables */
1645 sep_debug_print_lli_tables(
1646 (struct sep_lli_entry_t *)
1647 sep_shared_area_phys_to_virt(*lli_table_in_ptr),
1648 *in_num_entries_ptr,
1649 *table_data_size_ptr);
1650
1651 /* print output tables */
1652 sep_debug_print_lli_tables(
1653 (struct sep_lli_entry_t *)
1654 sep_shared_area_phys_to_virt(*lli_table_out_ptr),
1655 *out_num_entries_ptr,
1656 *table_data_size_ptr);
1657
1658 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1659 "SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1660
1661 return 0;
1662}
1663
1664/*
1665 this function calculates the size of data that can be inserted into the lli
1666 table from this array the condition is that either the table is full
1667 (all etnries are entered), or there are no more entries in the lli array
1668*/
1669unsigned long sep_calculate_lli_table_max_size(
1670 struct sep_lli_entry_t *lli_in_array_ptr,
1671 unsigned long num_array_entries)
1672{
1673 /* table data size */
1674 unsigned long table_data_size;
1675
1676 /* counter */
1677 unsigned long counter;
1678
1679 /*---------------------
1680 CODE
1681 ----------------------*/
1682
1683 table_data_size = 0;
1684
1685 /* calculate the data in the out lli table if till we fill the whole
1686 table or till the data has ended */
1687 for (counter = 0;
1688 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1689 (counter < num_array_entries); counter++)
1690 table_data_size += lli_in_array_ptr[counter].block_size;
1691
1692 return table_data_size;
1693}
1694
1695/*
1696 this functions builds ont lli table from the lli_array according to
1697 the given size of data
1698*/
1699static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr,
1700 struct sep_lli_entry_t *lli_table_ptr,
1701 unsigned long *num_processed_entries_ptr,
1702 unsigned long *num_table_entries_ptr,
1703 unsigned long table_data_size)
1704{
1705 /* current table data size */
1706 unsigned long curr_table_data_size;
1707
1708 /* counter of lli array entry */
1709 unsigned long array_counter;
1710
1711 /*-----------------------
1712 CODE
1713 ---------------------------*/
1714
1715 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1716 "SEP Driver:--------> sep_build_lli_table start\n");
1717
1718 /* init currrent table data size and lli array entry counter */
1719 curr_table_data_size = 0;
1720 array_counter = 0;
1721 *num_table_entries_ptr = 1;
1722
1723 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1724 "SEP Driver:table_data_size is %lu\n",
1725 table_data_size);
1726
1727 /* fill the table till table size reaches the needed amount */
1728 while (curr_table_data_size < table_data_size) {
1729 /* update the number of entries in table */
1730 (*num_table_entries_ptr)++;
1731
1732 lli_table_ptr->physical_address =
1733 lli_array_ptr[array_counter].physical_address;
1734 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1735 curr_table_data_size += lli_table_ptr->block_size;
1736
1737 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1738 "SEP Driver:lli_table_ptr is %08lx\n",
1739 (unsigned long)lli_table_ptr);
1740 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1741 "SEP Driver:lli_table_ptr->physical_address is %08lx\n",
1742 lli_table_ptr->physical_address);
1743 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1744 "SEP Driver:lli_table_ptr->block_size is %lu\n",
1745 lli_table_ptr->block_size);
1746
1747 /* check for overflow of the table data */
1748 if (curr_table_data_size > table_data_size) {
1749 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1750 "SEP Driver:curr_table_data_size > table_data_size\n");
1751
1752 /* update the size of block in the table */
1753 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1754
1755 /* update the physical address in the lli array */
1756 lli_array_ptr[array_counter].physical_address +=
1757 lli_table_ptr->block_size;
1758
1759 /* update the block size left in the lli array */
1760 lli_array_ptr[array_counter].block_size =
1761 (curr_table_data_size - table_data_size);
1762 } else
1763 /* advance to the next entry in the lli_array */
1764 array_counter++;
1765
1766 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1767 "SEP Driver:lli_table_ptr->physical_address is %08lx\n",
1768 lli_table_ptr->physical_address);
1769 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1770 "SEP Driver:lli_table_ptr->block_size is %lu\n",
1771 lli_table_ptr->block_size);
1772
1773 /* move to the next entry in table */
1774 lli_table_ptr++;
1775 }
1776
1777 /* set the info entry to default */
1778 lli_table_ptr->physical_address = 0xffffffff;
1779 lli_table_ptr->block_size = 0;
1780
1781 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1782 "SEP Driver:lli_table_ptr is %08lx\n",
1783 (unsigned long)lli_table_ptr);
1784 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1785 "SEP Driver:lli_table_ptr->physical_address is %08lx\n",
1786 lli_table_ptr->physical_address);
1787 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1788 "SEP Driver:lli_table_ptr->block_size is %lu\n",
1789 lli_table_ptr->block_size);
1790
1791
1792 /* set the output parameter */
1793 *num_processed_entries_ptr += array_counter;
1794
1795 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1796 "SEP Driver:*num_processed_entries_ptr is %lu\n",
1797 *num_processed_entries_ptr);
1798
1799
1800 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1801 "SEP Driver:<-------- sep_build_lli_table end\n");
1802
1803 return;
1804}
1805
1806/*
1807 this function goes over the list of the print created tables and
1808 prints all the data
1809*/
1810static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr,
1811 unsigned long num_table_entries,
1812 unsigned long table_data_size)
1813{
1814 unsigned long table_count;
1815
1816 unsigned long entries_count;
1817 /*-----------------------------
1818 CODE
1819 -------------------------------*/
1820
1821 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1822 "SEP Driver:--------> sep_debug_print_lli_tables start\n");
1823
1824 table_count = 1;
1825 while ((unsigned long)lli_table_ptr != 0xffffffff) {
1826 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
1827 "SEP Driver: lli table %08lx, table_data_size is %lu\n",
1828 table_count,
1829 table_data_size);
1830 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1831 "SEP Driver: num_table_entries is %lu\n", num_table_entries);
1832
1833 /* print entries of the table (without info entry) */
1834 for (entries_count = 0;
1835 entries_count < num_table_entries;
1836 entries_count++, lli_table_ptr++) {
1837 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1838 "SEP Driver:lli_table_ptr address is %08lx\n",
1839 (unsigned long)lli_table_ptr);
1840 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
1841 "SEP Driver:phys address is %08lx block size is %lu\n",
1842 lli_table_ptr->physical_address, lli_table_ptr->block_size);
1843 }
1844
1845 /* point to the info entry */
1846 lli_table_ptr--;
1847
1848 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1849 "SEP Driver:phys lli_table_ptr->block_size is %lu\n",
1850 lli_table_ptr->block_size);
1851 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1852 "SEP Driver:phys lli_table_ptr->physical_address is %08lx\n",
1853 lli_table_ptr->physical_address);
1854
1855
1856 table_data_size = lli_table_ptr->block_size & 0xffffff;
1857 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1858 lli_table_ptr = (struct sep_lli_entry_t *)
1859 (lli_table_ptr->physical_address);
1860
1861 DEBUG_PRINT_3(SEP_DEBUG_LEVEL_EXTENDED,
1862 "SEP Driver:phys table_data_size is %lu num_table_entries is \
1863 %lu lli_table_ptr is%lu\n",
1864 table_data_size, num_table_entries, (unsigned long)lli_table_ptr);
1865
1866 if ((unsigned long)lli_table_ptr != 0xffffffff)
1867 lli_table_ptr = (struct sep_lli_entry_t *)sep_shared_area_phys_to_virt(
1868 (unsigned long)lli_table_ptr);
1869
1870 table_count++;
1871 }
1872
1873 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1874 "SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1875}
1876
1877
1878/*
1879 This function locks all the physical pages of the application virtual buffer
1880 and construct a basic lli array, where each entry holds the physical page
1881 address and the size that application data holds in this physical pages
1882*/
1883int sep_lock_user_pages(unsigned long app_virt_addr,
1884 unsigned long data_size,
1885 unsigned long *num_pages_ptr,
1886 struct sep_lli_entry_t **lli_array_ptr,
1887 struct page ***page_array_ptr)
1888
1889{
1890 /* error */
1891 int error;
1892
1893 /* the the page of the end address of the user space buffer */
1894 unsigned long end_page;
1895
1896 /* the page of the start address of the user space buffer */
1897 unsigned long start_page;
1898
1899 /* the range in pages */
1900 unsigned long num_pages;
1901
1902 /* array of pointers ot page */
1903 struct page **page_array;
1904
1905 /* array of lli */
1906 struct sep_lli_entry_t *lli_array;
1907
1908 /* count */
1909 unsigned long count;
1910
1911 /* result */
1912 int result;
1913
1914 /*------------------------
1915 CODE
1916 --------------------------*/
1917
1918 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1919 "SEP Driver:--------> sep_lock_user_pages start\n");
1920
1921 error = 0;
1922
1923 /* set start and end pages and num pages */
1924 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1925 start_page = app_virt_addr >> PAGE_SHIFT;
1926 num_pages = end_page - start_page + 1;
1927
1928 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1929 "SEP Driver: app_virt_addr is %08lx\n",
1930 app_virt_addr);
1931 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1932 "SEP Driver: data_size is %lu\n",
1933 data_size);
1934 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1935 "SEP Driver: start_page is %lu\n",
1936 start_page);
1937 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1938 "SEP Driver: end_page is %lu\n",
1939 end_page);
1940 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
1941 "SEP Driver: num_pages is %lu\n",
1942 num_pages);
1943
1944 /* allocate array of pages structure pointers */
1945 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1946 if (!page_array) {
1947 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1948 "SEP Driver: kmalloc for page_array failed\n");
1949
1950 error = -ENOMEM;
1951 goto end_function;
1952 }
1953
1954 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1955 if (!lli_array) {
1956 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
1957 "SEP Driver: kmalloc for lli_array failed\n");
1958
1959 error = -ENOMEM;
1960 goto end_function_with_error1;
1961 }
1962
1963 /* convert the application virtual address into a set of physical */
1964 down_read(&current->mm->mmap_sem);
1965 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0,
1966 page_array,
1967 0);
1968 up_read(&current->mm->mmap_sem);
1969
1970 /* check the number of pages locked - if not all then exit with error */
1971 if (result != num_pages) {
1972 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
1973 "SEP Driver: not all pages locked by get_user_pages\n");
1974
1975 error = -ENOMEM;
1976 goto end_function_with_error2;
1977 }
1978
1979 /* flush the cache */
1980 for (count = 0; count < num_pages; count++)
1981 flush_dcache_page(page_array[count]);
1982
1983 /* set the start address of the first page - app data may start not at
1984 the beginning of the page */
1985 lli_array[0].physical_address = (
1986 (unsigned long)page_to_phys(page_array[0])) +
1987 (app_virt_addr & (~PAGE_MASK)) ;
1988
1989 /* check that not all the data is in the first page only */
1990 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1991 lli_array[0].block_size = data_size;
1992 else
1993 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1994
1995 /* debug print */
1996 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
1997 "lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n",
1998 lli_array[0].physical_address,
1999 lli_array[0].block_size);
2000
2001 /* go from the second page to the prev before last */
2002 for (count = 1; count < (num_pages - 1); count++) {
2003 lli_array[count].physical_address =
2004 (unsigned long)page_to_phys(page_array[count]);
2005 lli_array[count].block_size = PAGE_SIZE;
2006
2007 DEBUG_PRINT_4(SEP_DEBUG_LEVEL_EXTENDED,
2008 "lli_array[%lu].physical_address is %08lx, \
2009 lli_array[%lu].block_size is %lu\n",
2010 count, lli_array[count].physical_address,
2011 count,
2012 lli_array[count].block_size);
2013 }
2014
2015 /* if more then 1 pages locked - then update for the last page size needed */
2016 if (num_pages > 1) {
2017 /* update the address of the last page */
2018 lli_array[count].physical_address =
2019 (unsigned long)page_to_phys(page_array[count]);
2020
2021 /* set the size of the last page */
2022 lli_array[count].block_size = (app_virt_addr + data_size) &
2023 (~PAGE_MASK);
2024
2025 if (lli_array[count].block_size == 0) {
2026 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC,
2027 "app_virt_addr is %08lx\n",
2028 app_virt_addr);
2029 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC, "data_size is %lu\n", data_size);
2030 while (1);
2031 }
2032 DEBUG_PRINT_4(SEP_DEBUG_LEVEL_EXTENDED,
2033 "lli_array[%lu].physical_address is %08lx, \
2034 lli_array[%lu].block_size is %lu\n",
2035 count, lli_array[count].physical_address,
2036 count,
2037 lli_array[count].block_size);
2038 }
2039
2040 /* set output params */
2041 *lli_array_ptr = lli_array;
2042 *num_pages_ptr = num_pages;
2043 *page_array_ptr = page_array;
2044
2045 goto end_function;
2046
2047end_function_with_error2:
2048
2049 /* release the cache */
2050 for (count = 0; count < num_pages; count++)
2051 page_cache_release(page_array[count]);
2052
2053 /* free lli array */
2054 kfree(lli_array);
2055
2056end_function_with_error1:
2057
2058 /* free page array */
2059 kfree(page_array);
2060
2061end_function:
2062
2063 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2064 "SEP Driver:<-------- sep_lock_user_pages end\n");
2065
2066 return 0;
2067}
2068
2069/*
2070 This function locks all the physical pages of the kernel virtual buffer
2071 and construct a basic lli array, where each entry holds the physical
2072 page address and the size that application data holds in this physical pages
2073*/
2074int sep_lock_kernel_pages(unsigned long kernel_virt_addr,
2075 unsigned long data_size,
2076 unsigned long *num_pages_ptr,
2077 struct sep_lli_entry_t **lli_array_ptr,
2078 struct page ***page_array_ptr)
2079
2080{
2081 /* error */
2082 int error;
2083
2084 /* the the page of the end address of the user space buffer */
2085 unsigned long end_page;
2086
2087 /* the page of the start address of the user space buffer */
2088 unsigned long start_page;
2089
2090 /* the range in pages */
2091 unsigned long num_pages;
2092
2093 /* array of lli */
2094 struct sep_lli_entry_t *lli_array;
2095
2096 /* next kernel address to map */
2097 unsigned long next_kernel_address;
2098
2099 /* count */
2100 unsigned long count;
2101
2102
2103 /*------------------------
2104 CODE
2105 --------------------------*/
2106
2107 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2108 "SEP Driver:--------> sep_lock_kernel_pages start\n");
2109
2110 error = 0;
2111
2112 /* set start and end pages and num pages */
2113 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
2114 start_page = kernel_virt_addr >> PAGE_SHIFT;
2115 num_pages = end_page - start_page + 1;
2116
2117 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2118 "SEP Driver: kernel_virt_addr is %08lx\n",
2119 kernel_virt_addr);
2120 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2121 "SEP Driver: data_size is %lu\n",
2122 data_size);
2123 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2124 "SEP Driver: start_page is %lx\n",
2125 start_page);
2126 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2127 "SEP Driver: end_page is %lx\n",
2128 end_page);
2129 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2130 "SEP Driver: num_pages is %lu\n",
2131 num_pages);
2132
2133 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
2134 if (!lli_array) {
2135 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
2136 "SEP Driver: kmalloc for lli_array failed\n");
2137
2138 error = -ENOMEM;
2139 goto end_function;
2140 }
2141
2142 /* set the start address of the first page - app data may start not at
2143 the beginning of the page */
2144 lli_array[0].physical_address =
2145 (unsigned long)virt_to_phys((unsigned long *)kernel_virt_addr);
2146
2147 /* check that not all the data is in the first page only */
2148 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
2149 lli_array[0].block_size = data_size;
2150 else
2151 lli_array[0].block_size =
2152 PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
2153
2154 /* debug print */
2155 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
2156 "lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n",
2157 lli_array[0].physical_address,
2158 lli_array[0].block_size);
2159
2160 /* advance the address to the start of the next page */
2161 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
2162
2163 /* go from the second page to the prev before last */
2164 for (count = 1; count < (num_pages - 1); count++) {
2165 lli_array[count].physical_address =
2166 (unsigned long)virt_to_phys((unsigned long *)next_kernel_address);
2167 lli_array[count].block_size = PAGE_SIZE;
2168
2169 DEBUG_PRINT_4(SEP_DEBUG_LEVEL_EXTENDED,
2170 "lli_array[%lu].physical_address is %08lx, \
2171 lli_array[%lu].block_size is %lu\n",
2172 count, lli_array[count].physical_address, count,
2173 lli_array[count].block_size);
2174
2175 next_kernel_address += PAGE_SIZE;
2176 }
2177
2178 /* if more then 1 pages locked - then update for the last page size needed */
2179 if (num_pages > 1) {
2180 /* update the address of the last page */
2181 lli_array[count].physical_address =
2182 (unsigned long)virt_to_phys((unsigned long *)next_kernel_address);
2183
2184 /* set the size of the last page */
2185 lli_array[count].block_size =
2186 (kernel_virt_addr + data_size) & (~PAGE_MASK);
2187
2188 if (lli_array[count].block_size == 0) {
2189 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC,
2190 "app_virt_addr is %08lx\n",
2191 kernel_virt_addr);
2192 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_BASIC, "data_size is %lu\n", data_size);
2193 while (1);
2194 }
2195
2196 DEBUG_PRINT_4(SEP_DEBUG_LEVEL_EXTENDED,
2197 "lli_array[%lu].physical_address is %08lx, \
2198 lli_array[%lu].block_size is %lu\n",
2199 count, lli_array[count].physical_address,
2200 count,
2201 lli_array[count].block_size);
2202 }
2203
2204 /* set output params */
2205 *lli_array_ptr = lli_array;
2206 *num_pages_ptr = num_pages;
2207 *page_array_ptr = 0;
2208
2209
2210end_function:
2211
2212 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2213 "SEP Driver:<-------- sep_lock_kernel_pages end\n");
2214
2215 return 0;
2216}
2217
2218/*
2219 This function releases all the application virtual buffer physical pages,
2220 that were previously locked
2221*/
2222int sep_free_dma_pages(struct page **page_array_ptr,
2223 unsigned long num_pages,
2224 unsigned long dirtyFlag)
2225{
2226 /* count */
2227 unsigned long count;
2228
2229 /*-------------------
2230 CODE
2231 ---------------------*/
2232
2233 if (dirtyFlag) {
2234 for (count = 0; count < num_pages; count++) {
2235 /* the out array was written, therefore the data was changed */
2236 if (!PageReserved(page_array_ptr[count]))
2237 SetPageDirty(page_array_ptr[count]);
2238 page_cache_release(page_array_ptr[count]);
2239 }
2240 } else {
2241 /* free in pages - the data was only read, therefore no update was done
2242 on those pages */
2243 for (count = 0; count < num_pages; count++)
2244 page_cache_release(page_array_ptr[count]);
2245 }
2246
2247 if (page_array_ptr)
2248 /* free the array */
2249 kfree(page_array_ptr);
2250
2251 return 0;
2252}
2253
2254/*
2255 This function raises interrupt to SEP that signals that is has a new
2256 command from HOST
2257*/
2258static void sep_send_command_handler()
2259{
2260
2261 unsigned long count;
2262
2263 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2264 "SEP Driver:--------> sep_send_command_handler start\n");
2265
2266 sep_set_time(0, 0);
2267
2268 /* flash cache */
2269 flush_cache_all();
2270
2271 for (count = 0; count < 12 * 4; count += 4)
2272 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
2273 "Word %lu of the message is %lu\n",
2274 count,
2275 *((unsigned long *)(g_sep_shared_area_addr + count)));
2276
2277 /* update counter */
2278 sep_host_to_sep_send_counter++;
2279
2280 /* send interrupt to SEP */
2281 SEP_WRITE_REGISTER(g_sep_reg_base_address +
2282 HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2283
2284 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2285 "SEP Driver:<-------- sep_send_command_handler end\n");
2286
2287 return;
2288}
2289
2290/*
2291 This function raises interrupt to SEPm that signals that is has a
2292 new command from HOST
2293*/
2294static void sep_send_reply_command_handler()
2295{
2296 unsigned long count;
2297
2298 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2299 "SEP Driver:--------> sep_send_reply_command_handler start\n");
2300
2301 /* flash cache */
2302 flush_cache_all();
2303
2304 for (count = 0; count < 12 * 4; count += 4)
2305 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
2306 "Word %lu of the message is %lu\n",
2307 count,
2308 *((unsigned long *)(g_sep_shared_area_addr + count)));
2309
2310
2311 /* update counter */
2312 sep_host_to_sep_send_counter++;
2313
2314 /* send the interrupt to SEP */
2315 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_HOST_SEP_GPR2_REG_ADDR,
2316 sep_host_to_sep_send_counter);
2317
2318 /* update both counters */
2319 sep_host_to_sep_send_counter++;
2320
2321 sep_sep_to_host_reply_counter++;
2322
2323 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2324 "SEP Driver:<-------- sep_send_reply_command_handler end\n");
2325
2326 return;
2327}
2328
2329
2330
2331/*
2332 This function handles the allocate data pool memory request
2333 This function returns calculates the physical address of the
2334 allocated memory, and the offset of this area from the mapped address.
2335 Therefore, the FVOs in user space can calculate the exact virtual
2336 address of this allocated memory
2337*/
2338static int sep_allocate_data_pool_memory_handler(unsigned long arg)
2339{
2340 /* error */
2341 int error;
2342
2343 /* command paramaters */
2344 struct sep_driver_alloc_t command_args;
2345
2346 /*-------------------------
2347 CODE
2348 ----------------------------*/
2349
2350 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2351 "SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
2352
2353
2354 error = copy_from_user(&command_args,
2355 (void *)arg,
2356 sizeof(struct sep_driver_alloc_t));
2357 if (error)
2358 goto end_function;
2359
2360 /* allocate memory */
2361 if (
2362 (sep_data_pool_bytes_allocated + command_args.num_bytes) >
2363 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
2364 error = -ENOTTY;
2365 goto end_function;
2366 }
2367
2368 /* set the virtual and physical address */
2369 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
2370 sep_data_pool_bytes_allocated;
2371 command_args.phys_address = g_sep_phys_shared_area_addr +
2372 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
2373 sep_data_pool_bytes_allocated;
2374
2375 /* write the memory back to the user space */
2376 error = copy_to_user((void *)arg,
2377 (void *)&command_args,
2378 sizeof(struct sep_driver_alloc_t));
2379 if (error)
2380 goto end_function;
2381
2382 /* set the allocation */
2383 sep_data_pool_bytes_allocated += command_args.num_bytes;
2384
2385end_function:
2386
2387 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2388 "SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
2389
2390 return error;
2391}
2392
2393/*
2394 This function handles write into allocated data pool command
2395*/
2396static int sep_write_into_data_pool_handler(unsigned long arg)
2397{
2398 /* error */
2399 int error;
2400
2401 /* virtual address */
2402 unsigned long virt_address;
2403
2404 /* application in address */
2405 unsigned long app_in_address;
2406
2407 /* number of bytes */
2408 unsigned long num_bytes;
2409
2410 /* address of the data pool */
2411 unsigned long data_pool_area_addr;
2412
2413 /*--------------------------
2414 CODE
2415 -----------------------------*/
2416
2417 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2418 "SEP Driver:--------> sep_write_into_data_pool_handler start\n");
2419
2420 /* get the application address */
2421 error = get_user(app_in_address,
2422 &(((struct sep_driver_write_t *)arg)->app_address));
2423 if (error)
2424 goto end_function;
2425
2426 /* get the virtual kernel address address */
2427 error = get_user(virt_address,
2428 &(((struct sep_driver_write_t *)arg)->datapool_address));
2429 if (error)
2430 goto end_function;
2431
2432 /* get the number of bytes */
2433 error = get_user(num_bytes, &(((struct sep_driver_write_t *)arg)->num_bytes));
2434 if (error)
2435 goto end_function;
2436
2437 /* calculate the start of the data pool */
2438 data_pool_area_addr = g_sep_shared_area_addr +
2439 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
2440
2441
2442 /* check that the range of the virtual kernel address is correct */
2443 if ((virt_address < data_pool_area_addr) ||
2444 (virt_address > (data_pool_area_addr +
2445 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
2446 error = -ENOTTY;
2447 goto end_function;
2448 }
2449
2450 /* copy the application data */
2451 error = copy_from_user((void *)virt_address,
2452 (void *)app_in_address,
2453 num_bytes);
2454
2455end_function:
2456
2457 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2458 "SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
2459
2460 return error;
2461}
2462
2463/*
2464 this function handles the read from data pool command
2465*/
2466static int sep_read_from_data_pool_handler(unsigned long arg)
2467{
2468 /* error */
2469 int error;
2470
2471 /* virtual address of dest application buffer */
2472 unsigned long app_out_address;
2473
2474 /* virtual address of the data pool */
2475 unsigned long virt_address;
2476
2477 /* number bytes */
2478 unsigned long num_bytes;
2479
2480 /* address of the data pool */
2481 unsigned long data_pool_area_addr;
2482
2483 /*------------------------
2484 CODE
2485 -----------------------------*/
2486
2487 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2488 "SEP Driver:--------> sep_read_from_data_pool_handler start\n");
2489
2490 /* get the application address */
2491 error = get_user(app_out_address,
2492 &(((struct sep_driver_write_t *)arg)->app_address));
2493 if (error)
2494 goto end_function;
2495
2496 /* get the virtual kernel address address */
2497 error = get_user(virt_address,
2498 &(((struct sep_driver_write_t *)arg)->datapool_address));
2499 if (error)
2500 goto end_function;
2501
2502 /* get the number of bytes */
2503 error = get_user(num_bytes, &(((struct sep_driver_write_t *)arg)->num_bytes));
2504 if (error)
2505 goto end_function;
2506
2507 /* calculate the start of the data pool */
2508 data_pool_area_addr = g_sep_shared_area_addr +
2509 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
2510
2511 /* check that the range of the virtual kernel address is correct */
2512 if ((virt_address < data_pool_area_addr) ||
2513 (virt_address > (data_pool_area_addr +
2514 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
2515 error = -ENOTTY;
2516 goto end_function;
2517 }
2518
2519 /* copy the application data */
2520 error = copy_to_user((void *)app_out_address, (void *)virt_address,
2521 num_bytes);
2522
2523end_function:
2524
2525 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2526 "SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
2527
2528 return error;
2529}
2530
2531
2532/*
2533 this function handles tha request for creation of the DMA table
2534 for the synchronic symmetric operations (AES,DES)
2535*/
2536static int sep_create_sync_dma_tables_handler(unsigned long arg)
2537{
2538 /* error */
2539 int error;
2540
2541 /* command arguments */
2542 struct sep_driver_build_sync_table_t command_args;
2543
2544 /*------------------------
2545 CODE
2546 --------------------------*/
2547
2548 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2549 "SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
2550
2551 error = copy_from_user(&command_args,
2552 (void *)arg,
2553 sizeof(struct sep_driver_build_sync_table_t));
2554 if (error)
2555 goto end_function;
2556
2557 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2558 "app_in_address is %08lx\n",
2559 command_args.app_in_address);
2560 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2561 "app_out_address is %08lx\n",
2562 command_args.app_out_address);
2563 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2564 "data_size is %lu\n",
2565 command_args.data_in_size);
2566 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
2567 "block_size is %lu\n",
2568 command_args.block_size);
2569
2570
2571 /* check if we need to build only input table or input/output */
2572 if (command_args.app_out_address)
2573 /* prepare input and output tables */
2574 error = sep_prepare_input_output_dma_table(command_args.app_in_address,
2575 command_args.app_out_address,
2576 command_args.data_in_size,
2577 command_args.block_size,
2578 &command_args.in_table_address,
2579 &command_args.out_table_address,
2580 &command_args.in_table_num_entries,
2581 &command_args.out_table_num_entries,
2582 &command_args.table_data_size,
2583 command_args.isKernelVirtualAddress);
2584 else
2585 /* prepare input tables */
2586 error = sep_prepare_input_dma_table(command_args.app_in_address,
2587 command_args.data_in_size,
2588 command_args.block_size,
2589 &command_args.in_table_address,
2590 &command_args.in_table_num_entries,
2591 &command_args.table_data_size,
2592 command_args.isKernelVirtualAddress);
2593
2594 if (error)
2595 goto end_function;
2596
2597 /* copy to user */
2598 error = copy_to_user((void *)arg,
2599 (void *)&command_args,
2600 sizeof(struct sep_driver_build_sync_table_t));
2601
2602end_function:
2603
2604 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2605 "SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
2606
2607 return error;
2608}
2609
2610/*
2611 this function handles the request for freeing dma table for synhronic actions
2612*/
2613int sep_free_dma_table_data_handler()
2614{
2615 /*-------------------------
2616 CODE
2617 -----------------------------*/
2618
2619 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2620 "SEP Driver:--------> sep_free_dma_table_data_handler start\n");
2621
2622 /* free input pages array */
2623 sep_free_dma_pages(sep_in_page_array,
2624 sep_in_num_pages,
2625 0);
2626
2627 /* free output pages array if needed */
2628 if (sep_out_page_array)
2629 sep_free_dma_pages(sep_out_page_array,
2630 sep_out_num_pages,
2631 1);
2632
2633 /* reset all the values */
2634 sep_in_page_array = 0;
2635 sep_out_page_array = 0;
2636 sep_in_num_pages = 0;
2637 sep_out_num_pages = 0;
2638
2639
2640 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2641 "SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
2642
2643 return 0;
2644}
2645
2646/*
2647 this function handles the request to create the DMA tables for flow
2648*/
2649static int sep_create_flow_dma_tables_handler(unsigned long arg)
2650{
2651 /* error */
2652 int error;
2653
2654 /* command arguments */
2655 struct sep_driver_build_flow_table_t command_args;
2656
2657 /* first table - output */
2658 struct sep_lli_entry_t first_table_data;
2659
2660 /* dma table data */
2661 struct sep_lli_entry_t last_table_data;
2662
2663 /* pointer to the info entry of the previuos DMA table */
2664 struct sep_lli_entry_t *prev_info_entry_ptr;
2665
2666 /* pointer to the flow data strucutre */
2667 struct sep_flow_context_t *flow_context_ptr;
2668
2669 /*------------------------
2670 CODE
2671 --------------------------*/
2672
2673 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2674 "SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
2675
2676 /* init variables */
2677 prev_info_entry_ptr = 0;
2678 first_table_data.physical_address = 0xffffffff;
2679
2680 /* find the free structure for flow data */
2681 error = sep_find_flow_context(SEP_FREE_FLOW_ID, &flow_context_ptr);
2682 if (error)
2683 goto end_function;
2684
2685 error = copy_from_user(&command_args,
2686 (void *)arg,
2687 sizeof(struct sep_driver_build_flow_table_t));
2688 if (error)
2689 goto end_function;
2690
2691 /* create flow tables */
2692 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers,
2693 command_args.virt_buff_data_addr,
2694 flow_context_ptr,
2695 &first_table_data,
2696 &last_table_data,
2697 command_args.isKernelVirtualAddress);
2698 if (error)
2699 goto end_function_with_error;
2700
2701 /* check if flow is static */
2702 if (!command_args.flow_type)
2703 /* point the info entry of the last to the info entry of the first */
2704 last_table_data = first_table_data;
2705
2706 /* set output params */
2707 command_args.first_table_addr = first_table_data.physical_address;
2708 command_args.first_table_num_entries =
2709 ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) &
2710 SEP_NUM_ENTRIES_MASK);
2711 command_args.first_table_data_size =
2712 (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
2713
2714 /* send the parameters to user application */
2715 error = copy_to_user((void *)arg,
2716 &command_args,
2717 sizeof(struct sep_driver_build_flow_table_t));
2718 if (error)
2719 goto end_function_with_error;
2720
2721 /* all the flow created - update the flow entry with temp id */
2722 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
2723
2724 /* set the processing tables data in the context */
2725 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
2726 flow_context_ptr->input_tables_in_process = first_table_data;
2727 else
2728 flow_context_ptr->output_tables_in_process = first_table_data;
2729
2730 goto end_function;
2731
2732end_function_with_error:
2733
2734 /* free the allocated tables */
2735 sep_deallocated_flow_tables(&first_table_data);
2736
2737end_function:
2738
2739 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2740 "SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
2741
2742 return error;
2743
2744}
2745
2746/*
2747 this functio n handles add tables to flow
2748*/
2749static int sep_add_flow_tables_handler(unsigned long arg)
2750{
2751 /* error */
2752 int error;
2753
2754 /* number of entries */
2755 unsigned long num_entries;
2756
2757 /* command arguments */
2758 struct sep_driver_add_flow_table_t command_args;
2759
2760 /* pointer to the flow data strucutre */
2761 struct sep_flow_context_t *flow_context_ptr;
2762
2763 /* first dma table data */
2764 struct sep_lli_entry_t first_table_data;
2765
2766 /* last dma table data */
2767 struct sep_lli_entry_t last_table_data;
2768
2769 /* pointer to the info entry of the current DMA table */
2770 struct sep_lli_entry_t *info_entry_ptr;
2771
2772 /*--------------------------
2773 CODE
2774 ----------------------------*/
2775
2776 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2777 "SEP Driver:--------> sep_add_flow_tables_handler start\n");
2778
2779 /* get input parameters */
2780 error = copy_from_user(&command_args,
2781 (void *)arg,
2782 sizeof(struct sep_driver_add_flow_table_t));
2783 if (error)
2784 goto end_function;
2785
2786 /* find the flow structure for the flow id */
2787 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
2788 if (error)
2789 goto end_function;
2790
2791 /* prepare the flow dma tables */
2792 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers,
2793 command_args.virt_buff_data_addr,
2794 flow_context_ptr,
2795 &first_table_data,
2796 &last_table_data,
2797 command_args.isKernelVirtualAddress);
2798 if (error)
2799 goto end_function_with_error;
2800
2801 /* now check if there is already an existing add table for this flow */
2802 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
2803 /* this buffer was for input buffers */
2804 if (flow_context_ptr->input_tables_flag) {
2805 /* add table already exists - add the new tables to the end
2806 of the previous */
2807 num_entries = (flow_context_ptr->last_input_table.block_size >>
2808 SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2809
2810 info_entry_ptr =
2811 (struct sep_lli_entry_t *)
2812 (flow_context_ptr->last_input_table.physical_address +
2813 (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
2814
2815 /* connect to list of tables */
2816 *info_entry_ptr = first_table_data;
2817
2818 /* set the first table data */
2819 first_table_data = flow_context_ptr->first_input_table;
2820 } else {
2821 /* set the input flag */
2822 flow_context_ptr->input_tables_flag = 1;
2823
2824 /* set the first table data */
2825 flow_context_ptr->first_input_table = first_table_data;
2826 }
2827 /* set the last table data */
2828 flow_context_ptr->last_input_table = last_table_data;
2829 } else /* this is output tables */ {
2830 /* this buffer was for input buffers */
2831 if (flow_context_ptr->output_tables_flag) {
2832 /* add table already exists - add the new tables to
2833 the end of the previous */
2834 num_entries = (flow_context_ptr->last_output_table.block_size >>
2835 SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2836
2837 info_entry_ptr =
2838 (struct sep_lli_entry_t *)
2839 (flow_context_ptr->last_output_table.physical_address +
2840 (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
2841
2842 /* connect to list of tables */
2843 *info_entry_ptr = first_table_data;
2844
2845 /* set the first table data */
2846 first_table_data = flow_context_ptr->first_output_table;
2847 } else {
2848 /* set the input flag */
2849 flow_context_ptr->output_tables_flag = 1;
2850
2851 /* set the first table data */
2852 flow_context_ptr->first_output_table = first_table_data;
2853 }
2854 /* set the last table data */
2855 flow_context_ptr->last_output_table = last_table_data;
2856 }
2857
2858 /* set output params */
2859 command_args.first_table_addr = first_table_data.physical_address;
2860 command_args.first_table_num_entries = ((first_table_data.block_size >>
2861 SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
2862 command_args.first_table_data_size =
2863 (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
2864
2865 /* send the parameters to user application */
2866 error = copy_to_user((void *)arg,
2867 &command_args,
2868 sizeof(struct sep_driver_add_flow_table_t));
2869 if (error)
2870 goto end_function_with_error;
2871
2872end_function_with_error:
2873
2874 /* free the allocated tables */
2875 sep_deallocated_flow_tables(&first_table_data);
2876
2877end_function:
2878
2879 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2880 "SEP Driver:<-------- sep_add_flow_tables_handler end\n");
2881
2882 return error;
2883}
2884
2885/*
2886 this function add the flow add message to the specific flow
2887*/
2888static int sep_add_flow_tables_message_handler(unsigned long arg)
2889{
2890 /* error */
2891 int error;
2892
2893 /* arguments */
2894 struct sep_driver_add_message_t command_args;
2895
2896 /* flow context */
2897 struct sep_flow_context_t *flow_context_ptr;
2898
2899 /*----------------------------
2900 CODE
2901 ------------------------------*/
2902
2903 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2904 "SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
2905
2906 error = copy_from_user(&command_args,
2907 (void *)arg,
2908 sizeof(struct sep_driver_add_message_t));
2909 if (error)
2910 goto end_function;
2911
2912 /* check input */
2913 if (command_args.message_size_in_bytes >
2914 SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
2915 error = -ENOMEM;
2916 goto end_function;
2917 }
2918
2919 /* find the flow context */
2920 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
2921 if (error)
2922 goto end_function;
2923
2924 /* copy the message into context */
2925 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
2926
2927 error = copy_from_user(flow_context_ptr->message,
2928 (void *)command_args.message_address,
2929 command_args.message_size_in_bytes);
2930
2931
2932end_function:
2933
2934 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2935 "SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
2936
2937 return error;
2938}
2939
2940
2941/*
2942 this function returns the physical and virtual addresses of the static pool
2943*/
2944static int sep_get_static_pool_addr_handler(unsigned long arg)
2945{
2946 /* error */
2947 int error;
2948
2949 /* command arguments */
2950 struct sep_driver_static_pool_addr_t command_args;
2951
2952 /*-----------------------------
2953 CODE
2954 ------------------------------*/
2955
2956 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2957 "SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2958
2959 /*prepare the output parameters in the struct */
2960 command_args.physical_static_address = g_sep_phys_shared_area_addr +
2961 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2962 command_args.virtual_static_address = g_sep_shared_area_addr +
2963 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2964
2965 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
2966 "SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n",
2967 command_args.physical_static_address,
2968 command_args.virtual_static_address);
2969
2970 /* send the parameters to user application */
2971 error = copy_to_user((void *)arg,
2972 &command_args,
2973 sizeof(struct sep_driver_static_pool_addr_t));
2974 if (error)
2975 goto end_function;
2976
2977end_function:
2978
2979 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
2980 "SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2981
2982 return error;
2983}
2984
2985/*
2986 this address gets the offset of the physical address from the start
2987 of the mapped area
2988*/
2989static int sep_get_physical_mapped_offset_handler(unsigned long arg)
2990{
2991 /* error */
2992 int error;
2993
2994 /* command arguments */
2995 struct sep_driver_get_mapped_offset_t command_args;
2996
2997 /*-----------------------------
2998 CODE
2999 ------------------------------*/
3000
3001 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3002 "SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
3003
3004 error = copy_from_user(&command_args,
3005 (void *)arg,
3006 sizeof(struct sep_driver_get_mapped_offset_t));
3007 if (error)
3008 goto end_function;
3009
3010 if (command_args.physical_address < g_sep_phys_shared_area_addr) {
3011 error = -ENOTTY;
3012 goto end_function;
3013 }
3014
3015 /*prepare the output parameters in the struct */
3016 command_args.offset = command_args.physical_address -
3017 g_sep_phys_shared_area_addr;
3018
3019 DEBUG_PRINT_2(SEP_DEBUG_LEVEL_EXTENDED,
3020 "SEP Driver:physical_address is %08lx, offset is %lu\n",
3021 command_args.physical_address,
3022 command_args.offset);
3023
3024 /* send the parameters to user application */
3025 error = copy_to_user((void *)arg,
3026 &command_args,
3027 sizeof(struct sep_driver_get_mapped_offset_t));
3028 if (error)
3029 goto end_function;
3030
3031end_function:
3032
3033 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3034 "SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
3035
3036 return error;
3037}
3038
3039
3040/*
3041 ?
3042*/
3043static int sep_start_handler(void)
3044{
3045 /* reg val */
3046 unsigned long reg_val;
3047
3048 /* error */
3049 unsigned long error;
3050
3051 /*-----------------------------
3052 CODE
3053 ------------------------------*/
3054
3055 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3056 "SEP Driver:--------> sep_start_handler start\n");
3057
3058 error = 0;
3059
3060 /* wait in polling for message from SEP */
3061 do {
3062 SEP_READ_REGISTER(g_sep_reg_base_address +
3063 HW_HOST_SEP_HOST_GPR3_REG_ADDR, reg_val);
3064 } while (!reg_val);
3065
3066 /* check the value */
3067 if (reg_val == 0x1) {
3068 /* fatal error - read erro status from GPRO */
3069 SEP_READ_REGISTER(g_sep_reg_base_address +
3070 HW_HOST_SEP_HOST_GPR0_REG_ADDR, error);
3071 goto end_function;
3072 }
3073
3074end_function:
3075
3076 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3077 "SEP Driver:<-------- sep_start_handler end\n");
3078
3079 return error;
3080}
3081
3082/*
3083 this function handles the request for SEP initialization
3084*/
3085static int sep_init_handler(unsigned long arg)
3086{
3087 /* word from message */
3088 unsigned long message_word;
3089
3090 /* message ptr */
3091 unsigned long *message_ptr;
3092
3093 /* command arguments */
3094 struct sep_driver_init_t command_args;
3095
3096 /* counter */
3097 unsigned long counter;
3098
3099 /* error */
3100 unsigned long error;
3101
3102 /* reg val */
3103 unsigned long reg_val;
3104
3105 /*-------------------
3106 CODE
3107 ---------------------*/
3108
3109 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3110 "SEP Driver:--------> sep_init_handler start\n");
3111
3112 error = 0;
3113
3114 error = copy_from_user(&command_args, (void *)arg,
3115 sizeof(struct sep_driver_init_t));
3116
3117 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3118 "SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
3119
3120 if (error)
3121 goto end_function;
3122
3123 /* PATCH - configure the DMA to single -burst instead of multi-burst */
3124 /*sep_configure_dma_burst();*/
3125
3126 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3127 "SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
3128
3129 message_ptr = (unsigned long *)command_args.message_addr;
3130
3131 /* set the base address of the SRAM */
3132 SEP_WRITE_REGISTER(g_sep_reg_base_address +
3133 HW_SRAM_ADDR_REG_ADDR,
3134 HW_CC_SRAM_BASE_ADDRESS);
3135
3136 for (counter = 0 ;
3137 counter < command_args.message_size_in_words;
3138 counter++, message_ptr++) {
3139 get_user(message_word, message_ptr);
3140
3141 /* write data to SRAM */
3142 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_SRAM_DATA_REG_ADDR,
3143 message_word);
3144
3145 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3146 "SEP Driver:message_word is %lu\n",
3147 message_word);
3148
3149 /* wait for write complete */
3150 SEP_WAIT_SRAM_WRITE_COMPLETE();
3151 }
3152
3153 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3154 "SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
3155
3156 /* signal SEP */
3157 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_HOST_SEP_GPR0_REG_ADDR,
3158 0x1);
3159
3160 do {
3161 SEP_READ_REGISTER(g_sep_reg_base_address +
3162 HW_HOST_SEP_HOST_GPR3_REG_ADDR, reg_val);
3163 } while (!(reg_val & 0xFFFFFFFD));
3164
3165 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3166 "SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
3167
3168 /* check the value */
3169 if (reg_val == 0x1) {
3170 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_EXTENDED,
3171 "SEP Driver:init failed\n");
3172
3173 SEP_READ_REGISTER(g_sep_reg_base_address + 0x8060, error);
3174 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3175 "SEP Driver:sw monitor is %lu\n",
3176 error);
3177
3178 /* fatal error - read erro status from GPRO */
3179 SEP_READ_REGISTER(g_sep_reg_base_address +
3180 HW_HOST_SEP_HOST_GPR0_REG_ADDR,
3181 error);
3182 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3183 "SEP Driver:error is %lu\n", error);
3184 goto end_function;
3185 }
3186
3187end_function:
3188
3189 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3190 "SEP Driver:<-------- sep_init_handler end\n");
3191
3192 return error;
3193
3194}
3195
3196/*
3197 this function handles the request cache and resident reallocation
3198*/
3199static int sep_realloc_cache_resident_handler(unsigned long arg)
3200{
3201 /* error */
3202 int error;
3203
3204 /* physical cache addr */
3205 unsigned long phys_cache_address;
3206
3207 /* physical resident addr */
3208 unsigned long phys_resident_address;
3209
3210 /* command arguments */
3211 struct sep_driver_realloc_cache_resident_t command_args;
3212
3213 /*------------------
3214 CODE
3215 ---------------------*/
3216
3217 /* copy the data */
3218 error = copy_from_user(&command_args,
3219 (void *)arg,
3220 sizeof(struct sep_driver_realloc_cache_resident_t));
3221 if (error)
3222 goto end_function;
3223
3224 /* copy cache and resident to the their intended locations */
3225 error = sep_copy_cache_resident_to_area(command_args.cache_addr,
3226 command_args.cache_size_in_bytes,
3227 command_args.resident_addr,
3228 command_args.resident_size_in_bytes,
3229 &phys_cache_address,
3230 &phys_resident_address);
3231 if (error)
3232 goto end_function;
3233
3234 /* lock the area (if needed) */
3235 sep_lock_cache_resident_area();
3236
3237 command_args.new_base_addr = g_sep_phys_shared_area_addr;
3238
3239 /* find the new base address according to the lowest address between
3240 cache, resident and shared area */
3241 if (phys_resident_address < command_args.new_base_addr)
3242 command_args.new_base_addr = phys_resident_address;
3243 if (phys_cache_address < command_args.new_base_addr)
3244 command_args.new_base_addr = phys_cache_address;
3245
3246 /* set the return parameters */
3247 command_args.new_cache_addr = phys_cache_address;
3248 command_args.new_resident_addr = phys_resident_address;
3249
3250
3251 /* set the new shared area */
3252 command_args.new_shared_area_addr = g_sep_phys_shared_area_addr;
3253
3254 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3255 "SEP Driver:command_args.new_shared_area_addr is %08lx\n",
3256 command_args.new_shared_area_addr);
3257 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3258 "SEP Driver:command_args.new_base_addr is %08lx\n",
3259 command_args.new_base_addr);
3260 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3261 "SEP Driver:command_args.new_resident_addr is %08lx\n",
3262 command_args.new_resident_addr);
3263 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3264 "SEP Driver:command_args.new_cache_addr is %08lx\n",
3265 command_args.new_cache_addr);
3266
3267 /* return to user */
3268 error = copy_to_user((void *)arg,
3269 (void *)&command_args,
3270 sizeof(struct sep_driver_realloc_cache_resident_t));
3271
3272end_function:
3273
3274 return error;
3275}
3276
3277/*
3278 this function handles the request for get time
3279*/
3280static int sep_get_time_handler(unsigned long arg)
3281{
3282 /* error */
3283 int error;
3284
3285 /* command arguments */
3286 struct sep_driver_get_time_t command_args;
3287
3288 /*------------------------
3289 CODE
3290 --------------------------*/
3291
3292 error = sep_set_time(&command_args.time_physical_address,
3293 &command_args.time_value);
3294
3295 /* return to user */
3296 error = copy_to_user((void *)arg,
3297 (void *)&command_args,
3298 sizeof(struct sep_driver_get_time_t));
3299
3300 return error;
3301
3302}
3303
3304/*
3305 This api handles the setting of API mode to blocking or non-blocking
3306*/
3307static int sep_set_api_mode_handler(unsigned long arg)
3308{
3309 /* error */
3310 int error;
3311
3312 /* flag */
3313 unsigned long mode_flag;
3314
3315 /*----------------------------
3316 CODE
3317 -----------------------------*/
3318
3319 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3320 "SEP Driver:--------> sep_set_api_mode_handler start\n");
3321
3322 error = get_user(
3323 mode_flag, &(((struct sep_driver_set_api_mode_t *)arg)->mode));
3324 if (error)
3325 goto end_function;
3326
3327 /* set the global flag */
3328 g_sep_block_mode_flag = mode_flag;
3329
3330
3331end_function:
3332
3333 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3334 "SEP Driver:<-------- sep_set_api_mode_handler end\n");
3335
3336 return error;
3337}
3338
3339/*
3340 This API handles the end transaction request
3341*/
3342static int sep_end_transaction_handler(unsigned long arg)
3343{
3344 /*----------------------------
3345 CODE
3346 -----------------------------*/
3347
3348 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3349 "SEP Driver:--------> sep_end_transaction_handler start\n");
3350
3351#if 0/*!SEP_DRIVER_POLLING_MODE*/
3352 /* close IMR */
3353 SEP_WRITE_REGISTER(g_sep_reg_base_address + HW_HOST_IMR_REG_ADDR, 0x7FFF);
3354
3355 /* release IRQ line */
3356 free_irq(SEP_DIRVER_IRQ_NUM, &g_sep_reg_base_address);
3357
3358 /* lock the sep mutex */
3359 mutex_unlock(&sep_mutex);
3360#endif
3361
3362 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3363 "SEP Driver:<-------- sep_end_transaction_handler end\n");
3364
3365 return 0;
3366}
3367
3368/* handler for flow done interrupt */
3369static void sep_flow_done_handler(struct work_struct *work)
3370{
3371 /* flow context_ptr */
3372 struct sep_flow_context_t *flow_data_ptr;
3373 /*-------------------------
3374 CODE
3375 ---------------------------*/
3376
3377 /* obtain the mutex */
3378 mutex_lock(&sep_mutex);
3379
3380 /* get the pointer to context */
3381 flow_data_ptr = (struct sep_flow_context_t *)work;
3382
3383 /* free all the current input tables in sep */
3384 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
3385
3386 /* free all the current tables output tables in SEP (if needed) */
3387 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
3388 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
3389
3390 /* check if we have additional tables to be sent to SEP only input
3391 flag may be checked */
3392 if (flow_data_ptr->input_tables_flag) {
3393 /* copy the message to the shared RAM and signal SEP */
3394 memcpy((void *)flow_data_ptr->message,
3395 (void *)g_sep_shared_area_addr,
3396 flow_data_ptr->message_size_in_bytes);
3397
3398 SEP_WRITE_REGISTER(g_sep_reg_base_address +
3399 HW_HOST_HOST_SEP_GPR2_REG_ADDR,
3400 0x2);
3401 }
3402 mutex_unlock(&sep_mutex);
3403}
3404
3405
3406/*
3407 This function creates a list of tables for flow and returns the data for
3408 the first and last tables of the list
3409*/
3410static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
3411 unsigned long first_buff_addr,
3412 struct sep_flow_context_t *flow_data_ptr,
3413 struct sep_lli_entry_t *first_table_data_ptr,
3414 struct sep_lli_entry_t *last_table_data_ptr,
3415 bool isKernelVirtualAddress)
3416{
3417 /* error */
3418 int error;
3419
3420 /* virtaul address of one buffer */
3421 unsigned long virt_buff_addr;
3422
3423 /* virtual size of one buffer */
3424 unsigned long virt_buff_size;
3425
3426 /* table data for each created table */
3427 struct sep_lli_entry_t table_data;
3428
3429 /* info entry */
3430 struct sep_lli_entry_t *info_entry_ptr;
3431
3432 /* prevouis info entry */
3433 struct sep_lli_entry_t *prev_info_entry_ptr;
3434
3435 /* counter */
3436 unsigned long i;
3437
3438 /*-------------------------------
3439 CODE
3440 ----------------------------------*/
3441
3442 /* init vars */
3443 error = 0;
3444 prev_info_entry_ptr = 0;
3445
3446 /* init the first table to default */
3447 table_data.physical_address = 0xffffffff;
3448 first_table_data_ptr->physical_address = 0xffffffff;
3449 table_data.block_size = 0;
3450
3451 for (i = 0; i < num_virtual_buffers; i++) {
3452 /* get the virtual buffer address */
3453 error = get_user(virt_buff_addr, &first_buff_addr);
3454 if (error)
3455 goto end_function;
3456
3457 /* get the virtual buffer size */
3458 first_buff_addr++;
3459 error = get_user(virt_buff_size, &first_buff_addr);
3460 if (error)
3461 goto end_function;
3462
3463 /* advance the address to point to the next pair of address|size */
3464 first_buff_addr++;
3465
3466 /* now prepare the one flow LLI table from the data */
3467 error = sep_prepare_one_flow_dma_table(virt_buff_addr,
3468 virt_buff_size,
3469 &table_data,
3470 &info_entry_ptr,
3471 flow_data_ptr,
3472 isKernelVirtualAddress);
3473 if (error)
3474 goto end_function;
3475
3476 if (i == 0) {
3477 /* if this is the first table - save it to return to the user
3478 application */
3479 *first_table_data_ptr = table_data;
3480
3481 /* set the pointer to info entry */
3482 prev_info_entry_ptr = info_entry_ptr;
3483 } else {
3484 /* not first table - the previous table info entry should
3485 be updated */
3486 prev_info_entry_ptr->block_size =
3487 (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) |
3488 (table_data.block_size);
3489
3490 /* set the pointer to info entry */
3491 prev_info_entry_ptr = info_entry_ptr;
3492 }
3493 }
3494
3495 /* set the last table data */
3496 *last_table_data_ptr = table_data;
3497
3498end_function:
3499
3500 return error;
3501}
3502
3503
3504/*
3505 This function creates one DMA table for flow and returns its data,
3506 and pointer to its info entry
3507*/
3508static int sep_prepare_one_flow_dma_table(
3509 unsigned long virt_buff_addr,
3510 unsigned long virt_buff_size,
3511 struct sep_lli_entry_t *table_data,
3512 struct sep_lli_entry_t **info_entry_ptr,
3513 struct sep_flow_context_t *flow_data_ptr,
3514 bool isKernelVirtualAddress)
3515{
3516 /* error */
3517 int error;
3518
3519 /* the range in pages */
3520 unsigned long lli_array_size;
3521
3522 /* array of pointers ot page */
3523 struct sep_lli_entry_t *lli_array;
3524
3525 /* pointer to the entry in the dma table */
3526 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
3527
3528 /* address of the dma table */
3529 unsigned long *start_dma_table_ptr;
3530
3531 /* total table data counter */
3532 unsigned long dma_table_data_count;
3533
3534 /* pointer that will keep the pointer t the pages of the virtual buffer */
3535 struct page **page_array_ptr;
3536
3537 /* counter */
3538 unsigned long entry_count;
3539
3540 /*-------------------------------
3541 CODE
3542 ----------------------------------*/
3543
3544 /* find the space for the new table */
3545 error = sep_find_free_flow_dma_table_space(&start_dma_table_ptr);
3546 if (error)
3547 goto end_function;
3548
3549 /* check if the pages are in Kernel Virtual Address layout */
3550 if (isKernelVirtualAddress == true)
3551 /* lock kernel buffer in the memory */
3552 error = sep_lock_kernel_pages(virt_buff_addr,
3553 virt_buff_size,
3554 &lli_array_size,
3555 &lli_array,
3556 &page_array_ptr);
3557 else
3558 /* lock user buffer in the memory */
3559 error = sep_lock_user_pages(virt_buff_addr,
3560 virt_buff_size,
3561 &lli_array_size,
3562 &lli_array,
3563 &page_array_ptr);
3564
3565 if (error)
3566 goto end_function;
3567
3568 /* set the pointer to page array at the beginning of table - this table is
3569 now considered taken */
3570 *start_dma_table_ptr = lli_array_size;
3571
3572 /* point to the place of the pages pointers of the table */
3573 start_dma_table_ptr++;
3574
3575 /* set the pages pointer */
3576 *start_dma_table_ptr = (unsigned long)page_array_ptr;
3577
3578 /* set the pointer to the first entry */
3579 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *)(++start_dma_table_ptr);
3580
3581 /* now create the entries for table */
3582 for (dma_table_data_count = entry_count = 0;
3583 entry_count < lli_array_size;
3584 entry_count++) {
3585 flow_dma_table_entry_ptr->physical_address =
3586 lli_array[entry_count].physical_address;
3587
3588 flow_dma_table_entry_ptr->block_size =
3589 lli_array[entry_count].block_size;
3590
3591 /* set the total data of a table */
3592 dma_table_data_count += lli_array[entry_count].block_size;
3593
3594 flow_dma_table_entry_ptr++;
3595 }
3596
3597 /* set the physical address */
3598 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
3599
3600 /* set the num_entries and total data size */
3601 table_data->block_size = ((lli_array_size + 1) <<
3602 SEP_NUM_ENTRIES_OFFSET_IN_BITS) |
3603 (dma_table_data_count);
3604
3605 /* set the info entry */
3606 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
3607 flow_dma_table_entry_ptr->block_size = 0;
3608
3609 /* set the pointer to info entry */
3610 *info_entry_ptr = flow_dma_table_entry_ptr;
3611
3612 /* the array of the lli entries */
3613 kfree(lli_array);
3614
3615end_function:
3616
3617 return error;
3618}
3619
3620
3621/*
3622 This function returns pointer to the flow data structure
3623 that conatins the given id
3624*/
3625static int sep_find_flow_context(
3626 unsigned long flow_id,
3627 struct sep_flow_context_t **flow_data_ptr)
3628{
3629 /* count */
3630 unsigned long count;
3631
3632 /* error */
3633 int error;
3634
3635 /*-----------------------
3636 CODE
3637 ---------------------------*/
3638
3639 error = 0;
3640
3641 /*
3642 always search for flow with id default first - in case we
3643 already started working on the flow there can be no situation
3644 when 2 flows are with default flag
3645 */
3646 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
3647 if (g_sep_flows_data_array[count].flow_id == flow_id) {
3648 *flow_data_ptr = &g_sep_flows_data_array[count];
3649 break;
3650 }
3651 }
3652
3653 if (count == SEP_DRIVER_NUM_FLOWS)
3654 /* no flow found */
3655 error = -ENOMEM;
3656
3657 return error;
3658}
3659
3660/*
3661 this function find a space for the new flow dma table
3662*/
3663static int sep_find_free_flow_dma_table_space(
3664 unsigned long **table_address_ptr)
3665{
3666 /* error */
3667 int error;
3668
3669 /* pointer to the id field of the flow dma table */
3670 unsigned long *start_table_ptr;
3671
3672 /* start address of the flow dma area */
3673 unsigned long flow_dma_area_start_addr;
3674
3675 /* end address of the flow dma area */
3676 unsigned long flow_dma_area_end_addr;
3677
3678 /* maximum table size in words */
3679 unsigned long table_size_in_words;
3680
3681 /*---------------------
3682 CODE
3683 -----------------------*/
3684
3685 error = 0;
3686
3687 /* find the start address of the flow DMA table area */
3688 flow_dma_area_start_addr = g_sep_shared_area_addr +
3689 SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
3690
3691 /* set end address of the flow table area */
3692 flow_dma_area_end_addr = flow_dma_area_start_addr +
3693 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
3694
3695 /* set table size in words */
3696 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE *
3697 (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
3698
3699 /* set the pointer to the start address of DMA area */
3700 start_table_ptr = (unsigned long *)flow_dma_area_start_addr;
3701
3702 /* find the space for the next table */
3703 while (((*start_table_ptr & 0x7FFFFFFF) != 0) &&
3704 ((unsigned long)start_table_ptr <
3705 flow_dma_area_end_addr))
3706 start_table_ptr += table_size_in_words;
3707
3708 /* check if we reached the end of floa tables area */
3709 if ((unsigned long)start_table_ptr >= flow_dma_area_end_addr)
3710 error = -1;
3711 else
3712 *table_address_ptr = start_table_ptr;
3713
3714 return error;
3715}
3716
3717/*
3718 this function goes over all the flow tables connected to the given
3719 table and deallocate them
3720*/
3721static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
3722{
3723 /* id poiner */
3724 unsigned long *table_ptr;
3725
3726 /* end address of the flow dma area */
3727 unsigned long num_entries;
3728
3729 unsigned long num_pages;
3730
3731 /* pages ptr */
3732 struct page **pages_ptr;
3733
3734 /* maximum table size in words */
3735 struct sep_lli_entry_t *info_entry_ptr;
3736
3737 /*-------------------------------
3738 CODE
3739 ---------------------------------*/
3740
3741 /* set the pointer to the first table */
3742 table_ptr = (unsigned long *)first_table_ptr->physical_address;
3743
3744 /* set the num of entries */
3745 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
3746 & SEP_NUM_ENTRIES_MASK;
3747
3748 /* go over all the connected tables */
3749 while (*table_ptr != 0xffffffff) {
3750 /* get number of pages */
3751 num_pages = *(table_ptr - 2);
3752
3753 /* get the pointer to the pages */
3754 pages_ptr = (struct page **)(*(table_ptr - 1));
3755
3756 /* free the pages */
3757 sep_free_dma_pages(pages_ptr, num_pages, 1);
3758
3759 /* goto to the info entry */
3760 info_entry_ptr = ((struct sep_lli_entry_t *)table_ptr) +
3761 (num_entries - 1);
3762
3763 table_ptr = (unsigned long *)info_entry_ptr->physical_address;
3764 num_entries = (info_entry_ptr->block_size >>
3765 SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
3766 }
3767
3768 return;
3769}
3770
3771/*
3772 This function handler the set flow id command
3773*/
3774static int sep_set_flow_id_handler(unsigned long arg)
3775{
3776 /* error */
3777 int error;
3778
3779 /* flow _id */
3780 unsigned long flow_id;
3781
3782 /* pointer to flow data structre */
3783 struct sep_flow_context_t *flow_data_ptr;
3784
3785 /*----------------------
3786 CODE
3787 -----------------------*/
3788
3789 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3790 "------------>SEP Driver: sep_set_flow_id_handler start\n");
3791
3792 error = get_user(flow_id,
3793 &(((struct sep_driver_set_flow_id_t *)arg)->flow_id));
3794 if (error)
3795 goto end_function;
3796
3797 /* find the flow data structure that was just used for creating new flow
3798 - its id should be default */
3799 error = sep_find_flow_context(SEP_TEMP_FLOW_ID, &flow_data_ptr);
3800 if (error)
3801 goto end_function;
3802
3803 /* set flow id */
3804 flow_data_ptr->flow_id = flow_id;
3805
3806end_function:
3807
3808 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3809 "SEP Driver:<-------- sep_set_flow_id_handler end\n");
3810
3811
3812 return error;
3813}
3814
3815
3816/*
3817 calculates time and sets it at the predefined address
3818*/
3819static int sep_set_time(unsigned long *address_ptr,
3820 unsigned long *time_in_sec_ptr)
3821{
3822 /* time struct */
3823 struct timeval time;
3824
3825 /* address of time in the kernel */
3826 unsigned long time_addr;
3827
3828
3829 /*------------------------
3830 CODE
3831 --------------------------*/
3832
3833 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3834 "SEP Driver:--------> sep_set_time start\n");
3835
3836
3837 do_gettimeofday(&time);
3838
3839 /* set value in the SYSTEM MEMORY offset */
3840 time_addr = g_message_shared_area_addr +
3841 SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
3842
3843 *(unsigned long *)time_addr = SEP_TIME_VAL_TOKEN;
3844 *(unsigned long *)(time_addr + 4) = time.tv_sec;
3845
3846 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3847 "SEP Driver:time.tv_sec is %lu\n",
3848 time.tv_sec);
3849 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3850 "SEP Driver:time_addr is %lu\n",
3851 time_addr);
3852 DEBUG_PRINT_1(SEP_DEBUG_LEVEL_EXTENDED,
3853 "SEP Driver:g_message_shared_area_addr is %lu\n",
3854 g_message_shared_area_addr);
3855
3856 /* set the output parameters if needed */
3857 if (address_ptr)
3858 *address_ptr = sep_shared_area_virt_to_phys(time_addr);
3859
3860 if (time_in_sec_ptr)
3861 *time_in_sec_ptr = time.tv_sec;
3862
3863 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3864 "SEP Driver:<-------- sep_set_time end\n");
3865
3866 return 0;
3867}
3868
3869/*
3870 PATCH for configuring the DMA to single burst instead of multi-burst
3871*/
3872static void sep_configure_dma_burst(void)
3873{
3874
3875#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
3876
3877 unsigned long regVal;
3878
3879 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3880 "SEP Driver:<-------- sep_configure_dma_burst start \n");
3881
3882 /* request access to registers from SEP */
3883 SEP_WRITE_REGISTER(g_sep_reg_base_address +
3884 HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2UL);
3885
3886 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3887 "SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
3888
3889 SEP_READ_REGISTER(g_sep_reg_base_address +
3890 HW_HOST_SEP_BUSY_REG_ADDR, regVal);
3891 while (regVal)
3892 SEP_READ_REGISTER(g_sep_reg_base_address +
3893 HW_HOST_SEP_BUSY_REG_ADDR, regVal);
3894
3895 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3896 "SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
3897
3898 /* set the DMA burst register to single burst*/
3899 SEP_WRITE_REGISTER(g_sep_reg_base_address +
3900 HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
3901
3902 /* release the sep busy */
3903 SEP_WRITE_REGISTER(g_sep_reg_base_address +
3904 HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
3905 SEP_READ_REGISTER(g_sep_reg_base_address +
3906 HW_HOST_SEP_BUSY_REG_ADDR, regVal);
3907 while (regVal != 0x0)
3908 SEP_READ_REGISTER(g_sep_reg_base_address +
3909 HW_HOST_SEP_BUSY_REG_ADDR, regVal);
3910
3911 DEBUG_PRINT_0(SEP_DEBUG_LEVEL_BASIC,
3912 "SEP Driver:<-------- sep_configure_dma_burst done \n");
3913
3914}
3915
3916module_init(sep_init);
3917module_exit(sep_exit);
3918
3919MODULE_LICENSE("GPL");