vchiq_arm.c revision 1.17 1 /**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer,
9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without
15 * specific prior written permission.
16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #include <sys/systm.h>
36 #include <sys/device.h>
37 #include <sys/file.h>
38 #include <sys/filedesc.h>
39 #include <sys/kmem.h>
40
41 #include "vchiq_core.h"
42 #include "vchiq_ioctl.h"
43 #include "vchiq_arm.h"
44
45 #define DEVICE_NAME "vchiq"
46
47 /* Override the default prefix, which would be vchiq_arm (from the filename) */
48 #undef MODULE_PARAM_PREFIX
49 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
50
51 #define VCHIQ_MINOR 0
52
53 /* Some per-instance constants */
54 #define MAX_COMPLETIONS 16
55 #define MAX_SERVICES 64
56 #define MAX_ELEMENTS 8
57 #define MSG_QUEUE_SIZE 64
58
59 #define KEEPALIVE_VER 1
60 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
61
62 MALLOC_DEFINE(M_VCHIQ, "vchiq_cdev", "VideoCore cdev memory");
63
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
67
68 #define SUSPEND_TIMER_TIMEOUT_MS 100
69 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
70
71 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
72 static const char *const suspend_state_names[] = {
73 "VC_SUSPEND_FORCE_CANCELED",
74 "VC_SUSPEND_REJECTED",
75 "VC_SUSPEND_FAILED",
76 "VC_SUSPEND_IDLE",
77 "VC_SUSPEND_REQUESTED",
78 "VC_SUSPEND_IN_PROGRESS",
79 "VC_SUSPEND_SUSPENDED"
80 };
81 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
82 static const char *const resume_state_names[] = {
83 "VC_RESUME_FAILED",
84 "VC_RESUME_IDLE",
85 "VC_RESUME_REQUESTED",
86 "VC_RESUME_IN_PROGRESS",
87 "VC_RESUME_RESUMED"
88 };
89 /* The number of times we allow force suspend to timeout before actually
90 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
91 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
92 */
93 #define FORCE_SUSPEND_FAIL_MAX 8
94
95 /* The time in ms allowed for videocore to go idle when force suspend has been
96 * requested */
97 #define FORCE_SUSPEND_TIMEOUT_MS 200
98
99
100 static void suspend_timer_callback(unsigned long context);
101 #ifdef notyet
102 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
103 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
104 #endif
105
106
107 typedef struct user_service_struct {
108 VCHIQ_SERVICE_T *service;
109 void *userdata;
110 VCHIQ_INSTANCE_T instance;
111 int is_vchi;
112 int dequeue_pending;
113 int message_available_pos;
114 int msg_insert;
115 int msg_remove;
116 struct semaphore insert_event;
117 struct semaphore remove_event;
118 VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
119 } USER_SERVICE_T;
120
121 struct bulk_waiter_node {
122 struct bulk_waiter bulk_waiter;
123 int pid;
124 struct list_head list;
125 };
126
127 struct vchiq_instance_struct {
128 VCHIQ_STATE_T *state;
129 VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
130 int completion_insert;
131 int completion_remove;
132 struct semaphore insert_event;
133 struct semaphore remove_event;
134 struct mutex completion_mutex;
135
136 int connected;
137 int closing;
138 int pid;
139 int mark;
140
141 struct list_head bulk_waiter_list;
142 struct mutex bulk_waiter_list_mutex;
143
144 struct proc_dir_entry *proc_entry;
145 };
146
147 typedef struct dump_context_struct {
148 char __user *buf;
149 size_t actual;
150 size_t space;
151 loff_t offset;
152 } DUMP_CONTEXT_T;
153
154 VCHIQ_STATE_T g_state;
155 static DEFINE_SPINLOCK(msg_queue_spinlock);
156
157 static const char *const ioctl_names[] = {
158 "CONNECT",
159 "SHUTDOWN",
160 "CREATE_SERVICE",
161 "REMOVE_SERVICE",
162 "QUEUE_MESSAGE",
163 "QUEUE_BULK_TRANSMIT",
164 "QUEUE_BULK_RECEIVE",
165 "AWAIT_COMPLETION",
166 "DEQUEUE_MESSAGE",
167 "GET_CLIENT_ID",
168 "GET_CONFIG",
169 "CLOSE_SERVICE",
170 "USE_SERVICE",
171 "RELEASE_SERVICE",
172 "SET_SERVICE_OPTION",
173 "DUMP_PHYS_MEM"
174 };
175
176 vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
177 (VCHIQ_IOC_MAX + 1));
178
179 static dev_type_open(vchiq_open);
180
181 struct cdevsw vchiq_cdevsw = {
182 .d_open = vchiq_open,
183 .d_close = noclose,
184 .d_read = noread,
185 .d_write = nowrite,
186 .d_ioctl = noioctl,
187 .d_stop = nostop,
188 .d_tty = notty,
189 .d_poll = nopoll,
190 .d_mmap = nommap,
191 .d_kqfilter = nokqfilter,
192 .d_discard = nodiscard,
193 .d_flag = D_OTHER | D_MPSAFE
194 };
195
196 extern struct cfdriver vchiq_cd;
197
198 static int vchiq_ioctl(struct file *, u_long, void *);
199 static int vchiq_close(struct file *);
200 static int vchiq_read(struct file *, off_t *, struct uio *, kauth_cred_t, int);
201
202 static const struct fileops vchiq_fileops = {
203 .fo_read = vchiq_read,
204 .fo_write = fbadop_write,
205 .fo_ioctl = vchiq_ioctl,
206 .fo_fcntl = fnullop_fcntl,
207 .fo_poll = fnullop_poll,
208 .fo_stat = fbadop_stat,
209 .fo_close = vchiq_close,
210 .fo_kqfilter = fnullop_kqfilter,
211 };
212
213 #if 0
214 static void
215 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
216 #endif
217
218 /****************************************************************************
219 *
220 * add_completion
221 *
222 ***************************************************************************/
223
224 static VCHIQ_STATUS_T
225 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
226 VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
227 void *bulk_userdata)
228 {
229 VCHIQ_COMPLETION_DATA_T *completion;
230 DEBUG_INITIALISE(g_state.local)
231
232 while (instance->completion_insert ==
233 (instance->completion_remove + MAX_COMPLETIONS)) {
234 /* Out of space - wait for the client */
235 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
236 vchiq_log_trace(vchiq_arm_log_level,
237 "add_completion - completion queue full");
238 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
239 if (down_interruptible(&instance->remove_event) != 0) {
240 vchiq_log_info(vchiq_arm_log_level,
241 "service_callback interrupted");
242 return VCHIQ_RETRY;
243 } else if (instance->closing) {
244 vchiq_log_info(vchiq_arm_log_level,
245 "service_callback closing");
246 return VCHIQ_ERROR;
247 }
248 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
249 }
250
251 completion =
252 &instance->completions[instance->completion_insert &
253 (MAX_COMPLETIONS - 1)];
254
255 completion->header = header;
256 completion->reason = reason;
257 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
258 completion->service_userdata = user_service->service;
259 completion->bulk_userdata = bulk_userdata;
260
261 if (reason == VCHIQ_SERVICE_CLOSED)
262 /* Take an extra reference, to be held until
263 this CLOSED notification is delivered. */
264 lock_service(user_service->service);
265
266 /* A write barrier is needed here to ensure that the entire completion
267 record is written out before the insert point. */
268 wmb();
269
270 if (reason == VCHIQ_MESSAGE_AVAILABLE)
271 user_service->message_available_pos =
272 instance->completion_insert;
273 instance->completion_insert++;
274
275 up(&instance->insert_event);
276
277 return VCHIQ_SUCCESS;
278 }
279
280 /****************************************************************************
281 *
282 * service_callback
283 *
284 ***************************************************************************/
285
286 static VCHIQ_STATUS_T
287 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
288 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
289 {
290 /* How do we ensure the callback goes to the right client?
291 ** The service_user data points to a USER_SERVICE_T record containing
292 ** the original callback and the user state structure, which contains a
293 ** circular buffer for completion records.
294 */
295 USER_SERVICE_T *user_service;
296 VCHIQ_SERVICE_T *service;
297 VCHIQ_INSTANCE_T instance;
298 DEBUG_INITIALISE(g_state.local)
299
300 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
301
302 service = handle_to_service(handle);
303 BUG_ON(!service);
304 user_service = (USER_SERVICE_T *)service->base.userdata;
305 instance = user_service->instance;
306
307 if (!instance || instance->closing)
308 return VCHIQ_SUCCESS;
309
310 vchiq_log_trace(vchiq_arm_log_level,
311 "service_callback - service %lx(%d), handle %x, reason %d, header %lx, "
312 "instance %lx, bulk_userdata %lx",
313 (unsigned long)user_service,
314 service->localport, service->handle,
315 reason, (unsigned long)header,
316 (unsigned long)instance, (unsigned long)bulk_userdata);
317
318 if (header && user_service->is_vchi) {
319 spin_lock(&msg_queue_spinlock);
320 while (user_service->msg_insert ==
321 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
322 spin_unlock(&msg_queue_spinlock);
323 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
324 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
325 vchiq_log_trace(vchiq_arm_log_level,
326 "service_callback - msg queue full");
327 /* If there is no MESSAGE_AVAILABLE in the completion
328 ** queue, add one
329 */
330 if ((user_service->message_available_pos -
331 instance->completion_remove) < 0) {
332 VCHIQ_STATUS_T status;
333 vchiq_log_info(vchiq_arm_log_level,
334 "Inserting extra MESSAGE_AVAILABLE");
335 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
336 status = add_completion(instance, reason,
337 NULL, user_service, bulk_userdata);
338 if (status != VCHIQ_SUCCESS) {
339 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
340 return status;
341 }
342 }
343
344 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
345 if (down_interruptible(&user_service->remove_event)
346 != 0) {
347 vchiq_log_info(vchiq_arm_log_level,
348 "service_callback interrupted");
349 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
350 return VCHIQ_RETRY;
351 } else if (instance->closing) {
352 vchiq_log_info(vchiq_arm_log_level,
353 "service_callback closing");
354 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
355 return VCHIQ_ERROR;
356 }
357 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
358 spin_lock(&msg_queue_spinlock);
359 }
360
361 user_service->msg_queue[user_service->msg_insert &
362 (MSG_QUEUE_SIZE - 1)] = header;
363 user_service->msg_insert++;
364 spin_unlock(&msg_queue_spinlock);
365
366 up(&user_service->insert_event);
367
368 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
369 ** there is a MESSAGE_AVAILABLE in the completion queue then
370 ** bypass the completion queue.
371 */
372 if (((user_service->message_available_pos -
373 instance->completion_remove) >= 0) ||
374 user_service->dequeue_pending) {
375 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
376 user_service->dequeue_pending = 0;
377 return VCHIQ_SUCCESS;
378 }
379
380 header = NULL;
381 }
382 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
383
384 return add_completion(instance, reason, header, user_service,
385 bulk_userdata);
386 }
387
388 /****************************************************************************
389 *
390 * user_service_free
391 *
392 ***************************************************************************/
393 static void
394 user_service_free(void *userdata)
395 {
396 USER_SERVICE_T *user_service = userdata;
397
398 _sema_destroy(&user_service->insert_event);
399 _sema_destroy(&user_service->remove_event);
400
401 kfree(user_service);
402 }
403
404 /****************************************************************************
405 *
406 * vchiq_ioctl
407 *
408 ***************************************************************************/
409
410 static int
411 vchiq_ioctl(struct file *fp, u_long cmd, void *arg)
412 {
413 VCHIQ_INSTANCE_T instance;
414 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
415 VCHIQ_SERVICE_T *service = NULL;
416 int ret = 0;
417 int i, rc;
418 DEBUG_INITIALISE(g_state.local)
419
420 instance = fp->f_data;
421
422 /* XXXBSD: HACK! */
423 #define _IOC_NR(x) ((x) & 0xff)
424 #define _IOC_TYPE(x) IOCGROUP(x)
425
426 vchiq_log_trace(vchiq_arm_log_level,
427 "vchiq_ioctl - instance %x, cmd %s, arg %p",
428 (unsigned int)instance,
429 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
430 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
431 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
432
433 switch (cmd) {
434 case VCHIQ_IOC_SHUTDOWN:
435 if (!instance->connected)
436 break;
437
438 /* Remove all services */
439 i = 0;
440 while ((service = next_service_by_instance(instance->state,
441 instance, &i)) != NULL) {
442 status = vchiq_remove_service(service->handle);
443 unlock_service(service);
444 if (status != VCHIQ_SUCCESS)
445 break;
446 }
447 service = NULL;
448
449 if (status == VCHIQ_SUCCESS) {
450 /* Wake the completion thread and ask it to exit */
451 instance->closing = 1;
452 up(&instance->insert_event);
453 }
454
455 break;
456
457 case VCHIQ_IOC_CONNECT:
458 if (instance->connected) {
459 ret = -EINVAL;
460 break;
461 }
462 rc = lmutex_lock_interruptible(&instance->state->mutex);
463 if (rc != 0) {
464 vchiq_log_error(vchiq_arm_log_level,
465 "vchiq: connect: could not lock mutex for "
466 "state %d: %d",
467 instance->state->id, rc);
468 ret = -EINTR;
469 break;
470 }
471 status = vchiq_connect_internal(instance->state, instance);
472 lmutex_unlock(&instance->state->mutex);
473
474 if (status == VCHIQ_SUCCESS)
475 instance->connected = 1;
476 else
477 vchiq_log_error(vchiq_arm_log_level,
478 "vchiq: could not connect: %d", status);
479 break;
480
481 case VCHIQ_IOC_CREATE_SERVICE: {
482 VCHIQ_CREATE_SERVICE_T *pargs = arg;
483 USER_SERVICE_T *user_service = NULL;
484 void *userdata;
485 int srvstate;
486
487 user_service = kzalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
488 if (!user_service) {
489 ret = -ENOMEM;
490 break;
491 }
492
493 if (pargs->is_open) {
494 if (!instance->connected) {
495 ret = -ENOTCONN;
496 kfree(user_service);
497 break;
498 }
499 srvstate = VCHIQ_SRVSTATE_OPENING;
500 } else {
501 srvstate =
502 instance->connected ?
503 VCHIQ_SRVSTATE_LISTENING :
504 VCHIQ_SRVSTATE_HIDDEN;
505 }
506
507 userdata = pargs->params.userdata;
508 pargs->params.callback = service_callback;
509 pargs->params.userdata = user_service;
510 service = vchiq_add_service_internal(
511 instance->state,
512 &pargs->params, srvstate,
513 instance, user_service_free);
514
515 if (service != NULL) {
516 user_service->service = service;
517 user_service->userdata = userdata;
518 user_service->instance = instance;
519 user_service->is_vchi = pargs->is_vchi;
520 user_service->dequeue_pending = 0;
521 user_service->message_available_pos =
522 instance->completion_remove - 1;
523 user_service->msg_insert = 0;
524 user_service->msg_remove = 0;
525 _sema_init(&user_service->insert_event, 0);
526 _sema_init(&user_service->remove_event, 0);
527
528 if (pargs->is_open) {
529 status = vchiq_open_service_internal
530 (service, instance->pid);
531 if (status != VCHIQ_SUCCESS) {
532 vchiq_remove_service(service->handle);
533 service = NULL;
534 ret = (status == VCHIQ_RETRY) ?
535 -EINTR : -EIO;
536 break;
537 }
538 }
539
540 #ifdef VCHIQ_IOCTL_DEBUG
541 printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);
542 #endif
543 pargs->handle = service->handle;
544
545 service = NULL;
546 } else {
547 ret = -EEXIST;
548 kfree(user_service);
549 }
550 } break;
551
552 case VCHIQ_IOC_CLOSE_SERVICE: {
553 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg;
554
555 #ifdef VCHIQ_IOCTL_DEBUG
556 printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);
557 #endif
558
559 service = find_service_for_instance(instance, handle);
560 if (service != NULL)
561 status = vchiq_close_service(service->handle);
562 else
563 ret = -EINVAL;
564 } break;
565
566 case VCHIQ_IOC_REMOVE_SERVICE: {
567 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg;
568
569 #ifdef VCHIQ_IOCTL_DEBUG
570 printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);
571 #endif
572
573 service = find_service_for_instance(instance, handle);
574 if (service != NULL)
575 status = vchiq_remove_service(service->handle);
576 else
577 ret = -EINVAL;
578 } break;
579
580 case VCHIQ_IOC_USE_SERVICE:
581 case VCHIQ_IOC_RELEASE_SERVICE: {
582 VCHIQ_SERVICE_HANDLE_T handle = *(VCHIQ_SERVICE_HANDLE_T *)arg;
583
584 #ifdef VCHIQ_IOCTL_DEBUG
585 printf("%s: [%s SERVICE] handle = %08x\n", __func__,
586 cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);
587 #endif
588
589 service = find_service_for_instance(instance, handle);
590 if (service != NULL) {
591 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
592 vchiq_use_service_internal(service) :
593 vchiq_release_service_internal(service);
594 if (status != VCHIQ_SUCCESS) {
595 vchiq_log_error(vchiq_susp_log_level,
596 "%s: cmd %s returned error %d for "
597 "service %c%c%c%c:%8x",
598 __func__,
599 (cmd == VCHIQ_IOC_USE_SERVICE) ?
600 "VCHIQ_IOC_USE_SERVICE" :
601 "VCHIQ_IOC_RELEASE_SERVICE",
602 status,
603 VCHIQ_FOURCC_AS_4CHARS(
604 service->base.fourcc),
605 service->client_id);
606 ret = -EINVAL;
607 }
608 } else
609 ret = -EINVAL;
610 } break;
611
612 case VCHIQ_IOC_QUEUE_MESSAGE: {
613 VCHIQ_QUEUE_MESSAGE_T *pargs = arg;
614
615 #ifdef VCHIQ_IOCTL_DEBUG
616 printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, pargs->handle);
617 #endif
618
619 service = find_service_for_instance(instance, pargs->handle);
620
621 if ((service != NULL) && (pargs->count <= MAX_ELEMENTS)) {
622 /* Copy elements into kernel space */
623 VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
624 if (copy_from_user(elements, pargs->elements,
625 pargs->count * sizeof(VCHIQ_ELEMENT_T)) == 0)
626 status = vchiq_queue_message
627 (pargs->handle,
628 elements, pargs->count);
629 else
630 ret = -EFAULT;
631 } else {
632 ret = -EINVAL;
633 }
634 } break;
635
636 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
637 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
638 VCHIQ_QUEUE_BULK_TRANSFER_T *pargs = arg;
639 struct bulk_waiter_node *waiter = NULL;
640 VCHIQ_BULK_DIR_T dir =
641 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
642 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
643
644 service = find_service_for_instance(instance, pargs->handle);
645 if (!service) {
646 ret = -EINVAL;
647 break;
648 }
649
650 if (pargs->mode == VCHIQ_BULK_MODE_BLOCKING) {
651 waiter = kzalloc(sizeof(struct bulk_waiter_node),
652 GFP_KERNEL);
653 if (!waiter) {
654 ret = -ENOMEM;
655 break;
656 }
657 pargs->userdata = &waiter->bulk_waiter;
658 } else if (pargs->mode == VCHIQ_BULK_MODE_WAITING) {
659 struct list_head *pos;
660 lmutex_lock(&instance->bulk_waiter_list_mutex);
661 list_for_each(pos, &instance->bulk_waiter_list) {
662 if (list_entry(pos, struct bulk_waiter_node,
663 list)->pid == current->l_proc->p_pid) {
664 waiter = list_entry(pos,
665 struct bulk_waiter_node,
666 list);
667 list_del(pos);
668 break;
669 }
670
671 }
672 lmutex_unlock(&instance->bulk_waiter_list_mutex);
673 if (!waiter) {
674 vchiq_log_error(vchiq_arm_log_level,
675 "no bulk_waiter found for pid %d",
676 current->l_proc->p_pid);
677 ret = -ESRCH;
678 break;
679 }
680 vchiq_log_info(vchiq_arm_log_level,
681 "found bulk_waiter %x for pid %d",
682 (unsigned int)waiter, current->l_proc->p_pid);
683 pargs->userdata = &waiter->bulk_waiter;
684 }
685 status = vchiq_bulk_transfer
686 (pargs->handle,
687 VCHI_MEM_HANDLE_INVALID,
688 pargs->data, pargs->size,
689 pargs->userdata, pargs->mode,
690 dir);
691 if (!waiter)
692 break;
693 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
694 !waiter->bulk_waiter.bulk) {
695 if (waiter->bulk_waiter.bulk) {
696 /* Cancel the signal when the transfer
697 ** completes. */
698 spin_lock(&bulk_waiter_spinlock);
699 waiter->bulk_waiter.bulk->userdata = NULL;
700 spin_unlock(&bulk_waiter_spinlock);
701 }
702 _sema_destroy(&waiter->bulk_waiter.event);
703 kfree(waiter);
704 } else {
705 const VCHIQ_BULK_MODE_T mode_waiting =
706 VCHIQ_BULK_MODE_WAITING;
707 waiter->pid = current->l_proc->p_pid;
708 lmutex_lock(&instance->bulk_waiter_list_mutex);
709 list_add(&waiter->list, &instance->bulk_waiter_list);
710 lmutex_unlock(&instance->bulk_waiter_list_mutex);
711 vchiq_log_info(vchiq_arm_log_level,
712 "saved bulk_waiter %x for pid %d",
713 (unsigned int)waiter, current->l_proc->p_pid);
714
715 pargs->mode = mode_waiting;
716 }
717 } break;
718
719 case VCHIQ_IOC_AWAIT_COMPLETION: {
720 VCHIQ_AWAIT_COMPLETION_T *pargs = arg;
721 int count = 0;
722
723 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
724 if (!instance->connected) {
725 ret = -ENOTCONN;
726 break;
727 }
728
729 lmutex_lock(&instance->completion_mutex);
730
731 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
732 while ((instance->completion_remove ==
733 instance->completion_insert)
734 && !instance->closing) {
735 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
736 lmutex_unlock(&instance->completion_mutex);
737 rc = down_interruptible(&instance->insert_event);
738 lmutex_lock(&instance->completion_mutex);
739 if (rc != 0) {
740 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
741 vchiq_log_info(vchiq_arm_log_level,
742 "AWAIT_COMPLETION interrupted");
743 ret = -EINTR;
744 break;
745 }
746 }
747 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
748
749 /* A read memory barrier is needed to stop prefetch of a stale
750 ** completion record
751 */
752 rmb();
753
754 if (ret == 0) {
755 int msgbufcount = pargs->msgbufcount;
756
757 for (; count < pargs->count; count++) {
758 VCHIQ_COMPLETION_DATA_T *completion;
759 VCHIQ_SERVICE_T *service1;
760 USER_SERVICE_T *user_service;
761 VCHIQ_HEADER_T *header;
762 if (instance->completion_remove ==
763 instance->completion_insert)
764 break;
765 completion = &instance->completions[
766 instance->completion_remove &
767 (MAX_COMPLETIONS - 1)];
768
769 service1 = completion->service_userdata;
770 user_service = service1->base.userdata;
771 completion->service_userdata =
772 user_service->userdata;
773
774 header = completion->header;
775 if (header) {
776 void __user *msgbuf;
777 int msglen;
778
779 msglen = header->size +
780 sizeof(VCHIQ_HEADER_T);
781 /* This must be a VCHIQ-style service */
782 if (pargs->msgbufsize < msglen) {
783 vchiq_log_error(
784 vchiq_arm_log_level,
785 "header %x: msgbufsize"
786 " %x < msglen %x",
787 (unsigned int)header,
788 pargs->msgbufsize,
789 msglen);
790 WARN(1, "invalid message "
791 "size\n");
792 if (count == 0)
793 ret = -EMSGSIZE;
794 break;
795 }
796 if (msgbufcount <= 0)
797 /* Stall here for lack of a
798 ** buffer for the message. */
799 break;
800 /* Get the pointer from user space */
801 msgbufcount--;
802 if (copy_from_user(&msgbuf,
803 (const void __user *)
804 &pargs->msgbufs[msgbufcount],
805 sizeof(msgbuf)) != 0) {
806 if (count == 0)
807 ret = -EFAULT;
808 break;
809 }
810
811 /* Copy the message to user space */
812 if (copy_to_user(msgbuf, header,
813 msglen) != 0) {
814 if (count == 0)
815 ret = -EFAULT;
816 break;
817 }
818
819 /* Now it has been copied, the message
820 ** can be released. */
821 vchiq_release_message(service1->handle,
822 header);
823
824 /* The completion must point to the
825 ** msgbuf. */
826 completion->header = msgbuf;
827 }
828
829 if (completion->reason ==
830 VCHIQ_SERVICE_CLOSED)
831 unlock_service(service1);
832
833 if (copy_to_user((void __user *)(
834 (size_t)pargs->buf +
835 count * sizeof(VCHIQ_COMPLETION_DATA_T)),
836 completion,
837 sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
838 if (count == 0)
839 ret = -EFAULT;
840 break;
841 }
842
843 instance->completion_remove++;
844 }
845
846 pargs->msgbufcount = msgbufcount;
847 pargs->count = count;
848 }
849
850 if (count != 0)
851 up(&instance->remove_event);
852 lmutex_unlock(&instance->completion_mutex);
853 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
854 } break;
855
856 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
857 VCHIQ_DEQUEUE_MESSAGE_T *pargs = arg;
858 USER_SERVICE_T *user_service;
859 VCHIQ_HEADER_T *header;
860
861 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
862 service = find_service_for_instance(instance, pargs->handle);
863 if (!service) {
864 ret = -EINVAL;
865 break;
866 }
867 user_service = (USER_SERVICE_T *)service->base.userdata;
868 if (user_service->is_vchi == 0) {
869 ret = -EINVAL;
870 break;
871 }
872
873 spin_lock(&msg_queue_spinlock);
874 if (user_service->msg_remove == user_service->msg_insert) {
875 if (!pargs->blocking) {
876 spin_unlock(&msg_queue_spinlock);
877 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
878 ret = -EWOULDBLOCK;
879 break;
880 }
881 user_service->dequeue_pending = 1;
882 do {
883 spin_unlock(&msg_queue_spinlock);
884 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
885 if (down_interruptible(
886 &user_service->insert_event) != 0) {
887 vchiq_log_info(vchiq_arm_log_level,
888 "DEQUEUE_MESSAGE interrupted");
889 ret = -EINTR;
890 break;
891 }
892 spin_lock(&msg_queue_spinlock);
893 } while (user_service->msg_remove ==
894 user_service->msg_insert);
895
896 if (ret)
897 break;
898 }
899
900 BUG_ON((int)(user_service->msg_insert -
901 user_service->msg_remove) < 0);
902
903 header = user_service->msg_queue[user_service->msg_remove &
904 (MSG_QUEUE_SIZE - 1)];
905 user_service->msg_remove++;
906 spin_unlock(&msg_queue_spinlock);
907
908 up(&user_service->remove_event);
909 if (header == NULL)
910 ret = -ENOTCONN;
911 else if (header->size <= pargs->bufsize) {
912 /* Copy to user space if msgbuf is not NULL */
913 if ((pargs->buf == NULL) ||
914 (copy_to_user((void __user *)pargs->buf,
915 header->data,
916 header->size) == 0)) {
917 pargs->bufsize = header->size;
918 vchiq_release_message(
919 service->handle,
920 header);
921 } else
922 ret = -EFAULT;
923 } else {
924 vchiq_log_error(vchiq_arm_log_level,
925 "header %x: bufsize %x < size %x",
926 (unsigned int)header, pargs->bufsize,
927 header->size);
928 WARN(1, "invalid size\n");
929 ret = -EMSGSIZE;
930 }
931 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
932 } break;
933
934 case VCHIQ_IOC_GET_CLIENT_ID: {
935 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
936
937 ret = vchiq_get_client_id(handle);
938 } break;
939
940 case VCHIQ_IOC_GET_CONFIG: {
941 VCHIQ_GET_CONFIG_T *pargs = arg;
942 VCHIQ_CONFIG_T config;
943
944 if (pargs->config_size > sizeof(config)) {
945 ret = -EINVAL;
946 break;
947 }
948 status = vchiq_get_config(instance, pargs->config_size, &config);
949 if (status == VCHIQ_SUCCESS) {
950 if (copy_to_user((void __user *)pargs->pconfig,
951 &config, pargs->config_size) != 0) {
952 ret = -EFAULT;
953 break;
954 }
955 }
956 } break;
957
958 case VCHIQ_IOC_SET_SERVICE_OPTION: {
959 VCHIQ_SET_SERVICE_OPTION_T *pargs = arg;
960
961 service = find_service_for_instance(instance, pargs->handle);
962 if (!service) {
963 ret = -EINVAL;
964 break;
965 }
966
967 status = vchiq_set_service_option(
968 pargs->handle, pargs->option, pargs->value);
969 } break;
970
971 case VCHIQ_IOC_DUMP_PHYS_MEM: {
972 #if 0
973 VCHIQ_DUMP_MEM_T *pargs = arg;
974 #endif
975
976 printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);
977 #if 0
978 dump_phys_mem(pargs->virt_addr, pargs->num_bytes);
979 #endif
980 } break;
981
982 default:
983 ret = -ENOTTY;
984 break;
985 }
986
987 if (service)
988 unlock_service(service);
989
990 if (ret == 0) {
991 if (status == VCHIQ_ERROR)
992 ret = -EIO;
993 else if (status == VCHIQ_RETRY)
994 ret = -EINTR;
995 }
996
997 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
998 (ret != -EWOULDBLOCK))
999 vchiq_log_info(vchiq_arm_log_level,
1000 " ioctl instance %lx, cmd %s -> status %d, %d",
1001 (unsigned long)instance,
1002 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1003 ioctl_names[_IOC_NR(cmd)] :
1004 "<invalid>",
1005 status, ret);
1006 else
1007 vchiq_log_trace(vchiq_arm_log_level,
1008 " ioctl instance %lx, cmd %s -> status %d, %d",
1009 (unsigned long)instance,
1010 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1011 ioctl_names[_IOC_NR(cmd)] :
1012 "<invalid>",
1013 status, ret);
1014
1015 /* XXXBSD: report BSD-style error to userland */
1016 if (ret < 0)
1017 ret = -ret;
1018
1019 return ret;
1020 }
1021
1022 #if notyet
1023 static void
1024 instance_dtr(void *data)
1025 {
1026
1027 free(data, M_VCHIQ);
1028 }
1029 #endif
1030
1031 /****************************************************************************
1032 *
1033 * vchiq_open
1034 *
1035 ***************************************************************************/
1036
1037 static int
1038 vchiq_open(dev_t dev, int flags, int mode, lwp_t *l)
1039 {
1040 VCHIQ_INSTANCE_T instance = NULL;
1041 struct file *fp;
1042 int err, fd;
1043
1044 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1045
1046 /* XXXBSD: do we really need this check? */
1047 if (device_lookup_private(&vchiq_cd, minor(dev)) != NULL) {
1048 VCHIQ_STATE_T *state = vchiq_get_state();
1049
1050 if (!state) {
1051 vchiq_log_error(vchiq_arm_log_level,
1052 "vchiq has no connection to VideoCore");
1053 return -ENOTCONN;
1054 }
1055
1056 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1057 if (!instance)
1058 return -ENOMEM;
1059
1060 err = fd_allocfile(&fp, &fd);
1061 if (err) {
1062 kfree(instance);
1063 return -err;
1064 }
1065
1066 instance->state = state;
1067 instance->pid = current->l_proc->p_pid;
1068
1069 #ifdef notyet
1070 ret = vchiq_proc_add_instance(instance);
1071 if (ret != 0) {
1072 kfree(instance);
1073 return ret;
1074 }
1075 #endif
1076
1077 _sema_init(&instance->insert_event, 0);
1078 _sema_init(&instance->remove_event, 0);
1079 lmutex_init(&instance->completion_mutex);
1080 lmutex_init(&instance->bulk_waiter_list_mutex);
1081 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1082
1083 }
1084 else {
1085 vchiq_log_error(vchiq_arm_log_level,
1086 "Unknown minor device");
1087 return -ENXIO;
1088 }
1089
1090 return fd_clone(fp, fd, flags, &vchiq_fileops, instance);
1091 }
1092
1093 /****************************************************************************
1094 *
1095 * vchiq_release
1096 *
1097 ***************************************************************************/
1098
1099 static int
1100 vchiq_close(struct file *fp)
1101 {
1102 int ret = 0;
1103 if (1) {
1104 VCHIQ_INSTANCE_T instance;
1105 VCHIQ_STATE_T *state = vchiq_get_state();
1106 VCHIQ_SERVICE_T *service;
1107 int i;
1108
1109 instance = fp->f_data;
1110
1111 vchiq_log_info(vchiq_arm_log_level,
1112 "vchiq_release: instance=%lx",
1113 (unsigned long)instance);
1114
1115 if (!state) {
1116 ret = -EPERM;
1117 goto out;
1118 }
1119
1120 /* Ensure videocore is awake to allow termination. */
1121 vchiq_use_internal(instance->state, NULL,
1122 USE_TYPE_VCHIQ);
1123
1124 lmutex_lock(&instance->completion_mutex);
1125
1126 /* Wake the completion thread and ask it to exit */
1127 instance->closing = 1;
1128 up(&instance->insert_event);
1129
1130 lmutex_unlock(&instance->completion_mutex);
1131
1132 /* Wake the slot handler if the completion queue is full. */
1133 up(&instance->remove_event);
1134
1135 /* Mark all services for termination... */
1136 i = 0;
1137 while ((service = next_service_by_instance(state, instance,
1138 &i)) != NULL) {
1139 USER_SERVICE_T *user_service = service->base.userdata;
1140
1141 /* Wake the slot handler if the msg queue is full. */
1142 up(&user_service->remove_event);
1143
1144 vchiq_terminate_service_internal(service);
1145 unlock_service(service);
1146 }
1147
1148 /* ...and wait for them to die */
1149 i = 0;
1150 while ((service = next_service_by_instance(state, instance, &i))
1151 != NULL) {
1152 USER_SERVICE_T *user_service = service->base.userdata;
1153
1154 down(&service->remove_event);
1155
1156 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1157
1158 spin_lock(&msg_queue_spinlock);
1159
1160 while (user_service->msg_remove !=
1161 user_service->msg_insert) {
1162 VCHIQ_HEADER_T *header = user_service->
1163 msg_queue[user_service->msg_remove &
1164 (MSG_QUEUE_SIZE - 1)];
1165 user_service->msg_remove++;
1166 spin_unlock(&msg_queue_spinlock);
1167
1168 if (header)
1169 vchiq_release_message(
1170 service->handle,
1171 header);
1172 spin_lock(&msg_queue_spinlock);
1173 }
1174
1175 spin_unlock(&msg_queue_spinlock);
1176
1177 unlock_service(service);
1178 }
1179
1180 /* Release any closed services */
1181 while (instance->completion_remove !=
1182 instance->completion_insert) {
1183 VCHIQ_COMPLETION_DATA_T *completion;
1184 VCHIQ_SERVICE_T *service1;
1185 completion = &instance->completions[
1186 instance->completion_remove &
1187 (MAX_COMPLETIONS - 1)];
1188 service1 = completion->service_userdata;
1189 if (completion->reason == VCHIQ_SERVICE_CLOSED)
1190 unlock_service(service1);
1191 instance->completion_remove++;
1192 }
1193
1194 /* Release the PEER service count. */
1195 vchiq_release_internal(instance->state, NULL);
1196
1197 {
1198 struct list_head *pos, *next;
1199 list_for_each_safe(pos, next,
1200 &instance->bulk_waiter_list) {
1201 struct bulk_waiter_node *waiter;
1202 waiter = list_entry(pos,
1203 struct bulk_waiter_node,
1204 list);
1205 list_del(pos);
1206 vchiq_log_info(vchiq_arm_log_level,
1207 "bulk_waiter - cleaned up %x "
1208 "for pid %d",
1209 (unsigned int)waiter, waiter->pid);
1210 _sema_destroy(&waiter->bulk_waiter.event);
1211 kfree(waiter);
1212 }
1213 }
1214
1215 }
1216 else {
1217 vchiq_log_error(vchiq_arm_log_level,
1218 "Unknown minor device");
1219 ret = -ENXIO;
1220 }
1221
1222 out:
1223 return ret;
1224 }
1225
1226 /****************************************************************************
1227 *
1228 * vchiq_dump
1229 *
1230 ***************************************************************************/
1231
1232 void
1233 vchiq_dump(void *dump_context, const char *str, int len)
1234 {
1235 DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1236
1237 if (context->actual < context->space) {
1238 int copy_bytes;
1239 if (context->offset > 0) {
1240 int skip_bytes = min(len, (int)context->offset);
1241 str += skip_bytes;
1242 len -= skip_bytes;
1243 context->offset -= skip_bytes;
1244 if (context->offset > 0)
1245 return;
1246 }
1247 copy_bytes = min(len, (int)(context->space - context->actual));
1248 if (copy_bytes == 0)
1249 return;
1250 memcpy(context->buf + context->actual, str, copy_bytes);
1251 context->actual += copy_bytes;
1252 len -= copy_bytes;
1253
1254 /* If tne terminating NUL is included in the length, then it
1255 ** marks the end of a line and should be replaced with a
1256 ** carriage return. */
1257 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1258 char cr = '\n';
1259 memcpy(context->buf + context->actual - 1, &cr, 1);
1260 }
1261 }
1262 }
1263
1264 /****************************************************************************
1265 *
1266 * vchiq_dump_platform_instance_state
1267 *
1268 ***************************************************************************/
1269
1270 void
1271 vchiq_dump_platform_instances(void *dump_context)
1272 {
1273 VCHIQ_STATE_T *state = vchiq_get_state();
1274 char buf[80];
1275 int len;
1276 int i;
1277
1278 /* There is no list of instances, so instead scan all services,
1279 marking those that have been dumped. */
1280
1281 for (i = 0; i < state->unused_service; i++) {
1282 VCHIQ_SERVICE_T *service = state->services[i];
1283 VCHIQ_INSTANCE_T instance;
1284
1285 if (service && (service->base.callback == service_callback)) {
1286 instance = service->instance;
1287 if (instance)
1288 instance->mark = 0;
1289 }
1290 }
1291
1292 for (i = 0; i < state->unused_service; i++) {
1293 VCHIQ_SERVICE_T *service = state->services[i];
1294 VCHIQ_INSTANCE_T instance;
1295
1296 if (service && (service->base.callback == service_callback)) {
1297 instance = service->instance;
1298 if (instance && !instance->mark) {
1299 len = snprintf(buf, sizeof(buf),
1300 "Instance %x: pid %d,%s completions "
1301 "%d/%d",
1302 (unsigned int)instance, instance->pid,
1303 instance->connected ? " connected, " :
1304 "",
1305 instance->completion_insert -
1306 instance->completion_remove,
1307 MAX_COMPLETIONS);
1308
1309 vchiq_dump(dump_context, buf, len + 1);
1310
1311 instance->mark = 1;
1312 }
1313 }
1314 }
1315 }
1316
1317 /****************************************************************************
1318 *
1319 * vchiq_dump_platform_service_state
1320 *
1321 ***************************************************************************/
1322
1323 void
1324 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1325 {
1326 USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1327 char buf[80];
1328 int len;
1329
1330 len = snprintf(buf, sizeof(buf), " instance %x",
1331 (unsigned int)service->instance);
1332
1333 if ((service->base.callback == service_callback) &&
1334 user_service->is_vchi) {
1335 len += snprintf(buf + len, sizeof(buf) - len,
1336 ", %d/%d messages",
1337 user_service->msg_insert - user_service->msg_remove,
1338 MSG_QUEUE_SIZE);
1339
1340 if (user_service->dequeue_pending)
1341 len += snprintf(buf + len, sizeof(buf) - len,
1342 " (dequeue pending)");
1343 }
1344
1345 vchiq_dump(dump_context, buf, len + 1);
1346 }
1347
1348 #ifdef notyet
1349 /****************************************************************************
1350 *
1351 * dump_user_mem
1352 *
1353 ***************************************************************************/
1354
1355 static void
1356 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1357 {
1358 int rc;
1359 uint8_t *end_virt_addr = virt_addr + num_bytes;
1360 int num_pages;
1361 int offset;
1362 int end_offset;
1363 int page_idx;
1364 int prev_idx;
1365 struct page *page;
1366 struct page **pages;
1367 uint8_t *kmapped_virt_ptr;
1368
1369 /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1370
1371 virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1372 end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1373 ~0x0fuL);
1374
1375 offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1376 end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1377
1378 num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1379
1380 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1381 if (pages == NULL) {
1382 vchiq_log_error(vchiq_arm_log_level,
1383 "Unable to allocation memory for %d pages\n",
1384 num_pages);
1385 return;
1386 }
1387
1388 down_read(¤t->mm->mmap_sem);
1389 rc = get_user_pages(current, /* task */
1390 current->mm, /* mm */
1391 (unsigned long)virt_addr, /* start */
1392 num_pages, /* len */
1393 0, /* write */
1394 0, /* force */
1395 pages, /* pages (array of page pointers) */
1396 NULL); /* vmas */
1397 up_read(¤t->mm->mmap_sem);
1398
1399 prev_idx = -1;
1400 page = NULL;
1401
1402 while (offset < end_offset) {
1403
1404 int page_offset = offset % PAGE_SIZE;
1405 page_idx = offset / PAGE_SIZE;
1406
1407 if (page_idx != prev_idx) {
1408
1409 if (page != NULL)
1410 kunmap(page);
1411 page = pages[page_idx];
1412 kmapped_virt_ptr = kmap(page);
1413
1414 prev_idx = page_idx;
1415 }
1416
1417 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1418 vchiq_log_dump_mem("ph",
1419 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1420 page_offset],
1421 &kmapped_virt_ptr[page_offset], 16);
1422
1423 offset += 16;
1424 }
1425 if (page != NULL)
1426 kunmap(page);
1427
1428 for (page_idx = 0; page_idx < num_pages; page_idx++)
1429 page_cache_release(pages[page_idx]);
1430
1431 kfree(pages);
1432 }
1433 #endif
1434
1435 /****************************************************************************
1436 *
1437 * vchiq_read
1438 *
1439 ***************************************************************************/
1440
1441 static int
1442 vchiq_read(struct file *file, off_t *ppos, struct uio *uio, kauth_cred_t cred,
1443 int flags)
1444 {
1445 int result;
1446
1447 char *buf = kmem_zalloc(PAGE_SIZE, KM_SLEEP);
1448
1449 DUMP_CONTEXT_T context;
1450 context.buf = buf;
1451 context.actual = 0;
1452 context.space = PAGE_SIZE;
1453 context.offset = *ppos;
1454
1455 vchiq_dump_state(&context, &g_state);
1456
1457 *ppos += context.actual;
1458
1459 result = uiomove(buf, context.actual, uio);
1460 kmem_free(buf, PAGE_SIZE);
1461
1462 return result;
1463 }
1464
1465 VCHIQ_STATE_T *
1466 vchiq_get_state(void)
1467 {
1468
1469 if (g_state.remote == NULL)
1470 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1471 else if (g_state.remote->initialised != 1)
1472 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1473 __func__, g_state.remote->initialised);
1474
1475 return ((g_state.remote != NULL) &&
1476 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1477 }
1478
1479 /*
1480 * Autosuspend related functionality
1481 */
1482
1483 int
1484 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1485 {
1486 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1487 if (!arm_state)
1488 /* autosuspend not supported - always return wanted */
1489 return 1;
1490 else if (arm_state->blocked_count)
1491 return 1;
1492 else if (!arm_state->videocore_use_count)
1493 /* usage count zero - check for override unless we're forcing */
1494 if (arm_state->resume_blocked)
1495 return 0;
1496 else
1497 return vchiq_platform_videocore_wanted(state);
1498 else
1499 /* non-zero usage count - videocore still required */
1500 return 1;
1501 }
1502
1503 static VCHIQ_STATUS_T
1504 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1505 VCHIQ_HEADER_T *header,
1506 VCHIQ_SERVICE_HANDLE_T service_user,
1507 void *bulk_user)
1508 {
1509 vchiq_log_error(vchiq_susp_log_level,
1510 "%s callback reason %d", __func__, reason);
1511 return 0;
1512 }
1513
1514 static int
1515 vchiq_keepalive_thread_func(void *v)
1516 {
1517 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1518 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1519
1520 VCHIQ_STATUS_T status;
1521 VCHIQ_INSTANCE_T instance;
1522 VCHIQ_SERVICE_HANDLE_T ka_handle;
1523
1524 VCHIQ_SERVICE_PARAMS_T params = {
1525 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1526 .callback = vchiq_keepalive_vchiq_callback,
1527 .version = KEEPALIVE_VER,
1528 .version_min = KEEPALIVE_VER_MIN
1529 };
1530
1531 status = vchiq_initialise(&instance);
1532 if (status != VCHIQ_SUCCESS) {
1533 vchiq_log_error(vchiq_susp_log_level,
1534 "%s vchiq_initialise failed %d", __func__, status);
1535 goto exit;
1536 }
1537
1538 status = vchiq_connect(instance);
1539 if (status != VCHIQ_SUCCESS) {
1540 vchiq_log_error(vchiq_susp_log_level,
1541 "%s vchiq_connect failed %d", __func__, status);
1542 goto shutdown;
1543 }
1544
1545 status = vchiq_add_service(instance, ¶ms, &ka_handle);
1546 if (status != VCHIQ_SUCCESS) {
1547 vchiq_log_error(vchiq_susp_log_level,
1548 "%s vchiq_open_service failed %d", __func__, status);
1549 goto shutdown;
1550 }
1551
1552 while (1) {
1553 long rc = 0, uc = 0;
1554 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1555 != 0) {
1556 vchiq_log_error(vchiq_susp_log_level,
1557 "%s interrupted", __func__);
1558 flush_signals(current);
1559 continue;
1560 }
1561
1562 /* read and clear counters. Do release_count then use_count to
1563 * prevent getting more releases than uses */
1564 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1565 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1566
1567 /* Call use/release service the requisite number of times.
1568 * Process use before release so use counts don't go negative */
1569 while (uc--) {
1570 atomic_inc(&arm_state->ka_use_ack_count);
1571 status = vchiq_use_service(ka_handle);
1572 if (status != VCHIQ_SUCCESS) {
1573 vchiq_log_error(vchiq_susp_log_level,
1574 "%s vchiq_use_service error %d",
1575 __func__, status);
1576 }
1577 }
1578 while (rc--) {
1579 status = vchiq_release_service(ka_handle);
1580 if (status != VCHIQ_SUCCESS) {
1581 vchiq_log_error(vchiq_susp_log_level,
1582 "%s vchiq_release_service error %d",
1583 __func__, status);
1584 }
1585 }
1586 }
1587
1588 shutdown:
1589 vchiq_shutdown(instance);
1590 exit:
1591 return 0;
1592 }
1593
1594 VCHIQ_STATUS_T
1595 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1596 {
1597 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1598
1599 if (arm_state) {
1600 rwlock_init(&arm_state->susp_res_lock);
1601
1602 init_completion(&arm_state->ka_evt);
1603 atomic_set(&arm_state->ka_use_count, 0);
1604 atomic_set(&arm_state->ka_use_ack_count, 0);
1605 atomic_set(&arm_state->ka_release_count, 0);
1606
1607 init_completion(&arm_state->vc_suspend_complete);
1608
1609 init_completion(&arm_state->vc_resume_complete);
1610 /* Initialise to 'done' state. We only want to block on resume
1611 * completion while videocore is suspended. */
1612 set_resume_state(arm_state, VC_RESUME_RESUMED);
1613
1614 init_completion(&arm_state->resume_blocker);
1615 /* Initialise to 'done' state. We only want to block on this
1616 * completion while resume is blocked */
1617 complete_all(&arm_state->resume_blocker);
1618
1619 init_completion(&arm_state->blocked_blocker);
1620 /* Initialise to 'done' state. We only want to block on this
1621 * completion while things are waiting on the resume blocker */
1622 complete_all(&arm_state->blocked_blocker);
1623
1624 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1625 arm_state->suspend_timer_running = 0;
1626 init_timer(&arm_state->suspend_timer);
1627 arm_state->suspend_timer.data = (unsigned long)(state);
1628 arm_state->suspend_timer.function = suspend_timer_callback;
1629
1630 arm_state->first_connect = 0;
1631
1632 }
1633 return status;
1634 }
1635
1636 /*
1637 ** Functions to modify the state variables;
1638 ** set_suspend_state
1639 ** set_resume_state
1640 **
1641 ** There are more state variables than we might like, so ensure they remain in
1642 ** step. Suspend and resume state are maintained separately, since most of
1643 ** these state machines can operate independently. However, there are a few
1644 ** states where state transitions in one state machine cause a reset to the
1645 ** other state machine. In addition, there are some completion events which
1646 ** need to occur on state machine reset and end-state(s), so these are also
1647 ** dealt with in these functions.
1648 **
1649 ** In all states we set the state variable according to the input, but in some
1650 ** cases we perform additional steps outlined below;
1651 **
1652 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1653 ** The suspend completion is completed after any suspend
1654 ** attempt. When we reset the state machine we also reset
1655 ** the completion. This reset occurs when videocore is
1656 ** resumed, and also if we initiate suspend after a suspend
1657 ** failure.
1658 **
1659 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1660 ** suspend - ie from this point on we must try to suspend
1661 ** before resuming can occur. We therefore also reset the
1662 ** resume state machine to VC_RESUME_IDLE in this state.
1663 **
1664 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1665 ** complete_all on the suspend completion to notify
1666 ** anything waiting for suspend to happen.
1667 **
1668 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1669 ** initiate resume, so no need to alter resume state.
1670 ** We call complete_all on the suspend completion to notify
1671 ** of suspend rejection.
1672 **
1673 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
1674 ** suspend completion and reset the resume state machine.
1675 **
1676 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
1677 ** resume completion is in it's 'done' state whenever
1678 ** videcore is running. Therfore, the VC_RESUME_IDLE state
1679 ** implies that videocore is suspended.
1680 ** Hence, any thread which needs to wait until videocore is
1681 ** running can wait on this completion - it will only block
1682 ** if videocore is suspended.
1683 **
1684 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
1685 ** Call complete_all on the resume completion to unblock
1686 ** any threads waiting for resume. Also reset the suspend
1687 ** state machine to it's idle state.
1688 **
1689 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1690 */
1691
1692 inline void
1693 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1694 enum vc_suspend_status new_state)
1695 {
1696 /* set the state in all cases */
1697 arm_state->vc_suspend_state = new_state;
1698
1699 /* state specific additional actions */
1700 switch (new_state) {
1701 case VC_SUSPEND_FORCE_CANCELED:
1702 complete_all(&arm_state->vc_suspend_complete);
1703 break;
1704 case VC_SUSPEND_REJECTED:
1705 complete_all(&arm_state->vc_suspend_complete);
1706 break;
1707 case VC_SUSPEND_FAILED:
1708 complete_all(&arm_state->vc_suspend_complete);
1709 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1710 complete_all(&arm_state->vc_resume_complete);
1711 break;
1712 case VC_SUSPEND_IDLE:
1713 INIT_COMPLETION(arm_state->vc_suspend_complete);
1714 break;
1715 case VC_SUSPEND_REQUESTED:
1716 break;
1717 case VC_SUSPEND_IN_PROGRESS:
1718 set_resume_state(arm_state, VC_RESUME_IDLE);
1719 break;
1720 case VC_SUSPEND_SUSPENDED:
1721 complete_all(&arm_state->vc_suspend_complete);
1722 break;
1723 default:
1724 BUG();
1725 break;
1726 }
1727 }
1728
1729 inline void
1730 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1731 enum vc_resume_status new_state)
1732 {
1733 /* set the state in all cases */
1734 arm_state->vc_resume_state = new_state;
1735
1736 /* state specific additional actions */
1737 switch (new_state) {
1738 case VC_RESUME_FAILED:
1739 break;
1740 case VC_RESUME_IDLE:
1741 INIT_COMPLETION(arm_state->vc_resume_complete);
1742 break;
1743 case VC_RESUME_REQUESTED:
1744 break;
1745 case VC_RESUME_IN_PROGRESS:
1746 break;
1747 case VC_RESUME_RESUMED:
1748 complete_all(&arm_state->vc_resume_complete);
1749 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1750 break;
1751 default:
1752 BUG();
1753 break;
1754 }
1755 }
1756
1757
1758 /* should be called with the write lock held */
1759 inline void
1760 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1761 {
1762 del_timer(&arm_state->suspend_timer);
1763 arm_state->suspend_timer.expires = jiffies +
1764 msecs_to_jiffies(arm_state->
1765 suspend_timer_timeout);
1766 add_timer(&arm_state->suspend_timer);
1767 arm_state->suspend_timer_running = 1;
1768 }
1769
1770 /* should be called with the write lock held */
1771 static inline void
1772 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1773 {
1774 if (arm_state->suspend_timer_running) {
1775 del_timer(&arm_state->suspend_timer);
1776 arm_state->suspend_timer_running = 0;
1777 }
1778 }
1779
1780 static inline int
1781 need_resume(VCHIQ_STATE_T *state)
1782 {
1783 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1784 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1785 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1786 vchiq_videocore_wanted(state);
1787 }
1788
1789 static int
1790 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1791 {
1792 int status = VCHIQ_SUCCESS;
1793 const unsigned long timeout_val =
1794 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1795 int resume_count = 0;
1796
1797 /* Allow any threads which were blocked by the last force suspend to
1798 * complete if they haven't already. Only give this one shot; if
1799 * blocked_count is incremented after blocked_blocker is completed
1800 * (which only happens when blocked_count hits 0) then those threads
1801 * will have to wait until next time around */
1802 if (arm_state->blocked_count) {
1803 INIT_COMPLETION(arm_state->blocked_blocker);
1804 write_unlock_bh(&arm_state->susp_res_lock);
1805 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1806 "blocked clients", __func__);
1807 if (wait_for_completion_interruptible_timeout(
1808 &arm_state->blocked_blocker, timeout_val)
1809 <= 0) {
1810 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1811 "previously blocked clients failed" , __func__);
1812 status = VCHIQ_ERROR;
1813 write_lock_bh(&arm_state->susp_res_lock);
1814 goto out;
1815 }
1816 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
1817 "clients resumed", __func__);
1818 write_lock_bh(&arm_state->susp_res_lock);
1819 }
1820
1821 /* We need to wait for resume to complete if it's in process */
1822 while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
1823 arm_state->vc_resume_state > VC_RESUME_IDLE) {
1824 if (resume_count > 1) {
1825 status = VCHIQ_ERROR;
1826 vchiq_log_error(vchiq_susp_log_level, "%s waited too "
1827 "many times for resume" , __func__);
1828 goto out;
1829 }
1830 write_unlock_bh(&arm_state->susp_res_lock);
1831 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
1832 __func__);
1833 if (wait_for_completion_interruptible_timeout(
1834 &arm_state->vc_resume_complete, timeout_val)
1835 <= 0) {
1836 vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1837 "resume failed (%s)", __func__,
1838 resume_state_names[arm_state->vc_resume_state +
1839 VC_RESUME_NUM_OFFSET]);
1840 status = VCHIQ_ERROR;
1841 write_lock_bh(&arm_state->susp_res_lock);
1842 goto out;
1843 }
1844 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
1845 write_lock_bh(&arm_state->susp_res_lock);
1846 resume_count++;
1847 }
1848 INIT_COMPLETION(arm_state->resume_blocker);
1849 arm_state->resume_blocked = 1;
1850
1851 out:
1852 return status;
1853 }
1854
1855 static inline void
1856 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
1857 {
1858 complete_all(&arm_state->resume_blocker);
1859 arm_state->resume_blocked = 0;
1860 }
1861
1862 /* Initiate suspend via slot handler. Should be called with the write lock
1863 * held */
1864 VCHIQ_STATUS_T
1865 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
1866 {
1867 VCHIQ_STATUS_T status = VCHIQ_ERROR;
1868 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1869
1870 if (!arm_state)
1871 goto out;
1872
1873 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1874 status = VCHIQ_SUCCESS;
1875
1876
1877 switch (arm_state->vc_suspend_state) {
1878 case VC_SUSPEND_REQUESTED:
1879 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
1880 "requested", __func__);
1881 break;
1882 case VC_SUSPEND_IN_PROGRESS:
1883 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
1884 "progress", __func__);
1885 break;
1886
1887 default:
1888 /* We don't expect to be in other states, so log but continue
1889 * anyway */
1890 vchiq_log_error(vchiq_susp_log_level,
1891 "%s unexpected suspend state %s", __func__,
1892 suspend_state_names[arm_state->vc_suspend_state +
1893 VC_SUSPEND_NUM_OFFSET]);
1894 /* fall through */
1895 case VC_SUSPEND_REJECTED:
1896 case VC_SUSPEND_FAILED:
1897 /* Ensure any idle state actions have been run */
1898 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1899 /* fall through */
1900 case VC_SUSPEND_IDLE:
1901 vchiq_log_info(vchiq_susp_log_level,
1902 "%s: suspending", __func__);
1903 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
1904 /* kick the slot handler thread to initiate suspend */
1905 request_poll(state, NULL, 0);
1906 break;
1907 }
1908
1909 out:
1910 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
1911 return status;
1912 }
1913
1914 void
1915 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
1916 {
1917 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1918 int susp = 0;
1919
1920 if (!arm_state)
1921 goto out;
1922
1923 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1924
1925 write_lock_bh(&arm_state->susp_res_lock);
1926 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
1927 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
1928 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
1929 susp = 1;
1930 }
1931 write_unlock_bh(&arm_state->susp_res_lock);
1932
1933 if (susp)
1934 vchiq_platform_suspend(state);
1935
1936 out:
1937 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
1938 return;
1939 }
1940
1941
1942 static void
1943 output_timeout_error(VCHIQ_STATE_T *state)
1944 {
1945 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1946 char service_err[50] = "";
1947 int vc_use_count = arm_state->videocore_use_count;
1948 int active_services = state->unused_service;
1949 int i;
1950
1951 if (!arm_state->videocore_use_count) {
1952 snprintf(service_err, 50, " Videocore usecount is 0");
1953 goto output_msg;
1954 }
1955 for (i = 0; i < active_services; i++) {
1956 VCHIQ_SERVICE_T *service_ptr = state->services[i];
1957 if (service_ptr && service_ptr->service_use_count &&
1958 (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
1959 snprintf(service_err, 50, " %c%c%c%c(%8x) service has "
1960 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
1961 service_ptr->base.fourcc),
1962 service_ptr->client_id,
1963 service_ptr->service_use_count,
1964 service_ptr->service_use_count ==
1965 vc_use_count ? "" : " (+ more)");
1966 break;
1967 }
1968 }
1969
1970 output_msg:
1971 vchiq_log_error(vchiq_susp_log_level,
1972 "timed out waiting for vc suspend (%d).%s",
1973 arm_state->autosuspend_override, service_err);
1974
1975 }
1976
1977 /* Try to get videocore into suspended state, regardless of autosuspend state.
1978 ** We don't actually force suspend, since videocore may get into a bad state
1979 ** if we force suspend at a bad time. Instead, we wait for autosuspend to
1980 ** determine a good point to suspend. If this doesn't happen within 100ms we
1981 ** report failure.
1982 **
1983 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
1984 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
1985 */
1986 VCHIQ_STATUS_T
1987 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
1988 {
1989 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1990 VCHIQ_STATUS_T status = VCHIQ_ERROR;
1991 long rc = 0;
1992 int repeat = -1;
1993
1994 if (!arm_state)
1995 goto out;
1996
1997 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
1998
1999 write_lock_bh(&arm_state->susp_res_lock);
2000
2001 status = block_resume(arm_state);
2002 if (status != VCHIQ_SUCCESS)
2003 goto unlock;
2004 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2005 /* Already suspended - just block resume and exit */
2006 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2007 __func__);
2008 status = VCHIQ_SUCCESS;
2009 goto unlock;
2010 } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2011 /* initiate suspend immediately in the case that we're waiting
2012 * for the timeout */
2013 stop_suspend_timer(arm_state);
2014 if (!vchiq_videocore_wanted(state)) {
2015 vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2016 "idle, initiating suspend", __func__);
2017 status = vchiq_arm_vcsuspend(state);
2018 } else if (arm_state->autosuspend_override <
2019 FORCE_SUSPEND_FAIL_MAX) {
2020 vchiq_log_info(vchiq_susp_log_level, "%s letting "
2021 "videocore go idle", __func__);
2022 status = VCHIQ_SUCCESS;
2023 } else {
2024 vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2025 "many times - attempting suspend", __func__);
2026 status = vchiq_arm_vcsuspend(state);
2027 }
2028 } else {
2029 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2030 "in progress - wait for completion", __func__);
2031 status = VCHIQ_SUCCESS;
2032 }
2033
2034 /* Wait for suspend to happen due to system idle (not forced..) */
2035 if (status != VCHIQ_SUCCESS)
2036 goto unblock_resume;
2037
2038 do {
2039 write_unlock_bh(&arm_state->susp_res_lock);
2040
2041 rc = wait_for_completion_interruptible_timeout(
2042 &arm_state->vc_suspend_complete,
2043 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2044
2045 write_lock_bh(&arm_state->susp_res_lock);
2046 if (rc < 0) {
2047 vchiq_log_warning(vchiq_susp_log_level, "%s "
2048 "interrupted waiting for suspend", __func__);
2049 status = VCHIQ_ERROR;
2050 goto unblock_resume;
2051 } else if (rc == 0) {
2052 if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2053 /* Repeat timeout once if in progress */
2054 if (repeat < 0) {
2055 repeat = 1;
2056 continue;
2057 }
2058 }
2059 arm_state->autosuspend_override++;
2060 output_timeout_error(state);
2061
2062 status = VCHIQ_RETRY;
2063 goto unblock_resume;
2064 }
2065 } while (0 < (repeat--));
2066
2067 /* Check and report state in case we need to abort ARM suspend */
2068 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2069 status = VCHIQ_RETRY;
2070 vchiq_log_error(vchiq_susp_log_level,
2071 "%s videocore suspend failed (state %s)", __func__,
2072 suspend_state_names[arm_state->vc_suspend_state +
2073 VC_SUSPEND_NUM_OFFSET]);
2074 /* Reset the state only if it's still in an error state.
2075 * Something could have already initiated another suspend. */
2076 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2077 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2078
2079 goto unblock_resume;
2080 }
2081
2082 /* successfully suspended - unlock and exit */
2083 goto unlock;
2084
2085 unblock_resume:
2086 /* all error states need to unblock resume before exit */
2087 unblock_resume(arm_state);
2088
2089 unlock:
2090 write_unlock_bh(&arm_state->susp_res_lock);
2091
2092 out:
2093 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2094 return status;
2095 }
2096
2097 void
2098 vchiq_check_suspend(VCHIQ_STATE_T *state)
2099 {
2100 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2101
2102 if (!arm_state)
2103 goto out;
2104
2105 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2106
2107 write_lock_bh(&arm_state->susp_res_lock);
2108 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2109 arm_state->first_connect &&
2110 !vchiq_videocore_wanted(state)) {
2111 vchiq_arm_vcsuspend(state);
2112 }
2113 write_unlock_bh(&arm_state->susp_res_lock);
2114
2115 out:
2116 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2117 return;
2118 }
2119
2120
2121 int
2122 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2123 {
2124 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2125 int resume = 0;
2126 int ret = -1;
2127
2128 if (!arm_state)
2129 goto out;
2130
2131 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2132
2133 write_lock_bh(&arm_state->susp_res_lock);
2134 unblock_resume(arm_state);
2135 resume = vchiq_check_resume(state);
2136 write_unlock_bh(&arm_state->susp_res_lock);
2137
2138 if (resume) {
2139 if (wait_for_completion_interruptible(
2140 &arm_state->vc_resume_complete) < 0) {
2141 vchiq_log_error(vchiq_susp_log_level,
2142 "%s interrupted", __func__);
2143 /* failed, cannot accurately derive suspend
2144 * state, so exit early. */
2145 goto out;
2146 }
2147 }
2148
2149 read_lock_bh(&arm_state->susp_res_lock);
2150 if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2151 vchiq_log_info(vchiq_susp_log_level,
2152 "%s: Videocore remains suspended", __func__);
2153 } else {
2154 vchiq_log_info(vchiq_susp_log_level,
2155 "%s: Videocore resumed", __func__);
2156 ret = 0;
2157 }
2158 read_unlock_bh(&arm_state->susp_res_lock);
2159 out:
2160 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2161 return ret;
2162 }
2163
2164 /* This function should be called with the write lock held */
2165 int
2166 vchiq_check_resume(VCHIQ_STATE_T *state)
2167 {
2168 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2169 int resume = 0;
2170
2171 if (!arm_state)
2172 goto out;
2173
2174 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2175
2176 if (need_resume(state)) {
2177 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2178 request_poll(state, NULL, 0);
2179 resume = 1;
2180 }
2181
2182 out:
2183 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2184 return resume;
2185 }
2186
2187 #ifdef notyet
2188 void
2189 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2190 {
2191 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2192 int res = 0;
2193
2194 if (!arm_state)
2195 goto out;
2196
2197 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2198
2199 write_lock_bh(&arm_state->susp_res_lock);
2200 if (arm_state->wake_address == 0) {
2201 vchiq_log_info(vchiq_susp_log_level,
2202 "%s: already awake", __func__);
2203 goto unlock;
2204 }
2205 if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2206 vchiq_log_info(vchiq_susp_log_level,
2207 "%s: already resuming", __func__);
2208 goto unlock;
2209 }
2210
2211 if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2212 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2213 res = 1;
2214 } else
2215 vchiq_log_trace(vchiq_susp_log_level,
2216 "%s: not resuming (resume state %s)", __func__,
2217 resume_state_names[arm_state->vc_resume_state +
2218 VC_RESUME_NUM_OFFSET]);
2219
2220 unlock:
2221 write_unlock_bh(&arm_state->susp_res_lock);
2222
2223 if (res)
2224 vchiq_platform_resume(state);
2225
2226 out:
2227 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2228 return;
2229
2230 }
2231 #endif
2232
2233
2234
2235 VCHIQ_STATUS_T
2236 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2237 enum USE_TYPE_E use_type)
2238 {
2239 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2240 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2241 char entity[16];
2242 int *entity_uc;
2243 int local_uc, local_entity_uc;
2244
2245 if (!arm_state)
2246 goto out;
2247
2248 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2249
2250 if (use_type == USE_TYPE_VCHIQ) {
2251 snprintf(entity, sizeof(entity), "VCHIQ: ");
2252 entity_uc = &arm_state->peer_use_count;
2253 } else if (service) {
2254 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2255 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2256 service->client_id);
2257 entity_uc = &service->service_use_count;
2258 } else {
2259 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2260 "ptr", __func__);
2261 ret = VCHIQ_ERROR;
2262 goto out;
2263 }
2264
2265 write_lock_bh(&arm_state->susp_res_lock);
2266 while (arm_state->resume_blocked) {
2267 /* If we call 'use' while force suspend is waiting for suspend,
2268 * then we're about to block the thread which the force is
2269 * waiting to complete, so we're bound to just time out. In this
2270 * case, set the suspend state such that the wait will be
2271 * canceled, so we can complete as quickly as possible. */
2272 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2273 VC_SUSPEND_IDLE) {
2274 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2275 break;
2276 }
2277 /* If suspend is already in progress then we need to block */
2278 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2279 /* Indicate that there are threads waiting on the resume
2280 * blocker. These need to be allowed to complete before
2281 * a _second_ call to force suspend can complete,
2282 * otherwise low priority threads might never actually
2283 * continue */
2284 arm_state->blocked_count++;
2285 write_unlock_bh(&arm_state->susp_res_lock);
2286 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2287 "blocked - waiting...", __func__, entity);
2288 if (wait_for_completion_killable(
2289 &arm_state->resume_blocker) != 0) {
2290 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2291 "wait for resume blocker interrupted",
2292 __func__, entity);
2293 ret = VCHIQ_ERROR;
2294 write_lock_bh(&arm_state->susp_res_lock);
2295 arm_state->blocked_count--;
2296 write_unlock_bh(&arm_state->susp_res_lock);
2297 goto out;
2298 }
2299 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2300 "unblocked", __func__, entity);
2301 write_lock_bh(&arm_state->susp_res_lock);
2302 if (--arm_state->blocked_count == 0)
2303 complete_all(&arm_state->blocked_blocker);
2304 }
2305 }
2306
2307 stop_suspend_timer(arm_state);
2308
2309 local_uc = ++arm_state->videocore_use_count;
2310 local_entity_uc = ++(*entity_uc);
2311
2312 /* If there's a pending request which hasn't yet been serviced then
2313 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2314 * vc_resume_complete will block until we either resume or fail to
2315 * suspend */
2316 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2317 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2318
2319 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2320 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2321 vchiq_log_info(vchiq_susp_log_level,
2322 "%s %s count %d, state count %d",
2323 __func__, entity, local_entity_uc, local_uc);
2324 request_poll(state, NULL, 0);
2325 } else
2326 vchiq_log_trace(vchiq_susp_log_level,
2327 "%s %s count %d, state count %d",
2328 __func__, entity, *entity_uc, local_uc);
2329
2330
2331 write_unlock_bh(&arm_state->susp_res_lock);
2332
2333 /* Completion is in a done state when we're not suspended, so this won't
2334 * block for the non-suspended case. */
2335 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2336 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2337 __func__, entity);
2338 if (wait_for_completion_killable(
2339 &arm_state->vc_resume_complete) != 0) {
2340 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2341 "resume interrupted", __func__, entity);
2342 ret = VCHIQ_ERROR;
2343 goto out;
2344 }
2345 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2346 entity);
2347 }
2348
2349 if (ret == VCHIQ_SUCCESS) {
2350 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2351 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2352 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2353 /* Send the use notify to videocore */
2354 status = vchiq_send_remote_use_active(state);
2355 if (status == VCHIQ_SUCCESS)
2356 ack_cnt--;
2357 else
2358 atomic_add(ack_cnt,
2359 &arm_state->ka_use_ack_count);
2360 }
2361 }
2362
2363 out:
2364 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2365 return ret;
2366 }
2367
2368 VCHIQ_STATUS_T
2369 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2370 {
2371 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2372 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2373 char entity[16];
2374 int *entity_uc;
2375
2376 if (!arm_state)
2377 goto out;
2378
2379 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2380
2381 if (service) {
2382 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2383 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2384 service->client_id);
2385 entity_uc = &service->service_use_count;
2386 } else {
2387 snprintf(entity, sizeof(entity), "PEER: ");
2388 entity_uc = &arm_state->peer_use_count;
2389 }
2390
2391 write_lock_bh(&arm_state->susp_res_lock);
2392 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2393 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2394 WARN_ON(!arm_state->videocore_use_count);
2395 WARN_ON(!(*entity_uc));
2396 ret = VCHIQ_ERROR;
2397 goto unlock;
2398 }
2399 --arm_state->videocore_use_count;
2400 --(*entity_uc);
2401
2402 if (!vchiq_videocore_wanted(state)) {
2403 if (vchiq_platform_use_suspend_timer() &&
2404 !arm_state->resume_blocked) {
2405 /* Only use the timer if we're not trying to force
2406 * suspend (=> resume_blocked) */
2407 start_suspend_timer(arm_state);
2408 } else {
2409 vchiq_log_info(vchiq_susp_log_level,
2410 "%s %s count %d, state count %d - suspending",
2411 __func__, entity, *entity_uc,
2412 arm_state->videocore_use_count);
2413 vchiq_arm_vcsuspend(state);
2414 }
2415 } else
2416 vchiq_log_trace(vchiq_susp_log_level,
2417 "%s %s count %d, state count %d",
2418 __func__, entity, *entity_uc,
2419 arm_state->videocore_use_count);
2420
2421 unlock:
2422 write_unlock_bh(&arm_state->susp_res_lock);
2423
2424 out:
2425 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2426 return ret;
2427 }
2428
2429 void
2430 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2431 {
2432 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2433 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2434 atomic_inc(&arm_state->ka_use_count);
2435 complete(&arm_state->ka_evt);
2436 }
2437
2438 void
2439 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2440 {
2441 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2442 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2443 atomic_inc(&arm_state->ka_release_count);
2444 complete(&arm_state->ka_evt);
2445 }
2446
2447 VCHIQ_STATUS_T
2448 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2449 {
2450 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2451 }
2452
2453 VCHIQ_STATUS_T
2454 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2455 {
2456 return vchiq_release_internal(service->state, service);
2457 }
2458
2459 static void suspend_timer_callback(unsigned long context)
2460 {
2461 VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2462 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2463 if (!arm_state)
2464 goto out;
2465 vchiq_log_info(vchiq_susp_log_level,
2466 "%s - suspend timer expired - check suspend", __func__);
2467 vchiq_check_suspend(state);
2468 out:
2469 return;
2470 }
2471
2472 VCHIQ_STATUS_T
2473 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2474 {
2475 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2476 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2477 if (service) {
2478 ret = vchiq_use_internal(service->state, service,
2479 USE_TYPE_SERVICE_NO_RESUME);
2480 unlock_service(service);
2481 }
2482 return ret;
2483 }
2484
2485 VCHIQ_STATUS_T
2486 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2487 {
2488 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2489 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2490 if (service) {
2491 ret = vchiq_use_internal(service->state, service,
2492 USE_TYPE_SERVICE);
2493 unlock_service(service);
2494 }
2495 return ret;
2496 }
2497
2498 VCHIQ_STATUS_T
2499 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2500 {
2501 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2502 VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2503 if (service) {
2504 ret = vchiq_release_internal(service->state, service);
2505 unlock_service(service);
2506 }
2507 return ret;
2508 }
2509
2510 void
2511 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2512 {
2513 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2514 int i, j = 0;
2515 /* Only dump 64 services */
2516 #define local_max_services 64
2517 /* If there's more than 64 services, only dump ones with
2518 * non-zero counts */
2519 int only_nonzero = 0;
2520 static const char *nz = "<-- preventing suspend";
2521
2522 enum vc_suspend_status vc_suspend_state;
2523 enum vc_resume_status vc_resume_state;
2524 int peer_count;
2525 int vc_use_count;
2526 int active_services;
2527 struct service_data_struct {
2528 int fourcc;
2529 int clientid;
2530 int use_count;
2531 } service_data[local_max_services];
2532
2533 if (!arm_state)
2534 return;
2535
2536 read_lock_bh(&arm_state->susp_res_lock);
2537 vc_suspend_state = arm_state->vc_suspend_state;
2538 vc_resume_state = arm_state->vc_resume_state;
2539 peer_count = arm_state->peer_use_count;
2540 vc_use_count = arm_state->videocore_use_count;
2541 active_services = state->unused_service;
2542 if (active_services > local_max_services)
2543 only_nonzero = 1;
2544
2545 for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2546 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2547 if (!service_ptr)
2548 continue;
2549
2550 if (only_nonzero && !service_ptr->service_use_count)
2551 continue;
2552
2553 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2554 service_data[j].fourcc = service_ptr->base.fourcc;
2555 service_data[j].clientid = service_ptr->client_id;
2556 service_data[j++].use_count = service_ptr->
2557 service_use_count;
2558 }
2559 }
2560
2561 read_unlock_bh(&arm_state->susp_res_lock);
2562
2563 vchiq_log_warning(vchiq_susp_log_level,
2564 "-- Videcore suspend state: %s --",
2565 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2566 vchiq_log_warning(vchiq_susp_log_level,
2567 "-- Videcore resume state: %s --",
2568 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2569
2570 if (only_nonzero)
2571 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2572 "services (%d). Only dumping up to first %d services "
2573 "with non-zero use-count", active_services,
2574 local_max_services);
2575
2576 for (i = 0; i < j; i++) {
2577 vchiq_log_warning(vchiq_susp_log_level,
2578 "----- %c%c%c%c:%d service count %d %s",
2579 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2580 service_data[i].clientid,
2581 service_data[i].use_count,
2582 service_data[i].use_count ? nz : "");
2583 }
2584 vchiq_log_warning(vchiq_susp_log_level,
2585 "----- VCHIQ use count count %d", peer_count);
2586 vchiq_log_warning(vchiq_susp_log_level,
2587 "--- Overall vchiq instance use count %d", vc_use_count);
2588
2589 vchiq_dump_platform_use_state(state);
2590 }
2591
2592 VCHIQ_STATUS_T
2593 vchiq_check_service(VCHIQ_SERVICE_T *service)
2594 {
2595 VCHIQ_ARM_STATE_T *arm_state;
2596 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2597
2598 if (!service || !service->state)
2599 goto out;
2600
2601 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2602
2603 arm_state = vchiq_platform_get_arm_state(service->state);
2604
2605 read_lock_bh(&arm_state->susp_res_lock);
2606 if (service->service_use_count)
2607 ret = VCHIQ_SUCCESS;
2608 read_unlock_bh(&arm_state->susp_res_lock);
2609
2610 if (ret == VCHIQ_ERROR) {
2611 vchiq_log_error(vchiq_susp_log_level,
2612 "%s ERROR - %c%c%c%c:%8x service count %d, "
2613 "state count %d, videocore suspend state %s", __func__,
2614 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2615 service->client_id, service->service_use_count,
2616 arm_state->videocore_use_count,
2617 suspend_state_names[arm_state->vc_suspend_state +
2618 VC_SUSPEND_NUM_OFFSET]);
2619 vchiq_dump_service_use_state(service->state);
2620 }
2621 out:
2622 return ret;
2623 }
2624
2625 /* stub functions */
2626 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2627 {
2628 (void)state;
2629 }
2630
2631 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2632 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2633 {
2634 VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2635 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2636 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2637 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2638 write_lock_bh(&arm_state->susp_res_lock);
2639 if (!arm_state->first_connect) {
2640 char threadname[10];
2641 arm_state->first_connect = 1;
2642 write_unlock_bh(&arm_state->susp_res_lock);
2643 snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2644 state->id);
2645 arm_state->ka_thread = vchiq_thread_create(
2646 &vchiq_keepalive_thread_func,
2647 (void *)state,
2648 threadname);
2649 if (arm_state->ka_thread == NULL) {
2650 vchiq_log_error(vchiq_susp_log_level,
2651 "vchiq: FATAL: couldn't create thread %s",
2652 threadname);
2653 } else {
2654 wake_up_process(arm_state->ka_thread);
2655 }
2656 } else
2657 write_unlock_bh(&arm_state->susp_res_lock);
2658 }
2659 }
2660
2661 /****************************************************************************
2662 *
2663 * vchiq_init - called when the module is loaded.
2664 *
2665 ***************************************************************************/
2666
2667 int __init vchiq_init(void);
2668 int __init
2669 vchiq_init(void)
2670 {
2671 int err;
2672
2673 #ifdef notyet
2674 /* create proc entries */
2675 err = vchiq_proc_init();
2676 if (err != 0)
2677 goto failed_proc_init;
2678 #endif
2679
2680 spin_lock_init(&msg_queue_spinlock);
2681
2682 err = vchiq_platform_init(&g_state);
2683 if (err != 0)
2684 goto failed_platform_init;
2685
2686 vchiq_log_info(vchiq_arm_log_level,
2687 "vchiq: initialised - version %d (min %d)",
2688 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
2689
2690 return 0;
2691
2692 failed_platform_init:
2693 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2694 return err;
2695 }
2696
2697 #ifdef notyet
2698 static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2699 {
2700 VCHIQ_SERVICE_T *service;
2701 int use_count = 0, i;
2702 i = 0;
2703 while ((service = next_service_by_instance(instance->state,
2704 instance, &i)) != NULL) {
2705 use_count += service->service_use_count;
2706 unlock_service(service);
2707 }
2708 return use_count;
2709 }
2710
2711 /* read the per-process use-count */
2712 static int proc_read_use_count(char *page, char **start,
2713 off_t off, int count,
2714 int *eof, void *data)
2715 {
2716 VCHIQ_INSTANCE_T instance = data;
2717 int len, use_count;
2718
2719 use_count = vchiq_instance_get_use_count(instance);
2720 len = snprintf(page+off, count, "%d\n", use_count);
2721
2722 return len;
2723 }
2724
2725 /* add an instance (process) to the proc entries */
2726 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
2727 {
2728 char pidstr[32];
2729 struct proc_dir_entry *top, *use_count;
2730 struct proc_dir_entry *clients = vchiq_clients_top();
2731 int pid = instance->pid;
2732
2733 snprintf(pidstr, sizeof(pidstr), "%d", pid);
2734 top = proc_mkdir(pidstr, clients);
2735 if (!top)
2736 goto fail_top;
2737
2738 use_count = create_proc_read_entry("use_count",
2739 0444, top,
2740 proc_read_use_count,
2741 instance);
2742 if (!use_count)
2743 goto fail_use_count;
2744
2745 instance->proc_entry = top;
2746
2747 return 0;
2748
2749 fail_use_count:
2750 remove_proc_entry(top->name, clients);
2751 fail_top:
2752 return -ENOMEM;
2753 }
2754
2755 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
2756 {
2757 struct proc_dir_entry *clients = vchiq_clients_top();
2758 remove_proc_entry("use_count", instance->proc_entry);
2759 remove_proc_entry(instance->proc_entry->name, clients);
2760 }
2761
2762 #endif
2763
2764 /****************************************************************************
2765 *
2766 * vchiq_exit - called when the module is unloaded.
2767 *
2768 ***************************************************************************/
2769
2770 void vchiq_exit(void);
2771 void
2772 vchiq_exit(void)
2773 {
2774 vchiq_platform_exit(&g_state);
2775 }
2776