aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mvp/mvpkm/mutex_kernel.c
blob: 7b76bfcfb7fd3895247045589a1982877a2a5944 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
/*
 * Linux 2.6.32 and later Kernel module for VMware MVP Hypervisor Support
 *
 * Copyright (C) 2010-2012 VMware, Inc. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; see the file COPYING.  If not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */
#line 5

/**
 * @file
 *
 * @brief The host kernel mutex functions.  These mutexes can be located in
 *        shared address space with the monitor.
 */

#include <linux/kernel.h>

#include <asm/string.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hardirq.h>

#include "mvp.h"

#include "arm_inline.h"
#include "coproc_defs.h"
#include "mutex_kernel.h"

#define POLL_IN_PROGRESS_FLAG (1<<(30-MUTEX_CVAR_MAX))

#define INITWAITQ(waitQ) do {                         \
   init_waitqueue_head((wait_queue_head_t *)(waitQ)); \
} while (0)

#define WAKEUPALL(waitQ) do {                  \
   wake_up_all((wait_queue_head_t *)(waitQ));  \
} while (0)

#define WAKEUPONE(waitQ) do {                  \
   wake_up((wait_queue_head_t *)(waitQ));      \
} while (0)

/**
 * @brief initialize mutex
 * @param[in,out] mutex mutex to initialize
 */
void
Mutex_Init(Mutex *mutex)
{
   wait_queue_head_t *wq;
   int i;

   wq = kcalloc(MUTEX_CVAR_MAX + 1, sizeof(wait_queue_head_t), 0);
   FATAL_IF(wq == NULL);

   memset(mutex, 0, sizeof *mutex);
   mutex->mtxHKVA = (HKVA)mutex;
   mutex->lockWaitQ = (HKVA)&wq[0];
   INITWAITQ(mutex->lockWaitQ);
   for (i = 0; i < MUTEX_CVAR_MAX; i ++) {
      mutex->cvarWaitQs[i] = (HKVA)&wq[i + 1];
      INITWAITQ(mutex->cvarWaitQs[i]);
   }
}

/**
 * @brief Check if it is ok to sleep
 * @param file the file of the caller code
 * @param line the line number of the caller code
 */
static void
MutexCheckSleep(const char *file, int line)
{
#ifdef MVP_DEVEL
   static unsigned long prev_jiffy;        /* ratelimiting: 1/s */

#ifdef CONFIG_PREEMPT
   if (preemptible() && !irqs_disabled()) {
      return;
   }
#else
   if (!irqs_disabled()) {
      return;
   }
#endif
   if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) {
      return;
   }
   prev_jiffy = jiffies;
   printk(KERN_ERR
          "BUG: sleeping function called from invalid context at %s:%d\n",
          file, line);
   printk(KERN_ERR
          "irqs_disabled(): %d, preemtible(): %d, pid: %d, name: %s\n",
          irqs_disabled(),
          preemptible(),
          current->pid, current->comm);
   dump_stack();
#endif
}

/**
 * @brief destroy mutex
 * @param[in,out] mutex mutex to destroy
 */
void
Mutex_Destroy(Mutex *mutex)
{
   kfree((void*)mutex->lockWaitQ);
}

/**
 * @brief Lock the mutex.  Also does a data barrier after locking so the
 *        locking is complete before any shared data is accessed.
 * @param[in,out] mutex which mutex to lock
 * @param         mode  mutex lock mode
 * @param file the file of the caller code
 * @param line the line number of the code that called this function
 * @return rc = 0: mutex now locked by caller<br>
 *             < 0: interrupted
 */
int
Mutex_LockLine(Mutex *mutex, MutexMode mode, const char *file, int line)
{
   Mutex_State newState, oldState;

   MutexCheckSleep(file, line);

   /*
    * If uncontended, just set new lock state and return success status.
    * If contended, mark state saying there is a waiting thread to wake.
    */
   do {
lock_start:
      /*
       * Get current state and calculate what new state would be.
       * New state adds 1 for shared and 0xFFFF for exclusive.
       * If the 16 bit field overflows, there is contention.
       */
      oldState.state = ATOMIC_GETO(mutex->state);
      newState.mode  = oldState.mode + mode;
      newState.blck  = oldState.blck;

      /*
       * So we are saying there is no contention if new state
       * indicates no overflow.
       *
       * On fairness: The test here allows a new-comer thread to grab
       * the lock even if there is a blocked thread. For example 2
       * threads repeatedly obtaining shared access can starve a third
       * wishing to obtain an exclusive lock. Currently this is only a
       * hypothetical situation as mksck use exclusive lock only and
       * the code never has more than 2 threads using the same mutex.
       */
      if ((uint32)newState.mode >= (uint32)mode) {
         if (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state)) {
            goto lock_start;
         }
         DMB();
         mutex->line    = line;
         mutex->lineUnl = -1;
         return 0;
      }

      /*
       * There is contention, so increment the number of blocking threads.
       */
      newState.mode = oldState.mode;
      newState.blck = oldState.blck + 1;
   } while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

   /*
    * Statistics...
    */
   ATOMIC_ADDV(mutex->blocked, 1);

   /*
    * Mutex is contended, state has been updated to say there is a blocking
    * thread.
    *
    * So now we block till someone wakes us up.
    */
   do {
      DEFINE_WAIT(waiter);

      /*
       * This will make sure we catch any wakes done after we check the lock
       * state again.
       */
      prepare_to_wait((wait_queue_head_t *)mutex->lockWaitQ,
                      &waiter,
                      TASK_INTERRUPTIBLE);

      /*
       * Now that we will catch wakes, check the lock state again.  If now
       * uncontended, mark it locked, abandon the wait and return success.
       */

set_new_state:
      /*
       * Same as the original check for contention above, except that we
       * must decrement the number of waiting threads by one
       * if we are successful in locking the mutex.
       */
      oldState.state = ATOMIC_GETO(mutex->state);
      newState.mode  = oldState.mode + mode;
      newState.blck  = oldState.blck - 1;
      ASSERT(oldState.blck);

      if ((uint32)newState.mode >= (uint32)mode) {
         if (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state)) {
            goto set_new_state;
         }
         /*
          * Mutex is no longer contended and we were able to lock it.
          */
         finish_wait((wait_queue_head_t *)mutex->lockWaitQ, &waiter);
         DMB();
         mutex->line    = line;
         mutex->lineUnl = -1;
         return 0;
      }

      /*
       * Wait for a wake that happens any time after prepare_to_wait()
       * returned.
       */
      WARN(!schedule_timeout(10*HZ), "Mutex_Lock: soft lockup - stuck for 10s!\n");
      finish_wait((wait_queue_head_t *)mutex->lockWaitQ, &waiter);
   } while (!signal_pending(current));

   /*
    * We aren't waiting anymore, so decrement the number of waiting threads.
    */
   do {
      oldState.state = ATOMIC_GETO(mutex->state);
      newState.mode  = oldState.mode;
      newState.blck  = oldState.blck - 1;

      ASSERT(oldState.blck);

   } while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

   return -ERESTARTSYS;
}


/**
 * @brief Unlock the mutex.  Also does a data barrier before unlocking so any
 *        modifications made before the lock gets released will be completed
 *        before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param line the line number of the code that called this function
 */
void
Mutex_UnlockLine(Mutex *mutex, MutexMode mode, int line)
{
   Mutex_State newState, oldState;

   DMB();
   do {
      oldState.state = ATOMIC_GETO(mutex->state);
      newState.mode  = oldState.mode - mode;
      newState.blck  = oldState.blck;
      mutex->lineUnl = line;

      ASSERT(oldState.mode >= mode);
   } while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

   /*
    * If another thread was blocked, then wake it up.
    */
   if (oldState.blck) {
      if (mode == MutexModeSH) {
         WAKEUPONE(mutex->lockWaitQ);
      } else {
         WAKEUPALL(mutex->lockWaitQ);
      }
   }
}


/**
 * @brief Unlock the mutex and sleep.  Also does a data barrier before
 *        unlocking so any modifications made before the lock gets released
 *        will be completed before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to sleep on
 * @param file the file of the caller code
 * @param line the line number of the caller code
 * @return rc = 0: successfully waited<br>
 *            < 0: error waiting
 */
int
Mutex_UnlSleepLine(Mutex *mutex, MutexMode mode, uint32 cvi, const char *file, int line)
{
   return Mutex_UnlSleepTestLine(mutex, mode, cvi, NULL, 0, file, line);
}

/**
 * @brief Unlock the mutex and sleep.  Also does a data barrier before
 *        unlocking so any modifications made before the lock gets released
 *        will be completed before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to sleep on
 * @param test  sleep only if null or pointed atomic value mismatches mask
 * @param mask  bitfield to check test against before sleeping
 * @param file the file of the caller code
 * @param line the line number of the caller code
 * @return rc = 0: successfully waited<br>
 *            < 0: error waiting
 */
int
Mutex_UnlSleepTestLine(Mutex *mutex, MutexMode mode, uint32 cvi, AtmUInt32 *test, uint32 mask, const char *file, int line)
{
   DEFINE_WAIT(waiter);

   MutexCheckSleep(file, line);

   ASSERT(cvi < MUTEX_CVAR_MAX);

   /*
    * Tell anyone who might try to wake us that they need to actually call
    * WAKEUP***().
    */
   ATOMIC_ADDV(mutex->waiters, 1);

   /*
    * Be sure to catch any wake that comes along just after we unlock the mutex
    * but before we call schedule().
    */
   prepare_to_wait_exclusive((wait_queue_head_t *)mutex->cvarWaitQs[cvi],
                   &waiter,
                   TASK_INTERRUPTIBLE);

   /*
    * Release the mutex, someone can wake us up now.
    * They will see mutex->waiters non-zero so will actually do the wake.
    */
   Mutex_Unlock(mutex, mode);

   /*
    * Wait to be woken or interrupted.
    */
   if (test == NULL || (ATOMIC_GETO(*test) & mask) == 0) {
      schedule();
   }
   finish_wait((wait_queue_head_t *)mutex->cvarWaitQs[cvi], &waiter);

   /*
    * Done waiting, don't need a wake any more.
    */
   ATOMIC_SUBV(mutex->waiters, 1);

   /*
    * If interrupted, return error status.
    */
   if (signal_pending(current)) {
      return -ERESTARTSYS;
   }

   /*
    * Wait completed, return success status.
    */
   return 0;
}


/**
 * @brief Unlock the mutex and prepare to sleep on a kernel polling table
 *        given as anonymous parameters for poll_wait
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to sleep on
 * @param filp  which file to poll_wait upon
 * @param wait  which poll_table to poll_wait upon
 */
void
Mutex_UnlPoll(Mutex *mutex, MutexMode mode, uint32 cvi, void *filp, void *wait)
{
   ASSERT(cvi < MUTEX_CVAR_MAX);

   /* poll_wait is done with mutex locked to prevent any wake that comes and
    * defer them just after we unlock the mutex but before kernel polling
    * tables are used
    * Note that the kernel is probably avoiding an exclusive wait in that case
    * and also increments the usage for the file given in filp
    */
   poll_wait(filp, (wait_queue_head_t *)mutex->cvarWaitQs[cvi], wait);

   /*
    * Tell anyone who might try to wake us that they need to actually call
    * WAKEUP***(). This is done in putting ourselves in a "noisy" mode since
    * there is no guaranty that we would really sleep, or if we would be
    * wakening the sleeping thread with that socket or condition. This is
    * done using a POLL_IN_PROGRESS_FLAG, but unfortunately it has to be
    * a per-cvi flag, in case we would poll independently on different cvi
    */
   DMB();
   ATOMIC_ORO(mutex->waiters, (POLL_IN_PROGRESS_FLAG << cvi));

   /*
    * Release the mutex, someone can wake us up now.
    * They will see mutex->waiters non-zero so will actually do the wake.
    */
   Mutex_Unlock(mutex, mode);
}


/**
 * @brief Unlock the semaphore and wake sleeping threads.  Also does a data
 *        barrier before unlocking so any modifications made before the lock
 *        gets released will be completed before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to signal
 * @param all   false: wake a single thread<br>
 *              true: wake all threads
 */
void
Mutex_UnlWake(Mutex *mutex, MutexMode mode, uint32 cvi, _Bool all)
{
   Mutex_Unlock(mutex, mode);
   Mutex_CondSig(mutex, cvi, all);
}


/**
 * @brief Signal condition variable, ie, wake up anyone waiting.
 * @param mutex mutex that holds the condition variable
 * @param cvi   which condition variable to signal
 * @param all   false: wake a single thread<br>
 *              true: wake all threads
 */
void
Mutex_CondSig(Mutex *mutex, uint32 cvi, _Bool all)
{
   uint32 waiters;

   ASSERT(cvi < MUTEX_CVAR_MAX);

   waiters = ATOMIC_GETO(mutex->waiters);
   if (waiters != 0) {
      /* Cleanup the effects of Mutex_UnlPoll() but only when it is SMP safe,
       * considering that atomic and wakeup operations should also do memory
       * barriers accordingly. This is mandatory otherwise rare SMP races are
       * even possible, since Mutex_CondSig is called with the associated mutex
       * unlocked, and that does not prevent from select() to run parallel !
       */
      if ((waiters >= POLL_IN_PROGRESS_FLAG) &&
          !waitqueue_active((wait_queue_head_t *)mutex->cvarWaitQs[cvi])) {
         ATOMIC_ANDO(mutex->waiters, ~(POLL_IN_PROGRESS_FLAG << cvi));
      }
      DMB();

      if (all) {
         WAKEUPALL(mutex->cvarWaitQs[cvi]);
      } else {
         WAKEUPONE(mutex->cvarWaitQs[cvi]);
      }
   }
}