SAM4S-EK_FreeRTOS+FAT-SL  1.0
An example project to test the functionality of FreeRTOS+FAT-SL using SD card as data storage medium
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Macros Modules
queue.c
Go to the documentation of this file.
1 /*
2  FreeRTOS V8.0.1 - Copyright (C) 2014 Real Time Engineers Ltd.
3  All rights reserved
4 
5  VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
6 
7  ***************************************************************************
8  * *
9  * FreeRTOS provides completely free yet professionally developed, *
10  * robust, strictly quality controlled, supported, and cross *
11  * platform software that has become a de facto standard. *
12  * *
13  * Help yourself get started quickly and support the FreeRTOS *
14  * project by purchasing a FreeRTOS tutorial book, reference *
15  * manual, or both from: http://www.FreeRTOS.org/Documentation *
16  * *
17  * Thank you! *
18  * *
19  ***************************************************************************
20 
21  This file is part of the FreeRTOS distribution.
22 
23  FreeRTOS is free software; you can redistribute it and/or modify it under
24  the terms of the GNU General Public License (version 2) as published by the
25  Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
26 
27  >>! NOTE: The modification to the GPL is included to allow you to !<<
28  >>! distribute a combined work that includes FreeRTOS without being !<<
29  >>! obliged to provide the source code for proprietary components !<<
30  >>! outside of the FreeRTOS kernel. !<<
31 
32  FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
33  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
34  FOR A PARTICULAR PURPOSE. Full license text is available from the following
35  link: http://www.freertos.org/a00114.html
36 
37  1 tab == 4 spaces!
38 
39  ***************************************************************************
40  * *
41  * Having a problem? Start by reading the FAQ "My application does *
42  * not run, what could be wrong?" *
43  * *
44  * http://www.FreeRTOS.org/FAQHelp.html *
45  * *
46  ***************************************************************************
47 
48  http://www.FreeRTOS.org - Documentation, books, training, latest versions,
49  license and Real Time Engineers Ltd. contact details.
50 
51  http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
52  including FreeRTOS+Trace - an indispensable productivity tool, a DOS
53  compatible FAT file system, and our tiny thread aware UDP/IP stack.
54 
55  http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
56  Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
57  licenses offer ticketed support, indemnification and middleware.
58 
59  http://www.SafeRTOS.com - High Integrity Systems also provide a safety
60  engineered and independently SIL3 certified version for use in safety and
61  mission critical applications that require provable dependability.
62 
63  1 tab == 4 spaces!
64 */
65 
66 #include <stdlib.h>
67 #include <string.h>
68 
69 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
70 all the API functions to use the MPU wrappers. That should only be done when
71 task.h is included from an application file. */
72 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
73 
74 #include "FreeRTOS.h"
75 #include "task.h"
76 #include "queue.h"
77 
78 #if ( configUSE_CO_ROUTINES == 1 )
79  #include "croutine.h"
80 #endif
81 
82 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
83 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
84 header files above, but not in this file, in order to generate the correct
85 privileged Vs unprivileged linkage and placement. */
86 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
87 
88 
89 /* Constants used with the xRxLock and xTxLock structure members. */
90 #define queueUNLOCKED ( ( BaseType_t ) -1 )
91 #define queueLOCKED_UNMODIFIED ( ( BaseType_t ) 0 )
92 
93 /* When the Queue_t structure is used to represent a base queue its pcHead and
94 pcTail members are used as pointers into the queue storage area. When the
95 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
96 not necessary, and the pcHead pointer is set to NULL to indicate that the
97 pcTail pointer actually points to the mutex holder (if any). Map alternative
98 names to the pcHead and pcTail structure members to ensure the readability of
99 the code is maintained despite this dual use of two structure members. An
100 alternative implementation would be to use a union, but use of a union is
101 against the coding standard (although an exception to the standard has been
102 permitted where the dual use also significantly changes the type of the
103 structure member). */
104 #define pxMutexHolder pcTail
105 #define uxQueueType pcHead
106 #define queueQUEUE_IS_MUTEX NULL
107 
108 /* Semaphores do not actually store or copy data, so have an item size of
109 zero. */
110 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
111 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
112 
113 #if( configUSE_PREEMPTION == 0 )
114  /* If the cooperative scheduler is being used then a yield should not be
115  performed just because a higher priority task has been woken. */
116  #define queueYIELD_IF_USING_PREEMPTION()
117 #else
118  #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
119 #endif
120 
121 /*
122  * Definition of the queue used by the scheduler.
123  * Items are queued by copy, not reference.
124  */
125 typedef struct QueueDefinition
126 {
127  int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
128  int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
129  int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
130 
131  union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
132  {
133  int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
134  UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
135  } u;
136 
137  List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
138  List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
139 
140  volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
141  UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
142  UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
143 
144  volatile BaseType_t xRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
145  volatile BaseType_t xTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
146 
147  #if ( configUSE_TRACE_FACILITY == 1 )
148  UBaseType_t uxQueueNumber;
149  uint8_t ucQueueType;
150  #endif
151 
152  #if ( configUSE_QUEUE_SETS == 1 )
153  struct QueueDefinition *pxQueueSetContainer;
154  #endif
155 
156 } xQUEUE;
157 
158 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
159 name below to enable the use of older kernel aware debuggers. */
160 typedef xQUEUE Queue_t;
161 
162 /*-----------------------------------------------------------*/
163 
164 /*
165  * The queue registry is just a means for kernel aware debuggers to locate
166  * queue structures. It has no other purpose so is an optional component.
167  */
168 #if ( configQUEUE_REGISTRY_SIZE > 0 )
169 
170  /* The type stored within the queue registry array. This allows a name
171  to be assigned to each queue making kernel aware debugging a little
172  more user friendly. */
173  typedef struct QUEUE_REGISTRY_ITEM
174  {
175  const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
176  QueueHandle_t xHandle;
177  } xQueueRegistryItem;
178 
179  /* The old xQueueRegistryItem name is maintained above then typedefed to the
180  new xQueueRegistryItem name below to enable the use of older kernel aware
181  debuggers. */
182  typedef xQueueRegistryItem QueueRegistryItem_t;
183 
184  /* The queue registry is simply an array of QueueRegistryItem_t structures.
185  The pcQueueName member of a structure being NULL is indicative of the
186  array position being vacant. */
187  QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
188 
189 #endif /* configQUEUE_REGISTRY_SIZE */
190 
191 /*
192  * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
193  * prevent an ISR from adding or removing items to the queue, but does prevent
194  * an ISR from removing tasks from the queue event lists. If an ISR finds a
195  * queue is locked it will instead increment the appropriate queue lock count
196  * to indicate that a task may require unblocking. When the queue in unlocked
197  * these lock counts are inspected, and the appropriate action taken.
198  */
199 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
200 
201 /*
202  * Uses a critical section to determine if there is any data in a queue.
203  *
204  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
205  */
206 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
207 
208 /*
209  * Uses a critical section to determine if there is any space in a queue.
210  *
211  * @return pdTRUE if there is no space, otherwise pdFALSE;
212  */
213 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
214 
215 /*
216  * Copies an item into the queue, either at the front of the queue or the
217  * back of the queue.
218  */
219 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
220 
221 /*
222  * Copies an item out of a queue.
223  */
224 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
225 
226 #if ( configUSE_QUEUE_SETS == 1 )
227  /*
228  * Checks to see if a queue is a member of a queue set, and if so, notifies
229  * the queue set that the queue contains data.
230  */
231  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
232 #endif
233 
234 /*-----------------------------------------------------------*/
235 
236 /*
237  * Macro to mark a queue as locked. Locking a queue prevents an ISR from
238  * accessing the queue event lists.
239  */
240 #define prvLockQueue( pxQueue ) \
241  taskENTER_CRITICAL(); \
242  { \
243  if( ( pxQueue )->xRxLock == queueUNLOCKED ) \
244  { \
245  ( pxQueue )->xRxLock = queueLOCKED_UNMODIFIED; \
246  } \
247  if( ( pxQueue )->xTxLock == queueUNLOCKED ) \
248  { \
249  ( pxQueue )->xTxLock = queueLOCKED_UNMODIFIED; \
250  } \
251  } \
252  taskEXIT_CRITICAL()
253 /*-----------------------------------------------------------*/
254 
256 {
257 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
258 
259  configASSERT( pxQueue );
260 
262  {
263  pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
264  pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
265  pxQueue->pcWriteTo = pxQueue->pcHead;
266  pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
267  pxQueue->xRxLock = queueUNLOCKED;
268  pxQueue->xTxLock = queueUNLOCKED;
269 
270  if( xNewQueue == pdFALSE )
271  {
272  /* If there are tasks blocked waiting to read from the queue, then
273  the tasks will remain blocked as after this function exits the queue
274  will still be empty. If there are tasks blocked waiting to write to
275  the queue, then one should be unblocked as after this function exits
276  it will be possible to write to it. */
277  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
278  {
279  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
280  {
282  }
283  else
284  {
286  }
287  }
288  else
289  {
291  }
292  }
293  else
294  {
295  /* Ensure the event queues start in the correct state. */
296  vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
297  vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
298  }
299  }
301 
302  /* A value is returned for calling semantic consistency with previous
303  versions. */
304  return pdPASS;
305 }
306 /*-----------------------------------------------------------*/
307 
308 QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
309 {
310 Queue_t *pxNewQueue;
311 size_t xQueueSizeInBytes;
312 QueueHandle_t xReturn = NULL;
313 
314  /* Remove compiler warnings about unused parameters should
315  configUSE_TRACE_FACILITY not be set to 1. */
316  ( void ) ucQueueType;
317 
318  /* Allocate the new queue structure. */
319  if( uxQueueLength > ( UBaseType_t ) 0 )
320  {
321  pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
322  if( pxNewQueue != NULL )
323  {
324  /* Create the list of pointers to queue items. The queue is one byte
325  longer than asked for to make wrap checking easier/faster. */
326  xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
327 
328  pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
329  if( pxNewQueue->pcHead != NULL )
330  {
331  /* Initialise the queue members as described above where the
332  queue type is defined. */
333  pxNewQueue->uxLength = uxQueueLength;
334  pxNewQueue->uxItemSize = uxItemSize;
335  ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
336 
337  #if ( configUSE_TRACE_FACILITY == 1 )
338  {
339  pxNewQueue->ucQueueType = ucQueueType;
340  }
341  #endif /* configUSE_TRACE_FACILITY */
342 
343  #if( configUSE_QUEUE_SETS == 1 )
344  {
345  pxNewQueue->pxQueueSetContainer = NULL;
346  }
347  #endif /* configUSE_QUEUE_SETS */
348 
349  traceQUEUE_CREATE( pxNewQueue );
350  xReturn = pxNewQueue;
351  }
352  else
353  {
354  traceQUEUE_CREATE_FAILED( ucQueueType );
355  vPortFree( pxNewQueue );
356  }
357  }
358  else
359  {
361  }
362  }
363  else
364  {
366  }
367 
368  configASSERT( xReturn );
369 
370  return xReturn;
371 }
372 /*-----------------------------------------------------------*/
373 
374 #if ( configUSE_MUTEXES == 1 )
375 
376  QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
377  {
378  Queue_t *pxNewQueue;
379 
380  /* Prevent compiler warnings about unused parameters if
381  configUSE_TRACE_FACILITY does not equal 1. */
382  ( void ) ucQueueType;
383 
384  /* Allocate the new queue structure. */
385  pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
386  if( pxNewQueue != NULL )
387  {
388  /* Information required for priority inheritance. */
389  pxNewQueue->pxMutexHolder = NULL;
390  pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
391 
392  /* Queues used as a mutex no data is actually copied into or out
393  of the queue. */
394  pxNewQueue->pcWriteTo = NULL;
395  pxNewQueue->u.pcReadFrom = NULL;
396 
397  /* Each mutex has a length of 1 (like a binary semaphore) and
398  an item size of 0 as nothing is actually copied into or out
399  of the mutex. */
400  pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
401  pxNewQueue->uxLength = ( UBaseType_t ) 1U;
402  pxNewQueue->uxItemSize = ( UBaseType_t ) 0U;
403  pxNewQueue->xRxLock = queueUNLOCKED;
404  pxNewQueue->xTxLock = queueUNLOCKED;
405 
406  #if ( configUSE_TRACE_FACILITY == 1 )
407  {
408  pxNewQueue->ucQueueType = ucQueueType;
409  }
410  #endif
411 
412  #if ( configUSE_QUEUE_SETS == 1 )
413  {
414  pxNewQueue->pxQueueSetContainer = NULL;
415  }
416  #endif
417 
418  /* Ensure the event queues start with the correct state. */
419  vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) );
420  vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) );
421 
422  traceCREATE_MUTEX( pxNewQueue );
423 
424  /* Start with the semaphore in the expected state. */
425  ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
426  }
427  else
428  {
430  }
431 
432  configASSERT( pxNewQueue );
433  return pxNewQueue;
434  }
435 
436 #endif /* configUSE_MUTEXES */
437 /*-----------------------------------------------------------*/
438 
439 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
440 
441  void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
442  {
443  void *pxReturn;
444 
445  /* This function is called by xSemaphoreGetMutexHolder(), and should not
446  be called directly. Note: This is a good way of determining if the
447  calling task is the mutex holder, but not a good way of determining the
448  identity of the mutex holder, as the holder may change between the
449  following critical section exiting and the function returning. */
451  {
452  if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
453  {
454  pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
455  }
456  else
457  {
458  pxReturn = NULL;
459  }
460  }
462 
463  return pxReturn;
464  }
465 
466 #endif
467 /*-----------------------------------------------------------*/
468 
469 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
470 
472  {
473  BaseType_t xReturn;
474  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
475 
476  configASSERT( pxMutex );
477 
478  /* If this is the task that holds the mutex then pxMutexHolder will not
479  change outside of this task. If this task does not hold the mutex then
480  pxMutexHolder can never coincidentally equal the tasks handle, and as
481  this is the only condition we are interested in it does not matter if
482  pxMutexHolder is accessed simultaneously by another task. Therefore no
483  mutual exclusion is required to test the pxMutexHolder variable. */
484  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
485  {
486  traceGIVE_MUTEX_RECURSIVE( pxMutex );
487 
488  /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
489  the task handle, therefore no underflow check is required. Also,
490  uxRecursiveCallCount is only modified by the mutex holder, and as
491  there can only be one, no mutual exclusion is required to modify the
492  uxRecursiveCallCount member. */
493  ( pxMutex->u.uxRecursiveCallCount )--;
494 
495  /* Have we unwound the call count? */
496  if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
497  {
498  /* Return the mutex. This will automatically unblock any other
499  task that might be waiting to access the mutex. */
501  }
502  else
503  {
505  }
506 
507  xReturn = pdPASS;
508  }
509  else
510  {
511  /* We cannot give the mutex because we are not the holder. */
512  xReturn = pdFAIL;
513 
515  }
516 
517  return xReturn;
518  }
519 
520 #endif /* configUSE_RECURSIVE_MUTEXES */
521 /*-----------------------------------------------------------*/
522 
523 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
524 
526  {
527  BaseType_t xReturn;
528  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
529 
530  configASSERT( pxMutex );
531 
532  /* Comments regarding mutual exclusion as per those within
533  xQueueGiveMutexRecursive(). */
534 
535  traceTAKE_MUTEX_RECURSIVE( pxMutex );
536 
537  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
538  {
539  ( pxMutex->u.uxRecursiveCallCount )++;
540  xReturn = pdPASS;
541  }
542  else
543  {
544  xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
545 
546  /* pdPASS will only be returned if we successfully obtained the mutex,
547  we may have blocked to reach here. */
548  if( xReturn == pdPASS )
549  {
550  ( pxMutex->u.uxRecursiveCallCount )++;
551  }
552  else
553  {
555  }
556  }
557 
558  return xReturn;
559  }
560 
561 #endif /* configUSE_RECURSIVE_MUTEXES */
562 /*-----------------------------------------------------------*/
563 
564 #if ( configUSE_COUNTING_SEMAPHORES == 1 )
565 
566  QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
567  {
568  QueueHandle_t xHandle;
569 
570  configASSERT( uxMaxCount != 0 );
571  configASSERT( uxInitialCount <= uxMaxCount );
572 
574 
575  if( xHandle != NULL )
576  {
577  ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
578 
580  }
581  else
582  {
584  }
585 
586  configASSERT( xHandle );
587  return xHandle;
588  }
589 
590 #endif /* configUSE_COUNTING_SEMAPHORES */
591 /*-----------------------------------------------------------*/
592 
593 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
594 {
595 BaseType_t xEntryTimeSet = pdFALSE;
596 TimeOut_t xTimeOut;
597 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
598 
599  configASSERT( pxQueue );
600  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
601  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
602  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
603  {
604  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
605  }
606  #endif
607 
608 
609  /* This function relaxes the coding standard somewhat to allow return
610  statements within the function itself. This is done in the interest
611  of execution time efficiency. */
612  for( ;; )
613  {
615  {
616  /* Is there room on the queue now? The running task must be
617  the highest priority task wanting to access the queue. If
618  the head item in the queue is to be overwritten then it does
619  not matter if the queue is full. */
620  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
621  {
622  traceQUEUE_SEND( pxQueue );
623  prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
624 
625  #if ( configUSE_QUEUE_SETS == 1 )
626  {
627  if( pxQueue->pxQueueSetContainer != NULL )
628  {
629  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
630  {
631  /* The queue is a member of a queue set, and posting
632  to the queue set caused a higher priority task to
633  unblock. A context switch is required. */
635  }
636  else
637  {
639  }
640  }
641  else
642  {
643  /* If there was a task waiting for data to arrive on the
644  queue then unblock it now. */
645  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
646  {
647  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
648  {
649  /* The unblocked task has a priority higher than
650  our own so yield immediately. Yes it is ok to
651  do this from within the critical section - the
652  kernel takes care of that. */
654  }
655  else
656  {
658  }
659  }
660  else
661  {
663  }
664  }
665  }
666  #else /* configUSE_QUEUE_SETS */
667  {
668  /* If there was a task waiting for data to arrive on the
669  queue then unblock it now. */
670  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
671  {
672  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
673  {
674  /* The unblocked task has a priority higher than
675  our own so yield immediately. Yes it is ok to do
676  this from within the critical section - the kernel
677  takes care of that. */
679  }
680  else
681  {
683  }
684  }
685  else
686  {
688  }
689  }
690  #endif /* configUSE_QUEUE_SETS */
691 
693 
694  /* Return to the original privilege level before exiting the
695  function. */
696  return pdPASS;
697  }
698  else
699  {
700  if( xTicksToWait == ( TickType_t ) 0 )
701  {
702  /* The queue was full and no block time is specified (or
703  the block time has expired) so leave now. */
705 
706  /* Return to the original privilege level before exiting
707  the function. */
708  traceQUEUE_SEND_FAILED( pxQueue );
709  return errQUEUE_FULL;
710  }
711  else if( xEntryTimeSet == pdFALSE )
712  {
713  /* The queue was full and a block time was specified so
714  configure the timeout structure. */
715  vTaskSetTimeOutState( &xTimeOut );
716  xEntryTimeSet = pdTRUE;
717  }
718  else
719  {
720  /* Entry time was already set. */
722  }
723  }
724  }
726 
727  /* Interrupts and other tasks can send to and receive from the queue
728  now the critical section has been exited. */
729 
730  vTaskSuspendAll();
731  prvLockQueue( pxQueue );
732 
733  /* Update the timeout state to see if it has expired yet. */
734  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
735  {
736  if( prvIsQueueFull( pxQueue ) != pdFALSE )
737  {
738  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
739  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
740 
741  /* Unlocking the queue means queue events can effect the
742  event list. It is possible that interrupts occurring now
743  remove this task from the event list again - but as the
744  scheduler is suspended the task will go onto the pending
745  ready last instead of the actual ready list. */
746  prvUnlockQueue( pxQueue );
747 
748  /* Resuming the scheduler will move tasks from the pending
749  ready list into the ready list - so it is feasible that this
750  task is already in a ready list before it yields - in which
751  case the yield will not cause a context switch unless there
752  is also a higher priority task in the pending ready list. */
753  if( xTaskResumeAll() == pdFALSE )
754  {
756  }
757  }
758  else
759  {
760  /* Try again. */
761  prvUnlockQueue( pxQueue );
762  ( void ) xTaskResumeAll();
763  }
764  }
765  else
766  {
767  /* The timeout has expired. */
768  prvUnlockQueue( pxQueue );
769  ( void ) xTaskResumeAll();
770 
771  /* Return to the original privilege level before exiting the
772  function. */
773  traceQUEUE_SEND_FAILED( pxQueue );
774  return errQUEUE_FULL;
775  }
776  }
777 }
778 /*-----------------------------------------------------------*/
779 
780 #if ( configUSE_ALTERNATIVE_API == 1 )
781 
782  BaseType_t xQueueAltGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition )
783  {
784  BaseType_t xEntryTimeSet = pdFALSE;
785  TimeOut_t xTimeOut;
786  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
787 
788  configASSERT( pxQueue );
789  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
790 
791  for( ;; )
792  {
794  {
795  /* Is there room on the queue now? To be running we must be
796  the highest priority task wanting to access the queue. */
797  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
798  {
799  traceQUEUE_SEND( pxQueue );
800  prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
801 
802  /* If there was a task waiting for data to arrive on the
803  queue then unblock it now. */
804  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
805  {
806  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
807  {
808  /* The unblocked task has a priority higher than
809  our own so yield immediately. */
811  }
812  else
813  {
815  }
816  }
817  else
818  {
820  }
821 
823  return pdPASS;
824  }
825  else
826  {
827  if( xTicksToWait == ( TickType_t ) 0 )
828  {
830  return errQUEUE_FULL;
831  }
832  else if( xEntryTimeSet == pdFALSE )
833  {
834  vTaskSetTimeOutState( &xTimeOut );
835  xEntryTimeSet = pdTRUE;
836  }
837  }
838  }
840 
842  {
843  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
844  {
845  if( prvIsQueueFull( pxQueue ) != pdFALSE )
846  {
847  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
848  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
850  }
851  else
852  {
854  }
855  }
856  else
857  {
859  traceQUEUE_SEND_FAILED( pxQueue );
860  return errQUEUE_FULL;
861  }
862  }
864  }
865  }
866 
867 #endif /* configUSE_ALTERNATIVE_API */
868 /*-----------------------------------------------------------*/
869 
870 #if ( configUSE_ALTERNATIVE_API == 1 )
871 
872  BaseType_t xQueueAltGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking )
873  {
874  BaseType_t xEntryTimeSet = pdFALSE;
875  TimeOut_t xTimeOut;
876  int8_t *pcOriginalReadPosition;
877  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
878 
879  configASSERT( pxQueue );
880  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
881 
882  for( ;; )
883  {
885  {
886  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
887  {
888  /* Remember our read position in case we are just peeking. */
889  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
890 
891  prvCopyDataFromQueue( pxQueue, pvBuffer );
892 
893  if( xJustPeeking == pdFALSE )
894  {
895  traceQUEUE_RECEIVE( pxQueue );
896 
897  /* Data is actually being removed (not just peeked). */
898  --( pxQueue->uxMessagesWaiting );
899 
900  #if ( configUSE_MUTEXES == 1 )
901  {
902  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
903  {
904  /* Record the information required to implement
905  priority inheritance should it become necessary. */
906  pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle();
907  }
908  else
909  {
911  }
912  }
913  #endif
914 
915  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
916  {
917  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
918  {
920  }
921  else
922  {
924  }
925  }
926  }
927  else
928  {
929  traceQUEUE_PEEK( pxQueue );
930 
931  /* We are not removing the data, so reset our read
932  pointer. */
933  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
934 
935  /* The data is being left in the queue, so see if there are
936  any other tasks waiting for the data. */
937  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
938  {
939  /* Tasks that are removed from the event list will get added to
940  the pending ready list as the scheduler is still suspended. */
942  {
943  /* The task waiting has a higher priority than this task. */
945  }
946  else
947  {
949  }
950  }
951  else
952  {
954  }
955  }
956 
958  return pdPASS;
959  }
960  else
961  {
962  if( xTicksToWait == ( TickType_t ) 0 )
963  {
965  traceQUEUE_RECEIVE_FAILED( pxQueue );
966  return errQUEUE_EMPTY;
967  }
968  else if( xEntryTimeSet == pdFALSE )
969  {
970  vTaskSetTimeOutState( &xTimeOut );
971  xEntryTimeSet = pdTRUE;
972  }
973  }
974  }
976 
978  {
979  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
980  {
981  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
982  {
984 
985  #if ( configUSE_MUTEXES == 1 )
986  {
987  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
988  {
990  {
991  vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
992  }
994  }
995  else
996  {
998  }
999  }
1000  #endif
1001 
1002  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1004  }
1005  else
1006  {
1008  }
1009  }
1010  else
1011  {
1013  traceQUEUE_RECEIVE_FAILED( pxQueue );
1014  return errQUEUE_EMPTY;
1015  }
1016  }
1018  }
1019  }
1020 
1021 
1022 #endif /* configUSE_ALTERNATIVE_API */
1023 /*-----------------------------------------------------------*/
1024 
1025 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
1026 {
1027 BaseType_t xReturn;
1028 UBaseType_t uxSavedInterruptStatus;
1029 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1030 
1031  configASSERT( pxQueue );
1032  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1033  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1034 
1035  /* RTOS ports that support interrupt nesting have the concept of a maximum
1036  system call (or maximum API call) interrupt priority. Interrupts that are
1037  above the maximum system call priority are kept permanently enabled, even
1038  when the RTOS kernel is in a critical section, but cannot make any calls to
1039  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1040  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1041  failure if a FreeRTOS API function is called from an interrupt that has been
1042  assigned a priority above the configured maximum system call priority.
1043  Only FreeRTOS functions that end in FromISR can be called from interrupts
1044  that have been assigned a priority at or (logically) below the maximum
1045  system call interrupt priority. FreeRTOS maintains a separate interrupt
1046  safe API to ensure interrupt entry is as fast and as simple as possible.
1047  More information (albeit Cortex-M specific) is provided on the following
1048  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1050 
1051  /* Similar to xQueueGenericSend, except without blocking if there is no room
1052  in the queue. Also don't directly wake a task that was blocked on a queue
1053  read, instead return a flag to say whether a context switch is required or
1054  not (i.e. has a task with a higher priority than us been woken by this
1055  post). */
1056  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1057  {
1058  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1059  {
1060  traceQUEUE_SEND_FROM_ISR( pxQueue );
1061 
1062  prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1063 
1064  /* The event list is not altered if the queue is locked. This will
1065  be done when the queue is unlocked later. */
1066  if( pxQueue->xTxLock == queueUNLOCKED )
1067  {
1068  #if ( configUSE_QUEUE_SETS == 1 )
1069  {
1070  if( pxQueue->pxQueueSetContainer != NULL )
1071  {
1072  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) == pdTRUE )
1073  {
1074  /* The queue is a member of a queue set, and posting
1075  to the queue set caused a higher priority task to
1076  unblock. A context switch is required. */
1077  if( pxHigherPriorityTaskWoken != NULL )
1078  {
1079  *pxHigherPriorityTaskWoken = pdTRUE;
1080  }
1081  else
1082  {
1084  }
1085  }
1086  else
1087  {
1089  }
1090  }
1091  else
1092  {
1093  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1094  {
1095  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1096  {
1097  /* The task waiting has a higher priority so record that a
1098  context switch is required. */
1099  if( pxHigherPriorityTaskWoken != NULL )
1100  {
1101  *pxHigherPriorityTaskWoken = pdTRUE;
1102  }
1103  else
1104  {
1106  }
1107  }
1108  else
1109  {
1111  }
1112  }
1113  else
1114  {
1116  }
1117  }
1118  }
1119  #else /* configUSE_QUEUE_SETS */
1120  {
1121  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1122  {
1123  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1124  {
1125  /* The task waiting has a higher priority so record that a
1126  context switch is required. */
1127  if( pxHigherPriorityTaskWoken != NULL )
1128  {
1129  *pxHigherPriorityTaskWoken = pdTRUE;
1130  }
1131  else
1132  {
1134  }
1135  }
1136  else
1137  {
1139  }
1140  }
1141  else
1142  {
1144  }
1145  }
1146  #endif /* configUSE_QUEUE_SETS */
1147  }
1148  else
1149  {
1150  /* Increment the lock count so the task that unlocks the queue
1151  knows that data was posted while it was locked. */
1152  ++( pxQueue->xTxLock );
1153  }
1154 
1155  xReturn = pdPASS;
1156  }
1157  else
1158  {
1160  xReturn = errQUEUE_FULL;
1161  }
1162  }
1163  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1164 
1165  return xReturn;
1166 }
1167 /*-----------------------------------------------------------*/
1168 
1169 BaseType_t xQueueGenericReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking )
1170 {
1171 BaseType_t xEntryTimeSet = pdFALSE;
1172 TimeOut_t xTimeOut;
1173 int8_t *pcOriginalReadPosition;
1174 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1175 
1176  configASSERT( pxQueue );
1177  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1178  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1179  {
1180  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1181  }
1182  #endif
1183 
1184  /* This function relaxes the coding standard somewhat to allow return
1185  statements within the function itself. This is done in the interest
1186  of execution time efficiency. */
1187 
1188  for( ;; )
1189  {
1191  {
1192  /* Is there data in the queue now? To be running we must be
1193  the highest priority task wanting to access the queue. */
1194  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1195  {
1196  /* Remember the read position in case the queue is only being
1197  peeked. */
1198  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1199 
1200  prvCopyDataFromQueue( pxQueue, pvBuffer );
1201 
1202  if( xJustPeeking == pdFALSE )
1203  {
1204  traceQUEUE_RECEIVE( pxQueue );
1205 
1206  /* Actually removing data, not just peeking. */
1207  --( pxQueue->uxMessagesWaiting );
1208 
1209  #if ( configUSE_MUTEXES == 1 )
1210  {
1211  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1212  {
1213  /* Record the information required to implement
1214  priority inheritance should it become necessary. */
1215  pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
1216  }
1217  else
1218  {
1220  }
1221  }
1222  #endif
1223 
1224  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1225  {
1226  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
1227  {
1229  }
1230  else
1231  {
1233  }
1234  }
1235  else
1236  {
1238  }
1239  }
1240  else
1241  {
1242  traceQUEUE_PEEK( pxQueue );
1243 
1244  /* The data is not being removed, so reset the read
1245  pointer. */
1246  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1247 
1248  /* The data is being left in the queue, so see if there are
1249  any other tasks waiting for the data. */
1250  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1251  {
1252  /* Tasks that are removed from the event list will get added to
1253  the pending ready list as the scheduler is still suspended. */
1254  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1255  {
1256  /* The task waiting has a higher priority than this task. */
1258  }
1259  else
1260  {
1262  }
1263  }
1264  else
1265  {
1267  }
1268  }
1269 
1271  return pdPASS;
1272  }
1273  else
1274  {
1275  if( xTicksToWait == ( TickType_t ) 0 )
1276  {
1277  /* The queue was empty and no block time is specified (or
1278  the block time has expired) so leave now. */
1280  traceQUEUE_RECEIVE_FAILED( pxQueue );
1281  return errQUEUE_EMPTY;
1282  }
1283  else if( xEntryTimeSet == pdFALSE )
1284  {
1285  /* The queue was empty and a block time was specified so
1286  configure the timeout structure. */
1287  vTaskSetTimeOutState( &xTimeOut );
1288  xEntryTimeSet = pdTRUE;
1289  }
1290  else
1291  {
1292  /* Entry time was already set. */
1294  }
1295  }
1296  }
1298 
1299  /* Interrupts and other tasks can send to and receive from the queue
1300  now the critical section has been exited. */
1301 
1302  vTaskSuspendAll();
1303  prvLockQueue( pxQueue );
1304 
1305  /* Update the timeout state to see if it has expired yet. */
1306  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1307  {
1308  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1309  {
1310  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1311 
1312  #if ( configUSE_MUTEXES == 1 )
1313  {
1314  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1315  {
1317  {
1318  vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1319  }
1321  }
1322  else
1323  {
1325  }
1326  }
1327  #endif
1328 
1329  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1330  prvUnlockQueue( pxQueue );
1331  if( xTaskResumeAll() == pdFALSE )
1332  {
1334  }
1335  else
1336  {
1338  }
1339  }
1340  else
1341  {
1342  /* Try again. */
1343  prvUnlockQueue( pxQueue );
1344  ( void ) xTaskResumeAll();
1345  }
1346  }
1347  else
1348  {
1349  prvUnlockQueue( pxQueue );
1350  ( void ) xTaskResumeAll();
1351  traceQUEUE_RECEIVE_FAILED( pxQueue );
1352  return errQUEUE_EMPTY;
1353  }
1354  }
1355 }
1356 /*-----------------------------------------------------------*/
1357 
1358 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1359 {
1360 BaseType_t xReturn;
1361 UBaseType_t uxSavedInterruptStatus;
1362 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1363 
1364  configASSERT( pxQueue );
1365  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1366 
1367  /* RTOS ports that support interrupt nesting have the concept of a maximum
1368  system call (or maximum API call) interrupt priority. Interrupts that are
1369  above the maximum system call priority are kept permanently enabled, even
1370  when the RTOS kernel is in a critical section, but cannot make any calls to
1371  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1372  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1373  failure if a FreeRTOS API function is called from an interrupt that has been
1374  assigned a priority above the configured maximum system call priority.
1375  Only FreeRTOS functions that end in FromISR can be called from interrupts
1376  that have been assigned a priority at or (logically) below the maximum
1377  system call interrupt priority. FreeRTOS maintains a separate interrupt
1378  safe API to ensure interrupt entry is as fast and as simple as possible.
1379  More information (albeit Cortex-M specific) is provided on the following
1380  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1382 
1383  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1384  {
1385  /* Cannot block in an ISR, so check there is data available. */
1386  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1387  {
1388  traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1389 
1390  prvCopyDataFromQueue( pxQueue, pvBuffer );
1391  --( pxQueue->uxMessagesWaiting );
1392 
1393  /* If the queue is locked the event list will not be modified.
1394  Instead update the lock count so the task that unlocks the queue
1395  will know that an ISR has removed data while the queue was
1396  locked. */
1397  if( pxQueue->xRxLock == queueUNLOCKED )
1398  {
1399  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1400  {
1401  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1402  {
1403  /* The task waiting has a higher priority than us so
1404  force a context switch. */
1405  if( pxHigherPriorityTaskWoken != NULL )
1406  {
1407  *pxHigherPriorityTaskWoken = pdTRUE;
1408  }
1409  else
1410  {
1412  }
1413  }
1414  else
1415  {
1417  }
1418  }
1419  else
1420  {
1422  }
1423  }
1424  else
1425  {
1426  /* Increment the lock count so the task that unlocks the queue
1427  knows that data was removed while it was locked. */
1428  ++( pxQueue->xRxLock );
1429  }
1430 
1431  xReturn = pdPASS;
1432  }
1433  else
1434  {
1435  xReturn = pdFAIL;
1437  }
1438  }
1439  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1440 
1441  return xReturn;
1442 }
1443 /*-----------------------------------------------------------*/
1444 
1445 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
1446 {
1447 BaseType_t xReturn;
1448 UBaseType_t uxSavedInterruptStatus;
1449 int8_t *pcOriginalReadPosition;
1450 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1451 
1452  configASSERT( pxQueue );
1453  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1454 
1455  /* RTOS ports that support interrupt nesting have the concept of a maximum
1456  system call (or maximum API call) interrupt priority. Interrupts that are
1457  above the maximum system call priority are kept permanently enabled, even
1458  when the RTOS kernel is in a critical section, but cannot make any calls to
1459  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1460  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1461  failure if a FreeRTOS API function is called from an interrupt that has been
1462  assigned a priority above the configured maximum system call priority.
1463  Only FreeRTOS functions that end in FromISR can be called from interrupts
1464  that have been assigned a priority at or (logically) below the maximum
1465  system call interrupt priority. FreeRTOS maintains a separate interrupt
1466  safe API to ensure interrupt entry is as fast and as simple as possible.
1467  More information (albeit Cortex-M specific) is provided on the following
1468  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1470 
1471  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1472  {
1473  /* Cannot block in an ISR, so check there is data available. */
1474  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1475  {
1476  traceQUEUE_PEEK_FROM_ISR( pxQueue );
1477 
1478  /* Remember the read position so it can be reset as nothing is
1479  actually being removed from the queue. */
1480  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1481  prvCopyDataFromQueue( pxQueue, pvBuffer );
1482  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1483 
1484  xReturn = pdPASS;
1485  }
1486  else
1487  {
1488  xReturn = pdFAIL;
1490  }
1491  }
1492  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1493 
1494  return xReturn;
1495 }
1496 /*-----------------------------------------------------------*/
1497 
1499 {
1500 UBaseType_t uxReturn;
1501 
1502  configASSERT( xQueue );
1503 
1505  {
1506  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1507  }
1509 
1510  return uxReturn;
1511 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1512 /*-----------------------------------------------------------*/
1513 
1515 {
1516 UBaseType_t uxReturn;
1517 Queue_t *pxQueue;
1518 
1519  pxQueue = ( Queue_t * ) xQueue;
1520  configASSERT( pxQueue );
1521 
1523  {
1524  uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1525  }
1527 
1528  return uxReturn;
1529 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1530 /*-----------------------------------------------------------*/
1531 
1533 {
1534 UBaseType_t uxReturn;
1535 
1536  configASSERT( xQueue );
1537 
1538  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1539 
1540  return uxReturn;
1541 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1542 /*-----------------------------------------------------------*/
1543 
1545 {
1546 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1547 
1548  configASSERT( pxQueue );
1549 
1550  traceQUEUE_DELETE( pxQueue );
1551  #if ( configQUEUE_REGISTRY_SIZE > 0 )
1552  {
1553  vQueueUnregisterQueue( pxQueue );
1554  }
1555  #endif
1556  if( pxQueue->pcHead != NULL )
1557  {
1558  vPortFree( pxQueue->pcHead );
1559  }
1560  vPortFree( pxQueue );
1561 }
1562 /*-----------------------------------------------------------*/
1563 
1564 #if ( configUSE_TRACE_FACILITY == 1 )
1565 
1567  {
1568  return ( ( Queue_t * ) xQueue )->uxQueueNumber;
1569  }
1570 
1571 #endif /* configUSE_TRACE_FACILITY */
1572 /*-----------------------------------------------------------*/
1573 
1574 #if ( configUSE_TRACE_FACILITY == 1 )
1575 
1576  void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
1577  {
1578  ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
1579  }
1580 
1581 #endif /* configUSE_TRACE_FACILITY */
1582 /*-----------------------------------------------------------*/
1583 
1584 #if ( configUSE_TRACE_FACILITY == 1 )
1585 
1586  uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
1587  {
1588  return ( ( Queue_t * ) xQueue )->ucQueueType;
1589  }
1590 
1591 #endif /* configUSE_TRACE_FACILITY */
1592 /*-----------------------------------------------------------*/
1593 
1594 static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
1595 {
1596  if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
1597  {
1598  #if ( configUSE_MUTEXES == 1 )
1599  {
1600  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1601  {
1602  /* The mutex is no longer being held. */
1603  vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
1604  pxQueue->pxMutexHolder = NULL;
1605  }
1606  else
1607  {
1609  }
1610  }
1611  #endif /* configUSE_MUTEXES */
1612  }
1613  else if( xPosition == queueSEND_TO_BACK )
1614  {
1615  ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
1616  pxQueue->pcWriteTo += pxQueue->uxItemSize;
1617  if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1618  {
1619  pxQueue->pcWriteTo = pxQueue->pcHead;
1620  }
1621  else
1622  {
1624  }
1625  }
1626  else
1627  {
1628  ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1629  pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
1630  if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1631  {
1632  pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
1633  }
1634  else
1635  {
1637  }
1638 
1639  if( xPosition == queueOVERWRITE )
1640  {
1641  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1642  {
1643  /* An item is not being added but overwritten, so subtract
1644  one from the recorded number of items in the queue so when
1645  one is added again below the number of recorded items remains
1646  correct. */
1647  --( pxQueue->uxMessagesWaiting );
1648  }
1649  else
1650  {
1652  }
1653  }
1654  else
1655  {
1657  }
1658  }
1659 
1660  ++( pxQueue->uxMessagesWaiting );
1661 }
1662 /*-----------------------------------------------------------*/
1663 
1664 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
1665 {
1666  if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
1667  {
1668  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
1669  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
1670  {
1671  pxQueue->u.pcReadFrom = pxQueue->pcHead;
1672  }
1673  else
1674  {
1676  }
1677  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
1678  }
1679  else
1680  {
1682  }
1683 }
1684 /*-----------------------------------------------------------*/
1685 
1686 static void prvUnlockQueue( Queue_t * const pxQueue )
1687 {
1688  /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
1689 
1690  /* The lock counts contains the number of extra data items placed or
1691  removed from the queue while the queue was locked. When a queue is
1692  locked items can be added or removed, but the event lists cannot be
1693  updated. */
1695  {
1696  /* See if data was added to the queue while it was locked. */
1697  while( pxQueue->xTxLock > queueLOCKED_UNMODIFIED )
1698  {
1699  /* Data was posted while the queue was locked. Are any tasks
1700  blocked waiting for data to become available? */
1701  #if ( configUSE_QUEUE_SETS == 1 )
1702  {
1703  if( pxQueue->pxQueueSetContainer != NULL )
1704  {
1705  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
1706  {
1707  /* The queue is a member of a queue set, and posting to
1708  the queue set caused a higher priority task to unblock.
1709  A context switch is required. */
1710  vTaskMissedYield();
1711  }
1712  else
1713  {
1715  }
1716  }
1717  else
1718  {
1719  /* Tasks that are removed from the event list will get added to
1720  the pending ready list as the scheduler is still suspended. */
1721  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1722  {
1723  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1724  {
1725  /* The task waiting has a higher priority so record that a
1726  context switch is required. */
1727  vTaskMissedYield();
1728  }
1729  else
1730  {
1732  }
1733  }
1734  else
1735  {
1736  break;
1737  }
1738  }
1739  }
1740  #else /* configUSE_QUEUE_SETS */
1741  {
1742  /* Tasks that are removed from the event list will get added to
1743  the pending ready list as the scheduler is still suspended. */
1744  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1745  {
1746  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1747  {
1748  /* The task waiting has a higher priority so record that a
1749  context switch is required. */
1750  vTaskMissedYield();
1751  }
1752  else
1753  {
1755  }
1756  }
1757  else
1758  {
1759  break;
1760  }
1761  }
1762  #endif /* configUSE_QUEUE_SETS */
1763 
1764  --( pxQueue->xTxLock );
1765  }
1766 
1767  pxQueue->xTxLock = queueUNLOCKED;
1768  }
1770 
1771  /* Do the same for the Rx lock. */
1773  {
1774  while( pxQueue->xRxLock > queueLOCKED_UNMODIFIED )
1775  {
1776  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1777  {
1778  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1779  {
1780  vTaskMissedYield();
1781  }
1782  else
1783  {
1785  }
1786 
1787  --( pxQueue->xRxLock );
1788  }
1789  else
1790  {
1791  break;
1792  }
1793  }
1794 
1795  pxQueue->xRxLock = queueUNLOCKED;
1796  }
1798 }
1799 /*-----------------------------------------------------------*/
1800 
1801 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
1802 {
1803 BaseType_t xReturn;
1804 
1806  {
1807  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1808  {
1809  xReturn = pdTRUE;
1810  }
1811  else
1812  {
1813  xReturn = pdFALSE;
1814  }
1815  }
1817 
1818  return xReturn;
1819 }
1820 /*-----------------------------------------------------------*/
1821 
1823 {
1824 BaseType_t xReturn;
1825 
1826  configASSERT( xQueue );
1827  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
1828  {
1829  xReturn = pdTRUE;
1830  }
1831  else
1832  {
1833  xReturn = pdFALSE;
1834  }
1835 
1836  return xReturn;
1837 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
1838 /*-----------------------------------------------------------*/
1839 
1840 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
1841 {
1842 BaseType_t xReturn;
1843 
1845  {
1846  if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
1847  {
1848  xReturn = pdTRUE;
1849  }
1850  else
1851  {
1852  xReturn = pdFALSE;
1853  }
1854  }
1856 
1857  return xReturn;
1858 }
1859 /*-----------------------------------------------------------*/
1860 
1862 {
1863 BaseType_t xReturn;
1864 
1865  configASSERT( xQueue );
1866  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
1867  {
1868  xReturn = pdTRUE;
1869  }
1870  else
1871  {
1872  xReturn = pdFALSE;
1873  }
1874 
1875  return xReturn;
1876 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
1877 /*-----------------------------------------------------------*/
1878 
1879 #if ( configUSE_CO_ROUTINES == 1 )
1880 
1881  BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
1882  {
1883  BaseType_t xReturn;
1884  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1885 
1886  /* If the queue is already full we may have to block. A critical section
1887  is required to prevent an interrupt removing something from the queue
1888  between the check to see if the queue is full and blocking on the queue. */
1890  {
1891  if( prvIsQueueFull( pxQueue ) != pdFALSE )
1892  {
1893  /* The queue is full - do we want to block or just leave without
1894  posting? */
1895  if( xTicksToWait > ( TickType_t ) 0 )
1896  {
1897  /* As this is called from a coroutine we cannot block directly, but
1898  return indicating that we need to block. */
1899  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
1901  return errQUEUE_BLOCKED;
1902  }
1903  else
1904  {
1906  return errQUEUE_FULL;
1907  }
1908  }
1909  }
1911 
1913  {
1914  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
1915  {
1916  /* There is room in the queue, copy the data into the queue. */
1917  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
1918  xReturn = pdPASS;
1919 
1920  /* Were any co-routines waiting for data to become available? */
1921  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1922  {
1923  /* In this instance the co-routine could be placed directly
1924  into the ready list as we are within a critical section.
1925  Instead the same pending ready list mechanism is used as if
1926  the event were caused from within an interrupt. */
1928  {
1929  /* The co-routine waiting has a higher priority so record
1930  that a yield might be appropriate. */
1931  xReturn = errQUEUE_YIELD;
1932  }
1933  else
1934  {
1936  }
1937  }
1938  else
1939  {
1941  }
1942  }
1943  else
1944  {
1945  xReturn = errQUEUE_FULL;
1946  }
1947  }
1949 
1950  return xReturn;
1951  }
1952 
1953 #endif /* configUSE_CO_ROUTINES */
1954 /*-----------------------------------------------------------*/
1955 
1956 #if ( configUSE_CO_ROUTINES == 1 )
1957 
1958  BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
1959  {
1960  BaseType_t xReturn;
1961  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1962 
1963  /* If the queue is already empty we may have to block. A critical section
1964  is required to prevent an interrupt adding something to the queue
1965  between the check to see if the queue is empty and blocking on the queue. */
1967  {
1968  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1969  {
1970  /* There are no messages in the queue, do we want to block or just
1971  leave with nothing? */
1972  if( xTicksToWait > ( TickType_t ) 0 )
1973  {
1974  /* As this is a co-routine we cannot block directly, but return
1975  indicating that we need to block. */
1976  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
1978  return errQUEUE_BLOCKED;
1979  }
1980  else
1981  {
1983  return errQUEUE_FULL;
1984  }
1985  }
1986  else
1987  {
1989  }
1990  }
1992 
1994  {
1995  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1996  {
1997  /* Data is available from the queue. */
1998  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
1999  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2000  {
2001  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2002  }
2003  else
2004  {
2006  }
2007  --( pxQueue->uxMessagesWaiting );
2008  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2009 
2010  xReturn = pdPASS;
2011 
2012  /* Were any co-routines waiting for space to become available? */
2013  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2014  {
2015  /* In this instance the co-routine could be placed directly
2016  into the ready list as we are within a critical section.
2017  Instead the same pending ready list mechanism is used as if
2018  the event were caused from within an interrupt. */
2020  {
2021  xReturn = errQUEUE_YIELD;
2022  }
2023  else
2024  {
2026  }
2027  }
2028  else
2029  {
2031  }
2032  }
2033  else
2034  {
2035  xReturn = pdFAIL;
2036  }
2037  }
2039 
2040  return xReturn;
2041  }
2042 
2043 #endif /* configUSE_CO_ROUTINES */
2044 /*-----------------------------------------------------------*/
2045 
2046 #if ( configUSE_CO_ROUTINES == 1 )
2047 
2048  BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
2049  {
2050  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2051 
2052  /* Cannot block within an ISR so if there is no space on the queue then
2053  exit without doing anything. */
2054  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2055  {
2056  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2057 
2058  /* We only want to wake one co-routine per ISR, so check that a
2059  co-routine has not already been woken. */
2060  if( xCoRoutinePreviouslyWoken == pdFALSE )
2061  {
2062  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2063  {
2065  {
2066  return pdTRUE;
2067  }
2068  else
2069  {
2071  }
2072  }
2073  else
2074  {
2076  }
2077  }
2078  else
2079  {
2081  }
2082  }
2083  else
2084  {
2086  }
2087 
2088  return xCoRoutinePreviouslyWoken;
2089  }
2090 
2091 #endif /* configUSE_CO_ROUTINES */
2092 /*-----------------------------------------------------------*/
2093 
2094 #if ( configUSE_CO_ROUTINES == 1 )
2095 
2096  BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
2097  {
2098  BaseType_t xReturn;
2099  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2100 
2101  /* We cannot block from an ISR, so check there is data available. If
2102  not then just leave without doing anything. */
2103  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2104  {
2105  /* Copy the data from the queue. */
2106  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2107  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2108  {
2109  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2110  }
2111  else
2112  {
2114  }
2115  --( pxQueue->uxMessagesWaiting );
2116  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2117 
2118  if( ( *pxCoRoutineWoken ) == pdFALSE )
2119  {
2120  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2121  {
2123  {
2124  *pxCoRoutineWoken = pdTRUE;
2125  }
2126  else
2127  {
2129  }
2130  }
2131  else
2132  {
2134  }
2135  }
2136  else
2137  {
2139  }
2140 
2141  xReturn = pdPASS;
2142  }
2143  else
2144  {
2145  xReturn = pdFAIL;
2146  }
2147 
2148  return xReturn;
2149  }
2150 
2151 #endif /* configUSE_CO_ROUTINES */
2152 /*-----------------------------------------------------------*/
2153 
2154 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2155 
2156  void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2157  {
2158  UBaseType_t ux;
2159 
2160  /* See if there is an empty space in the registry. A NULL name denotes
2161  a free slot. */
2162  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2163  {
2164  if( xQueueRegistry[ ux ].pcQueueName == NULL )
2165  {
2166  /* Store the information on this queue. */
2167  xQueueRegistry[ ux ].pcQueueName = pcQueueName;
2168  xQueueRegistry[ ux ].xHandle = xQueue;
2169 
2170  traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
2171  break;
2172  }
2173  else
2174  {
2176  }
2177  }
2178  }
2179 
2180 #endif /* configQUEUE_REGISTRY_SIZE */
2181 /*-----------------------------------------------------------*/
2182 
2183 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2184 
2185  void vQueueUnregisterQueue( QueueHandle_t xQueue )
2186  {
2187  UBaseType_t ux;
2188 
2189  /* See if the handle of the queue being unregistered in actually in the
2190  registry. */
2191  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2192  {
2193  if( xQueueRegistry[ ux ].xHandle == xQueue )
2194  {
2195  /* Set the name to NULL to show that this slot if free again. */
2196  xQueueRegistry[ ux ].pcQueueName = NULL;
2197  break;
2198  }
2199  else
2200  {
2202  }
2203  }
2204 
2205  } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2206 
2207 #endif /* configQUEUE_REGISTRY_SIZE */
2208 /*-----------------------------------------------------------*/
2209 
2210 #if ( configUSE_TIMERS == 1 )
2211 
2212  void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
2213  {
2214  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2215 
2216  /* This function should not be called by application code hence the
2217  'Restricted' in its name. It is not part of the public API. It is
2218  designed for use by kernel code, and has special calling requirements.
2219  It can result in vListInsert() being called on a list that can only
2220  possibly ever have one item in it, so the list will be fast, but even
2221  so it should be called with the scheduler locked and not from a critical
2222  section. */
2223 
2224  /* Only do anything if there are no messages in the queue. This function
2225  will not actually cause the task to block, just place it on a blocked
2226  list. It will not block until the scheduler is unlocked - at which
2227  time a yield will be performed. If an item is added to the queue while
2228  the queue is locked, and the calling task blocks on the queue, then the
2229  calling task will be immediately unblocked when the queue is unlocked. */
2230  prvLockQueue( pxQueue );
2231  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
2232  {
2233  /* There is nothing in the queue, block for the specified period. */
2234  vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
2235  }
2236  else
2237  {
2239  }
2240  prvUnlockQueue( pxQueue );
2241  }
2242 
2243 #endif /* configUSE_TIMERS */
2244 /*-----------------------------------------------------------*/
2245 
2246 #if ( configUSE_QUEUE_SETS == 1 )
2247 
2248  QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
2249  {
2250  QueueSetHandle_t pxQueue;
2251 
2252  pxQueue = xQueueGenericCreate( uxEventQueueLength, sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
2253 
2254  return pxQueue;
2255  }
2256 
2257 #endif /* configUSE_QUEUE_SETS */
2258 /*-----------------------------------------------------------*/
2259 
2260 #if ( configUSE_QUEUE_SETS == 1 )
2261 
2262  BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2263  {
2264  BaseType_t xReturn;
2265 
2267  {
2268  if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
2269  {
2270  /* Cannot add a queue/semaphore to more than one queue set. */
2271  xReturn = pdFAIL;
2272  }
2273  else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
2274  {
2275  /* Cannot add a queue/semaphore to a queue set if there are already
2276  items in the queue/semaphore. */
2277  xReturn = pdFAIL;
2278  }
2279  else
2280  {
2281  ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
2282  xReturn = pdPASS;
2283  }
2284  }
2286 
2287  return xReturn;
2288  }
2289 
2290 #endif /* configUSE_QUEUE_SETS */
2291 /*-----------------------------------------------------------*/
2292 
2293 #if ( configUSE_QUEUE_SETS == 1 )
2294 
2296  {
2297  BaseType_t xReturn;
2298  Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
2299 
2300  if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
2301  {
2302  /* The queue was not a member of the set. */
2303  xReturn = pdFAIL;
2304  }
2305  else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
2306  {
2307  /* It is dangerous to remove a queue from a set when the queue is
2308  not empty because the queue set will still hold pending events for
2309  the queue. */
2310  xReturn = pdFAIL;
2311  }
2312  else
2313  {
2315  {
2316  /* The queue is no longer contained in the set. */
2317  pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2318  }
2320  xReturn = pdPASS;
2321  }
2322 
2323  return xReturn;
2324  } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
2325 
2326 #endif /* configUSE_QUEUE_SETS */
2327 /*-----------------------------------------------------------*/
2328 
2329 #if ( configUSE_QUEUE_SETS == 1 )
2330 
2332  {
2333  QueueSetMemberHandle_t xReturn = NULL;
2334 
2335  ( void ) xQueueGenericReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait, pdFALSE ); /*lint !e961 Casting from one typedef to another is not redundant. */
2336  return xReturn;
2337  }
2338 
2339 #endif /* configUSE_QUEUE_SETS */
2340 /*-----------------------------------------------------------*/
2341 
2342 #if ( configUSE_QUEUE_SETS == 1 )
2343 
2345  {
2346  QueueSetMemberHandle_t xReturn = NULL;
2347 
2348  ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
2349  return xReturn;
2350  }
2351 
2352 #endif /* configUSE_QUEUE_SETS */
2353 /*-----------------------------------------------------------*/
2354 
2355 #if ( configUSE_QUEUE_SETS == 1 )
2356 
2357  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
2358  {
2359  Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2360  BaseType_t xReturn = pdFALSE;
2361 
2362  /* This function must be called form a critical section. */
2363 
2364  configASSERT( pxQueueSetContainer );
2365  configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
2366 
2367  if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
2368  {
2369  traceQUEUE_SEND( pxQueueSetContainer );
2370  /* The data copies is the handle of the queue that contains data. */
2371  prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
2372  if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
2373  {
2374  if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
2375  {
2376  /* The task waiting has a higher priority */
2377  xReturn = pdTRUE;
2378  }
2379  else
2380  {
2382  }
2383  }
2384  else
2385  {
2387  }
2388  }
2389  else
2390  {
2392  }
2393 
2394  return xReturn;
2395  }
2396 
2397 #endif /* configUSE_QUEUE_SETS */
2398 
2399 
2400 
2401 
2402 
2403 
2404 
2405 
2406 
2407 
2408 
2409 
#define pdTRUE
Definition: projdefs.h:76
BaseType_t xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
#define pxMutexHolder
Definition: queue.c:104
UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
Definition: queue.c:1532
void vPortFree(void *pv) PRIVILEGED_FUNCTION
Definition: heap_4.c:294
#define queueQUEUE_TYPE_SET
Definition: queue.h:107
#define traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:435
BaseType_t xQueueTakeMutexRecursive(QueueHandle_t xMutex, TickType_t xTicksToWait) PRIVILEGED_FUNCTION
#define queueOVERWRITE
Definition: queue.h:103
BaseType_t xQueueCRSendFromISR(QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken)
#define traceQUEUE_CREATE_FAILED(ucQueueType)
Definition: FreeRTOS.h:411
#define traceQUEUE_SEND_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:471
UBaseType_t uxRecursiveCallCount
Definition: queue.c:134
void * QueueSetMemberHandle_t
Definition: queue.h:98
#define queueYIELD_IF_USING_PREEMPTION()
Definition: queue.c:116
void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber) PRIVILEGED_FUNCTION
#define errQUEUE_YIELD
Definition: projdefs.h:86
xQUEUE Queue_t
Definition: queue.c:160
TaskHandle_t xTaskGetCurrentTaskHandle(void) PRIVILEGED_FUNCTION
#define mtCOVERAGE_TEST_MARKER()
Definition: FreeRTOS.h:717
UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
Definition: queue.c:1498
#define queueQUEUE_TYPE_COUNTING_SEMAPHORE
Definition: queue.h:109
#define queueLOCKED_UNMODIFIED
Definition: queue.c:91
#define taskEXIT_CRITICAL()
Definition: task.h:216
List_t xTasksWaitingToReceive
Definition: queue.c:138
void vTaskSuspendAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:1543
#define vQueueAddToRegistry(xQueue, pcName)
Definition: FreeRTOS.h:314
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:483
#define traceQUEUE_RECEIVE_FAILED(pxQueue)
Definition: FreeRTOS.h:467
#define portENABLE_INTERRUPTS()
Definition: portmacro.h:130
#define configASSERT(x)
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH
Definition: queue.c:110
int8_t * pcWriteTo
Definition: queue.c:129
void vTaskPlaceOnEventListRestricted(List_t *const pxEventList, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
void * xQueueGetMutexHolder(QueueHandle_t xSemaphore) PRIVILEGED_FUNCTION
#define portDISABLE_INTERRUPTS()
Definition: portmacro.h:129
#define traceQUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:455
void * pvPortMalloc(size_t xSize) PRIVILEGED_FUNCTION
Definition: heap_4.c:147
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue)
Definition: FreeRTOS.h:294
#define portYIELD_WITHIN_API
Definition: FreeRTOS.h:649
UBaseType_t uxItemSize
Definition: queue.c:142
QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength) PRIVILEGED_FUNCTION
volatile BaseType_t xTxLock
Definition: queue.c:145
#define listLIST_IS_EMPTY(pxList)
Definition: list.h:241
BaseType_t xCoRoutineRemoveFromEventList(const List_t *pxEventList)
Definition: croutine.c:363
void * QueueSetHandle_t
Definition: queue.h:91
#define traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:487
unsigned long UBaseType_t
Definition: portmacro.h:95
uint32_t TickType_t
Definition: portmacro.h:101
#define queueSEND_TO_BACK
Definition: queue.h:101
struct QueueDefinition xQUEUE
BaseType_t xQueueGenericReceive(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking)
Definition: queue.c:1169
#define portSET_INTERRUPT_MASK_FROM_ISR()
Definition: FreeRTOS.h:290
#define traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:475
QueueSetMemberHandle_t xQueueSelectFromSet(QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
Definition: queue.c:1445
#define traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:427
#define vQueueUnregisterQueue(xQueue)
Definition: FreeRTOS.h:315
QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
UBaseType_t uxLength
Definition: queue.c:141
BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
Definition: queue.c:1861
#define traceCREATE_MUTEX(pxNewQueue)
Definition: FreeRTOS.h:415
#define traceQUEUE_CREATE(pxNewQueue)
Definition: FreeRTOS.h:407
void vTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
#define traceGIVE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:423
#define queueUNLOCKED
Definition: queue.c:90
union QueueDefinition::@0 u
BaseType_t xQueueAltGenericReceive(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait, BaseType_t xJustPeeking)
int8_t * pcHead
Definition: queue.c:127
BaseType_t xQueueReceiveFromISR(QueueHandle_t xQueue, void *const pvBuffer, BaseType_t *const pxHigherPriorityTaskWoken)
Definition: queue.c:1358
#define traceQUEUE_DELETE(pxQueue)
Definition: FreeRTOS.h:491
#define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName)
Definition: FreeRTOS.h:615
#define pdFAIL
Definition: projdefs.h:79
void vTaskMissedYield(void) PRIVILEGED_FUNCTION
Definition: tasks.c:2497
long BaseType_t
Definition: portmacro.h:94
BaseType_t xQueueGiveMutexRecursive(QueueHandle_t pxMutex) PRIVILEGED_FUNCTION
UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
#define errQUEUE_FULL
Definition: projdefs.h:81
QueueHandle_t xQueueGenericCreate(const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType)
Definition: queue.c:308
#define pdPASS
Definition: projdefs.h:78
#define traceQUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:447
BaseType_t xQueueCRReceiveFromISR(QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken)
uint8_t ucQueueGetQueueType(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
void * QueueHandle_t
Definition: queue.h:84
#define traceQUEUE_SEND_FAILED(pxQueue)
Definition: FreeRTOS.h:451
void vTaskPriorityInherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
#define traceBLOCKING_ON_QUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:393
BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
Definition: queue.c:255
void vQueueWaitForMessageRestricted(QueueHandle_t xQueue, TickType_t xTicksToWait) PRIVILEGED_FUNCTION
#define traceCREATE_COUNTING_SEMAPHORE()
Definition: FreeRTOS.h:439
int8_t * pcReadFrom
Definition: queue.c:133
BaseType_t xQueueAltGenericSend(QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, BaseType_t xCopyPosition)
int8_t * pcTail
Definition: queue.c:128
#define pdFALSE
Definition: projdefs.h:75
#define traceTAKE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:431
BaseType_t xTaskResumeAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:1581
volatile UBaseType_t uxMessagesWaiting
Definition: queue.c:140
BaseType_t xTaskGetSchedulerState(void) PRIVILEGED_FUNCTION
#define taskENTER_CRITICAL()
Definition: task.h:202
#define uxQueueType
Definition: queue.c:105
BaseType_t xQueueGenericSendFromISR(QueueHandle_t xQueue, const void *const pvItemToQueue, BaseType_t *const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition)
Definition: queue.c:1025
#define traceQUEUE_PEEK_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:463
#define traceQUEUE_PEEK(pxQueue)
Definition: FreeRTOS.h:459
BaseType_t xQueueCRSend(QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait)
void vTaskSetTimeOutState(TimeOut_t *const pxTimeOut) PRIVILEGED_FUNCTION
Definition: tasks.c:2440
QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType) PRIVILEGED_FUNCTION
#define queueQUEUE_IS_MUTEX
Definition: queue.c:106
#define configQUEUE_REGISTRY_SIZE
List_t xTasksWaitingToSend
Definition: queue.c:137
void vCoRoutineAddToDelayedList(TickType_t xTicksToDelay, List_t *pxEventList)
Definition: croutine.c:194
#define errQUEUE_BLOCKED
Definition: projdefs.h:85
BaseType_t xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
Definition: queue.c:1822
#define PRIVILEGED_FUNCTION
Definition: mpu_wrappers.h:145
#define traceCREATE_COUNTING_SEMAPHORE_FAILED()
Definition: FreeRTOS.h:443
#define traceCREATE_MUTEX_FAILED()
Definition: FreeRTOS.h:419
#define errQUEUE_EMPTY
Definition: projdefs.h:80
#define queueMUTEX_GIVE_BLOCK_TIME
Definition: queue.c:111
#define prvLockQueue(pxQueue)
Definition: queue.c:240
BaseType_t xTaskCheckForTimeOut(TimeOut_t *const pxTimeOut, TickType_t *const pxTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:2448
#define taskSCHEDULER_SUSPENDED
Definition: task.h:241
void vQueueDelete(QueueHandle_t xQueue)
Definition: queue.c:1544
Definition: list.h:157
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
Definition: FreeRTOS.h:709
void vListInitialise(List_t *const pxList)
Definition: list.c:75
BaseType_t xQueueCRReceive(QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait)
BaseType_t xQueueGenericSend(QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition)
Definition: queue.c:593
BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList) PRIVILEGED_FUNCTION
Definition: tasks.c:2341
void vTaskPlaceOnEventList(List_t *const pxEventList, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:2171
QueueHandle_t xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount) PRIVILEGED_FUNCTION
#define traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:385
#define traceQUEUE_RECEIVE_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:479
UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
Definition: queue.c:1514
volatile BaseType_t xRxLock
Definition: queue.c:144