/* Task pool internal include. */\r
#include "private/iot_taskpool_internal.h"\r
\r
-/**\r
- * @brief Enter a critical section by disabling interrupts.\r
- *\r
- */\r
-#define TASKPOOL_ENTER_CRITICAL() taskENTER_CRITICAL()\r
-\r
-/**\r
- * @brief Enter a critical section by disabling interrupts.\r
- *\r
- */\r
-#define TASKPOOL_ENTER_CRITICAL_FROM_ISR() taskENTER_CRITICAL_FROM_ISR()\r
-\r
-/**\r
- * @brief Exit a critical section by re-enabling interrupts.\r
- *\r
- */\r
-#define TASKPOOL_EXIT_CRITICAL() taskEXIT_CRITICAL()\r
-\r
-/**\r
- * @brief Exit a critical section by re-enabling interrupts.\r
- *\r
- */\r
-#define TASKPOOL_EXIT_CRITICAL_FROM_ISR( x ) taskEXIT_CRITICAL_FROM_ISR( x )\r
-\r
/**\r
* @brief Maximum semaphore value for wait operations.\r
*/\r
*\r
* param[in] timer The timer to handle.\r
*/\r
-static void _timerThread( TimerHandle_t xTimer );\r
+static void _timerCallback( TimerHandle_t xTimer );\r
\r
/* -------------- Convenience functions to create/initialize/destroy the task pool -------------- */\r
\r
*\r
*/\r
static IotTaskPoolError_t _scheduleInternal( _taskPool_t * const pTaskPool,\r
- _taskPoolJob_t * const pJob,\r
- uint32_t flags );\r
+ _taskPoolJob_t * const pJob );\r
\r
/**\r
* Matches a deferred job in the timer queue with its timer event wrapper.\r
{\r
TASKPOOL_FUNCTION_ENTRY( IOT_TASKPOOL_SUCCESS );\r
\r
- /* Parameter checking. */\r
- TASKPOOL_ON_ERROR_GOTO_CLEANUP( _performTaskPoolParameterValidation( pInfo ) );\r
+ /* At this time the task pool cannot be created before the scheduler has\r
+ started because the function attempts to block on synchronization\r
+ primitives (although I'm not sure why). */\r
+ configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED );\r
\r
- /* Create the system task pool pool. */\r
- TASKPOOL_SET_AND_GOTO_CLEANUP( _createTaskPool( pInfo, &_IotSystemTaskPool ) );\r
+ /* Guard against multiple attempts to create the system task pool in case\r
+ this function is called by more than one library initialization routine. */\r
+ if( _IotSystemTaskPool.running == false )\r
+ {\r
+ /* Parameter checking. */\r
+ TASKPOOL_ON_ERROR_GOTO_CLEANUP( _performTaskPoolParameterValidation( pInfo ) );\r
+\r
+ /* Create the system task pool pool. */\r
+ TASKPOOL_SET_AND_GOTO_CLEANUP( _createTaskPool( pInfo, &_IotSystemTaskPool ) );\r
+ }\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
/* Destroying the task pool should be safe, and therefore we will grab the task pool lock.\r
* No worker thread or application thread should access any data structure\r
* in the task pool while the task pool is being destroyed. */\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
IotLink_t * pItemLink;\r
\r
/* (4) Set the exit condition. */\r
_signalShutdown( pTaskPool, activeThreads );\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
/* (5) Wait for all active threads to reach the end of their life-span. */\r
for( count = 0; count < activeThreads; ++count )\r
\r
/*-----------------------------------------------------------*/\r
\r
+IotTaskPoolError_t IotTaskPool_CreateRecyclableSystemJob( IotTaskPoolRoutine_t userCallback,\r
+ void * pUserContext,\r
+ IotTaskPoolJob_t * const pJob )\r
+{\r
+ return IotTaskPool_CreateRecyclableJob ( &_IotSystemTaskPool, userCallback, pUserContext, pJob );\r
+}\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
IotTaskPoolError_t IotTaskPool_CreateRecyclableJob( IotTaskPool_t taskPoolHandle,\r
IotTaskPoolRoutine_t userCallback,\r
void * pUserContext,\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( userCallback );\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( ppJob );\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
/* Bail out early if this task pool is shutting down. */\r
pTempJob = _fetchOrAllocateJob( &pTaskPool->jobsCache );\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
if( pTempJob == NULL )\r
{\r
{\r
TASKPOOL_FUNCTION_ENTRY( IOT_TASKPOOL_SUCCESS );\r
\r
- _taskPool_t * pTaskPool = ( _taskPool_t * ) taskPoolHandle;\r
+ ( void ) taskPoolHandle;\r
+\r
_taskPoolJob_t * pJob = ( _taskPoolJob_t * ) pJobHandle;\r
\r
/* Parameter checking. */\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( taskPoolHandle );\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( pJob );\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
IotTaskPool_Assert( IotLink_IsLinked( &pJob->link ) == false );\r
\r
_recycleJob( &pTaskPool->jobsCache, pJob );\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
\r
_taskPool_t * pTaskPool = ( _taskPool_t * ) taskPoolHandle;\r
\r
+ configASSERT( pTaskPool->running != false );\r
+\r
/* Parameter checking. */\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( taskPoolHandle );\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( pJob );\r
TASKPOOL_ON_ARG_ERROR_GOTO_CLEANUP( ( flags != 0UL ) && ( flags != IOT_TASKPOOL_JOB_HIGH_PRIORITY ) );\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL(); //_RB_ Critical section is too long - does the whole thing need to be protected?\r
{\r
- _scheduleInternal( pTaskPool, pJob, flags );\r
+ _scheduleInternal( pTaskPool, pJob );\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
\r
/*-----------------------------------------------------------*/\r
\r
+IotTaskPoolError_t IotTaskPool_ScheduleSystemJob( IotTaskPoolJob_t pJob,\r
+ uint32_t flags )\r
+{\r
+ return IotTaskPool_Schedule( &_IotSystemTaskPool, pJob, flags );\r
+}\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
IotTaskPoolError_t IotTaskPool_ScheduleDeferred( IotTaskPool_t taskPoolHandle,\r
IotTaskPoolJob_t job,\r
uint32_t timeMs )\r
TASKPOOL_SET_AND_GOTO_CLEANUP( IotTaskPool_Schedule( pTaskPool, job, 0 ) );\r
}\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
_taskPoolTimerEvent_t * pTimerEvent = IotTaskPool_MallocTimerEvent( sizeof( _taskPoolTimerEvent_t ) );\r
\r
if( pTimerEvent == NULL )\r
{\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_SET_AND_GOTO_CLEANUP( IOT_TASKPOOL_NO_MEMORY );\r
}\r
_rescheduleDeferredJobsTimer( pTaskPool->timer, pTimerEvent );\r
}\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
{\r
TASKPOOL_FUNCTION_ENTRY( IOT_TASKPOOL_SUCCESS );\r
\r
- _taskPool_t * pTaskPool = ( _taskPool_t * ) taskPoolHandle;\r
-\r
/* Parameter checking. */\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( taskPoolHandle );\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( job );\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( pStatus );\r
*pStatus = IOT_TASKPOOL_STATUS_UNDEFINED;\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
*pStatus = job->status;\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
*pStatus = IOT_TASKPOOL_STATUS_UNDEFINED;\r
}\r
\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
status = _tryCancelInternal( pTaskPool, job, pStatus );\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
TASKPOOL_NO_FUNCTION_CLEANUP();\r
}\r
TASKPOOL_FUNCTION_ENTRY( IOT_TASKPOOL_SUCCESS );\r
\r
uint32_t count;\r
- uint32_t threadsCreated = 0;\r
+ uint32_t threadsCreated;\r
\r
/* Check input values for consistency. */\r
TASKPOOL_ON_NULL_ARG_GOTO_CLEANUP( pTaskPool );\r
/* Initialize all internal data structure prior to creating all threads. */\r
TASKPOOL_ON_ERROR_GOTO_CLEANUP( _initTaskPoolControlStructures( pInfo, pTaskPool ) );\r
\r
- /* Create the timer mutex for a new connection. */\r
- pTaskPool->timer = xTimerCreate( NULL, portMAX_DELAY, pdFALSE, ( void * ) pTaskPool, _timerThread );\r
+ /* Create the timer for a new connection. */\r
+ pTaskPool->timer = xTimerCreate( NULL, portMAX_DELAY, pdFALSE, ( void * ) pTaskPool, _timerCallback );\r
\r
if( pTaskPool->timer == NULL )\r
{\r
TASKPOOL_SET_AND_GOTO_CLEANUP( IOT_TASKPOOL_NO_MEMORY );\r
}\r
\r
- /* The task pool will initialize the minimum number of threads reqeusted by the user upon start. */\r
+ /* The task pool will initialize the minimum number of threads requested by the user upon start. */\r
/* When a thread is created, it will signal a semaphore to signify that it is about to wait on incoming */\r
/* jobs. A thread can be woken up for exit or for new jobs only at that point in time. */\r
/* The exit condition is setting the maximum number of threads to 0. */\r
\r
/* Create the minimum number of threads specified by the user, and if one fails shutdown and return error. */\r
- for( ; threadsCreated < pInfo->minThreads; )\r
+ for( threadsCreated = 0; threadsCreated < pInfo->minThreads; )\r
{\r
TaskHandle_t task = NULL;\r
\r
/* Wait for threads to be ready to wait on the condition, so that threads are actually able to receive messages. */\r
for( count = 0; count < threadsCreated; ++count )\r
{\r
- xSemaphoreTake( pTaskPool->startStopSignal, portMAX_DELAY );\r
+ xSemaphoreTake( pTaskPool->startStopSignal, portMAX_DELAY ); /*_RB_ Is waiting necessary, and if so, is a semaphore necessary? */\r
}\r
\r
/* In case of failure, wait on the created threads to exit. */\r
/* Acquire the lock to check the exit condition, and release the lock if the exit condition is verified,\r
* or before waiting for incoming notifications.\r
*/\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
/* If the exit condition is verified, update the number of active threads and exit the loop. */\r
if( _IsShutdownStarted( pTaskPool ) )\r
/* Decrease the number of active threads. */\r
pTaskPool->activeThreads--;\r
\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
/* Signal that this worker is exiting. */\r
xSemaphoreGive( pTaskPool->startStopSignal );\r
userCallback = pJob->userCallback;\r
}\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
/* INNER LOOP: it controls the execution of jobs: the exit condition is the lack of a job to execute. */\r
while( pJob != NULL )\r
}\r
\r
/* Acquire the lock before updating the job status. */\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
/* Try and dequeue the next job in the dispatch queue. */\r
IotLink_t * pItem = NULL;\r
/* If there is no job left in the dispatch queue, update the worker status and leave. */\r
if( pItem == NULL )\r
{\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
/* Abandon the INNER LOOP. Execution will tranfer back to the OUTER LOOP condition. */\r
break;\r
\r
pJob->status = IOT_TASKPOOL_STATUS_COMPLETED;\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
}\r
} while( running == true );\r
\r
/* ---------------------------------------------------------------------------------------------- */\r
\r
static IotTaskPoolError_t _scheduleInternal( _taskPool_t * const pTaskPool,\r
- _taskPoolJob_t * const pJob,\r
- uint32_t flags )\r
+ _taskPoolJob_t * const pJob )\r
{\r
TASKPOOL_FUNCTION_ENTRY( IOT_TASKPOOL_SUCCESS );\r
\r
\r
/*-----------------------------------------------------------*/\r
\r
-static void _timerThread( TimerHandle_t xTimer )\r
+static void _timerCallback( TimerHandle_t xTimer )\r
{\r
_taskPool_t * pTaskPool = pvTimerGetTimerID( xTimer );\r
\r
* If this mutex cannot be locked it means that another thread is manipulating the\r
* timeouts list, and will reset the timer to fire again, although it will be late.\r
*/\r
- TASKPOOL_ENTER_CRITICAL();\r
+ taskENTER_CRITICAL();\r
{\r
/* Check again for shutdown and bail out early in case. */\r
if( _IsShutdownStarted( pTaskPool ) )\r
{\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
\r
/* Complete the shutdown sequence. */\r
_destroyTaskPool( pTaskPool );\r
IotLogDebug( "Scheduling job from timer event." );\r
\r
/* Queue the job associated with the received timer event. */\r
- ( void ) _scheduleInternal( pTaskPool, pTimerEvent->job, 0 );\r
+ ( void ) _scheduleInternal( pTaskPool, pTimerEvent->job );\r
\r
/* Free the timer event. */\r
IotTaskPool_FreeTimerEvent( pTimerEvent );\r
}\r
}\r
- TASKPOOL_EXIT_CRITICAL();\r
+ taskEXIT_CRITICAL();\r
}\r