Bug Summary

File:cppu/source/threadpool/threadpool.cxx
Location:line 348, column 25
Description:Called C++ object pointer is null

Annotated Source Code

1/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2/*
3 * This file is part of the LibreOffice project.
4 *
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
8 *
9 * This file incorporates work covered by the following license notice:
10 *
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
18 */
19
20#include "sal/config.h"
21
22#include <boost/unordered_map.hpp>
23#include <cassert>
24#include <stdio.h>
25
26#include <osl/diagnose.h>
27#include <osl/mutex.hxx>
28#include <osl/thread.h>
29#include <rtl/instance.hxx>
30
31#include <uno/threadpool.h>
32
33#include "threadpool.hxx"
34#include "thread.hxx"
35
36using namespace ::std;
37using namespace ::osl;
38
39namespace cppu_threadpool
40{
41 struct theDisposedCallerAdmin :
42 public rtl::StaticWithInit< DisposedCallerAdminHolder, theDisposedCallerAdmin >
43 {
44 DisposedCallerAdminHolder operator () () {
45 return DisposedCallerAdminHolder(new DisposedCallerAdmin());
46 }
47 };
48
49 DisposedCallerAdminHolder DisposedCallerAdmin::getInstance()
50 {
51 return theDisposedCallerAdmin::get();
52 }
53
54 DisposedCallerAdmin::~DisposedCallerAdmin()
55 {
56#if OSL_DEBUG_LEVEL1 > 1
57 if( !m_lst.empty() )
58 {
59 printf( "DisposedCallerList : %lu left\n" , static_cast<unsigned long>(m_lst.size( )));
60 }
61#endif
62 }
63
64 void DisposedCallerAdmin::dispose( sal_Int64 nDisposeId )
65 {
66 MutexGuard guard( m_mutex );
67 m_lst.push_back( nDisposeId );
68 }
69
70 void DisposedCallerAdmin::destroy( sal_Int64 nDisposeId )
71 {
72 MutexGuard guard( m_mutex );
73 for( DisposedCallerList::iterator ii = m_lst.begin() ;
74 ii != m_lst.end() ;
75 ++ ii )
76 {
77 if( (*ii) == nDisposeId )
78 {
79 m_lst.erase( ii );
80 break;
81 }
82 }
83 }
84
85 sal_Bool DisposedCallerAdmin::isDisposed( sal_Int64 nDisposeId )
86 {
87 MutexGuard guard( m_mutex );
88 for( DisposedCallerList::iterator ii = m_lst.begin() ;
89 ii != m_lst.end() ;
90 ++ ii )
91 {
92 if( (*ii) == nDisposeId )
93 {
94 return sal_True((sal_Bool)1);
95 }
96 }
97 return sal_False((sal_Bool)0);
98 }
99
100
101 //-------------------------------------------------------------------------------
102
103 ThreadPool::ThreadPool()
104 {
105 m_DisposedCallerAdmin = DisposedCallerAdmin::getInstance();
106 }
107
108 ThreadPool::~ThreadPool()
109 {
110#if OSL_DEBUG_LEVEL1 > 1
111 if( m_mapQueue.size() )
112 {
113 printf( "ThreadIdHashMap : %lu left\n" , static_cast<unsigned long>(m_mapQueue.size()) );
114 }
115#endif
116 }
117
118 void ThreadPool::dispose( sal_Int64 nDisposeId )
119 {
120 m_DisposedCallerAdmin->dispose( nDisposeId );
121
122 MutexGuard guard( m_mutex );
123 for( ThreadIdHashMap::iterator ii = m_mapQueue.begin() ;
124 ii != m_mapQueue.end();
125 ++ii)
126 {
127 if( (*ii).second.first )
128 {
129 (*ii).second.first->dispose( nDisposeId );
130 }
131 if( (*ii).second.second )
132 {
133 (*ii).second.second->dispose( nDisposeId );
134 }
135 }
136 }
137
138 void ThreadPool::destroy( sal_Int64 nDisposeId )
139 {
140 m_DisposedCallerAdmin->destroy( nDisposeId );
141 }
142
143 /******************
144 * This methods lets the thread wait a certain amount of time. If within this timespan
145 * a new request comes in, this thread is reused. This is done only to improve performance,
146 * it is not required for threadpool functionality.
147 ******************/
148 void ThreadPool::waitInPool( rtl::Reference< ORequestThread > const & pThread )
149 {
150 struct WaitingThread waitingThread;
151 waitingThread.condition = osl_createCondition();
152 waitingThread.thread = pThread;
153 {
154 MutexGuard guard( m_mutexWaitingThreadList );
155 m_lstThreads.push_front( &waitingThread );
156 }
157
158 // let the thread wait 2 seconds
159 TimeValue time = { 2 , 0 };
160 osl_waitCondition( waitingThread.condition , &time );
161
162 {
163 MutexGuard guard ( m_mutexWaitingThreadList );
164 if( waitingThread.thread.is() )
165 {
166 // thread wasn't reused, remove it from the list
167 WaitingThreadList::iterator ii = find(
168 m_lstThreads.begin(), m_lstThreads.end(), &waitingThread );
169 OSL_ASSERT( ii != m_lstThreads.end() )do { if (true && (!(ii != m_lstThreads.end()))) { sal_detail_logFormat
((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "169" ": "), "OSL_ASSERT: %s", "ii != m_lstThreads.end()"
); } } while (false)
;
170 m_lstThreads.erase( ii );
171 }
172 }
173
174 osl_destroyCondition( waitingThread.condition );
175 }
176
177 void ThreadPool::joinWorkers()
178 {
179 {
180 MutexGuard guard( m_mutexWaitingThreadList );
181 for( WaitingThreadList::iterator ii = m_lstThreads.begin() ;
182 ii != m_lstThreads.end() ;
183 ++ ii )
184 {
185 // wake the threads up
186 osl_setCondition( (*ii)->condition );
187 }
188 }
189 m_aThreadAdmin.join();
190 }
191
192 void ThreadPool::createThread( JobQueue *pQueue ,
193 const ByteSequence &aThreadId,
194 sal_Bool bAsynchron )
195 {
196 sal_Bool bCreate = sal_True((sal_Bool)1);
197 {
198 // Can a thread be reused ?
199 MutexGuard guard( m_mutexWaitingThreadList );
200 if( ! m_lstThreads.empty() )
201 {
202 // inform the thread and let it go
203 struct WaitingThread *pWaitingThread = m_lstThreads.back();
204 pWaitingThread->thread->setTask( pQueue , aThreadId , bAsynchron );
205 pWaitingThread->thread = 0;
206
207 // remove from list
208 m_lstThreads.pop_back();
209
210 // let the thread go
211 osl_setCondition( pWaitingThread->condition );
212 bCreate = sal_False((sal_Bool)0);
213 }
214 }
215
216 if( bCreate )
217 {
218 rtl::Reference< ORequestThread > pThread(
219 new ORequestThread( this, pQueue , aThreadId, bAsynchron) );
220 pThread->launch();
221 }
222 }
223
224 sal_Bool ThreadPool::revokeQueue( const ByteSequence &aThreadId, sal_Bool bAsynchron )
225 {
226 MutexGuard guard( m_mutex );
227
228 ThreadIdHashMap::iterator ii = m_mapQueue.find( aThreadId );
229 OSL_ASSERT( ii != m_mapQueue.end() )do { if (true && (!(ii != m_mapQueue.end()))) { sal_detail_logFormat
((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "229" ": "), "OSL_ASSERT: %s", "ii != m_mapQueue.end()")
; } } while (false)
;
230
231 if( bAsynchron )
232 {
233 if( ! (*ii).second.second->isEmpty() )
234 {
235 // another thread has put something into the queue
236 return sal_False((sal_Bool)0);
237 }
238
239 (*ii).second.second = 0;
240 if( (*ii).second.first )
241 {
242 // all oneway request have been processed, now
243 // synchronus requests may go on
244 (*ii).second.first->resume();
245 }
246 }
247 else
248 {
249 if( ! (*ii).second.first->isEmpty() )
250 {
251 // another thread has put something into the queue
252 return sal_False((sal_Bool)0);
253 }
254 (*ii).second.first = 0;
255 }
256
257 if( 0 == (*ii).second.first && 0 == (*ii).second.second )
258 {
259 m_mapQueue.erase( ii );
260 }
261
262 return sal_True((sal_Bool)1);
263 }
264
265
266 void ThreadPool::addJob(
267 const ByteSequence &aThreadId ,
268 sal_Bool bAsynchron,
269 void *pThreadSpecificData,
270 RequestFun * doRequest )
271 {
272 sal_Bool bCreateThread = sal_False((sal_Bool)0);
273 JobQueue *pQueue = 0;
274 {
275 MutexGuard guard( m_mutex );
276
277 ThreadIdHashMap::iterator ii = m_mapQueue.find( aThreadId );
278
279 if( ii == m_mapQueue.end() )
280 {
281 m_mapQueue[ aThreadId ] = pair < JobQueue * , JobQueue * > ( (JobQueue *)0 , (JobQueue*)0 );
282 ii = m_mapQueue.find( aThreadId );
283 OSL_ASSERT( ii != m_mapQueue.end() )do { if (true && (!(ii != m_mapQueue.end()))) { sal_detail_logFormat
((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "283" ": "), "OSL_ASSERT: %s", "ii != m_mapQueue.end()")
; } } while (false)
;
284 }
285
286 if( bAsynchron )
287 {
288 if( ! (*ii).second.second )
289 {
290 (*ii).second.second = new JobQueue();
291 bCreateThread = sal_True((sal_Bool)1);
292 }
293 pQueue = (*ii).second.second;
294 }
295 else
296 {
297 if( ! (*ii).second.first )
298 {
299 (*ii).second.first = new JobQueue();
300 bCreateThread = sal_True((sal_Bool)1);
301 }
302 pQueue = (*ii).second.first;
303
304 if( (*ii).second.second && ( (*ii).second.second->isBusy() ) )
305 {
306 pQueue->suspend();
307 }
308 }
309 pQueue->add( pThreadSpecificData , doRequest );
310 }
311
312 if( bCreateThread )
313 {
314 createThread( pQueue , aThreadId , bAsynchron);
315 }
316 }
317
318 void ThreadPool::prepare( const ByteSequence &aThreadId )
319 {
320 MutexGuard guard( m_mutex );
321
322 ThreadIdHashMap::iterator ii = m_mapQueue.find( aThreadId );
323
324 if( ii == m_mapQueue.end() )
325 {
326 JobQueue *p = new JobQueue();
327 m_mapQueue[ aThreadId ] = pair< JobQueue * , JobQueue * > ( p , (JobQueue*)0 );
328 }
329 else if( 0 == (*ii).second.first )
330 {
331 (*ii).second.first = new JobQueue();
332 }
333 }
334
335 void * ThreadPool::enter( const ByteSequence & aThreadId , sal_Int64 nDisposeId )
336 {
337 JobQueue *pQueue = 0;
338 {
339 MutexGuard guard( m_mutex );
340
341 ThreadIdHashMap::iterator ii = m_mapQueue.find( aThreadId );
342
343 OSL_ASSERT( ii != m_mapQueue.end() )do { if (true && (!(ii != m_mapQueue.end()))) { sal_detail_logFormat
((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "343" ": "), "OSL_ASSERT: %s", "ii != m_mapQueue.end()")
; } } while (false)
;
344 pQueue = (*ii).second.first;
2
Value assigned to 'pQueue'
345 }
346
347 OSL_ASSERT( pQueue )do { if (true && (!(pQueue))) { sal_detail_logFormat(
(SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "347" ": "), "OSL_ASSERT: %s", "pQueue"); } } while (false
)
;
3
Within the expansion of the macro 'OSL_ASSERT':
a
Assuming pointer value is null
348 void *pReturn = pQueue->enter( nDisposeId );
4
Called C++ object pointer is null
349
350 if( pQueue->isCallstackEmpty() )
351 {
352 if( revokeQueue( aThreadId , sal_False((sal_Bool)0)) )
353 {
354 // remove queue
355 delete pQueue;
356 }
357 }
358 return pReturn;
359 }
360}
361
362// All uno_ThreadPool handles in g_pThreadpoolHashSet with overlapping life
363// spans share one ThreadPool instance. When g_pThreadpoolHashSet becomes empty
364// (within the last uno_threadpool_destroy) all worker threads spawned by that
365// ThreadPool instance are joined (which implies that uno_threadpool_destroy
366// must never be called from a worker thread); afterwards, the next call to
367// uno_threadpool_create (if any) will lead to a new ThreadPool instance.
368
369using namespace cppu_threadpool;
370
371struct uno_ThreadPool_Equal
372{
373 sal_Bool operator () ( const uno_ThreadPool &a , const uno_ThreadPool &b ) const
374 {
375 return a == b;
376 }
377};
378
379struct uno_ThreadPool_Hash
380{
381 sal_Size operator () ( const uno_ThreadPool &a ) const
382 {
383 return (sal_Size) a;
384 }
385};
386
387
388
389typedef ::boost::unordered_map< uno_ThreadPool, ThreadPoolHolder, uno_ThreadPool_Hash, uno_ThreadPool_Equal > ThreadpoolHashSet;
390
391static ThreadpoolHashSet *g_pThreadpoolHashSet;
392
393struct _uno_ThreadPool
394{
395 sal_Int32 dummy;
396};
397
398namespace {
399
400ThreadPoolHolder getThreadPool( uno_ThreadPool hPool )
401{
402 MutexGuard guard( Mutex::getGlobalMutex() );
403 assert( g_pThreadpoolHashSet != 0 )((g_pThreadpoolHashSet != 0) ? static_cast<void> (0) : __assert_fail
("g_pThreadpoolHashSet != 0", "/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
, 403, __PRETTY_FUNCTION__))
;
404 ThreadpoolHashSet::iterator i( g_pThreadpoolHashSet->find(hPool) );
405 assert( i != g_pThreadpoolHashSet->end() )((i != g_pThreadpoolHashSet->end()) ? static_cast<void>
(0) : __assert_fail ("i != g_pThreadpoolHashSet->end()", "/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
, 405, __PRETTY_FUNCTION__))
;
406 return i->second;
407}
408
409}
410
411extern "C" uno_ThreadPool SAL_CALL
412uno_threadpool_create() SAL_THROW_EXTERN_C()throw ()
413{
414 MutexGuard guard( Mutex::getGlobalMutex() );
415 ThreadPoolHolder p;
416 if( ! g_pThreadpoolHashSet )
417 {
418 g_pThreadpoolHashSet = new ThreadpoolHashSet();
419 p = new ThreadPool;
420 }
421 else
422 {
423 assert( !g_pThreadpoolHashSet->empty() )((!g_pThreadpoolHashSet->empty()) ? static_cast<void>
(0) : __assert_fail ("!g_pThreadpoolHashSet->empty()", "/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
, 423, __PRETTY_FUNCTION__))
;
424 p = g_pThreadpoolHashSet->begin()->second;
425 }
426
427 // Just ensure that the handle is unique in the process (via heap)
428 uno_ThreadPool h = new struct _uno_ThreadPool;
429 g_pThreadpoolHashSet->insert( ThreadpoolHashSet::value_type(h, p) );
430 return h;
431}
432
433extern "C" void SAL_CALL
434uno_threadpool_attach( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C()throw ()
435{
436 sal_Sequence *pThreadId = 0;
437 uno_getIdOfCurrentThread( &pThreadId );
438 getThreadPool( hPool )->prepare( pThreadId );
439 rtl_byte_sequence_release( pThreadId );
440 uno_releaseIdFromCurrentThread();
441}
442
443extern "C" void SAL_CALL
444uno_threadpool_enter( uno_ThreadPool hPool , void **ppJob )
445 SAL_THROW_EXTERN_C()throw ()
446{
447 sal_Sequence *pThreadId = 0;
448 uno_getIdOfCurrentThread( &pThreadId );
449 *ppJob =
450 getThreadPool( hPool )->enter(
1
Calling 'enter'
451 pThreadId,
452 sal::static_int_cast< sal_Int64 >(
453 reinterpret_cast< sal_IntPtr >(hPool)) );
454 rtl_byte_sequence_release( pThreadId );
455 uno_releaseIdFromCurrentThread();
456}
457
458extern "C" void SAL_CALL
459uno_threadpool_detach(SAL_UNUSED_PARAMETER__attribute__ ((unused)) uno_ThreadPool) SAL_THROW_EXTERN_C()throw ()
460{
461 // we might do here some tiding up in case a thread called attach but never detach
462}
463
464extern "C" void SAL_CALL
465uno_threadpool_putJob(
466 uno_ThreadPool hPool,
467 sal_Sequence *pThreadId,
468 void *pJob,
469 void ( SAL_CALL * doRequest ) ( void *pThreadSpecificData ),
470 sal_Bool bIsOneway ) SAL_THROW_EXTERN_C()throw ()
471{
472 getThreadPool(hPool)->addJob( pThreadId, bIsOneway, pJob ,doRequest );
473}
474
475extern "C" void SAL_CALL
476uno_threadpool_dispose( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C()throw ()
477{
478 getThreadPool(hPool)->dispose(
479 sal::static_int_cast< sal_Int64 >(
480 reinterpret_cast< sal_IntPtr >(hPool)) );
481}
482
483extern "C" void SAL_CALL
484uno_threadpool_destroy( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C()throw ()
485{
486 ThreadPoolHolder p( getThreadPool(hPool) );
487 p->destroy(
488 sal::static_int_cast< sal_Int64 >(
489 reinterpret_cast< sal_IntPtr >(hPool)) );
490
491 bool empty;
492 {
493 OSL_ASSERT( g_pThreadpoolHashSet )do { if (true && (!(g_pThreadpoolHashSet))) { sal_detail_logFormat
((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "493" ": "), "OSL_ASSERT: %s", "g_pThreadpoolHashSet"); }
} while (false)
;
494
495 MutexGuard guard( Mutex::getGlobalMutex() );
496
497 ThreadpoolHashSet::iterator ii = g_pThreadpoolHashSet->find( hPool );
498 OSL_ASSERT( ii != g_pThreadpoolHashSet->end() )do { if (true && (!(ii != g_pThreadpoolHashSet->end
()))) { sal_detail_logFormat((SAL_DETAIL_LOG_LEVEL_WARN), ("legacy.osl"
), ("/usr/local/src/libreoffice/cppu/source/threadpool/threadpool.cxx"
":" "498" ": "), "OSL_ASSERT: %s", "ii != g_pThreadpoolHashSet->end()"
); } } while (false)
;
499 g_pThreadpoolHashSet->erase( ii );
500 delete hPool;
501
502 empty = g_pThreadpoolHashSet->empty();
503 if( empty )
504 {
505 delete g_pThreadpoolHashSet;
506 g_pThreadpoolHashSet = 0;
507 }
508 }
509
510 if( empty )
511 {
512 p->joinWorkers();
513 }
514}
515
516/* vim:set shiftwidth=4 softtabstop=4 expandtab: */