queues.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. #
  2. # Module implementing queues
  3. #
  4. # multiprocessing/queues.py
  5. #
  6. # Copyright (c) 2006-2008, R Oudkerk
  7. # Licensed to PSF under a Contributor Agreement.
  8. #
  9. __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
  10. import sys
  11. import os
  12. import threading
  13. import collections
  14. import time
  15. import weakref
  16. import errno
  17. from queue import Empty, Full
  18. import _multiprocessing
  19. from . import connection
  20. from . import context
  21. _ForkingPickler = context.reduction.ForkingPickler
  22. from .util import debug, info, Finalize, register_after_fork, is_exiting
  23. #
  24. # Queue type using a pipe, buffer and thread
  25. #
  26. class Queue(object):
  27. def __init__(self, maxsize=0, *, ctx):
  28. if maxsize <= 0:
  29. # Can raise ImportError (see issues #3770 and #23400)
  30. from .synchronize import SEM_VALUE_MAX as maxsize
  31. self._maxsize = maxsize
  32. self._reader, self._writer = connection.Pipe(duplex=False)
  33. self._rlock = ctx.Lock()
  34. self._opid = os.getpid()
  35. if sys.platform == 'win32':
  36. self._wlock = None
  37. else:
  38. self._wlock = ctx.Lock()
  39. self._sem = ctx.BoundedSemaphore(maxsize)
  40. # For use by concurrent.futures
  41. self._ignore_epipe = False
  42. self._after_fork()
  43. if sys.platform != 'win32':
  44. register_after_fork(self, Queue._after_fork)
  45. def __getstate__(self):
  46. context.assert_spawning(self)
  47. return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
  48. self._rlock, self._wlock, self._sem, self._opid)
  49. def __setstate__(self, state):
  50. (self._ignore_epipe, self._maxsize, self._reader, self._writer,
  51. self._rlock, self._wlock, self._sem, self._opid) = state
  52. self._after_fork()
  53. def _after_fork(self):
  54. debug('Queue._after_fork()')
  55. self._notempty = threading.Condition(threading.Lock())
  56. self._buffer = collections.deque()
  57. self._thread = None
  58. self._jointhread = None
  59. self._joincancelled = False
  60. self._closed = False
  61. self._close = None
  62. self._send_bytes = self._writer.send_bytes
  63. self._recv_bytes = self._reader.recv_bytes
  64. self._poll = self._reader.poll
  65. def put(self, obj, block=True, timeout=None):
  66. assert not self._closed, "Queue {0!r} has been closed".format(self)
  67. if not self._sem.acquire(block, timeout):
  68. raise Full
  69. with self._notempty:
  70. if self._thread is None:
  71. self._start_thread()
  72. self._buffer.append(obj)
  73. self._notempty.notify()
  74. def get(self, block=True, timeout=None):
  75. if block and timeout is None:
  76. with self._rlock:
  77. res = self._recv_bytes()
  78. self._sem.release()
  79. else:
  80. if block:
  81. deadline = time.monotonic() + timeout
  82. if not self._rlock.acquire(block, timeout):
  83. raise Empty
  84. try:
  85. if block:
  86. timeout = deadline - time.monotonic()
  87. if not self._poll(timeout):
  88. raise Empty
  89. elif not self._poll():
  90. raise Empty
  91. res = self._recv_bytes()
  92. self._sem.release()
  93. finally:
  94. self._rlock.release()
  95. # unserialize the data after having released the lock
  96. return _ForkingPickler.loads(res)
  97. def qsize(self):
  98. # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
  99. return self._maxsize - self._sem._semlock._get_value()
  100. def empty(self):
  101. return not self._poll()
  102. def full(self):
  103. return self._sem._semlock._is_zero()
  104. def get_nowait(self):
  105. return self.get(False)
  106. def put_nowait(self, obj):
  107. return self.put(obj, False)
  108. def close(self):
  109. self._closed = True
  110. try:
  111. self._reader.close()
  112. finally:
  113. close = self._close
  114. if close:
  115. self._close = None
  116. close()
  117. def join_thread(self):
  118. debug('Queue.join_thread()')
  119. assert self._closed, "Queue {0!r} not closed".format(self)
  120. if self._jointhread:
  121. self._jointhread()
  122. def cancel_join_thread(self):
  123. debug('Queue.cancel_join_thread()')
  124. self._joincancelled = True
  125. try:
  126. self._jointhread.cancel()
  127. except AttributeError:
  128. pass
  129. def _start_thread(self):
  130. debug('Queue._start_thread()')
  131. # Start thread which transfers data from buffer to pipe
  132. self._buffer.clear()
  133. self._thread = threading.Thread(
  134. target=Queue._feed,
  135. args=(self._buffer, self._notempty, self._send_bytes,
  136. self._wlock, self._writer.close, self._ignore_epipe,
  137. self._on_queue_feeder_error, self._sem),
  138. name='QueueFeederThread'
  139. )
  140. self._thread.daemon = True
  141. debug('doing self._thread.start()')
  142. self._thread.start()
  143. debug('... done self._thread.start()')
  144. if not self._joincancelled:
  145. self._jointhread = Finalize(
  146. self._thread, Queue._finalize_join,
  147. [weakref.ref(self._thread)],
  148. exitpriority=-5
  149. )
  150. # Send sentinel to the thread queue object when garbage collected
  151. self._close = Finalize(
  152. self, Queue._finalize_close,
  153. [self._buffer, self._notempty],
  154. exitpriority=10
  155. )
  156. @staticmethod
  157. def _finalize_join(twr):
  158. debug('joining queue thread')
  159. thread = twr()
  160. if thread is not None:
  161. thread.join()
  162. debug('... queue thread joined')
  163. else:
  164. debug('... queue thread already dead')
  165. @staticmethod
  166. def _finalize_close(buffer, notempty):
  167. debug('telling queue thread to quit')
  168. with notempty:
  169. buffer.append(_sentinel)
  170. notempty.notify()
  171. @staticmethod
  172. def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
  173. onerror, queue_sem):
  174. debug('starting thread to feed data to pipe')
  175. nacquire = notempty.acquire
  176. nrelease = notempty.release
  177. nwait = notempty.wait
  178. bpopleft = buffer.popleft
  179. sentinel = _sentinel
  180. if sys.platform != 'win32':
  181. wacquire = writelock.acquire
  182. wrelease = writelock.release
  183. else:
  184. wacquire = None
  185. while 1:
  186. try:
  187. nacquire()
  188. try:
  189. if not buffer:
  190. nwait()
  191. finally:
  192. nrelease()
  193. try:
  194. while 1:
  195. obj = bpopleft()
  196. if obj is sentinel:
  197. debug('feeder thread got sentinel -- exiting')
  198. close()
  199. return
  200. # serialize the data before acquiring the lock
  201. obj = _ForkingPickler.dumps(obj)
  202. if wacquire is None:
  203. send_bytes(obj)
  204. else:
  205. wacquire()
  206. try:
  207. send_bytes(obj)
  208. finally:
  209. wrelease()
  210. except IndexError:
  211. pass
  212. except Exception as e:
  213. if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
  214. return
  215. # Since this runs in a daemon thread the resources it uses
  216. # may be become unusable while the process is cleaning up.
  217. # We ignore errors which happen after the process has
  218. # started to cleanup.
  219. if is_exiting():
  220. info('error in queue thread: %s', e)
  221. return
  222. else:
  223. # Since the object has not been sent in the queue, we need
  224. # to decrease the size of the queue. The error acts as
  225. # if the object had been silently removed from the queue
  226. # and this step is necessary to have a properly working
  227. # queue.
  228. queue_sem.release()
  229. onerror(e, obj)
  230. @staticmethod
  231. def _on_queue_feeder_error(e, obj):
  232. """
  233. Private API hook called when feeding data in the background thread
  234. raises an exception. For overriding by concurrent.futures.
  235. """
  236. import traceback
  237. traceback.print_exc()
  238. _sentinel = object()
  239. #
  240. # A queue type which also supports join() and task_done() methods
  241. #
  242. # Note that if you do not call task_done() for each finished task then
  243. # eventually the counter's semaphore may overflow causing Bad Things
  244. # to happen.
  245. #
  246. class JoinableQueue(Queue):
  247. def __init__(self, maxsize=0, *, ctx):
  248. Queue.__init__(self, maxsize, ctx=ctx)
  249. self._unfinished_tasks = ctx.Semaphore(0)
  250. self._cond = ctx.Condition()
  251. def __getstate__(self):
  252. return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
  253. def __setstate__(self, state):
  254. Queue.__setstate__(self, state[:-2])
  255. self._cond, self._unfinished_tasks = state[-2:]
  256. def put(self, obj, block=True, timeout=None):
  257. assert not self._closed, "Queue {0!r} is closed".format(self)
  258. if not self._sem.acquire(block, timeout):
  259. raise Full
  260. with self._notempty, self._cond:
  261. if self._thread is None:
  262. self._start_thread()
  263. self._buffer.append(obj)
  264. self._unfinished_tasks.release()
  265. self._notempty.notify()
  266. def task_done(self):
  267. with self._cond:
  268. if not self._unfinished_tasks.acquire(False):
  269. raise ValueError('task_done() called too many times')
  270. if self._unfinished_tasks._semlock._is_zero():
  271. self._cond.notify_all()
  272. def join(self):
  273. with self._cond:
  274. if not self._unfinished_tasks._semlock._is_zero():
  275. self._cond.wait()
  276. #
  277. # Simplified Queue type -- really just a locked pipe
  278. #
  279. class SimpleQueue(object):
  280. def __init__(self, *, ctx):
  281. self._reader, self._writer = connection.Pipe(duplex=False)
  282. self._rlock = ctx.Lock()
  283. self._poll = self._reader.poll
  284. if sys.platform == 'win32':
  285. self._wlock = None
  286. else:
  287. self._wlock = ctx.Lock()
  288. def empty(self):
  289. return not self._poll()
  290. def __getstate__(self):
  291. context.assert_spawning(self)
  292. return (self._reader, self._writer, self._rlock, self._wlock)
  293. def __setstate__(self, state):
  294. (self._reader, self._writer, self._rlock, self._wlock) = state
  295. self._poll = self._reader.poll
  296. def get(self):
  297. with self._rlock:
  298. res = self._reader.recv_bytes()
  299. # unserialize the data after having released the lock
  300. return _ForkingPickler.loads(res)
  301. def put(self, obj):
  302. # serialize the data before acquiring the lock
  303. obj = _ForkingPickler.dumps(obj)
  304. if self._wlock is None:
  305. # writes to a message oriented win32 pipe are atomic
  306. self._writer.send_bytes(obj)
  307. else:
  308. with self._wlock:
  309. self._writer.send_bytes(obj)