|
12 | 12 | import logging
|
13 | 13 | import multiprocessing
|
14 | 14 | import socket
|
15 |
| -import threading |
| 15 | + |
16 | 16 | from multiprocessing import connection
|
17 | 17 | from multiprocessing.reduction import send_handle
|
18 | 18 | from typing import List, Optional, Type
|
19 | 19 |
|
20 | 20 | from .acceptor import Acceptor
|
21 | 21 | from .work import Work
|
22 | 22 |
|
23 |
| -from ..event import EventQueue, EventDispatcher |
| 23 | +from ..event import EventQueue |
| 24 | + |
24 | 25 | from ...common.flag import flags
|
25 |
| -from ...common.constants import DEFAULT_BACKLOG, DEFAULT_ENABLE_EVENTS |
26 |
| -from ...common.constants import DEFAULT_IPV6_HOSTNAME, DEFAULT_NUM_WORKERS, DEFAULT_PORT |
| 26 | +from ...common.constants import DEFAULT_BACKLOG, DEFAULT_IPV6_HOSTNAME, DEFAULT_NUM_WORKERS, DEFAULT_PORT |
27 | 27 |
|
28 | 28 | logger = logging.getLogger(__name__)
|
29 | 29 |
|
|
37 | 37 | default=DEFAULT_BACKLOG,
|
38 | 38 | help='Default: 100. Maximum number of pending connections to proxy server')
|
39 | 39 |
|
40 |
| -flags.add_argument( |
41 |
| - '--enable-events', |
42 |
| - action='store_true', |
43 |
| - default=DEFAULT_ENABLE_EVENTS, |
44 |
| - help='Default: False. Enables core to dispatch lifecycle events. ' |
45 |
| - 'Plugins can be used to subscribe for core events.' |
46 |
| -) |
47 |
| - |
48 | 40 | flags.add_argument(
|
49 | 41 | '--hostname',
|
50 | 42 | type=str,
|
@@ -79,31 +71,16 @@ class AcceptorPool:
|
79 | 71 | pool.shutdown()
|
80 | 72 |
|
81 | 73 | `work_klass` must implement `work.Work` class.
|
82 |
| -
|
83 |
| - Optionally, AcceptorPool also initialize a global event queue. |
84 |
| - It is a multiprocess safe queue which can be used to build pubsub patterns |
85 |
| - for message sharing or signaling. |
86 |
| -
|
87 |
| - TODO(abhinavsingh): Decouple event queue setup & teardown into its own class. |
88 | 74 | """
|
89 | 75 |
|
90 | 76 | def __init__(self, flags: argparse.Namespace,
|
91 |
| - work_klass: Type[Work]) -> None: |
| 77 | + work_klass: Type[Work], event_queue: Optional[EventQueue] = None) -> None: |
92 | 78 | self.flags = flags
|
93 | 79 | self.socket: Optional[socket.socket] = None
|
94 | 80 | self.acceptors: List[Acceptor] = []
|
95 | 81 | self.work_queues: List[connection.Connection] = []
|
96 | 82 | self.work_klass = work_klass
|
97 |
| - |
98 |
| - self.event_queue: Optional[EventQueue] = None |
99 |
| - self.event_dispatcher: Optional[EventDispatcher] = None |
100 |
| - self.event_dispatcher_thread: Optional[threading.Thread] = None |
101 |
| - self.event_dispatcher_shutdown: Optional[threading.Event] = None |
102 |
| - self.manager: Optional[multiprocessing.managers.SyncManager] = None |
103 |
| - |
104 |
| - if self.flags.enable_events: |
105 |
| - self.manager = multiprocessing.Manager() |
106 |
| - self.event_queue = EventQueue(self.manager.Queue()) |
| 83 | + self.event_queue: Optional[EventQueue] = event_queue |
107 | 84 |
|
108 | 85 | def listen(self) -> None:
|
109 | 86 | self.socket = socket.socket(self.flags.family, socket.SOCK_STREAM)
|
@@ -137,42 +114,17 @@ def start_workers(self) -> None:
|
137 | 114 | self.work_queues.append(work_queue[0])
|
138 | 115 | logger.info('Started %d workers' % self.flags.num_workers)
|
139 | 116 |
|
140 |
| - def start_event_dispatcher(self) -> None: |
141 |
| - self.event_dispatcher_shutdown = threading.Event() |
142 |
| - assert self.event_dispatcher_shutdown |
143 |
| - assert self.event_queue |
144 |
| - self.event_dispatcher = EventDispatcher( |
145 |
| - shutdown=self.event_dispatcher_shutdown, |
146 |
| - event_queue=self.event_queue |
147 |
| - ) |
148 |
| - self.event_dispatcher_thread = threading.Thread( |
149 |
| - target=self.event_dispatcher.run |
150 |
| - ) |
151 |
| - self.event_dispatcher_thread.start() |
152 |
| - logger.debug('Thread ID: %d', self.event_dispatcher_thread.ident) |
153 |
| - |
154 | 117 | def shutdown(self) -> None:
|
155 | 118 | logger.info('Shutting down %d workers' % self.flags.num_workers)
|
156 | 119 | for acceptor in self.acceptors:
|
157 | 120 | acceptor.running.set()
|
158 |
| - if self.flags.enable_events: |
159 |
| - assert self.event_dispatcher_shutdown |
160 |
| - assert self.event_dispatcher_thread |
161 |
| - self.event_dispatcher_shutdown.set() |
162 |
| - self.event_dispatcher_thread.join() |
163 |
| - logger.debug( |
164 |
| - 'Shutdown of global event dispatcher thread %d successful', |
165 |
| - self.event_dispatcher_thread.ident) |
166 | 121 | for acceptor in self.acceptors:
|
167 | 122 | acceptor.join()
|
168 | 123 | logger.debug('Acceptors shutdown')
|
169 | 124 |
|
170 | 125 | def setup(self) -> None:
|
171 | 126 | """Listen on port, setup workers and pass server socket to workers."""
|
172 | 127 | self.listen()
|
173 |
| - if self.flags.enable_events: |
174 |
| - logger.info('Core Event enabled') |
175 |
| - self.start_event_dispatcher() |
176 | 128 | self.start_workers()
|
177 | 129 | # Send server socket to all acceptor processes.
|
178 | 130 | assert self.socket is not None
|
|
0 commit comments