42
42
43
43
44
44
class Acceptor (multiprocessing .Process ):
45
- """Socket server acceptor process.
45
+ """Work acceptor process.
46
46
47
- Accepts a server socket fd over `work_queue` and start listening for client
48
- connections over the passed server socket. By default, it spawns a separate thread
49
- to handle each client request .
47
+ On start-up, `Acceptor` accepts a file descriptor which will be used to
48
+ accept new work. File descriptor is accepted over a `work_queue` which is
49
+ closed immediately after receiving the descriptor .
50
50
51
- However, if `--threadless` option is enabled, Acceptor process will also pre-spawns a `Threadless`
52
- process at startup. Accepted client connections are then passed to the `Threadless` process
53
- which internally uses asyncio event loop to handle client connections.
51
+ `Acceptor` goes on to listen for new work over the received server socket.
52
+ By default, `Acceptor` will spawn a new thread to handle each work.
54
53
55
- TODO(abhinavsingh): Instead of starting `Threadless` process, can we work with a `Threadless` thread?
56
- What are the performance implications of sharing fds between threads vs processes? How much performance
57
- degradation happen when processes are running on separate CPU cores?
54
+ However, when `--threadless` option is enabled, `Acceptor` process will also pre-spawns a
55
+ `Threadless` process during start-up. Accepted work is passed to these `Threadless` processes.
56
+ `Acceptor` process shares accepted work with a `Threadless` process over it's dedicated pipe.
57
+
58
+ TODO(abhinavsingh): Open questions:
59
+ 1) Instead of starting `Threadless` process, can we work with a `Threadless` thread?
60
+ 2) What are the performance implications of sharing fds between threads vs processes?
61
+ 3) How much performance degradation happens when acceptor and threadless processes are
62
+ running on separate CPU cores?
63
+ 4) Can we ensure both acceptor and threadless process are pinned to the same CPU core?
58
64
"""
59
65
60
66
def __init__ (
@@ -67,18 +73,26 @@ def __init__(
67
73
event_queue : Optional [EventQueue ] = None ,
68
74
) -> None :
69
75
super ().__init__ ()
76
+ self .flags = flags
77
+ # Lock shared by all acceptor processes
78
+ # to avoid concurrent accept over server socket
79
+ self .lock = lock
80
+ # Index assigned by `AcceptorPool`
70
81
self .idd = idd
82
+ # Queue over which server socket fd is received on start-up
71
83
self .work_queue : connection .Connection = work_queue
72
- self . flags = flags
84
+ # Worker class
73
85
self .work_klass = work_klass
74
- self . lock = lock
86
+ # Eventing core queue
75
87
self .event_queue = event_queue
76
-
88
+ # Selector & threadless states
77
89
self .running = multiprocessing .Event ()
78
90
self .selector : Optional [selectors .DefaultSelector ] = None
79
- self .sock : Optional [socket .socket ] = None
80
91
self .threadless_process : Optional [Threadless ] = None
81
92
self .threadless_client_queue : Optional [connection .Connection ] = None
93
+ # File descriptor used to accept new work
94
+ # Currently, a socket fd is assumed.
95
+ self .sock : Optional [socket .socket ] = None
82
96
83
97
def start_threadless_process (self ) -> None :
84
98
pipe = multiprocessing .Pipe ()
@@ -99,31 +113,30 @@ def shutdown_threadless_process(self) -> None:
99
113
self .threadless_process .join ()
100
114
self .threadless_client_queue .close ()
101
115
102
- def start_work (self , conn : socket .socket , addr : Tuple [str , int ]) -> None :
103
- if self .flags .threadless and \
104
- self .threadless_client_queue and \
105
- self .threadless_process :
106
- self .threadless_client_queue .send (addr )
107
- send_handle (
108
- self .threadless_client_queue ,
109
- conn .fileno (),
110
- self .threadless_process .pid ,
111
- )
112
- conn .close ()
113
- else :
114
- work = self .work_klass (
115
- TcpClientConnection (conn , addr ),
116
- flags = self .flags ,
117
- event_queue = self .event_queue ,
118
- )
119
- work_thread = threading .Thread (target = work .run )
120
- work_thread .daemon = True
121
- work .publish_event (
122
- event_name = eventNames .WORK_STARTED ,
123
- event_payload = {'fileno' : conn .fileno (), 'addr' : addr },
124
- publisher_id = self .__class__ .__name__ ,
125
- )
126
- work_thread .start ()
116
+ def _start_threadless_work (self , conn : socket .socket , addr : Tuple [str , int ]) -> None :
117
+ assert self .threadless_process and self .threadless_client_queue
118
+ self .threadless_client_queue .send (addr )
119
+ send_handle (
120
+ self .threadless_client_queue ,
121
+ conn .fileno (),
122
+ self .threadless_process .pid ,
123
+ )
124
+ conn .close ()
125
+
126
+ def _start_threaded_work (self , conn : socket .socket , addr : Tuple [str , int ]) -> None :
127
+ work = self .work_klass (
128
+ TcpClientConnection (conn , addr ),
129
+ flags = self .flags ,
130
+ event_queue = self .event_queue ,
131
+ )
132
+ work_thread = threading .Thread (target = work .run )
133
+ work_thread .daemon = True
134
+ work .publish_event (
135
+ event_name = eventNames .WORK_STARTED ,
136
+ event_payload = {'fileno' : conn .fileno (), 'addr' : addr },
137
+ publisher_id = self .__class__ .__name__ ,
138
+ )
139
+ work_thread .start ()
127
140
128
141
def run_once (self ) -> None :
129
142
with self .lock :
@@ -132,7 +145,14 @@ def run_once(self) -> None:
132
145
if len (events ) == 0 :
133
146
return
134
147
conn , addr = self .sock .accept ()
135
- self .start_work (conn , addr )
148
+ if (
149
+ self .flags .threadless and
150
+ self .threadless_client_queue and
151
+ self .threadless_process
152
+ ):
153
+ self ._start_threadless_work (conn , addr )
154
+ else :
155
+ self ._start_threaded_work (conn , addr )
136
156
137
157
def run (self ) -> None :
138
158
setup_logger (
0 commit comments