@@ -102,7 +102,7 @@ Connection::~Connection()
102102 m_sync_cleanup_fns.pop_front ();
103103 }
104104 while (!m_async_cleanup_fns.empty ()) {
105- std::unique_lock<std::mutex> lock (m_loop.m_mutex );
105+ std::unique_lock<std::mutex> lock (m_loop.m_mutex ); // NOLINT(misc-const-correctness)
106106 m_loop.m_async_fns .emplace_back (std::move (m_async_cleanup_fns.front ()));
107107 m_async_cleanup_fns.pop_front ();
108108 }
@@ -113,7 +113,7 @@ Connection::~Connection()
113113
114114CleanupIt Connection::addSyncCleanup (std::function<void ()> fn)
115115{
116- std::unique_lock<std::mutex> lock (m_loop.m_mutex );
116+ std::unique_lock<std::mutex> lock (m_loop.m_mutex ); // NOLINT(misc-const-correctness)
117117 // Add cleanup callbacks to the front of list, so sync cleanup functions run
118118 // in LIFO order. This is a good approach because sync cleanup functions are
119119 // added as client objects are created, and it is natural to clean up
@@ -127,13 +127,13 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
127127
128128void Connection::removeSyncCleanup (CleanupIt it)
129129{
130- std::unique_lock<std::mutex> lock (m_loop.m_mutex );
130+ std::unique_lock<std::mutex> lock (m_loop.m_mutex ); // NOLINT(misc-const-correctness)
131131 m_sync_cleanup_fns.erase (it);
132132}
133133
134134void Connection::addAsyncCleanup (std::function<void ()> fn)
135135{
136- std::unique_lock<std::mutex> lock (m_loop.m_mutex );
136+ std::unique_lock<std::mutex> lock (m_loop.m_mutex ); // NOLINT(misc-const-correctness)
137137 // Add async cleanup callbacks to the back of the list. Unlike the sync
138138 // cleanup list, this list order is more significant because it determines
139139 // the order server objects are destroyed when there is a sudden disconnect,
@@ -169,7 +169,7 @@ EventLoop::EventLoop(const char* exe_name, LogFn log_fn, void* context)
169169EventLoop::~EventLoop ()
170170{
171171 if (m_async_thread.joinable ()) m_async_thread.join ();
172- std::lock_guard<std::mutex> lock (m_mutex);
172+ std::lock_guard<std::mutex> lock (m_mutex); // NOLINT(misc-const-correctness)
173173 KJ_ASSERT (m_post_fn == nullptr );
174174 KJ_ASSERT (m_async_fns.empty ());
175175 KJ_ASSERT (m_wait_fd == -1 );
@@ -192,7 +192,7 @@ void EventLoop::loop()
192192 int post_fd{m_post_fd};
193193 char buffer = 0 ;
194194 for (;;) {
195- size_t read_bytes = wait_stream->read (&buffer, 0 , 1 ).wait (m_io_context.waitScope );
195+ const size_t read_bytes = wait_stream->read (&buffer, 0 , 1 ).wait (m_io_context.waitScope );
196196 if (read_bytes != 1 ) throw std::logic_error (" EventLoop wait_stream closed unexpectedly" );
197197 std::unique_lock<std::mutex> lock (m_mutex);
198198 if (m_post_fn) {
@@ -212,7 +212,7 @@ void EventLoop::loop()
212212 log () << " EventLoop::loop bye." ;
213213 wait_stream = nullptr ;
214214 KJ_SYSCALL (::close (post_fd));
215- std::unique_lock<std::mutex> lock (m_mutex);
215+ std::unique_lock<std::mutex> lock (m_mutex); // NOLINT(misc-const-correctness)
216216 m_wait_fd = -1 ;
217217 m_post_fd = -1 ;
218218}
@@ -258,7 +258,7 @@ void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
258258 while (true ) {
259259 if (!m_async_fns.empty ()) {
260260 addClient (lock);
261- std::function<void ()> fn = std::move (m_async_fns.front ());
261+ const std::function<void ()> fn = std::move (m_async_fns.front ());
262262 m_async_fns.pop_front ();
263263 Unlock (lock, fn);
264264 removeClient (lock);
@@ -282,7 +282,7 @@ bool EventLoop::done(std::unique_lock<std::mutex>& lock)
282282
283283std::tuple<ConnThread, bool > SetThread (ConnThreads& threads, std::mutex& mutex, Connection* connection, std::function<Thread::Client()> make_thread)
284284{
285- std::unique_lock<std::mutex> lock (mutex);
285+ std::unique_lock<std::mutex> lock (mutex); // NOLINT(misc-const-correctness)
286286 auto thread = threads.find (connection);
287287 if (thread != threads.end ()) return {thread, false };
288288 thread = threads.emplace (
@@ -299,7 +299,7 @@ std::tuple<ConnThread, bool> SetThread(ConnThreads& threads, std::mutex& mutex,
299299 // try unregister this callback after connection is destroyed.
300300 thread->second .m_cleanup_it .reset ();
301301 // Remove connection pointer about to be destroyed from the map
302- std::unique_lock<std::mutex> lock (mutex);
302+ std::unique_lock<std::mutex> lock (mutex); // NOLINT(misc-const-correctness)
303303 threads.erase (thread);
304304 });
305305 return {thread, true };
@@ -339,7 +339,7 @@ ProxyServer<Thread>::~ProxyServer()
339339 assert (m_thread_context.waiter .get ());
340340 std::unique_ptr<Waiter> waiter;
341341 {
342- std::unique_lock<std::mutex> lock (m_thread_context.waiter ->m_mutex );
342+ std::unique_lock<std::mutex> lock (m_thread_context.waiter ->m_mutex ); // NOLINT(misc-const-correctness)
343343 // ! Reset thread context waiter pointer, as shutdown signal for done
344344 // ! lambda passed as waiter->wait() argument in makeThread code below.
345345 waiter = std::move (m_thread_context.waiter );
@@ -367,7 +367,7 @@ ProxyServer<ThreadMap>::ProxyServer(Connection& connection) : m_connection(conne
367367
368368kj::Promise<void > ProxyServer<ThreadMap>::makeThread(MakeThreadContext context)
369369{
370- std::string from = context.getParams ().getName ();
370+ const std::string from = context.getParams ().getName ();
371371 std::promise<ThreadContext*> thread_context;
372372 std::thread thread ([&thread_context, from, this ]() {
373373 g_thread_context.thread_name = ThreadName (m_connection.m_loop .m_exe_name ) + " (from " + from + " )" ;
0 commit comments