tornado+gunicorn部署设置max_body_size
背景
想通过 gunicorn 的 max_requests 配置实现重启进程,解决一些内存泄露问题。
因为gunicorn启动配置的是 tornado.web.Application 实例,并非直接使用 tornado.httpserver.HTTPServer 导致无法设置 max_body_size 和 max_buffer_size 。
而 gunicore配置项 中只有 limit_request_line /limit_request_fields / limit_request_field_size 无法满足 tornado的限制配置。
现状
依赖版本如下:
- Python3.9
- tornado==6.5.2
- gunicorn==23.0.0
app.py
# 定义实例
app = tornado.web.Application(url_patterns)
gunicorn.conf.py
# 因为用的docker部署有restart配置
daemon = False
# 不能设置为True, tornado的IOLoop不能被共享
preload_app = False
# 可以有效解决OOM的问题
max_requests = 102400
max_requests_jitter = 1024
# 连接的空闲时间(秒),对应 idle_connection_timeout=60
keepalive = 60
# 对应 body_timeout/请求处理超时
timeout = 60
# bind
# port = os.environ.get("PROJECT_PORT", 8000)
# bind = "0.0.0.0:{}".format(port)
worker_class = "tornado"
启动命令
gunicorn -c gunicorn.conf.py --bind 0.0.0.0:8000 --workers 2 app:app
自定义 TornadoWorker
解决方式可以通过自定义 TornadoWorker . 代码是基于 gunicorn 源码修改的,主要修改 server_class 初始化的部分,增加了 max_body_size 相关配置。
最终的是同步修改 gunicore 启动 worker_class 配置:
worker_class = "gunicorn_worker.MyTornadoWorker"
gunicorn_worker 代码实现如下:
#!/usr/bin/env python
# coding=utf-8
import typingfrom gunicorn.sock import ssl_context
from gunicorn.workers.gtornado import TornadoWorkerimport tornado
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop, PeriodicCallbackMAX_FILE_SIZE = 100 * 1024 * 1024 # 100MBclass MyTornadoWorker(TornadoWorker):"""专门为tornado>=6定制因为需要实现设置 max_body_size"""def run(self) -> None:self.ioloop = IOLoop.instance()self.alive = Trueself.server_alive = False# tornado >= 5self.callbacks = []self.callbacks.append(PeriodicCallback(self.watchdog, 1000))self.callbacks.append(PeriodicCallback(self.heartbeat, 1000))for callback in self.callbacks:callback.start()# Assume the app is a WSGI callable if its not an# instance of tornado.web.Application or is an# instance of tornado.wsgi.WSGIApplicationapp = self.wsgiif not isinstance(app, WSGIContainer) and not isinstance(app, tornado.web.Application):app = WSGIContainer(app)# Monkey-patching HTTPConnection.finish to count the# number of requests being handled by Tornado. This# will help gunicorn shutdown the worker if max_requests# is exceeded.httpserver = tornado.httpserver.HTTPServerif hasattr(httpserver, "HTTPConnection"):old_connection_finish = httpserver.HTTPConnection.finishdef finish(other: typing.Any) -> None:self.handle_request()old_connection_finish(other)httpserver.HTTPConnection.finish = finishserver_class = httpserverelse:class _HTTPServer(tornado.httpserver.HTTPServer):def on_close(instance: typing.Any, server_conn: typing.Any) -> None:self.handle_request()super().on_close(server_conn)server_class = _HTTPServerif self.cfg.is_ssl:server = server_class(app,ssl_options=ssl_context(self.cfg),max_body_size=MAX_FILE_SIZE,max_buffer_size=MAX_FILE_SIZE,)else:server = server_class(app,max_body_size=MAX_FILE_SIZE,max_buffer_size=MAX_FILE_SIZE,)self.server = serverself.server_alive = Truefor s in self.sockets:s.setblocking(0)if hasattr(server, "add_socket"): # tornado > 2.0server.add_socket(s)elif hasattr(server, "_sockets"): # tornado 2.0server._sockets[s.fileno()] = sserver.no_keep_alive = self.cfg.keepalive <= 0server.start(num_processes=1)self.ioloop.start()