diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..cbb5442 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.10-slim + +RUN apt-get update \ + && apt-get install -y gcc curl \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/app + +COPY . . + +# Download model weights from GitHub release +ARG MODEL_URL=https://github.com/dblasko/low-light-event-img-enhancer/releases/download/mirnet-finetuned-1.0.0-100epochs/mirnet_lowlight-enhance_finetuned_100-epochs_early-stopped_64x64.pth +RUN curl -L -o model/weights/Mirnet_enhance_finetune-35-early-stopped_64x64.pth ${MODEL_URL} + +RUN pip install -r requirements.txt + +EXPOSE 5000 + +CMD ["python", "app/api.py"] diff --git a/README.md b/README.md index 36bb7df..7ee3d1e 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,8 @@ Deep-learning-based low-light image enhancer specialized on restoring dark image - [Running tests](#running-tests) - [Usage of the web-application and REST API based on the model](#usage-of-the-web-application-and-rest-api-based-on-the-model) - [Running the inference endpoint](#running-the-inference-endpoint) + - [Running the endpoint locally and querying the API](#running-the-endpoint-locally-and-querying-the-api) + - [Deploying the endpoint using Docker](#deploying-the-endpoint-using-docker) - [Running the web application](#running-the-web-application) @@ -182,6 +184,8 @@ To add further tests, simply add a new file in the `tests` folder, and name it ` # Usage of the web-application and REST API based on the model ## Running the inference endpoint + +### Running the endpoint locally and querying the API To start the inference endpoint (an API implemented with Flask), run the following command from the root directory of the project: ```bash python app/api.py @@ -221,6 +225,16 @@ Two routes are available: f.write(response.content) ``` +### Deploying the endpoint using Docker +The same inference endpoint can be run without setting up the environment using the Docker image provided in the `Dockerfile` in the root directory of the project. To do so, run the following commands from the root directory of the project: +```bash +docker build -t low-light-enhancer . +docker run -p 5000:5000 low-light-enhancer +``` +The image automatically downloads and sets up the model weights from the GitHub release. If you would like to use the weights of a particular release, you can specify the weight URL with the `--build-arg MODEL_URL=...` argument when building the image. + +The inference endpoint should then be accessible at `
:5000` and allow you to send POST requests with images to enhance as described in the previous section. + ## Running the web application To start the inference web application, run the following command from the root directory of the project: ```bash diff --git a/app/api.py b/app/api.py index 814a626..0a020c9 100644 --- a/app/api.py +++ b/app/api.py @@ -60,6 +60,7 @@ def run_model(input_image): # Endpoint for single image @app.route("/enhance", methods=["POST"]) def enhance_image(): + print("ENHANCE ENDPOINT CALLED") if "image" not in request.files: return jsonify({"error": "No image provided"}), 400 file = request.files["image"] @@ -100,4 +101,4 @@ def batch_enhance_images(): if __name__ == "__main__": - app.run(debug=True) + app.run(host="0.0.0.0", port=5000)