Home>

With Django

python manege.py runserver


Start the app (myapp). The home screen (index.html) uploads an image to a form and saves it to a specific directory ('/Users/downloads/django_app/myapp/image/myapp') It has become. (App site reference site: https://qiita.com/narupo/items/e3dbdd5d030952d10661,
https://qiita.com/okoppe8/items/86776b8df566a4513e96)

Main.py has CNN code that reads images from a specific directory and returns prediction results.


When submitting an image to the form, start main.py as POST processing, receive the variable, and render it in index.html.
・ Questions
How do you make main.py run when processing views.py during POST?

I personally think that the prediction result of main.py is stored in a variable (LABEL) and the import statement main.py is called to render

# views.py
from django.shortcuts import render, redirect
from .forms import PhotoForm
from .models import Photo
def index (req):
    if req.method == 'GET':
        return render (req, 'myapp/index.html', {
                      'form': PhotoForm (),
                      })
    if req.method == 'POST':
        == Processing at POST ==
# index.html
<form action = "{% url 'index'%}" method = "POST" enctype = "multipart/form-data">
    {% csrf_token%}
    {{form}}
    <input type = "submit" value = "post" />
</form>
# main.py
from __future__ import print_function
from __future__ import absolute_import
import warnings
import os
import numpy as np
import tensorflow as tf
from keras.optimizers import SGD
from keras import layers
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
from keras.models import Model
from keras.layers import Activation, AveragePooling2D, BatchNormalization, Concatenate, Conv2D, Dense
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Input, Lambda, MaxPooling2D
from keras import backend as K
from keras import metrics
from keras.preprocessing import image
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping
tf.logging.set_verbosity (tf.logging.ERROR)

def InceptionResNetV2 (img_input, include_top = True,
                      pooling = None, classes = 13):
    # Stem block: 35 x 35 x 192
    x = conv2d_bn (img_input, 32, 3, strides = 2, padding = 'valid')
    x = conv2d_bn (x, 32, 3, padding = 'valid')
    x = conv2d_bn (x, 64, 3)
    x = MaxPooling2D (3, strides = 2) (x)
    x = conv2d_bn (x, 80, 1, padding = 'valid')
    x = conv2d_bn (x, 192, 3, padding = 'valid')
    x = MaxPooling2D (3, strides = 2) (x)
    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn (x, 96, 1)
    branch_1 = conv2d_bn (x, 48, 1)
    branch_1 = conv2d_bn (branch_1, 64, 5)
    branch_2 = conv2d_bn (x, 64, 1)
    branch_2 = conv2d_bn (branch_2, 96, 3)
    branch_2 = conv2d_bn (branch_2, 96, 3)branch_pool = AveragePooling2D (3, strides = 1, padding = 'same') (x)
    branch_pool = conv2d_bn (branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if K.image_data_format () == 'channels_first' else 3
    x = Concatenate (axis = channel_axis, name = 'mixed_5b') (branches)
    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range (1, 11):
        x = inception_resnet_block (x,
                                   scale = 0.17,
                                   block_type = 'block35',
                                   block_idx = block_idx)
    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn (x, 384, 3, strides = 2, padding = 'valid')
    branch_1 = conv2d_bn (x, 256, 1)
    branch_1 = conv2d_bn (branch_1, 256, 3)
    branch_1 = conv2d_bn (branch_1, 384, 3, strides = 2, padding = 'valid')
    branch_pool = MaxPooling2D (3, strides = 2, padding = 'valid') (x)
    branches = [branch_0, branch_1, branch_pool]
    x = Concatenate (axis = channel_axis, name = 'mixed_6a') (branches)
    # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
    for block_idx in range (1, 21):
        x = inception_resnet_block (x,
                                   scale = 0.1,
                                   block_type = 'block17',
                                   block_idx = block_idx)
    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn (x, 256, 1)
    branch_0 = conv2d_bn (branch_0, 384, 3, strides = 2, padding = 'valid')
    branch_1 = conv2d_bn (x, 256, 1)
    branch_1 = conv2d_bn (branch_1, 288, 3, strides = 2, padding = 'valid')
    branch_2 = conv2d_bn (x, 256, 1)
    branch_2 = conv2d_bn (branch_2, 288, 3)
    branch_2 = conv2d_bn (branch_2, 320, 3, strides = 2, padding = 'valid')
    branch_pool = MaxPooling2D (3, strides = 2, padding = 'valid') (x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = Concatenate (axis = channel_axis, name = 'mixed_7a') (branches)
    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range (1, 10):
        x = inception_resnet_block (x,
                                   scale = 0.2,
                                   block_type = 'block8',
                                   block_idx = block_idx)
    x = inception_resnet_block (x,
                               scale = 1.,
                               activation = None,
                               block_type = 'block8',
                               block_idx = 10)
    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn (x, 1536, 1, name = 'conv_7b')
    # Classification block
    x = GlobalAveragePooling2D (name = 'avg_pool') (x)
    return Dense (classes, activation = 'softmax', name = 'predictions') (x)
input_ = Input (batch_shape = (None, 100,100,3))
output_ = InceptionResNetV2 (img_input = input_)
model = Model (input_, output_, name = 'inception_resnet_v2')
model.load_weights ('/ Users/downloads/Downloads/django_app/myapp/weght_dir/model.h5')
model.compile (optimizer = SGD (decay = 0.1, momentum = 0.9, nesterov = True),
                            loss = 'categorical_crossentropy',
                        metrics = ['accuracy'])
img_path = '/Users/downloads/django_app/myapp/image/myapp/images.jpeg'
img = image.load_img (img_path, target_size = (100, 100))
x = image.img_to_array (img)
x = np.expand_dims (x, axis = 0)
x = preprocess_input (x)
preds = model.predict (x)
print ('Predicted:', decode_predictions (preds))
  • Answer # 1

    I feel like there was a sample project with a project similar to

    Github.

    1, Save the trained model of CNN in advance.
    How to save Keras model?
    2, modularize main.py
    2-1, Write code to call the learned model.
    2-2, Create a function that performspreprocessandmodel.predict (x)of the image and returns the result. Call on theviews.pyside.