# !pip install keras==3.0.4
# !pip install keras-cv==0.8.1
import os
import requests
import json
import tempfile
import shutil
os.environ["KERAS_BACKEND"] = "tensorflow" # Set the Keras backend environment variable to "tensorflow"
import keras
import keras_cv
import numpy as np
BATCH_SIZE = 64
IMAGE_SIZE = (224, 224)
target = "logo" # Predicted target class: "album", "book", "logo", "screenshot"
label_mode = "binary" # Model type: "binary" for binary classification, "categorical" for multiclass
model_path = "/content/logo_detection/model/logo_max_all.keras" # Path to a trained Keras model with ".keras" extension
# Load the model
model = keras.models.load_model(model_path)
# Input data in JSON format, each containing filename, URL, and target class
input_data = [
{
"filename": "Elizabeth_Drive_-_border_of_Edensor_Park_and_Bonnyrigg_Heights_in_New_South_Wales_62.jpg",
"url": "https://phab.wmfusercontent.org/file/data/bfjukphf6khghlic2rgv/PHID-FILE-34q3qs3oe6ea5o4p22lv/Elizabeth_Drive_-_border_of_Edensor_Park_and_Bonnyrigg_Heights_in_New_South_Wales_62.jpg",
"target": "logo"
},
{
"filename": "Cambia_logo.png",
"url": "https://phab.wmfusercontent.org/file/data/mb6wynlvf3bdfw5e443f/PHID-FILE-wc27fvtkl6yv4rjdlqzn/Cambia_logo.png",
"target": "logo"
},
{
"filename": "Blooming_bush_(14248894271).jpg",
"url": "https://phab.wmfusercontent.org/file/data/46i23voto2a4aqwo6iyb/PHID-FILE-eldmzjv4p3vwsiwsuxya/Blooming_bush_%2814248894271%29.jpg",
"target": "logo"
},
{
"filename": "BackupVault_Logo_2019.png",
"url": "https://phab.wmfusercontent.org/file/data/licxzubl2357mpyw5hai/PHID-FILE-kygwsboczktnzfe3u2ne/BackupVault_Logo_2019.png",
"target": "logo"
},
{
"filename": "Abv.png",
"url": "https://phab.wmfusercontent.org/file/data/l5rkhcd3vv2kk4czp2y2/PHID-FILE-wj5balvrsa73eo35j7eg/Abv.png",
"target": "logo"
},
{
"filename": "12_rue_de_Condé_-_detail.jpg",
"url": "https://phab.wmfusercontent.org/file/data/wxtr7be45udzyjzrojr6/PHID-FILE-tnu6mrji2smn2hpm6nhv/12_rue_de_Cond%C3%A9_-_detail.jpg",
"target": "logo"
}
]
# Create a temporary directory to store images
temp_dir = tempfile.mkdtemp()
# Create subdirectories for "logo" and "out_of_domain"
logo_dir = os.path.join(temp_dir, "logo")
out_of_domain_dir = os.path.join(temp_dir, "out_of_domain")
os.makedirs(logo_dir)
os.makedirs(out_of_domain_dir)
# Download images from URLs and save them to appropriate directories
for idx, data in enumerate(input_data):
image_url = data["url"]
image_filename = os.path.join(temp_dir, data["target"], data["filename"])
with open(image_filename, "wb") as f:
response = requests.get(image_url)
f.write(response.content)
# Use keras.utils.image_dataset_from_directory to create test_set
test_set = keras.utils.image_dataset_from_directory(
temp_dir,
labels="inferred",
label_mode=label_mode,
class_names=["out_of_domain", "logo"],
batch_size=BATCH_SIZE,
image_size=IMAGE_SIZE,
shuffle=False,
)
predictions_response = []
# Iterate through the test set and make predictions
for images, labels in test_set:
predictions = model(images) # Pass images directly to the model
for i in range(len(predictions)):
file_path = test_set.file_paths[i]
file_name = os.path.basename(file_path)
rounded_predictions = np.around(predictions[i].numpy(), decimals=2).astype(float)
prediction = {
"filename": file_name,
"target": target,
"prediction": {
"logo": rounded_predictions[1],
"out_of_domain": rounded_predictions[0]
}
}
predictions_response.append(prediction)
# Output response in JSON format
print(json.dumps(predictions_response, indent=4))
# Delete the temporary directory after use
shutil.rmtree(temp_dir)