Commit e8f8d4e3 authored by PHILIPPE's avatar PHILIPPE
Browse files

ATb to tag without script and ML part


Signed-off-by: PHILIPPE's avatarPhilippe Anne-Charlotte <Anne.Charlotte.Philippe@ifremer.fr>
parent 29f9c374
Pipeline #12426 failed with stage
in 28 seconds
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 4 15:55:14 2021
@author: Administrateur
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import matplotlib.pyplot as plt
import keras
from imgaug import augmenters as iaa
from mrcnn.config import Config
from mrcnn.visualize import display_instances
from mrcnn import model as modellib, utils
# path to inform
commands = "train"
dataset = 'C:/Users/Administrateur/Documents/DATA/buccin/dataset/'
weights = 'C:/Users/Administrateur/Desktop/mask_rcnn_balloon.h5'
path_logs = 'C:/Users/Administrateur/Documents/DATA/buccin/'
label = "crabe"
############################################################
# Configurations
############################################################
class CustomConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = label
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + toy
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
#####################################################
# Configurations
config = CustomConfig()
config.display()
# Create model
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=path_logs)
# Select weights file to load
weights_path = weights
# Load weights
print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
# Train or evaluate
train(model)
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 21:42:47 2021
@author: Administrateur
"""
import os, csv, json, collections, random, shutil
from argparse import ArgumentParser
from PIL import Image
import pandas as pd
from tkinter import *
def prepare_data_maskrcnn(
label: str,
path_data: str,
path_img: str,
path_out: str)-> None:
# fenetre = Tk()
# label = StringVar()
# label.set("label")
# entree = Entry(fenetre, textvariable=label, width=40)
# entree.pack()
# path_data = StringVar()
# path_data.set("path to the ATb-based data")
# entree = Entry(fenetre, textvariable=path_data, width=40)
# entree.pack()
# path_out = StringVar()
# path_out.set("output directory")
# entree = Entry(fenetre, textvariable=path_out, width=40)
# entree.pack()
# path_img = StringVar()
# path_img.set("path to the images")
# entree = Entry(fenetre, textvariable=path_img, width=40)
# entree.pack()
# monBoutonQuitter = Button(fenetre, text='OK', command=fenetre.destroy)
# monBoutonQuitter.pack(side = RIGHT)
# fenetre.mainloop()
# label = label.get()
# path_data = path_data.get()
# path_img = path_img.get()
# path_out = path_out.get()
# label = 'chicira'
# path_data = 'C:/Users/Administrateur/Documents/DATA/exemple_video_biigle/catalog/catalog_Chi_0/data_Chiridota.csv'
# path_out = 'C:/Users/Administrateur/Documents/DATA/exemple_video_biigle/catalog/catalog_Chi_0/out_maskrcnn/'
# path_img = 'C:/Users/Administrateur/Documents/MISSIONS/pomme/POMME_video/rep_img/'
if not os.path.exists(path_out):
os.makedirs(path_out)
# data formatation
imgs_list = []
users_list = {}
data = collections.defaultdict(dict)
df = pd.read_csv(path_data, sep = ';', encoding='latin-1', low_memory=False)
for i in df.index:
img_filename = df['img_filename'][i]
img_size = os.path.getsize(path_img + df['img_filename'][i])
label_size = img_filename + str(img_size)
#filling the first 2 fields of JSON format: filename and size
data[label_size]['filename'] = df['img_filename'][i]
data[label_size]['size'] = os.path.getsize(path_img+df['img_filename'][i])
x_corner_up_left = int(df['pos1x_rect'][i])
x_corner_down_left = int(df['pos1x_rect'][i])
x_corner_down_right = int(df['pos2x_rect'][i])
x_corner_up_right = int(df['pos2x_rect'][i])
all_points_x = [x_corner_up_left,x_corner_down_left,x_corner_down_right,x_corner_up_right]
y_corner_up_left = int(df['pos1y_rect'][i])
y_corner_down_left = int(df['pos2y_rect'][i])
y_corner_down_right = int(df['pos2y_rect'][i])
y_corner_up_right = int(df['pos1y_rect'][i])
all_points_y = [y_corner_up_left,y_corner_down_left,y_corner_down_right,y_corner_up_right]
#filling the 2 last requested fields of JSON format: regions and file_attributes
new_region = {"shape_attributes":{"name":"polygon","all_points_x":all_points_x,"all_points_y":all_points_y},"region_attributes":{"type":label}}
if 'regions' not in data[label_size]:
data[label_size]['regions'] = collections.defaultdict()
data[label_size]['regions'][0] = new_region
else:
new_key = len(data[label_size]['regions'])
data[label_size]['regions'][new_key] = new_region
data[label_size]["file_attributes"] = {}
# Split data into train & val subdictionaries before dumping into JSON format
num_train = int(len(data.keys())*4/5)
train_data = dict(random.sample(data.items(), num_train))
print("Number of training examples: {}".format(int(len(train_data.keys()))))
val_data = {k:v for k,v in data.items() if k not in train_data.keys()}
print("Number of validation examples: {}".format(int(len(val_data.keys()))))
# write json
with open((path_out + "annotations_json.json"), "w") as json_file:
json.dump(data, json_file, indent=2)
with open((path_out + "train_annotations.json"), "w") as json_file:
json.dump(train_data, json_file, indent=2)
with open((path_out + "val_annotations.json"), "w") as json_file:
json.dump(val_data, json_file, indent=2)
# Extract image filenames for both train and val annotated examples and copy corresponding images into new folders
print('Creating training folder')
train_images_list = []
for k,v in train_data.items():
train_images_list.append(train_data[k]['filename'])
train_folder = path_out + 'train/'
if os.path.exists(train_folder):
# Delete an entire directory tree
shutil.rmtree(train_folder)
os.makedirs(train_folder)
# shutil.copy(source, destination)
shutil.copy(path_out + "train_annotations.json", train_folder + "via_region_data.json")
for f in train_images_list:
try:
shutil.copy(path_img + f, train_folder)
except:
print("Couldn't find image: {}".format(f))
print('Creating validation folder')
val_images_list = []
for k,v in val_data.items():
val_images_list.append(val_data[k]['filename'])
val_folder = path_out + 'val/'
if os.path.exists(val_folder):
shutil.rmtree(val_folder)
os.makedirs(val_folder)
shutil.copy(path_out + "val_annotations.json", val_folder + "via_region_data.json")
for f in val_images_list:
try:
shutil.copy(path_img + f, val_folder)
except:
print("Couldn't find image: {}".format(f))
......@@ -24,7 +24,6 @@ import image_annotations.metadata.add_images_metadata as ee4
import image_annotations.visualization.genere_annoted_images as ff
import image_annotations.analyse.report_pdf as gg
import image_annotations.cleaning.check_catalog as hh
import image_annotations.ML_mrcnn.prepare_maskrcnn as ii
from tkinter import *
import pandas as pd
......@@ -53,10 +52,6 @@ def image_annotations_pipeline():
FrameIII.config(background='#a3decb')
Label(FrameIII, text="IMAGES GENERATION").pack(padx=5, pady=5)
FrameIV = Frame(fenetre, borderwidth=2, relief=GROOVE)
FrameIV.pack(side = LEFT)
FrameIV.config(background='white')
Label(FrameIV, text="MACHINE LEARNING").pack(padx=5, pady=5)
Frame1 = Frame(FrameI, borderwidth=1, relief=GROOVE)
Frame1.pack(padx=5, pady=5)
......@@ -231,46 +226,7 @@ def image_annotations_pipeline():
entree = Entry(Frame8, textvariable=thbn2cancel, width=40)
entree.pack()
Frame9 = Frame(FrameIV, borderwidth=2, relief=GROOVE)
Frame9.pack(padx=5, pady=5)
pmrcnn = IntVar()
bouton = Checkbutton(Frame9, text="Prepare data", variable=pmrcnn, onvalue=1, offvalue=0)
bouton.pack(side = LEFT)
def createNewWindow():
newWindow = Toplevel()
Label(newWindow, text='Prepare data for Mask R-CNN.').pack(padx=30, pady=30)
buttonq = Button(Frame9, text='?', command=createNewWindow)
buttonq.pack(side = RIGHT)
label_mrcnn = StringVar()
label_mrcnn.set("label")
entree = Entry(Frame9, textvariable=label_mrcnn, width=40)
entree.pack()
path_data_mrcnn = StringVar()
path_data_mrcnn.set("path to the ATb-based data")
entree = Entry(Frame9, textvariable=path_data_mrcnn, width=40)
entree.pack()
path_out_mrcnn = StringVar()
path_out_mrcnn.set("output directory")
entree = Entry(Frame9, textvariable=path_out_mrcnn, width=40)
entree.pack()
path_img_mrcnn = StringVar()
path_img_mrcnn.set("path to the images")
entree = Entry(Frame9, textvariable=path_img_mrcnn, width=40)
entree.pack()
Frame91 = Frame(FrameIV, borderwidth=2, relief=GROOVE)
Frame91.pack(padx=5, pady=5)
mmrcnn = IntVar()
bouton = Checkbutton(Frame91, text="Mask R-CNN model training", variable=pmrcnn, onvalue=1, offvalue=0)
bouton.pack(side = LEFT)
def createNewWindow():
newWindow = Toplevel()
Label(newWindow, text='Compute Mask R-CNN model training.').pack(padx=30, pady=30)
buttonq = Button(Frame91, text='?', command=createNewWindow)
buttonq.pack(side = RIGHT)
monBoutonQuitter = Button(fenetre, text='OK', command=fenetre.destroy)
monBoutonQuitter.pack(side = RIGHT)
......@@ -312,13 +268,7 @@ def image_annotations_pipeline():
csv_name_catalog = csv_name_catalog.get()
thbn2cancel = thbn2cancel.get()
pmrcnn = pmrcnn.get()
label_mrcnn = label_mrcnn.get()
path_data_mrcnn = path_data_mrcnn.get()
path_out_mrcnn = path_out_mrcnn.get()
path_img_mrcnn = path_img_mrcnn.get()
mmrcnn = mmrcnn.get()
if dss == 1:
......@@ -409,16 +359,9 @@ def image_annotations_pipeline():
if clean == 1:
print('Delete bad thumbnails to the catalog of '+csv_name_catalag)
print('Delete bad thumbnails to the catalog' + csv_name_catalog)
hh.check_catalog(path_catalog, csv_name_catalog, thbn2cancel)
if pmrcnn == 1:
print('Data for Mask R-CNN preparation...')
ii.prepare_maskrcnn(label_mrcnn, path_data_mrcnn, path_out_mrcnn, path_img_mrcnn)
print('Data preparation of Mask R-CNN ok.')
if mmrcnn == 1:
print('Not available yet')
......
This repertory contains scripts to run out of ATb, directly using a Python interface.
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 22 20:20:43 2021
@author: Administrateur
"""
# script to perform a 2zoom-based separation of dataframe
import os
import numpy as np
import pandas as pd
path_data = 'C:/Users/Administrateur/Documents/MISSIONS/imageJ_marjolaine/buccin_expert_imageJ/zoom_separation/data.csv'
df = pd.read_csv(path_data, sep = ';', encoding='latin-1', low_memory=False)
# 1- add a new column zoom to indicate if the image is a zoom (zoom=1) or not (zoom=0)
# if from MOMAR = M if CAM it is C
df = df.assign(zoom = 1)
filenames = df['img_filename'].unique()
for i in range(np.shape(filenames)[0]):
ff = filenames[i]
print(ff)
if ff == ff: # ff n'est pas nan
minute = ff[len(ff)-8:len(ff)-6]
#si l'image correspond à une prise de vue à une heure pile (=> minute == 0) alors on et sur une image dézoomée => zoom = 0
if minute == '00':
df.loc[df['img_filename']== filenames[i],'zoom'] = str(0)+ff[0]
else:
df.loc[df['img_filename']== filenames[i],'zoom'] = str(1)+ff[0]
#df.loc[df['img_filename']== filenames[i],'zoom'] = ff[0]
# 2- split the dataframe into 4 dataframes following its zoom
group = df.groupby(df.zoom)
df0C = group.get_group('0C')
df1C= group.get_group('1C')
#dfM = group.get_group('M')
# 3- delate the column zoom
df0C = df0C.drop('zoom', axis=1)
df1C = df1C.drop('zoom', axis=1)
#dfM = dfM.drop('zoom', axis=1)
# 4- save
path_data0C = 'C:/Users/Administrateur/Documents/MISSIONS/imageJ_marjolaine/buccin_expert_imageJ/zoom_separation/0C/data.csv'
df0C.to_csv(path_data0C, sep=';')
path_data1C = 'C:/Users/Administrateur/Documents/MISSIONS/imageJ_marjolaine/buccin_expert_imageJ/zoom_separation/1C/data.csv'
df1C.to_csv(path_data1C, sep=';')
# path_dataM = 'C:/Users/Administrateur/Documents/MISSIONS/mission1dss/M/data.csv'
# dfM.to_csv(path_dataM, sep=';')
......@@ -31,6 +31,20 @@ setup(
'scipy',
'typing',
'reportlab',
'folium',
'folium',
],
)
#
# 'Pillow',
# 'cython',
# 'tensorflow',
# 'keras>=2.0.8',
# 'opencv-python',
# 'h5py',
# 'imgaug',
# 'IPython[all]',
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment