-
Notifications
You must be signed in to change notification settings - Fork 257
added evaluation script for PPHumanSeg model #130
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 5 commits
0d0875c
db4aaca
4193e38
e4b799b
6073f5d
a59e39c
086a717
d449064
c7c2700
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,206 @@ | ||
| import os | ||
| import cv2 as cv | ||
| import numpy as np | ||
| from tqdm import tqdm | ||
|
|
||
|
|
||
| class MiniSupervisely : | ||
fengyuentau marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| def __init__(self, root) : | ||
| self.root = root | ||
| self.val_path = os.path.join(root, 'val.txt') | ||
| self.image_set = self.load_data(self.val_path) | ||
| self.num_classes = 2 | ||
| self.miou = -1 | ||
| self.class_miou = -1 | ||
| self.acc = -1 | ||
| self.class_acc = -1 | ||
|
|
||
|
|
||
| @property | ||
| def name(self): | ||
| return self.__class__.__name__ | ||
|
|
||
|
|
||
| def load_data(self, val_path) : | ||
| """ | ||
| Load validation image set from val.txt file | ||
| Args : | ||
| val_path (str) : path to val.txt file | ||
| Returns : | ||
| image_set (list) : list of image path of input and expected image | ||
| """ | ||
|
|
||
| image_set = [] | ||
| with open(val_path, 'r') as f : | ||
| for line in f.readlines() : | ||
| image_set.append(line.strip().split()) | ||
|
|
||
| return image_set | ||
|
|
||
|
|
||
| def eval(self, model) : | ||
| """ | ||
| Evaluate model on validation set | ||
| Args : | ||
| model (object) : PP_HumanSeg model object | ||
| """ | ||
|
|
||
| intersect_area_all = [] | ||
| pred_area_all = [] | ||
| label_area_all = [] | ||
|
|
||
| pbar = tqdm(self.image_set) | ||
|
|
||
| for input_image, expected_image in pbar : | ||
| pbar.set_description( | ||
| "Evaluating {} with {} val set".format(model.name, self.name)) | ||
|
||
|
|
||
| input_image = cv.imread(os.path.join(self.root, input_image)) | ||
| input_image = cv.resize(input_image, (192, 192)) | ||
fengyuentau marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| expected_image = cv.imread(os.path.join(self.root, expected_image),cv.IMREAD_GRAYSCALE) | ||
| expected_image = cv.resize(expected_image, (192, 192))[np.newaxis, :, :] | ||
|
|
||
|
|
||
| output_image = model.infer(input_image) | ||
|
|
||
| intersect_area, pred_area, label_area = self.calculate_area( | ||
| output_image, | ||
| expected_image, | ||
| self.num_classes) | ||
| intersect_area_all = intersect_area_all + intersect_area | ||
| pred_area_all = pred_area_all + pred_area | ||
| label_area_all = label_area_all + label_area | ||
|
|
||
|
|
||
|
|
||
| self.class_iou, self.miou = self.mean_iou(intersect_area_all, pred_area_all, | ||
| label_area_all) | ||
| self.class_acc, self.acc = self.accuracy(intersect_area_all, pred_area_all) | ||
|
|
||
|
|
||
| def get_results(self) : | ||
| """ | ||
| Get evaluation results | ||
| Returns : | ||
| miou (float) : mean iou | ||
| class_miou (list) : iou on all classes | ||
| acc (float) : mean accuracy | ||
| class_acc (list) : accuracy on all classes | ||
| """ | ||
| return self.miou, self.class_miou, self.acc, self.class_acc | ||
|
|
||
|
|
||
| def print_result(self) : | ||
| """ | ||
| Print evaluation results | ||
| """ | ||
| print("Mean IoU : ", self.miou) | ||
| print("Mean Accuracy : ", self.acc) | ||
|
|
||
|
|
||
| def one_hot(self, arr, max_size) : | ||
| return np.eye(max_size)[arr] | ||
|
|
||
|
|
||
fengyuentau marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| def calculate_area(self,pred, label, num_classes, ignore_index=255): | ||
| """ | ||
| Calculate intersect, prediction and label area | ||
| Args: | ||
| pred (Tensor): The prediction by model. | ||
| label (Tensor): The ground truth of image. | ||
| num_classes (int): The unique number of target classes. | ||
| ignore_index (int): Specifies a target value that is ignored. Default: 255. | ||
| Returns: | ||
| Tensor: The intersection area of prediction and the ground on all class. | ||
| Tensor: The prediction area on all class. | ||
| Tensor: The ground truth area on all class | ||
| """ | ||
|
|
||
| # Delete ignore_index | ||
fengyuentau marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| mask = label != ignore_index | ||
| pred = pred + 1 | ||
| label = label + 1 | ||
fengyuentau marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| pred = pred * mask | ||
| label = label * mask | ||
|
|
||
|
|
||
| pred = self.one_hot(pred, num_classes + 1) | ||
| label = self.one_hot(label, num_classes + 1) | ||
|
|
||
| pred = pred[:, :, :, 1:] | ||
| label = label[:, :, :, 1:] | ||
|
|
||
| pred_area = [] | ||
| label_area = [] | ||
| intersect_area = [] | ||
|
|
||
| #iterate over all classes and calculate their respective areas | ||
| for i in range(num_classes): | ||
| pred_i = pred[:, :, :, i] | ||
| label_i = label[:, :, :, i] | ||
| pred_area_i = np.sum(pred_i) | ||
| label_area_i = np.sum(label_i) | ||
| intersect_area_i = np.sum(pred_i * label_i) | ||
fengyuentau marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| pred_area.append(pred_area_i) | ||
| label_area.append(label_area_i) | ||
| intersect_area.append(intersect_area_i) | ||
|
|
||
| return intersect_area, pred_area, label_area | ||
|
|
||
|
|
||
| def mean_iou(self,intersect_area, pred_area, label_area): | ||
| """ | ||
| Calculate iou. | ||
| Args: | ||
| intersect_area (Tensor): The intersection area of prediction and ground truth on all classes. | ||
| pred_area (Tensor): The prediction area on all classes. | ||
| label_area (Tensor): The ground truth area on all classes. | ||
| Returns: | ||
| np.ndarray: iou on all classes. | ||
| float: mean iou of all classes. | ||
| """ | ||
| intersect_area = np.array(intersect_area) | ||
| pred_area = np.array(pred_area) | ||
| label_area = np.array(label_area) | ||
|
|
||
| union = pred_area + label_area - intersect_area | ||
|
|
||
| class_iou = [] | ||
| for i in range(len(intersect_area)): | ||
| if union[i] == 0: | ||
| iou = 0 | ||
| else: | ||
| iou = intersect_area[i] / union[i] | ||
| class_iou.append(iou) | ||
|
|
||
| miou = np.mean(class_iou) | ||
|
|
||
| return np.array(class_iou), miou | ||
|
|
||
|
|
||
| def accuracy(self,intersect_area, pred_area): | ||
| """ | ||
| Calculate accuracy | ||
| Args: | ||
| intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. | ||
| pred_area (Tensor): The prediction area on all classes. | ||
| Returns: | ||
| np.ndarray: accuracy on all classes. | ||
| float: mean accuracy. | ||
| """ | ||
|
|
||
| intersect_area = np.array(intersect_area) | ||
| pred_area = np.array(pred_area) | ||
|
|
||
| class_acc = [] | ||
| for i in range(len(intersect_area)): | ||
| if pred_area[i] == 0: | ||
| acc = 0 | ||
| else: | ||
| acc = intersect_area[i] / pred_area[i] | ||
| class_acc.append(acc) | ||
|
|
||
| macc = np.sum(intersect_area) / np.sum(pred_area) | ||
|
|
||
| return np.array(class_acc), macc | ||
Uh oh!
There was an error while loading. Please reload this page.