-
Notifications
You must be signed in to change notification settings - Fork 63
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
9dce44f
commit 3324fb1
Showing
82 changed files
with
5,176 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
*.o | ||
*.so | ||
*log | ||
*.pyc | ||
*.png | ||
*.ppm | ||
*.flo | ||
*.jpg | ||
*.tar | ||
*.mp4 | ||
*.pth | ||
checkpoints/ | ||
data/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
import os, sys, argparse, subprocess | ||
import utils | ||
|
||
|
||
if __name__ == "__main__": | ||
|
||
parser = argparse.ArgumentParser(description='Fast Blind Video Temporal Consistency') | ||
|
||
### model options | ||
parser.add_argument('-method', type=str, required=True, help='full model name') | ||
parser.add_argument('-epoch', type=int, default=-1, help='model epoch') | ||
parser.add_argument('-gpu', type=int, default=0, help='gpu device id') | ||
parser.add_argument('-metric', type=str, required=True, choices=["LPIPS", "WarpError"]) | ||
parser.add_argument('-redo', action="store_true", help='redo evaluation') | ||
|
||
opts = parser.parse_args() | ||
print(opts) | ||
|
||
if opts.epoch != -1: | ||
opts.method = os.path.join("output", opts.method, "epoch_%d" %opts.epoch) | ||
|
||
filename = "lists/test_tasks.txt" | ||
with open(filename) as f: | ||
dataset_task_list = [] | ||
for line in f.readlines(): | ||
if line[0] != "#": | ||
dataset_task_list.append(line.rstrip().split()) | ||
|
||
|
||
for i in range(len(dataset_task_list)): | ||
|
||
dataset = dataset_task_list[i][0] | ||
task = dataset_task_list[i][1] | ||
|
||
filename = '../../data/test/%s/%s/%s/%s.txt' %(opts.method, task, dataset, opts.metric) | ||
|
||
if not os.path.exists(filename) or opts.redo: | ||
|
||
cmd = "CUDA_VISIBLE_DEVICES=%d python evaluate_%s.py -dataset %s -phase test -task %s -method %s -redo %d" \ | ||
%(opts.gpu, opts.metric, dataset, task, opts.method, opts.redo) | ||
|
||
utils.run_cmd(cmd) | ||
|
||
|
||
print("%s:" %opts.metric) | ||
for i in range(len(dataset_task_list)): | ||
|
||
dataset = dataset_task_list[i][0] | ||
task = dataset_task_list[i][1] | ||
|
||
cmd = "tail -n1 ../../data/test/%s/%s/%s/%s.txt" %(opts.method, task, dataset, opts.metric) | ||
subprocess.call(cmd, shell=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import os, sys, argparse, subprocess | ||
|
||
if __name__ == "__main__": | ||
|
||
parser = argparse.ArgumentParser(description='Fast Blind Video Temporal Consistency') | ||
|
||
### model options | ||
parser.add_argument('-method', type=str, required=True, help='full model name') | ||
parser.add_argument('-epoch', type=int, required=True, help='epoch to test') | ||
parser.add_argument('-gpu', type=int, default=0, help='gpu device id') | ||
parser.add_argument('-reverse', action="store_true", help='reverse task list') | ||
|
||
opts = parser.parse_args() | ||
|
||
filename = "lists/test_tasks.txt" | ||
dataset_task_list = [] | ||
with open(filename) as f: | ||
for line in f.readlines(): | ||
if line[0] != "#": | ||
dataset_task_list.append(line.rstrip().split()) | ||
|
||
if opts.reverse: | ||
dataset_task_list.reverse() | ||
|
||
for i in range(len(dataset_task_list)): | ||
|
||
dataset = dataset_task_list[i][0] | ||
task = dataset_task_list[i][1] | ||
|
||
|
||
cmd = "CUDA_VISIBLE_DEVICES=%d python test.py -dataset %s -phase test -task %s -method %s -epoch %d" \ | ||
%(opts.gpu, dataset, task, opts.method, opts.epoch) | ||
|
||
print(cmd) | ||
subprocess.call(cmd, shell=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,148 @@ | ||
#!/usr/bin/python | ||
from __future__ import print_function | ||
|
||
### python lib | ||
import os, sys, argparse, glob, re, math, pickle, cv2 | ||
from datetime import datetime | ||
import numpy as np | ||
|
||
### torch lib | ||
import torch | ||
import torch.nn as nn | ||
from torch.autograd import Variable | ||
from torch.utils.data import DataLoader | ||
import torchvision.transforms as transforms | ||
|
||
### custom lib | ||
import networks | ||
import utils | ||
|
||
|
||
|
||
if __name__ == "__main__": | ||
|
||
parser = argparse.ArgumentParser(description='optical flow estimation') | ||
|
||
### testing options | ||
parser.add_argument('-model', type=str, default="FlowNet2", help='Flow model name') | ||
|
||
parser.add_argument('-dataset', type=str, required=True, help='testing datasets') | ||
parser.add_argument('-phase', type=str, default="test", choices=["train", "test"]) | ||
parser.add_argument('-data_dir', type=str, default='data', help='path to data folder') | ||
parser.add_argument('-list_dir', type=str, default='lists', help='path to list folder') | ||
parser.add_argument('-gpu', type=int, default=0, help='gpu device id') | ||
parser.add_argument('-cpu', action='store_true', help='use cpu?') | ||
|
||
|
||
opts = parser.parse_args() | ||
|
||
### update options | ||
opts.cuda = (opts.cpu != True) | ||
opts.grads = {} # dict to collect activation gradients (for training debug purpose) | ||
|
||
### FlowNet options | ||
opts.rgb_max = 1.0 | ||
opts.fp16 = False | ||
|
||
print(opts) | ||
|
||
if opts.cuda and not torch.cuda.is_available(): | ||
raise Exception("No GPU found, please run without -cuda") | ||
|
||
### initialize FlowNet | ||
print('===> Initializing model from %s...' %opts.model) | ||
model = networks.__dict__[opts.model](opts) | ||
|
||
### load pre-trained FlowNet | ||
model_filename = os.path.join("pretrained_models", "%s_checkpoint.pth.tar" %opts.model) | ||
print("===> Load %s" %model_filename) | ||
checkpoint = torch.load(model_filename) | ||
model.load_state_dict(checkpoint['state_dict']) | ||
|
||
device = torch.device("cuda" if opts.cuda else "cpu") | ||
model = model.to(device) | ||
model.eval() | ||
|
||
### load image list | ||
list_filename = os.path.join(opts.list_dir, "%s_%s.txt" %(opts.dataset, opts.phase)) | ||
with open(list_filename) as f: | ||
video_list = [line.rstrip() for line in f.readlines()] | ||
|
||
|
||
for video in video_list: | ||
|
||
frame_dir = os.path.join(opts.data_dir, opts.phase, "input", opts.dataset, video) | ||
fw_flow_dir = os.path.join(opts.data_dir, opts.phase, "fw_flow", opts.dataset, video) | ||
if not os.path.isdir(fw_flow_dir): | ||
os.makedirs(fw_flow_dir) | ||
|
||
fw_occ_dir = os.path.join(opts.data_dir, opts.phase, "fw_occlusion", opts.dataset, video) | ||
if not os.path.isdir(fw_occ_dir): | ||
os.makedirs(fw_occ_dir) | ||
|
||
fw_rgb_dir = os.path.join(opts.data_dir, opts.phase, "fw_flow_rgb", opts.dataset, video) | ||
if not os.path.isdir(fw_rgb_dir): | ||
os.makedirs(fw_rgb_dir) | ||
|
||
frame_list = glob.glob(os.path.join(frame_dir, "*.jpg")) | ||
|
||
for t in range(len(frame_list) - 1): | ||
|
||
print("Compute flow on %s-%s frame %d" %(opts.dataset, opts.phase, t)) | ||
|
||
### load input images | ||
img1 = utils.read_img(os.path.join(frame_dir, "%05d.jpg" %(t))) | ||
img2 = utils.read_img(os.path.join(frame_dir, "%05d.jpg" %(t + 1))) | ||
|
||
### resize image | ||
size_multiplier = 64 | ||
H_orig = img1.shape[0] | ||
W_orig = img1.shape[1] | ||
|
||
H_sc = int(math.ceil(float(H_orig) / size_multiplier) * size_multiplier) | ||
W_sc = int(math.ceil(float(W_orig) / size_multiplier) * size_multiplier) | ||
|
||
img1 = cv2.resize(img1, (W_sc, H_sc)) | ||
img2 = cv2.resize(img2, (W_sc, H_sc)) | ||
|
||
with torch.no_grad(): | ||
|
||
### convert to tensor | ||
img1 = utils.img2tensor(img1).to(device) | ||
img2 = utils.img2tensor(img2).to(device) | ||
|
||
### compute fw flow | ||
fw_flow = model(img1, img2) | ||
fw_flow = utils.tensor2img(fw_flow) | ||
|
||
### compute bw flow | ||
bw_flow = model(img2, img1) | ||
bw_flow = utils.tensor2img(bw_flow) | ||
|
||
|
||
### resize flow | ||
fw_flow = utils.resize_flow(fw_flow, W_out = W_orig, H_out = H_orig) | ||
bw_flow = utils.resize_flow(bw_flow, W_out = W_orig, H_out = H_orig) | ||
|
||
### compute occlusion | ||
fw_occ = utils.detect_occlusion(bw_flow, fw_flow) | ||
|
||
### save flow | ||
output_flow_filename = os.path.join(fw_flow_dir, "%05d.flo" %t) | ||
if not os.path.exists(output_flow_filename): | ||
utils.save_flo(fw_flow, output_flow_filename) | ||
|
||
### save occlusion map | ||
output_occ_filename = os.path.join(fw_occ_dir, "%05d.png" %t) | ||
if not os.path.exists(output_occ_filename): | ||
utils.save_img(fw_occ, output_occ_filename) | ||
|
||
### save rgb flow | ||
output_filename = os.path.join(fw_rgb_dir, "%05d.png" %t) | ||
if not os.path.exists(output_filename): | ||
flow_rgb = utils.flow_to_rgb(fw_flow) | ||
utils.save_img(flow_rgb, output_filename) | ||
|
||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
#!/bin/bash | ||
|
||
SET=$1 | ||
|
||
if [[ $SET != "train" && $SET != "test" && $SET != "all" && $SET != "results" ]]; then | ||
echo "Usage: ./download_dataset.sh SET" | ||
echo "SET options:" | ||
echo " \t train - download training data (25 GB)" | ||
echo " \t test - download testing data ( GB)" | ||
echo " \t all - download both training and testing data ( GB)" | ||
echo " \t results - download results of Bonneel et al. and our aproach ( GB)" | ||
exit 1 | ||
fi | ||
|
||
URL=https://vllab.ucmerced.edu/wlai24/video_consistency/data | ||
|
||
if [[ $SET == "train" ]]; then | ||
wget -N $URL/train.zip -O ./data/train.zip | ||
unzip ./data/train.zip -d ./data | ||
fi | ||
|
||
|
||
if [[ $SET == "test" ]]; then | ||
wget -N $URL/test.zip -O ./data/test.zip | ||
unzip ./data/test.zip -d ./data | ||
fi | ||
|
||
|
||
if [[ $SET == "all" ]]; then | ||
wget -N $URL/train.zip -O ./data/train.zip | ||
unzip ./data/train.zip -d ./data | ||
|
||
wget -N $URL/test.zip -O ./data/test.zip | ||
unzip ./data/test.zip -d ./data | ||
fi | ||
|
Oops, something went wrong.