Initial commit

pull/4/head
Shean 2017-09-18 00:16:09 +08:00
parent 6993c951b0
commit dd35b4b29b
9 changed files with 770 additions and 0 deletions

43
Dockerfile Normal file
View File

@ -0,0 +1,43 @@
FROM nvidia/cuda:8.0-cudnn6-devel
ENV LANG=C.UTF-8 LC_ALL=C.UTF-8
RUN apt-get update --fix-missing && apt-get install -y wget bzip2 ca-certificates \
libglib2.0-0 libxext6 libsm6 libxrender1 \
git mercurial subversion
RUN echo 'export PATH=/opt/conda/bin:$PATH' > /etc/profile.d/conda.sh && \
wget --quiet https://repo.continuum.io/archive/Anaconda3-4.4.0-Linux-x86_64.sh -O ~/anaconda.sh && \
/bin/bash ~/anaconda.sh -b -p /opt/conda && \
rm ~/anaconda.sh && \
chmod -R a+r /opt/conda
RUN apt-get install -y curl grep sed dpkg && \
apt-get install -y wget git libhdf5-dev g++ graphviz openmpi-bin && \
apt-get install -y build-essential cmake pkg-config && \
apt-get install -y libjpeg8-dev libtiff5-dev libjasper-dev libpng12-dev && \
apt-get install -y libavcodec-dev libavformat-dev libswscale-dev libv4l-dev && \
apt-get install -y libxvidcore-dev libx264-dev && \
apt-get install -y libgtk-3-dev && \
apt-get install -y libatlas-base-dev gfortran && \
apt-get install -y libav-tools && \
apt-get clean
ENV PATH /opt/conda/bin:$PATH
# Python + OpenCV + Tensorflow + Keras
ARG python_version=3.5
RUN conda install -y python=${python_version} && \
pip install --upgrade pip && \
pip install tensorflow-gpu && \
pip install sk-video && \
pip install tqdm coloredlogs && \
pip install opencv-contrib-python && \
conda install -y Pillow scikit-learn scikit-image graphviz pydot notebook pandas matplotlib mkl nose pyyaml six h5py && \
pip install keras && \
conda clean -yt
ENV PYTHONPATH='/src/:$PYTHONPATH'
WORKDIR /share

44
README.md Normal file
View File

@ -0,0 +1,44 @@
# abnormal-spatiotemporal-ae
Codes for "Abnormal Event Detection in Videos Using Spatiotemporal Autoencoder".
Paper can be found at [Springer](https://link.springer.com/chapter/10.1007/978-3-319-59081-3_23) and [arXiv](https://arxiv.org/abs/1701.01546).
Prerequisites:
- keras
- tensorflow
- h5py
- scikit-image
- scikit-learn
- sk-video
- tqdm (for progressbar)
- coloredlogs (optional, for colored terminal logs only)
You can use the `Dockerfile` provided to build the environment then enter the environment using `nvidia-docker run --rm -it -v HOST_FOLDER:/share DOCKER_IMAGE bash`.
To train the model, just run `python start_train.py`. Default configuration can be found at `config.yml`. You need to prepare video dataset you plan to train/evaluate on. You may get the benchmark dataset videos from respective authors. For each dataset, put the training videos into `VIDEO_ROOT_PATH/DATASET_NAME/training_videos` and testing videos into `VIDEO_ROOT_PATH/DATASET_NAME/testing_videos`. Example structure of training videos for `avenue` dataset:
`VIDEO_ROOT_PATH/avenue/training_videos`
- `01.avi`
- `02.avi`
- ...
- `16.avi`
Once you have trained the model, you may now run `python start_test.py` after setting the parameters at the beginning of the file.
Please cite the following paper if you use our code / paper:
```
@inbook{Chong2017,
author = {Chong, Yong Shean and
Tay, Yong Haur},
editor = {Cong, Fengyu and
Leung, Andrew and
Wei, Qinglai},
title = {Abnormal Event Detection in Videos Using Spatiotemporal Autoencoder},
bookTitle = {Advances in Neural Networks - ISNN 2017: 14th International Symposium, ISNN 2017, Sapporo, Hakodate, and Muroran, Hokkaido, Japan, June 21--26, 2017, Proceedings, Part II},
year = {2017},
publisher = {Springer International Publishing},
address = {Cham},
pages = {189--196},
isbn = {978-3-319-59081-3},
doi = {10.1007/978-3-319-59081-3_23},
url = {https://doi.org/10.1007/978-3-319-59081-3_23}
}
```

309
classifier.py Normal file
View File

@ -0,0 +1,309 @@
from dataset import preprocess_data
import os
from keras import backend as K
import matplotlib
matplotlib.use('Agg')
assert(K.image_data_format() == 'channels_last')
def get_model(t):
from keras.models import Model
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from keras.layers.wrappers import TimeDistributed
from keras.layers.core import Activation
from keras.layers import Input
input_tensor = Input(shape=(t, 224, 224, 1))
conv1 = TimeDistributed(Conv2D(128, kernel_size=(11, 11), padding='same', strides=(4, 4), name='conv1'),
input_shape=(t, 224, 224, 1))(input_tensor)
conv1 = TimeDistributed(BatchNormalization())(conv1)
conv1 = TimeDistributed(Activation('relu'))(conv1)
conv2 = TimeDistributed(Conv2D(64, kernel_size=(5, 5), padding='same', strides=(2, 2), name='conv2'))(conv1)
conv2 = TimeDistributed(BatchNormalization())(conv2)
conv2 = TimeDistributed(Activation('relu'))(conv2)
convlstm1 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm1')(conv2)
convlstm2 = ConvLSTM2D(32, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm2')(convlstm1)
convlstm3 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm3')(convlstm2)
deconv1 = TimeDistributed(Conv2DTranspose(128, kernel_size=(5, 5), padding='same', strides=(2, 2), name='deconv1'))(convlstm3)
deconv1 = TimeDistributed(BatchNormalization())(deconv1)
deconv1 = TimeDistributed(Activation('relu'))(deconv1)
decoded = TimeDistributed(Conv2DTranspose(1, kernel_size=(11, 11), padding='same', strides=(4, 4), name='deconv2'))(
deconv1)
return Model(inputs=input_tensor, outputs=decoded)
def compile_model(model, loss, optimizer):
"""Compiles the given model (from get_model) with given loss (from get_loss) and optimizer (from get_optimizer)
"""
from keras import optimizers
model.summary()
if optimizer == 'sgd':
opt = optimizers.SGD(nesterov=True)
else:
opt = optimizer
model.compile(loss=loss, optimizer=opt)
def train(dataset, job_folder, logger, video_root_path='/share/data/videos'):
"""Build and train the model
"""
import yaml
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping
from custom_callback import LossHistory
import matplotlib.pyplot as plt
from keras.utils.io_utils import HDF5Matrix
logger.debug("Loading configs from {}".format(os.path.join(job_folder, 'config.yml')))
with open(os.path.join(job_folder, 'config.yml'), 'r') as ymlfile:
cfg = yaml.load(ymlfile)
nb_epoch = cfg['epochs']
batch_size = cfg['batch_size']
loss = cfg['cost']
optimizer = cfg['optimizer']
time_length = cfg['time_length']
# shuffle = cfg['shuffle']
logger.info("Building model of type {} and activation {}".format(model_type, activation))
model = get_model(time_length)
logger.info("Compiling model with {} and {} optimizer".format(loss, optimizer))
compile_model(model, loss, optimizer)
logger.info("Saving model configuration to {}".format(os.path.join(job_folder, 'model.yml')))
yaml_string = model.to_yaml()
with open(os.path.join(job_folder, 'model.yml'), 'w') as outfile:
yaml.dump(yaml_string, outfile)
logger.info("Preparing training and testing data")
preprocess_data(logger, dataset, time_length, video_root_path)
data = HDF5Matrix(os.path.join(video_root_path, '{0}/{0}_train_t{1}.h5'.format(dataset, time_length)),
'data')
snapshot = ModelCheckpoint(os.path.join(job_folder,
'model_snapshot_e{epoch:03d}_{val_loss:.6f}.h5'))
earlystop = EarlyStopping(patience=10)
history_log = LossHistory(job_folder=job_folder, logger=logger)
logger.info("Initializing training...")
history = model.fit(
data, data,
batch_size=batch_size,
epochs=nb_epoch,
validation_split=0.15,
shuffle='batch',
callbacks=[snapshot, earlystop, history_log]
)
logger.info("Training completed!")
np.save(os.path.join(job_folder, 'train_profile.npy'), history.history)
n_epoch = len(history.history['loss'])
logger.info("Plotting training profile for {} epochs".format(n_epoch))
plt.plot(range(1, n_epoch+1),
history.history['val_loss'],
'g-',
label='Val Loss')
plt.plot(range(1, n_epoch+1),
history.history['loss'],
'g--',
label='Training Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig(os.path.join(job_folder, 'train_val_loss.png'))
def get_gt_vid(dataset, vid_idx, pred_vid):
import numpy as np
if dataset in ("indoor", "plaza", "lawn"):
gt_vid = np.load('/share/data/groundtruths/{0}_test_gt.npy'.format(dataset))
else:
gt_vid_raw = np.loadtxt('/share/data/groundtruths/gt_{0}_vid{1:02d}.txt'.format(dataset, vid_idx+1))
gt_vid = np.zeros_like(pred_vid)
try:
for event in range(gt_vid_raw.shape[1]):
start = int(gt_vid_raw[0, event])
end = int(gt_vid_raw[1, event]) + 1
gt_vid[start:end] = 1
except IndexError:
start = int(gt_vid_raw[0])
end = int(gt_vid_raw[1])
gt_vid[start:end] = 1
return gt_vid
def compute_eer(far, frr):
cords = zip(far, frr)
min_dist = 999999
for item in cords:
item_far, item_frr = item
dist = abs(item_far-item_frr)
if dist < min_dist:
min_dist = dist
eer = (item_far + item_frr) / 2
return eer
def calc_auc_overall(logger, dataset, n_vid, save_path):
import numpy as np
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
all_gt = []
all_pred = []
for vid in range(n_vid):
pred_vid = np.loadtxt(os.path.join(save_path, 'frame_costs_{0}_video_{1:02d}.txt'.format(dataset, vid+1)))
gt_vid = get_gt_vid(dataset, vid, pred_vid)
all_gt.append(gt_vid)
all_pred.append(pred_vid)
all_gt = np.asarray(all_gt)
all_pred = np.asarray(all_pred)
all_gt = np.concatenate(all_gt).ravel()
all_pred = np.concatenate(all_pred).ravel()
auc = roc_auc_score(all_gt, all_pred)
fpr, tpr, thresholds = roc_curve(all_gt, all_pred, pos_label=1)
frr = 1 - tpr
far = fpr
eer = compute_eer(far, frr)
logger.info("Dataset {}: Overall AUC = {:.2f}%, Overall EER = {:.2f}%".format(dataset, auc*100, eer*100))
plt.plot(fpr, tpr)
plt.plot([0,1],[1,0],'--')
plt.xlim(0,1.01)
plt.ylim(0,1.01)
plt.title('{0} AUC: {1:.3f}, EER: {2:.3f}'.format(dataset, auc, eer))
plt.savefig(os.path.join(save_path, '{}_auc.png'.format(dataset)))
plt.close()
return auc, eer
def test(logger, dataset, t, job_uuid, epoch, val_loss, visualize_score=True, visualize_frame=False,
video_root_path='/share/data/videos'):
import numpy as np
from keras.models import load_model
import os
import h5py
from keras.utils.io_utils import HDF5Matrix
import matplotlib.pyplot as plt
from scipy.misc import imresize
n_videos = {'avenue': 21, 'enter': 6, 'exit': 4, 'ped1': 36, 'ped2': 12}
test_dir = os.path.join(video_root_path, '{0}/testing_h5_t{1}'.format(dataset, t))
job_folder = os.path.join('/share/clean/{}/jobs'.format(dataset), job_uuid)
model_filename = 'model_snapshot_e{:03d}_{:.6f}.h5'.format(epoch, val_loss)
temporal_model = load_model(os.path.join(job_folder, model_filename))
save_path = os.path.join(job_folder, 'result')
os.makedirs(save_path, exist_ok=True)
for videoid in range(n_videos[dataset]):
videoname = '{0}_{1:02d}.h5'.format(dataset, videoid+1)
filepath = os.path.join(test_dir, videoname)
logger.info("==> {}".format(filepath))
f = h5py.File(filepath, 'r')
filesize = f['data'].shape[0]
f.close()
gt_vid_raw = np.loadtxt('/share/data/groundtruths/gt_{0}_vid{1:02d}.txt'.format(dataset, videoid+1))
logger.debug("Predicting using {}".format(os.path.join(job_folder, model_filename)))
X_test = HDF5Matrix(filepath, 'data')
res = temporal_model.predict(X_test, batch_size=4)
X_test = np.array(X_test)
if visualize_score:
logger.debug("Calculating volume reconstruction error")
vol_costs = np.zeros((filesize,))
for j in range(filesize):
vol_costs[j] = np.linalg.norm(np.squeeze(res[j])-np.squeeze(X_test[j]))
file_name_prefix = 'vol_costs_{0}_video'.format(dataset)
np.savetxt(os.path.join(save_path,file_name_prefix+'_'+'%02d'%(videoid+1)+'.txt'),vol_costs)
logger.debug("Calculating frame reconstruction error")
raw_costs = imresize(np.expand_dims(vol_costs,1), (filesize+t,1))
raw_costs = np.squeeze(raw_costs)
gt_vid = np.zeros_like(raw_costs)
file_name_prefix = 'frame_costs_{0}_video'.format(dataset)
np.savetxt(os.path.join(save_path, file_name_prefix+'_'+'%02d'%(videoid+1)+'.txt'), raw_costs)
score_vid = raw_costs - min(raw_costs)
score_vid = 1 - (score_vid / max(score_vid))
file_name_prefix = 'frame_costs_scaled_{0}_video'.format(dataset)
np.savetxt(os.path.join(save_path, file_name_prefix + '_' + '%02d' % (videoid + 1) + '.txt'), 1-score_vid)
logger.debug("Plotting frame reconstruction error")
plt.plot(np.arange(1, raw_costs.shape[0]+1), raw_costs)
plt.savefig(os.path.join(save_path, '{}_video_{:02d}_err.png'.format(dataset, videoid+1)))
plt.clf()
logger.debug("Plotting regularity scores")
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height*0.1, box.width, box.height*0.9])
ax.plot(np.arange(1, score_vid.shape[0]+1), score_vid, color='b', linewidth=2.0)
plt.xlabel('Frame number')
plt.ylabel('Regularity score')
plt.ylim(0, 1)
plt.xlim(1, score_vid.shape[0]+1)
try:
for event in range(gt_vid_raw.shape[1]):
start = int(gt_vid_raw[0, event])
end = int(gt_vid_raw[1, event]) + 1
gt_vid[start:end] = 1
plt.fill_between(np.arange(start, end), 0, 1, facecolor='red', alpha=0.4)
except IndexError:
start = int(gt_vid_raw[0])
end = int(gt_vid_raw[1])
gt_vid[start:end] = 1
plt.fill_between(np.arange(start, end), 0, 1, facecolor='red', alpha=0.4)
plt.savefig(os.path.join(save_path, 'scores_{0}_video_{1:02d}.png'.format(dataset, videoid+1)), dpi=300)
plt.close()
if visualize_frame:
logger.debug("Calculating pixel reconstruction error")
pixel_costs = np.zeros((filesize+t, 224, 224, 1))
count = 0
for vol in range(filesize):
for i in range(t):
pixel_costs[vol+i, :, :, :] += np.sqrt((res[count, i, :, :, :] - X_test[count, i, :, :, :])**2)
count += 1
file_name_prefix = 'pixel_costs_{0}_video'.format(dataset)
np.save(os.path.join(save_path,file_name_prefix+'_'+'%02d'%(videoid+1)+'.npy'),pixel_costs)
logger.debug("Drawing pixel reconstruction error")
for idx in range(filesize+t):
plt.imshow(np.squeeze(pixel_costs[idx]), vmin=np.amin(pixel_costs), vmax=np.amax(pixel_costs), cmap='jet')
plt.colorbar()
plt.savefig(os.path.join(save_path, '{}_err_vid{:02d}_frm{:03d}.png'.format(dataset, videoid+1, idx+1)))
plt.clf()
logger.info("{}: Calculating overall metrics".format(dataset))
auc_overall, eer_overall = calc_auc_overall(logger, dataset, n_videos[dataset], save_path)

5
config.yml Normal file
View File

@ -0,0 +1,5 @@
time_length: 8
cost: mse
epochs: 2000
optimizer: sgd
batch_size: 16

44
convert_video_to_frame.py Normal file
View File

@ -0,0 +1,44 @@
import os
import skvideo.io
from skimage.transform import resize
from skimage.io import imsave
video_root_path = '/share/data/videos'
size = (224, 224)
def video_to_frame(dataset, train_or_test):
video_path = os.path.join(video_root_path, dataset, '{}_videos'.format(train_or_test))
frame_path = os.path.join(video_root_path, dataset, '{}_frames'.format(train_or_test))
os.makedirs(frame_path, exist_ok=True)
for video_file in os.listdir(video_path):
if video_file.lower().endswith(('.avi', '.mp4')):
print('==> ' + os.path.join(video_path, video_file))
vid_frame_path = os.path.join(frame_path, os.path.basename(video_file).split('.')[0])
os.makedirs(vid_frame_path, exist_ok=True)
vidcap = skvideo.io.vreader(os.path.join(video_path, video_file))
count = 1
for image in vidcap:
image = resize(image, size, mode='reflect')
imsave(os.path.join(vid_frame_path, '{:05d}.jpg'.format(count)), image) # save frame as JPEG file
count += 1
# avenue
video_to_frame('avenue', 'training')
video_to_frame('avenue', 'testing')
# ped1
video_to_frame('ped1', 'training')
video_to_frame('ped1', 'testing')
# ped2
video_to_frame('ped2', 'training')
video_to_frame('ped2', 'testing')
# enter
video_to_frame('enter', 'training')
video_to_frame('enter', 'testing')
# exit
video_to_frame('exit', 'training')
video_to_frame('exit', 'testing')

32
custom_callback.py Normal file
View File

@ -0,0 +1,32 @@
from keras.callbacks import Callback
import numpy as np
import os
import csv
class LossHistory(Callback):
def __init__(self, job_folder, logger):
super(LossHistory, self).__init__()
self.save_path = job_folder
self.logger = logger
def on_train_begin(self, logs={}):
self.logger.debug("Training started!")
self.train_losses = []
self.val_losses = []
def on_epoch_end(self, epoch, logs={}):
self.logger.debug("Training loss for epoch {} is {}".format(epoch+1, logs.get('loss')))
self.logger.debug("Validation loss for epoch {} is {}".format(epoch+1, logs.get('val_loss')))
self.train_losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
with open(os.path.join(self.save_path, 'train_losses.csv'), 'a') as f:
writer = csv.writer(f)
writer.writerow([logs.get('loss')])
with open(os.path.join(self.save_path, 'val_losses.csv'), 'a') as f:
writer = csv.writer(f)
writer.writerow([logs.get('val_loss')])
def on_train_end(self, logs={}):
self.logger.info('Saving training and validation loss history to file...')
np.save(os.path.join(self.save_path, 'train_losses.npy'), np.array(self.train_losses))
np.save(os.path.join(self.save_path, 'val_losses.npy'), np.array(self.val_losses))

185
dataset.py Normal file
View File

@ -0,0 +1,185 @@
def calc_mean(dataset, video_root_path='/share/data/videos'):
import os
from skimage.io import imread
import numpy as np
frame_path = os.path.join(video_root_path, dataset, 'training_frames')
count = 0
frame_sum = np.zeros((224, 224)).astype('float64')
for frame_folder in os.listdir(frame_path):
print('==> ' + os.path.join(frame_path, frame_folder))
for frame_file in os.listdir(os.path.join(frame_path, frame_folder)):
frame_filename = os.path.join(frame_path, frame_folder, frame_file)
frame_value = imread(frame_filename, as_grey=True)
assert(0. <= frame_value.all() <= 1.)
frame_sum += frame_value
count += 1
frame_mean = frame_sum / count
assert(0. <= frame_mean.all() <= 1.)
np.save(os.path.join(video_root_path, dataset, 'mean_frame_224.npy'), frame_mean)
def subtract_mean(dataset, video_root_path='/share/data/videos'):
import os
from skimage.io import imread
import numpy as np
frame_mean = np.load(os.path.join(video_root_path, dataset, 'mean_frame_224.npy'))
frame_path = os.path.join(video_root_path, dataset, 'training_frames')
for frame_folder in os.listdir(frame_path):
print('==> ' + os.path.join(frame_path, frame_folder))
training_frames_vid = []
for frame_file in sorted(os.listdir(os.path.join(frame_path, frame_folder))):
frame_filename = os.path.join(frame_path, frame_folder, frame_file)
frame_value = imread(frame_filename, as_grey=True)
assert(0. <= frame_value.all() <= 1.)
frame_value -= frame_mean
training_frames_vid.append(frame_value)
training_frames_vid = np.array(training_frames_vid)
np.save(os.path.join(video_root_path, dataset, 'training_frames_{}.npy'.format(frame_folder)), training_frames_vid)
frame_path = os.path.join(video_root_path, dataset, 'testing_frames')
for frame_folder in os.listdir(frame_path):
print('==> ' + os.path.join(frame_path, frame_folder))
testing_frames_vid = []
for frame_file in sorted(os.listdir(os.path.join(frame_path, frame_folder))):
frame_filename = os.path.join(frame_path, frame_folder, frame_file)
frame_value = imread(frame_filename, as_grey=True)
assert(0. <= frame_value.all() <= 1.)
frame_value -= frame_mean
testing_frames_vid.append(frame_value)
testing_frames_vid = np.array(testing_frames_vid)
np.save(os.path.join(video_root_path, dataset, 'testing_frames_{}.npy'.format(frame_folder)), testing_frames_vid)
def build_h5(dataset, train_or_test, t, video_root_path='/share/data/videos'):
import h5py
from tqdm import tqdm
import os
import numpy as np
print("==> {} {}".format(dataset, train_or_test))
def build_volume(train_or_test, num_videos, time_length):
for i in tqdm(range(num_videos)):
data_frames = np.load(os.path.join(video_root_path, '{}/{}_frames_{:02d}.npy'.format(dataset, train_or_test, i+1)))
data_frames = np.expand_dims(data_frames, axis=-1)
num_frames = data_frames.shape[0]
data_only_frames = np.zeros((num_frames-time_length, time_length, 224, 224, 1)).astype('float16')
vol = 0
for j in range(num_frames-time_length):
data_only_frames[vol] = data_frames[j:j+time_length] # Read a single volume
vol += 1
with h5py.File(os.path.join(video_root_path, '{0}/{1}_h5_t{2}/{0}_{3:02d}.h5'.format(dataset, train_or_test, time_length, i+1)), 'w') as f:
if train_or_test == 'training':
np.random.shuffle(data_only_frames)
f['data'] = data_only_frames
os.makedirs(os.path.join(video_root_path, '{}/{}_h5_t{}'.format(dataset, train_or_test, t)), exist_ok=True)
num_videos = len(os.listdir(os.path.join(video_root_path, '{}/{}_frames'.format(dataset, train_or_test))))
build_volume(train_or_test, num_videos, time_length=t)
def combine_dataset(dataset, t, video_root_path='/share/data/videos'):
import h5py
import os
from tqdm import tqdm
print("==> {}".format(dataset))
output_file = h5py.File(os.path.join(video_root_path, '{0}/{0}_train_t{1}.h5'.format(dataset, t)), 'w')
h5_folder = os.path.join(video_root_path, '{0}/training_h5_t{1}'.format(dataset, t))
filelist = sorted([os.path.join(h5_folder, item) for item in os.listdir(h5_folder)])
# keep track of the total number of rows
total_rows = 0
for n, f in enumerate(tqdm(filelist)):
your_data_file = h5py.File(f, 'r')
your_data = your_data_file['data']
total_rows = total_rows + your_data.shape[0]
if n == 0:
# first file; create the dummy dataset with no max shape
create_dataset = output_file.create_dataset('data', (total_rows, t, 224, 224, 1), maxshape=(None, t, 224, 224, 1))
# fill the first section of the dataset
create_dataset[:,:] = your_data
where_to_start_appending = total_rows
else:
# resize the dataset to accomodate the new data
create_dataset.resize(total_rows, axis=0)
create_dataset[where_to_start_appending:total_rows, :] = your_data
where_to_start_appending = total_rows
output_file.close()
def preprocess_data(logger, dataset, t, video_root_path='/share/data/videos'):
import os
# Step 1: Calculate the mean frame of all training frames
# Check if mean frame file exists for the dataset
# If the file exists, then we can skip re-generating the file
# Else calculate and generate mean file
logger.debug("Step 1/4: Check if mean frame exists for {}".format(dataset))
mean_frame_file = os.path.join(video_root_path, dataset, 'mean_frame_224.npy')
training_frame_path = os.path.join(video_root_path, dataset, 'training_frames')
testing_frame_path = os.path.join(video_root_path, dataset, 'testing_frames')
if not os.path.isfile(mean_frame_file):
# The frames must have already been extracted from training and testing videos
assert(os.path.isdir(training_frame_path))
assert(os.path.isdir(testing_frame_path))
logger.info("Step 1/4: Calculating mean frame for {}".format(dataset))
calc_mean(dataset, video_root_path)
# Step 2: Subtract mean frame from each training and testing frames
# Check if training & testing frames are already been subtracted
# If the file exists, then we can skip re-generating the file
logger.debug("Step 2/4: Check if training/testing_frames_videoID.npy exists for {}".format(dataset))
try:
# try block will execute without AssetionError if all frames have been subtracted
for frame_folder in os.listdir(training_frame_path):
training_frame_npy = os.path.join(video_root_path, dataset, 'training_frames_{}.npy'.format(frame_folder))
assert(os.path.isfile(training_frame_npy))
for frame_folder in os.listdir(testing_frame_path):
testing_frame_npy = os.path.join(video_root_path, dataset, 'testing_frames_{}.npy'.format(frame_folder))
assert (os.path.isfile(testing_frame_npy))
except AssertionError:
# if all or some frames have not been subtracted, then generate those files
logger.info("Step 2/4: Subtracting mean frame for {}".format(dataset))
subtract_mean(dataset, video_root_path)
# Step 3: Generate small video volumes from the mean-subtracted frames and dump into h5 files (grouped by video ID)
# Check if those h5 files have already been generated
# If the file exists, then skip this step
logger.debug("Step 3/4: Check if individual h5 files exists for {}".format(dataset))
for train_or_test in ('training', 'testing'):
try:
h5_folder = os.path.join(video_root_path, '{}/{}_h5_t{}'.format(dataset, train_or_test, t))
assert(os.path.isdir(h5_folder))
num_videos = len(os.listdir(os.path.join(video_root_path, '{}/{}_frames'.format(dataset, train_or_test))))
for i in range(num_videos):
h5_file = os.path.join(video_root_path, '{0}/{1}_h5_t{2}/{0}_{3:02d}.h5'.format(dataset, train_or_test, t, i+1))
assert(os.path.isfile(h5_file))
except AssertionError:
logger.info("Step 3/4: Generating volumes for {} {} set".format(dataset, train_or_test))
build_h5(dataset, train_or_test, t, video_root_path)
# Step 4: Combine small h5 files into one big h5 file
# Check if this big h5 file is already been generated
# If the file exists, then skip this step
logger.debug("Step 4/4: Check if individual h5 files have already been combined for {}".format(dataset))
training_h5 = os.path.join(video_root_path, '{0}/{0}_train_t{1}.h5'.format(dataset, t))
if not os.path.isfile(training_h5):
logger.info("Step 4/4: Combining h5 files for {}".format(dataset))
combine_dataset(dataset, t, video_root_path)
logger.info("Preprocessing is completed")

53
start_test.py Normal file
View File

@ -0,0 +1,53 @@
import logging
import datetime
import os
import sys
import coloredlogs
from classifier import test
device = 'cpu'
dataset = 'ped1'
job_uuid = '86f47b9c-d0ca-49a8-beb6-84373ea9e880'
epoch = 586
val_loss = 0.001069
time_length = 8
job_folder = os.path.join('/share/clean/{}/jobs'.format(dataset), job_uuid)
log_path = os.path.join(job_folder, 'logs')
os.makedirs(log_path, exist_ok=True)
logging.basicConfig(filename=os.path.join(log_path, "test-{}.log".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))),
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s")
coloredlogs.install()
logger = logging.getLogger()
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
logger.warning("Ctrl + C triggered by user, testing ended prematurely")
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
if device == 'cpu':
os.environ['CUDA_VISIBLE_DEVICES'] = ''
logger.debug("Using CPU only")
elif device == 'gpu0':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
logger.debug("Using GPU 0")
elif device == 'gpu1':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
logger.debug("Using GPU 1")
elif device == 'gpu':
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
logger.debug("Using GPU 0 and 1")
test(logger=logger, dataset=dataset, t=time_length, job_uuid=job_uuid, epoch=epoch, val_loss=val_loss,
visualize_score=True, visualize_frame=False)
logger.info("Job {} ({}) has finished testing.".format(job_uuid, dataset))

55
start_train.py Normal file
View File

@ -0,0 +1,55 @@
import logging
import datetime
import os
import sys
import coloredlogs
from classifier import train
import uuid
from shutil import copyfile
dataset = 'ped1'
device = 'gpu1'
job_uuid = str(uuid.uuid4())
job_folder = os.path.join('/share/clean/{}/jobs'.format(dataset), job_uuid)
os.makedirs(job_folder)
copyfile('config.yml', os.path.join(job_folder, 'config.yml'))
log_path = os.path.join(job_folder, 'logs')
os.makedirs(log_path, exist_ok=True)
logging.basicConfig(filename=os.path.join(log_path,
"train-{}.log".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))),
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s")
coloredlogs.install(level=logging.INFO)
logger = logging.getLogger()
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
logger.warning("Ctrl + C triggered by user, training ended prematurely")
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
if device == 'cpu':
os.environ['CUDA_VISIBLE_DEVICES'] = ''
logger.debug("Using CPU only")
elif device == 'gpu0':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
logger.debug("Using GPU 0")
elif device == 'gpu1':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
logger.debug("Using GPU 1")
elif device == 'gpu':
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
logger.debug("Using GPU 0 and 1")
train(dataset=dataset, job_folder=job_folder, logger=logger)
logger.info("Job {} has finished training.".format(job_uuid))