From dfbdc27202075b500577c64d3f0d6c8438b86cfd Mon Sep 17 00:00:00 2001 From: VitorGuizilini-TRI <58576956+VitorGuizilini-TRI@users.noreply.github.com> Date: Wed, 10 Jun 2020 09:50:03 -0700 Subject: [PATCH] DGP Multi-Cam + Velocity Loss + FP16 Inference + PackNetSlim (#30) * Support for multi-camera loading on the DGP dataset * Support for fp16 at inference time * Velocity loss (see VelSupModel) * PackNetSlim01 (faster version of PackNet) --- configs/default_config.py | 18 +- configs/eval_ddad.yaml | 2 +- configs/overfit_ddad.yaml | 6 +- configs/train_ddad.yaml | 6 +- .../packnet_sfm/datasets/dgp_dataset.html | 128 +- .../packnet_sfm/datasets/kitti_dataset.html | 13 +- .../packnet_sfm/datasets/transforms.html | 9 +- .../losses/multiview_photometric_loss.html | 2 +- .../packnet_sfm/models/SelfSupModel.html | 24 +- .../packnet_sfm/models/SemiSupModel.html | 22 +- .../_modules/packnet_sfm/models/SfmModel.html | 47 +- .../packnet_sfm/models/model_utils.html | 30 + .../packnet_sfm/models/model_wrapper.html | 54 +- .../packnet_sfm/networks/depth/PackNet01.html | 8 +- .../packnet_sfm/trainers/base_trainer.html | 11 +- docs/_modules/packnet_sfm/utils/config.html | 11 +- docs/_modules/packnet_sfm/utils/logging.html | 11 +- docs/_modules/packnet_sfm/utils/save.html | 3 - docs/_modules/scripts/eval.html | 9 +- docs/_modules/scripts/infer.html | 111 +- docs/_static/basic.css | 121 +- .../{jquery-3.4.1.js => jquery-3.5.1.js} | 1238 ++++++++++------- docs/_static/jquery.js | 4 +- docs/datasets/datasets.DGPDataset.html | 42 + docs/datasets/datasets.KITTIDataset.html | 3 +- docs/genindex.html | 59 +- .../losses.multiview_photometric_loss.html | 2 +- docs/models/models.SelfSupModel.html | 28 +- docs/models/models.SemiSupModel.html | 20 - docs/models/models.SfmModel.html | 62 +- docs/models/models.Utilities.html | 17 + docs/models/models.Wrapper.html | 12 +- docs/objects.inv | Bin 4180 -> 4169 bytes docs/scripts/scripts.eval.html | 2 +- docs/scripts/scripts.infer.html | 40 +- docs/searchindex.js | 2 +- docs/trainers/trainers.BaseTrainer.html | 2 +- packnet_sfm/datasets/dgp_dataset.py | 128 +- packnet_sfm/datasets/kitti_dataset.py | 13 +- packnet_sfm/datasets/transforms.py | 9 +- .../losses/multiview_photometric_loss.py | 2 +- packnet_sfm/losses/velocity_loss.py | 42 + packnet_sfm/models/SelfSupModel.py | 24 +- packnet_sfm/models/SemiSupModel.py | 22 +- packnet_sfm/models/SfmModel.py | 47 +- packnet_sfm/models/VelSupModel.py | 52 + packnet_sfm/models/model_utils.py | 30 + packnet_sfm/models/model_wrapper.py | 54 +- packnet_sfm/networks/depth/PackNet01.py | 8 +- packnet_sfm/networks/depth/PackNetSlim01.py | 183 +++ packnet_sfm/trainers/base_trainer.py | 11 +- packnet_sfm/trainers/horovod_trainer.py | 6 +- packnet_sfm/utils/config.py | 11 +- packnet_sfm/utils/logging.py | 11 +- packnet_sfm/utils/save.py | 3 - scripts/eval.py | 9 +- scripts/infer.py | 111 +- 57 files changed, 1972 insertions(+), 983 deletions(-) rename docs/_static/{jquery-3.4.1.js => jquery-3.5.1.js} (91%) create mode 100644 packnet_sfm/losses/velocity_loss.py create mode 100644 packnet_sfm/models/VelSupModel.py create mode 100644 packnet_sfm/networks/depth/PackNetSlim01.py diff --git a/configs/default_config.py b/configs/default_config.py index 2e2b23e2..fa2c09d3 100644 --- a/configs/default_config.py +++ b/configs/default_config.py @@ -80,11 +80,11 @@ ######################################################################################################################## cfg.model.loss = CN() # -cfg.model.loss.num_scales = 4 # Number of inverse depth scales to use -cfg.model.loss.progressive_scaling = 0.0 # Training percentage to decay number of scales -cfg.model.loss.flip_lr_prob = 0.5 # Probablity of horizontal flippping -cfg.model.loss.rotation_mode = 'euler' # Rotation mode -cfg.model.loss.upsample_depth_maps = True # Resize depth maps to highest resolution +cfg.model.loss.num_scales = 4 # Number of inverse depth scales to use +cfg.model.loss.progressive_scaling = 0.0 # Training percentage to decay number of scales +cfg.model.loss.flip_lr_prob = 0.5 # Probablity of horizontal flippping +cfg.model.loss.rotation_mode = 'euler' # Rotation mode +cfg.model.loss.upsample_depth_maps = True # Resize depth maps to highest resolution # cfg.model.loss.ssim_loss_weight = 0.85 # SSIM loss weight cfg.model.loss.occ_reg_weight = 0.1 # Occlusion regularizer loss weight @@ -97,6 +97,8 @@ cfg.model.loss.padding_mode = 'zeros' # Photometric loss padding mode cfg.model.loss.automask_loss = True # Automasking to remove static pixels # +cfg.model.loss.velocity_loss_weight = 0.1 # Velocity supervision loss weight +# cfg.model.loss.supervised_method = 'sparse-l1' # Method for depth supervision cfg.model.loss.supervised_num_scales = 4 # Number of scales for supervised learning cfg.model.loss.supervised_loss_weight = 0.9 # Supervised loss weight @@ -138,7 +140,7 @@ cfg.datasets.train.path = [] # Training data path cfg.datasets.train.split = [] # Training split cfg.datasets.train.depth_type = [''] # Training depth type -cfg.datasets.train.cameras = [] # Training cameras +cfg.datasets.train.cameras = [[]] # Training cameras (double list, one for each dataset) cfg.datasets.train.repeat = [1] # Number of times training dataset is repeated per epoch cfg.datasets.train.num_logs = 5 # Number of training images to log ######################################################################################################################## @@ -153,7 +155,7 @@ cfg.datasets.validation.path = [] # Validation data path cfg.datasets.validation.split = [] # Validation split cfg.datasets.validation.depth_type = [''] # Validation depth type -cfg.datasets.validation.cameras = [] # Validation cameras +cfg.datasets.validation.cameras = [[]] # Validation cameras (double list, one for each dataset) cfg.datasets.validation.num_logs = 5 # Number of validation images to log ######################################################################################################################## ### DATASETS.TEST @@ -167,7 +169,7 @@ cfg.datasets.test.path = [] # Test data path cfg.datasets.test.split = [] # Test split cfg.datasets.test.depth_type = [''] # Test depth type -cfg.datasets.test.cameras = [] # Test cameras +cfg.datasets.test.cameras = [[]] # Test cameras (double list, one for each dataset) cfg.datasets.test.num_logs = 5 # Number of test images to log ######################################################################################################################## ### THESE SHOULD NOT BE CHANGED diff --git a/configs/eval_ddad.yaml b/configs/eval_ddad.yaml index ca490fee..bc1e153b 100644 --- a/configs/eval_ddad.yaml +++ b/configs/eval_ddad.yaml @@ -18,7 +18,7 @@ datasets: path: ['/data/datasets/DDAD/ddad.json'] split: ['val'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] save: folder: '/data/save' viz: True diff --git a/configs/overfit_ddad.yaml b/configs/overfit_ddad.yaml index 7cfdd482..72e4447a 100644 --- a/configs/overfit_ddad.yaml +++ b/configs/overfit_ddad.yaml @@ -31,17 +31,17 @@ datasets: path: ['/data/datasets/DDAD_tiny/ddad_tiny.json'] split: ['train'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] repeat: [500] validation: dataset: ['DGP'] path: ['/data/datasets/DDAD_tiny/ddad_tiny.json'] split: ['train'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] test: dataset: ['DGP'] path: ['/data/datasets/DDAD_tiny/ddad_tiny.json'] split: ['train'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] diff --git a/configs/train_ddad.yaml b/configs/train_ddad.yaml index ea4c7c31..a047b48f 100644 --- a/configs/train_ddad.yaml +++ b/configs/train_ddad.yaml @@ -30,7 +30,7 @@ datasets: path: ['/data/datasets/DDAD/ddad.json'] split: ['train'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] repeat: [5] validation: num_workers: 8 @@ -38,11 +38,11 @@ datasets: path: ['/data/datasets/DDAD/ddad.json'] split: ['val'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] test: num_workers: 8 dataset: ['DGP'] path: ['/data/datasets/DDAD/ddad.json'] split: ['val'] depth_type: ['lidar'] - cameras: ['camera_01'] + cameras: [['camera_01']] diff --git a/docs/_modules/packnet_sfm/datasets/dgp_dataset.html b/docs/_modules/packnet_sfm/datasets/dgp_dataset.html index 14efb8da..7459d2b2 100644 --- a/docs/_modules/packnet_sfm/datasets/dgp_dataset.html +++ b/docs/_modules/packnet_sfm/datasets/dgp_dataset.html @@ -165,10 +165,16 @@

Source code for packnet_sfm.datasets.dgp_dataset

 # Copyright 2020 Toyota Research Institute.  All rights reserved.
 
+import os
 import torch
-from packnet_sfm.utils.misc import make_list
-from packnet_sfm.utils.types import is_tensor
+import numpy as np
+
 from dgp.datasets.synchronized_dataset import SynchronizedSceneDataset
+from dgp.utils.camera import Camera, generate_depth_map
+from dgp.utils.geometry import Pose
+
+from packnet_sfm.utils.misc import make_list
+from packnet_sfm.utils.types import is_tensor, is_numpy, is_list
 
 ########################################################################################################################
 #### FUNCTIONS
@@ -189,7 +195,24 @@ 

Source code for packnet_sfm.datasets.dgp_dataset

else: # Stack torch tensors if is_tensor(sample[0][key]): - stacked_sample[key] = torch.cat([s[key].unsqueeze(0) for s in sample], 0) + stacked_sample[key] = torch.stack([s[key] for s in sample], 0) + # Stack numpy arrays + elif is_numpy(sample[0][key]): + stacked_sample[key] = np.stack([s[key] for s in sample], 0) + # Stack list + elif is_list(sample[0][key]): + stacked_sample[key] = [] + # Stack list of torch tensors + if is_tensor(sample[0][key][0]): + for i in range(len(sample[0][key])): + stacked_sample[key].append( + torch.stack([s[key][i] for s in sample], 0)) + # Stack list of numpy arrays + if is_numpy(sample[0][key][0]): + for i in range(len(sample[0][key])): + stacked_sample[key].append( + np.stack([s[key][i] for s in sample], 0)) + # Return stacked sample return stacked_sample
@@ -231,6 +254,7 @@

Source code for packnet_sfm.datasets.dgp_dataset

forward_context=0, data_transform=None, ): + self.path = path self.split = split self.dataset_idx = 0 @@ -241,6 +265,7 @@

Source code for packnet_sfm.datasets.dgp_dataset

self.num_cameras = len(cameras) self.data_transform = data_transform + self.depth_type = depth_type self.with_depth = depth_type is not None self.with_pose = with_pose self.with_semantic = with_semantic @@ -250,11 +275,57 @@

Source code for packnet_sfm.datasets.dgp_dataset

datum_names=cameras, backward_context=back_context, forward_context=forward_context, - generate_depth_from_datum=depth_type, requested_annotations=None, only_annotated_datums=False, ) +
[docs] def generate_depth_map(self, sample_idx, datum_idx, filename): + """ + Generates the depth map for a camera by projecting LiDAR information. + It also caches the depth map following DGP folder structure, so it's not recalculated + + Parameters + ---------- + sample_idx : int + sample index + datum_idx : int + Datum index + filename : + Filename used for loading / saving + + Returns + ------- + depth : np.array [H, W] + Depth map for that datum in that sample + """ + # Generate depth filename + filename = '{}/{}.npz'.format( + os.path.dirname(self.path), filename.format('depth/{}'.format(self.depth_type))) + # Load and return if exists + if os.path.exists(filename): + return np.load(filename)['depth'] + # Otherwise, create, save and return + else: + # Get pointcloud + scene_idx, sample_idx_in_scene, _ = self.dataset.dataset_item_index[sample_idx] + pc_datum_idx_in_sample = self.dataset.get_datum_index_for_datum_name( + scene_idx, sample_idx_in_scene, self.depth_type) + pc_datum_data = self.dataset.get_point_cloud_from_datum( + scene_idx, sample_idx_in_scene, pc_datum_idx_in_sample) + # Create camera + camera_rgb = self.get_current('rgb', datum_idx) + camera_pose = self.get_current('pose', datum_idx) + camera_intrinsics = self.get_current('intrinsics', datum_idx) + camera = Camera(K=camera_intrinsics, p_cw=camera_pose.inverse()) + # Generate depth map + world_points = pc_datum_data['pose'] * pc_datum_data['point_cloud'] + depth = generate_depth_map(camera, world_points, camera_rgb.size[::-1]) + # Save depth map + os.makedirs(os.path.dirname(filename), exist_ok=True) + np.savez_compressed(filename, depth=depth) + # Return depth map + return depth
+
[docs] def get_current(self, key, sensor_idx): """Return current timestep of a key from a sensor""" return self.sample_dgp[self.bwd][sensor_idx][key]
@@ -275,6 +346,29 @@

Source code for packnet_sfm.datasets.dgp_dataset

"""Get both backward and forward contexts""" return self.get_backward(key, sensor_idx) + self.get_forward(key, sensor_idx)
+
[docs] def get_filename(self, sample_idx, datum_idx): + """ + Returns the filename for an index, following DGP structure + + Parameters + ---------- + sample_idx : int + Sample index + datum_idx : int + Datum index + + Returns + ------- + filename : str + Filename for the datum in that sample + """ + scene_idx, sample_idx_in_scene, datum_indices = self.dataset.dataset_item_index[sample_idx] + scene_dir = self.dataset.get_scene_directory(scene_idx) + filename = self.dataset.get_datum( + scene_idx, sample_idx_in_scene, datum_indices[datum_idx]).datum.image.filename + return os.path.splitext(os.path.join(os.path.basename(scene_dir), + filename.replace('rgb', '{}')))[0]
+ def __len__(self): """Length of dataset""" return len(self.dataset) @@ -292,27 +386,45 @@

Source code for packnet_sfm.datasets.dgp_dataset

'idx': idx, 'dataset_idx': self.dataset_idx, 'sensor_name': self.get_current('datum_name', i), - 'filename': '%s_%010d' % (self.split, idx), + # + 'filename': self.get_filename(idx, i), + 'splitname': '%s_%010d' % (self.split, idx), # 'rgb': self.get_current('rgb', i), 'intrinsics': self.get_current('intrinsics', i), } + # If depth is returned if self.with_depth: data.update({ - 'depth': self.get_current('depth', i), + 'depth': self.generate_depth_map(idx, i, data['filename']) }) + # If pose is returned if self.with_pose: data.update({ - 'extrinsics': [pose.matrix for pose in self.get_current('extrinsics', i)], - 'pose': [pose.matrix for pose in self.get_current('pose', i)], + 'extrinsics': self.get_current('extrinsics', i).matrix, + 'pose': self.get_current('pose', i).matrix, }) + # If context is returned if self.has_context: data.update({ 'rgb_context': self.get_context('rgb', i), }) + # If context pose is returned + if self.with_pose: + # Get original values to calculate relative motion + orig_extrinsics = Pose.from_matrix(data['extrinsics']) + orig_pose = Pose.from_matrix(data['pose']) + data.update({ + 'extrinsics_context': + [(orig_extrinsics.inverse() * extrinsics).matrix + for extrinsics in self.get_context('extrinsics', i)], + 'pose_context': + [(orig_pose.inverse() * pose).matrix + for pose in self.get_context('pose', i)], + }) sample.append(data) diff --git a/docs/_modules/packnet_sfm/datasets/kitti_dataset.html b/docs/_modules/packnet_sfm/datasets/kitti_dataset.html index f720cd8e..f9ecb61e 100644 --- a/docs/_modules/packnet_sfm/datasets/kitti_dataset.html +++ b/docs/_modules/packnet_sfm/datasets/kitti_dataset.html @@ -223,8 +223,6 @@

Source code for packnet_sfm.datasets.kitti_dataset

Split file, with paths to the images to be used train : bool True if the dataset will be used for training - mode : str - Dataset mode (stereo or mono) data_transform : Function Transformations applied to the sample depth_type : str @@ -238,7 +236,7 @@

Source code for packnet_sfm.datasets.kitti_dataset

strides : tuple List of context strides """ - def __init__(self, root_dir, file_list, train=True, mode='mono', + def __init__(self, root_dir, file_list, train=True, data_transform=None, depth_type=None, with_pose=False, back_context=0, forward_context=0, strides=(1,)): # Assertions @@ -459,9 +457,12 @@

Source code for packnet_sfm.datasets.kitti_dataset

def _get_oxts_file(image_file): """Gets the oxts file from an image file.""" # find oxts pose file - oxts_file = image_file.replace(IMAGE_FOLDER['left'], OXTS_POSE_DATA) - oxts_file = oxts_file.replace('png', 'txt') - return oxts_file + for cam in ['left', 'right']: + # Check for both cameras, if found replace and return file name + if IMAGE_FOLDER[cam] in image_file: + return image_file.replace(IMAGE_FOLDER[cam], OXTS_POSE_DATA).replace('.png', '.txt') + # Something went wrong (invalid image file) + raise ValueError('Invalid KITTI path for pose supervision.') def _get_oxts_data(self, image_file): """Gets the oxts data from an image file.""" diff --git a/docs/_modules/packnet_sfm/datasets/transforms.html b/docs/_modules/packnet_sfm/datasets/transforms.html index d15a6d3c..e0437de1 100644 --- a/docs/_modules/packnet_sfm/datasets/transforms.html +++ b/docs/_modules/packnet_sfm/datasets/transforms.html @@ -189,7 +189,8 @@

Source code for packnet_sfm.datasets.transforms

< sample : dict Augmented sample """ - sample = resize_sample(sample, image_shape) + if len(image_shape) > 0: + sample = resize_sample(sample, image_shape) sample = duplicate_sample(sample) if len(jittering) > 0: sample = colorjitter_sample(sample, jittering) @@ -212,7 +213,8 @@

Source code for packnet_sfm.datasets.transforms

< sample : dict Augmented sample """ - sample['rgb'] = resize_image(sample['rgb'], image_shape) + if len(image_shape) > 0: + sample['rgb'] = resize_image(sample['rgb'], image_shape) sample = to_tensor_sample(sample) return sample
@@ -232,7 +234,8 @@

Source code for packnet_sfm.datasets.transforms

< sample : dict Augmented sample """ - sample['rgb'] = resize_image(sample['rgb'], image_shape) + if len(image_shape) > 0: + sample['rgb'] = resize_image(sample['rgb'], image_shape) sample = to_tensor_sample(sample) return sample
diff --git a/docs/_modules/packnet_sfm/losses/multiview_photometric_loss.html b/docs/_modules/packnet_sfm/losses/multiview_photometric_loss.html index c6389f26..df6f1620 100644 --- a/docs/_modules/packnet_sfm/losses/multiview_photometric_loss.html +++ b/docs/_modules/packnet_sfm/losses/multiview_photometric_loss.html @@ -178,7 +178,7 @@

Source code for packnet_sfm.losses.multiview_photometric_loss

[docs]def SSIM(x, y, C1=1e-4, C2=9e-4, kernel_size=3, stride=1): """ - Structural SIMlilarity (SSIM) distance between two images. + Structural SIMilarity (SSIM) distance between two images. Parameters ---------- diff --git a/docs/_modules/packnet_sfm/models/SelfSupModel.html b/docs/_modules/packnet_sfm/models/SelfSupModel.html index dfd9a143..80e6e81b 100644 --- a/docs/_modules/packnet_sfm/models/SelfSupModel.html +++ b/docs/_modules/packnet_sfm/models/SelfSupModel.html @@ -177,16 +177,12 @@

Source code for packnet_sfm.models.SelfSupModel

< Parameters ---------- - depth_net : nn.Module - Depth network to be used - pose_net : nn.Module - Pose network to be used kwargs : dict Extra parameters """ - def __init__(self, depth_net=None, pose_net=None, **kwargs): + def __init__(self, **kwargs): # Initializes SfmModel - super().__init__(depth_net, pose_net, **kwargs) + super().__init__(**kwargs) # Initializes the photometric loss self._photometric_loss = MultiViewPhotometricLoss(**kwargs) @@ -198,22 +194,6 @@

Source code for packnet_sfm.models.SelfSupModel

< **self._photometric_loss.logs } - @property - def requires_depth_net(self): - return True - - @property - def requires_pose_net(self): - return True - - @property - def requires_gt_depth(self): - return False - - @property - def requires_gt_pose(self): - return False -
[docs] def self_supervised_loss(self, image, ref_images, inv_depths, poses, intrinsics, return_logs=False, progress=0.0): """ diff --git a/docs/_modules/packnet_sfm/models/SemiSupModel.html b/docs/_modules/packnet_sfm/models/SemiSupModel.html index d9d3ab11..06324768 100644 --- a/docs/_modules/packnet_sfm/models/SemiSupModel.html +++ b/docs/_modules/packnet_sfm/models/SemiSupModel.html @@ -186,6 +186,7 @@

Source code for packnet_sfm.models.SemiSupModel

< Extra parameters """ def __init__(self, supervised_loss_weight=0.9, **kwargs): + # Initializes SelfSupModel super().__init__(**kwargs) # If supervision weight is 0.0, use SelfSupModel directly assert 0. < supervised_loss_weight <= 1., "Model requires (0, 1] supervision" @@ -193,6 +194,11 @@

Source code for packnet_sfm.models.SemiSupModel

< self.supervised_loss_weight = supervised_loss_weight self._supervised_loss = SupervisedLoss(**kwargs) + # Pose network is only required if there is self-supervision + self._network_requirements['pose_net'] = self.supervised_loss_weight < 1 + # GT depth is only required if there is supervision + self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0 + @property def logs(self): """Return logs.""" @@ -201,22 +207,6 @@

Source code for packnet_sfm.models.SemiSupModel

< **self._supervised_loss.logs } - @property - def requires_depth_net(self): - return True - - @property - def requires_pose_net(self): - return self.supervised_loss_weight < 1. - - @property - def requires_gt_depth(self): - return self.supervised_loss_weight > 0. - - @property - def requires_gt_pose(self): - return False -
[docs] def supervised_loss(self, inv_depths, gt_inv_depths, return_logs=False, progress=0.0): """ diff --git a/docs/_modules/packnet_sfm/models/SfmModel.html b/docs/_modules/packnet_sfm/models/SfmModel.html index 208b0e48..c2d14059 100644 --- a/docs/_modules/packnet_sfm/models/SfmModel.html +++ b/docs/_modules/packnet_sfm/models/SfmModel.html @@ -187,7 +187,7 @@

Source code for packnet_sfm.models.SfmModel

     flip_lr_prob : float
         Probability of flipping when using the depth network
     upsample_depth_maps : bool
-        True if detph map scales are upsampled to highest resolution
+        True if depth map scales are upsampled to highest resolution
     kwargs : dict
         Extra parameters
     """
@@ -203,6 +203,15 @@ 

Source code for packnet_sfm.models.SfmModel

         self._logs = {}
         self._losses = {}
 
+        self._network_requirements = {
+                'depth_net': True,  # Depth network required
+                'pose_net': True,   # Pose network required
+            }
+        self._train_requirements = {
+                'gt_depth': False,  # No ground-truth depth required
+                'gt_pose': False,   # No ground-truth pose required
+            }
+
     @property
     def logs(self):
         """Return logs."""
@@ -218,25 +227,41 @@ 

Source code for packnet_sfm.models.SfmModel

         self._losses[key] = val.detach()
@property - def requires_depth_net(self): - return True + def network_requirements(self): + """ + Networks required to run the model - @property - def requires_pose_net(self): - return True + Returns + ------- + requirements : dict + depth_net : bool + Whether a depth network is required by the model + pose_net : bool + Whether a depth network is required by the model + """ + return self._network_requirements @property - def requires_gt_depth(self): - return False + def train_requirements(self): + """ + Information required by the model at training stage - @property - def requires_gt_pose(self): - return False + Returns + ------- + requirements : dict + gt_depth : bool + Whether ground truth depth is required by the model at training time + gt_pose : bool + Whether ground truth pose is required by the model at training time + """ + return self._train_requirements
[docs] def add_depth_net(self, depth_net): + """Add a depth network to the model""" self.depth_net = depth_net
[docs] def add_pose_net(self, pose_net): + """Add a pose network to the model""" self.pose_net = pose_net
[docs] def compute_inv_depths(self, image): diff --git a/docs/_modules/packnet_sfm/models/model_utils.html b/docs/_modules/packnet_sfm/models/model_utils.html index d11f0fb3..e9bd6828 100644 --- a/docs/_modules/packnet_sfm/models/model_utils.html +++ b/docs/_modules/packnet_sfm/models/model_utils.html @@ -165,6 +165,7 @@

Source code for packnet_sfm.models.model_utils

 # Copyright 2020 Toyota Research Institute.  All rights reserved.
 
+from packnet_sfm.utils.types import is_tensor, is_list, is_numpy
 
 
[docs]def merge_outputs(*outputs): """ @@ -199,6 +200,35 @@

Source code for packnet_sfm.models.model_utils

'Adding duplicated key {}'.format(key) merge[key] = val return merge

+ + +
[docs]def stack_batch(batch): + """ + Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W) + + Parameters + ---------- + batch : dict + Batch + + Returns + ------- + batch : dict + Stacked batch + """ + # If there is multi-camera information + if len(batch['rgb'].shape) == 5: + assert batch['rgb'].shape[0] == 1, 'Only batch size 1 is supported for multi-cameras' + # Loop over all keys + for key in batch.keys(): + # If list, stack every item + if is_list(batch[key]): + if is_tensor(batch[key][0]) or is_numpy(batch[key][0]): + batch[key] = [sample[0] for sample in batch[key]] + # Else, stack single item + else: + batch[key] = batch[key][0] + return batch
diff --git a/docs/_modules/packnet_sfm/models/model_wrapper.html b/docs/_modules/packnet_sfm/models/model_wrapper.html index ef000b77..c1eae876 100644 --- a/docs/_modules/packnet_sfm/models/model_wrapper.html +++ b/docs/_modules/packnet_sfm/models/model_wrapper.html @@ -184,6 +184,7 @@

Source code for packnet_sfm.models.model_wrapper

from packnet_sfm.utils.reduce import all_reduce_metrics, reduce_dict, \ create_dict, average_loss_and_metrics from packnet_sfm.utils.save import save_depth +from packnet_sfm.models.model_utils import stack_batch
[docs]class ModelWrapper(torch.nn.Module): @@ -223,7 +224,10 @@

Source code for packnet_sfm.models.model_wrapper

# Prepare datasets if load_datasets: - self.prepare_datasets() + # Requirements for validation (we only evaluate depth for now) + validation_requirements = {'gt_depth': True, 'gt_pose': False} + test_requirements = validation_requirements + self.prepare_datasets(validation_requirements, test_requirements) # Preparations done self.config.prepared = True @@ -241,20 +245,24 @@

Source code for packnet_sfm.models.model_wrapper

if 'epoch' in resume: self.current_epoch = resume['epoch']
-
[docs] def prepare_datasets(self): +
[docs] def prepare_datasets(self, validation_requirements, test_requirements): """Prepare datasets for training, validation and test.""" - # Prepare datasets print0(pcolor('### Preparing Datasets', 'green')) augmentation = self.config.datasets.augmentation + # Setup train dataset (requirements are given by the model itself) self.train_dataset = setup_dataset( self.config.datasets.train, 'train', - self.model.requires_gt_depth, **augmentation) + self.model.train_requirements, **augmentation) + # Setup validation dataset self.validation_dataset = setup_dataset( - self.config.datasets.validation, 'validation', **augmentation) + self.config.datasets.validation, 'validation', + validation_requirements, **augmentation) + # Setup test dataset self.test_dataset = setup_dataset( - self.config.datasets.test, 'test', **augmentation)
+ self.config.datasets.test, 'test', + test_requirements, **augmentation)
@property def depth_net(self): @@ -272,12 +280,17 @@

Source code for packnet_sfm.models.model_wrapper

params = OrderedDict() for param in self.optimizer.param_groups: params['{}_learning_rate'.format(param['name'].lower())] = param['lr'] - params['progress'] = self.current_epoch / self.config.arch.max_epochs + params['progress'] = self.progress return { **params, **self.model.logs, } + @property + def progress(self): + """Returns training progress (current epoch / max. number of epochs)""" + return self.current_epoch / self.config.arch.max_epochs +
[docs] def configure_optimizers(self): """Configure depth and pose optimizers and the corresponding scheduler.""" @@ -335,8 +348,8 @@

Source code for packnet_sfm.models.model_wrapper

[docs] def training_step(self, batch, *args): """Processes a training batch.""" - # loss = self.model(batch)[-1] - output = self.model(batch) + batch = stack_batch(batch) + output = self.model(batch, progress=self.progress) return { 'loss': output['loss'], 'metrics': output['metrics'] @@ -501,8 +514,11 @@

Source code for packnet_sfm.models.model_wrapper

print(met_line.format(*(('METRIC',) + self.metrics_keys))) for n, metrics in enumerate(metrics_data): print(hor_line) - print(wrap(pcolor('*** {:<87}'.format( - os.path.join(dataset.path[n], dataset.split[n])), 'magenta', attrs=['bold']))) + path_line = '{}'.format( + os.path.join(dataset.path[n], dataset.split[n])) + if len(dataset.cameras[n]) == 1: # only allows single cameras + path_line += ' ({})'.format(dataset.cameras[n][0]) + print(wrap(pcolor('*** {:<87}'.format(path_line), 'magenta', attrs=['bold']))) print(hor_line) for key, metric in metrics.items(): if self.metrics_name in key: @@ -607,10 +623,10 @@

Source code for packnet_sfm.models.model_wrapper

model = load_class(config.name, paths=['packnet_sfm.models',])( **{**config.loss, **kwargs}) # Add depth network if required - if model.requires_depth_net: + if model.network_requirements['depth_net']: model.add_depth_net(setup_depth_net(config.depth_net, prepared)) # Add pose network if required - if model.requires_pose_net: + if model.network_requirements['pose_net']: model.add_pose_net(setup_pose_net(config.pose_net, prepared)) # If a checkpoint is provided, load pretrained model if not prepared and config.checkpoint_path is not '': @@ -619,7 +635,7 @@

Source code for packnet_sfm.models.model_wrapper

return model
-
[docs]def setup_dataset(config, mode, requires_gt_depth=True, **kwargs): +
[docs]def setup_dataset(config, mode, requirements, **kwargs): """ Create a dataset class @@ -629,8 +645,8 @@

Source code for packnet_sfm.models.model_wrapper

Configuration (cf. configs/default_config.py) mode : str {'train', 'validation', 'test'} Mode from which we want the dataset - requires_gt_depth : bool - True if the model requires ground-truth depth maps at training time + requirements : dict (string -> bool) + Different requirements for dataset loading (gt_depth, gt_pose, etc) kwargs : dict Extra parameters for dataset creation @@ -659,7 +675,8 @@

Source code for packnet_sfm.models.model_wrapper

# Individual shared dataset arguments dataset_args_i = { - 'depth_type': config.depth_type[i] if requires_gt_depth else None, + 'depth_type': config.depth_type[i] if requirements['gt_depth'] else None, + 'with_pose': requirements['gt_pose'], } # KITTI dataset @@ -667,14 +684,13 @@

Source code for packnet_sfm.models.model_wrapper

dataset = KITTIDataset( config.path[i], path_split, **dataset_args, **dataset_args_i, - mode='mono', ) # DGP dataset elif config.dataset[i] == 'DGP': dataset = DGPDataset( config.path[i], config.split[i], **dataset_args, **dataset_args_i, - cameras=config.cameras, + cameras=config.cameras[i], ) # Image dataset elif config.dataset[i] == 'Image': diff --git a/docs/_modules/packnet_sfm/networks/depth/PackNet01.html b/docs/_modules/packnet_sfm/networks/depth/PackNet01.html index 72c2b3c3..aa2ea717 100644 --- a/docs/_modules/packnet_sfm/networks/depth/PackNet01.html +++ b/docs/_modules/packnet_sfm/networks/depth/PackNet01.html @@ -170,8 +170,6 @@

Source code for packnet_sfm.networks.depth.PackNet01

from packnet_sfm.networks.layers.packnet.layers01 import \ PackLayerConv3d, UnpackLayerConv3d, Conv2D, ResidualBlock, InvDepth -######################################################################################################################## -
[docs]class PackNet01(nn.Module): """ PackNet network with 3d convolutions (version 01, from the CVPR paper). @@ -205,13 +203,13 @@

Source code for packnet_sfm.networks.depth.PackNet01

# Initial convolutional layer self.pre_calc = Conv2D(in_channels, ni, 5, 1) # Support for different versions - if self.version == 'A': + if self.version == 'A': # Channel concatenation n1o, n1i = n1, n1 + ni + no n2o, n2i = n2, n2 + n1 + no n3o, n3i = n3, n3 + n2 + no n4o, n4i = n4, n4 + n3 n5o, n5i = n5, n5 + n4 - elif self.version == 'B': + elif self.version == 'B': # Channel addition n1o, n1i = n1, n1 + no n2o, n2i = n2, n2 + no n3o, n3i = n3//2, n3//2 + no @@ -346,8 +344,6 @@

Source code for packnet_sfm.networks.depth.PackNet01

return [disp1, disp2, disp3, disp4] else: return disp1
- -########################################################################################################################
diff --git a/docs/_modules/packnet_sfm/trainers/base_trainer.html b/docs/_modules/packnet_sfm/trainers/base_trainer.html index 41f1860f..2b1933ba 100644 --- a/docs/_modules/packnet_sfm/trainers/base_trainer.html +++ b/docs/_modules/packnet_sfm/trainers/base_trainer.html @@ -165,19 +165,22 @@

Source code for packnet_sfm.trainers.base_trainer

 # Copyright 2020 Toyota Research Institute.  All rights reserved.
 
+import torch
 from tqdm import tqdm
 from packnet_sfm.utils.logging import prepare_dataset_prefix
 
 
-
[docs]def sample_to_cuda(data): +
[docs]def sample_to_cuda(data, dtype=None): if isinstance(data, str): return data elif isinstance(data, dict): - return {key: sample_to_cuda(data[key]) for key in data.keys()} + return {key: sample_to_cuda(data[key], dtype) for key in data.keys()} elif isinstance(data, list): - return [sample_to_cuda(key) for key in data] + return [sample_to_cuda(val, dtype) for val in data] else: - return data.to('cuda')
+ # only convert floats (e.g., to half), otherwise preserve (e.g, ints) + dtype = dtype if torch.is_floating_point(data) else None + return data.to('cuda', dtype=dtype)
[docs]class BaseTrainer: diff --git a/docs/_modules/packnet_sfm/utils/config.html b/docs/_modules/packnet_sfm/utils/config.html index cec49f5c..454e4d25 100644 --- a/docs/_modules/packnet_sfm/utils/config.html +++ b/docs/_modules/packnet_sfm/utils/config.html @@ -171,7 +171,7 @@

Source code for packnet_sfm.utils.config

 
 from packnet_sfm.utils.logging import s3_url, prepare_dataset_prefix
 from packnet_sfm.utils.horovod import on_rank_0
-from packnet_sfm.utils.types import is_cfg
+from packnet_sfm.utils.types import is_cfg, is_list
 from packnet_sfm.utils.misc import make_list
 from packnet_sfm.utils.load import load_class, backwards_state_dict
 
@@ -193,11 +193,16 @@ 

Source code for packnet_sfm.utils.config

     # If there is no dataset, do nothing
     if len(config.path) == 0:
         return config
-    # Get split length and expand other arguments to the same length
-    n = len(config.split)
+    # If cameras is not a double list, make it so
+    if not config.cameras or not is_list(config.cameras[0]):
+        config.cameras = [config.cameras]
+    # Get maximum length and expand other arguments to the same length
+    n = max(len(config.split), len(config.cameras), len(config.depth_type))
     config.dataset = make_list(config.dataset, n)
     config.path = make_list(config.path, n)
+    config.split = make_list(config.split, n)
     config.depth_type = make_list(config.depth_type, n)
+    config.cameras = make_list(config.cameras, n)
     if 'repeat' in config:
         config.repeat = make_list(config.repeat, n)
     # Return updated configuration
diff --git a/docs/_modules/packnet_sfm/utils/logging.html b/docs/_modules/packnet_sfm/utils/logging.html
index 7545d457..f4ce9ad1 100644
--- a/docs/_modules/packnet_sfm/utils/logging.html
+++ b/docs/_modules/packnet_sfm/utils/logging.html
@@ -211,11 +211,14 @@ 

Source code for packnet_sfm.utils.logging

     prefix : str
         Dataset prefix for metrics logging
     """
-    return '{}-{}-{}'.format(
+    prefix = '{}-{}'.format(
         os.path.splitext(config.path[n].split('/')[-1])[0],
-        os.path.splitext(os.path.basename(config.split[n]))[0],
-        config.depth_type[n],
-    )
+ os.path.splitext(os.path.basename(config.split[n]))[0]) + if config.depth_type[n] is not '': + prefix += '-{}'.format(config.depth_type[n]) + if len(config.cameras[n]) == 1: # only allows single cameras + prefix += '-{}'.format(config.cameras[n][0]) + return prefix
[docs]def s3_url(config): diff --git a/docs/_modules/packnet_sfm/utils/save.html b/docs/_modules/packnet_sfm/utils/save.html index d847c312..b599f045 100644 --- a/docs/_modules/packnet_sfm/utils/save.html +++ b/docs/_modules/packnet_sfm/utils/save.html @@ -172,7 +172,6 @@

Source code for packnet_sfm.utils.save

 from packnet_sfm.utils.logging import prepare_dataset_prefix
 from packnet_sfm.utils.depth import inv2depth, viz_inv_depth
 
-########################################################################################################################
 
 
[docs]def save_depth(batch, output, args, dataset, save): """ @@ -229,8 +228,6 @@

Source code for packnet_sfm.utils.save

                 # Write to disk
                 cv2.imwrite('{}/{}.png'.format(
                     save_path, filename[i]), image[:, :, ::-1])
- -########################################################################################################################
diff --git a/docs/_modules/scripts/eval.html b/docs/_modules/scripts/eval.html index 6a529b94..1ee7dbb6 100644 --- a/docs/_modules/scripts/eval.html +++ b/docs/_modules/scripts/eval.html @@ -166,6 +166,7 @@

Source code for scripts.eval

 # Copyright 2020 Toyota Research Institute.  All rights reserved.
 
 import argparse
+import torch
 
 from packnet_sfm import ModelWrapper, HorovodTrainer
 from packnet_sfm.utils.config import parse_test_file
@@ -178,6 +179,7 @@ 

Source code for scripts.eval

     parser = argparse.ArgumentParser(description='PackNet-SfM evaluation script')
     parser.add_argument('--checkpoint', type=str, help='Checkpoint (.ckpt)')
     parser.add_argument('--config', type=str, default=None, help='Configuration (.yaml)')
+    parser.add_argument('--half', action="store_true", help='Use half precision (fp16)')
     args = parser.parse_args()
     assert args.checkpoint.endswith('.ckpt'), \
         'You need to provide a .ckpt file as checkpoint'
@@ -186,7 +188,7 @@ 

Source code for scripts.eval

     return args
-
[docs]def test(ckpt_file, cfg_file): +
[docs]def test(ckpt_file, cfg_file, half): """ Monocular depth estimation test script. @@ -211,6 +213,9 @@

Source code for scripts.eval

     # Restore model state
     model_wrapper.load_state_dict(state_dict)
 
+    # change to half precision for evaluation if requested
+    config.arch["dtype"] = torch.float16 if half else None
+
     # Create trainer with args.arch parameters
     trainer = HorovodTrainer(**config.arch)
 
@@ -220,7 +225,7 @@ 

Source code for scripts.eval

 
 if __name__ == '__main__':
     args = parse_args()
-    test(args.checkpoint, args.config)
+    test(args.checkpoint, args.config, args.half)
 
diff --git a/docs/_modules/scripts/infer.html b/docs/_modules/scripts/infer.html index 41b3cc69..f48efe30 100644 --- a/docs/_modules/scripts/infer.html +++ b/docs/_modules/scripts/infer.html @@ -178,7 +178,7 @@

Source code for scripts.infer

 from packnet_sfm.utils.image import load_image
 from packnet_sfm.utils.config import parse_test_file
 from packnet_sfm.utils.load import set_debug
-from packnet_sfm.utils.depth import viz_inv_depth
+from packnet_sfm.utils.depth import inv2depth, viz_inv_depth
 from packnet_sfm.utils.logging import pcolor
 
 
@@ -188,14 +188,15 @@ 

Source code for scripts.infer

 
 
 
[docs]def parse_args(): - """Parse arguments for training script""" - parser = argparse.ArgumentParser(description='PackNet-SfM evaluation script') + parser = argparse.ArgumentParser(description='PackNet-SfM inference of depth maps from images') parser.add_argument('--checkpoint', type=str, help='Checkpoint (.ckpt)') parser.add_argument('--input', type=str, help='Input file or folder') - parser.add_argument('--output', type=str, help='Output file or foler') + parser.add_argument('--output', type=str, help='Output file or folder') parser.add_argument('--image_shape', type=tuple, default=None, help='Input and output image shape ' '(default: checkpoint\'s config.datasets.augmentation.image_shape)') + parser.add_argument('--half', action="store_true", help='Use half precision (fp16)') + parser.add_argument('--save_npz', action='store_true', help='save in .npz format') args = parser.parse_args() assert args.checkpoint.endswith('.ckpt'), \ 'You need to provide a .ckpt file as checkpoint' @@ -207,7 +208,8 @@

Source code for scripts.infer

     return args
-
[docs]def process(input_file, output_file, model_wrapper, image_shape): +
[docs]@torch.no_grad() +def infer_and_save_depth(input_file, output_file, model_wrapper, image_shape, half, save_npz): """ Process a single input file to produce and save visualization @@ -221,11 +223,20 @@

Source code for scripts.infer

         Model wrapper used for inference
     image_shape : Image shape
         Input image shape
-
-    Returns
-    -------
+    half: bool
+        use half precision (fp16)
+    save_npz: bool
+        save .npz output depth maps if True, else save as png
 
     """
+    if not is_image(output_file):
+        # If not an image, assume it's a folder and append the input name
+        os.makedirs(output_file, exist_ok=True)
+        output_file = os.path.join(output_file, os.path.basename(input_file))
+
+    # change to half precision for evaluation if requested
+    dtype = torch.float16 if half else None
+
     # Load image
     image = load_image(input_file)
     # Resize and to tensor
@@ -234,50 +245,43 @@ 

Source code for scripts.infer

 
     # Send image to GPU if available
     if torch.cuda.is_available():
-        image = image.to('cuda:{}'.format(rank()))
-
-    # Depth inference
-    depth = model_wrapper.depth(image)[0]
-
-    # Prepare RGB image
-    rgb_i = image[0].permute(1, 2, 0).detach().cpu().numpy() * 255
-    # Prepare inverse depth
-    pred_inv_depth_i = viz_inv_depth(depth[0]) * 255
-    # Concatenate both vertically
-    image = np.concatenate([rgb_i, pred_inv_depth_i], 0)
-    if not is_image(output_file):
-        # If not an image, assume it's a folder and append the input name
-        os.makedirs(output_file, exist_ok=True)
-        output_file = os.path.join(output_file, os.path.basename(input_file))
-    # Save visualization
-    print('Saving {} to {}'.format(
-        pcolor(input_file, 'cyan', attrs=['bold']),
-        pcolor(output_file, 'magenta', attrs=['bold'])))
-    imwrite(output_file, image[:, :, ::-1])
+ image = image.to('cuda:{}'.format(rank()), dtype=dtype) + + # Depth inference (returns predicted inverse depth) + pred_inv_depth = model_wrapper.depth(image)[0] + + if save_npz: + # Get depth from predicted depth map and save to .npz + depth = inv2depth(pred_inv_depth).squeeze().detach().cpu().numpy() + output_file = os.path.splitext(output_file)[0] + ".npz" + print('Saving {} to {}'.format( + pcolor(input_file, 'cyan', attrs=['bold']), + pcolor(output_file, 'magenta', attrs=['bold']))) + np.savez_compressed(output_file, depth=depth) + else: + # Prepare RGB image + rgb = image[0].permute(1, 2, 0).detach().cpu().numpy() * 255 + # Prepare inverse depth + viz_pred_inv_depth = viz_inv_depth(pred_inv_depth[0]) * 255 + # Concatenate both vertically + image = np.concatenate([rgb, viz_pred_inv_depth], 0) + # Save visualization + print('Saving {} to {}'.format( + pcolor(input_file, 'cyan', attrs=['bold']), + pcolor(output_file, 'magenta', attrs=['bold']))) + imwrite(output_file, image[:, :, ::-1])
-
[docs]def infer(ckpt_file, input_file, output_file, image_shape): - """ - Monocular depth estimation test script. +
[docs]def main(args): - Parameters - ---------- - ckpt_file : str - Checkpoint path for a pretrained model - input_file : str - File or folder with input images - output_file : str - File or folder with output images - image_shape : tuple - Input image shape (H,W) - """ # Initialize horovod hvd_init() # Parse arguments - config, state_dict = parse_test_file(ckpt_file) + config, state_dict = parse_test_file(args.checkpoint) # If no image shape is provided, use the checkpoint one + image_shape = args.image_shape if image_shape is None: image_shape = config.datasets.augmentation.image_shape @@ -289,29 +293,36 @@

Source code for scripts.infer

     # Restore monodepth_model state
     model_wrapper.load_state_dict(state_dict)
 
+    # change to half precision for evaluation if requested
+    dtype = torch.float16 if args.half else None
+
     # Send model to GPU if available
     if torch.cuda.is_available():
-        model_wrapper = model_wrapper.to('cuda:{}'.format(rank()))
+        model_wrapper = model_wrapper.to('cuda:{}'.format(rank()), dtype=dtype)
+
+    # Set to eval mode
+    model_wrapper.eval()
 
-    if os.path.isdir(input_file):
+    if os.path.isdir(args.input):
         # If input file is a folder, search for image files
         files = []
         for ext in ['png', 'jpg']:
-            files.extend(glob((os.path.join(input_file, '*.{}'.format(ext)))))
+            files.extend(glob((os.path.join(args.input, '*.{}'.format(ext)))))
         files.sort()
         print0('Found {} files'.format(len(files)))
     else:
         # Otherwise, use it as is
-        files = [input_file]
+        files = [args.input]
 
     # Process each file
-    for file in files[rank()::world_size()]:
-        process(file, output_file, model_wrapper, image_shape)
+ for fn in files[rank()::world_size()]: + infer_and_save_depth( + fn, args.output, model_wrapper, image_shape, args.half, args.save_npz)
if __name__ == '__main__': args = parse_args() - infer(args.checkpoint, args.input, args.output, args.image_shape) + main(args)
diff --git a/docs/_static/basic.css b/docs/_static/basic.css index 01192852..56f5efc6 100644 --- a/docs/_static/basic.css +++ b/docs/_static/basic.css @@ -316,22 +316,29 @@ img.align-default, .figure.align-default { div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; - padding: 7px 7px 0 7px; + padding: 7px; background-color: #ffe; width: 40%; float: right; + clear: right; + overflow-x: auto; } p.sidebar-title { font-weight: bold; } +div.admonition, div.topic, pre, div[class|="highlight"] { + clear: both; +} + /* -- topics ---------------------------------------------------------------- */ div.topic { border: 1px solid #ccc; - padding: 7px 7px 0 7px; + padding: 7px; margin: 10px 0 10px 0; + overflow-x: auto; } p.topic-title { @@ -346,16 +353,13 @@ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; + overflow-x: auto; } div.admonition dt { font-weight: bold; } -div.admonition dl { - margin-bottom: 0; -} - p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; @@ -366,9 +370,19 @@ div.body p.centered { margin-top: 25px; } +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + /* -- tables ---------------------------------------------------------------- */ table.docutils { + margin-top: 10px; + margin-bottom: 10px; border: 0; border-collapse: collapse; } @@ -416,13 +430,13 @@ table.citation td { border-bottom: none; } -th > p:first-child, -td > p:first-child { +th > :first-child, +td > :first-child { margin-top: 0px; } -th > p:last-child, -td > p:last-child { +th > :last-child, +td > :last-child { margin-bottom: 0px; } @@ -468,6 +482,10 @@ table.field-list td, table.field-list th { /* -- hlist styles ---------------------------------------------------------- */ +table.hlist { + margin: 1em 0; +} + table.hlist td { vertical-align: top; } @@ -495,17 +513,34 @@ ol.upperroman { list-style: upper-roman; } -li > p:first-child { +ol > li:first-child > :first-child, +ul > li:first-child > :first-child { margin-top: 0px; } -li > p:last-child { +ol ol > li:first-child > :first-child, +ol ul > li:first-child > :first-child, +ul ol > li:first-child > :first-child, +ul ul > li:first-child > :first-child { + margin-top: revert; +} + +ol > li:last-child > :last-child, +ul > li:last-child > :last-child { margin-bottom: 0px; } +ol ol > li:last-child > :last-child, +ol ul > li:last-child > :last-child, +ul ol > li:last-child > :last-child, +ul ul > li:last-child > :last-child { + margin-bottom: revert; +} + dl.footnote > dt, dl.citation > dt { float: left; + margin-right: 0.5em; } dl.footnote > dd, @@ -546,7 +581,7 @@ dl { margin-bottom: 15px; } -dd > p:first-child { +dd > :first-child { margin-top: 0px; } @@ -560,6 +595,11 @@ dd { margin-left: 30px; } +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + dt:target, span.highlighted { background-color: #fbe54e; } @@ -644,22 +684,57 @@ span.pre { hyphens: none; } +div[class^="highlight-"] { + margin: 1em 0; +} + td.linenos pre { - padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { - margin-left: 0.5em; + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; } table.highlighttable td { - padding: 0 0.5em 0 0.5em; + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; } div.code-block-caption { + margin-top: 1em; padding: 2px 5px; font-size: small; } @@ -668,10 +743,7 @@ div.code-block-caption code { background-color: transparent; } -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - +table.highlighttable td.linenos, div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ user-select: none; } @@ -685,11 +757,7 @@ div.code-block-caption span.caption-text { } div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; + margin: 1em 0; } code.descname { @@ -740,8 +808,7 @@ span.eqno { } span.eqno a.headerlink { - position: relative; - left: 0px; + position: absolute; z-index: 1; } diff --git a/docs/_static/jquery-3.4.1.js b/docs/_static/jquery-3.5.1.js similarity index 91% rename from docs/_static/jquery-3.4.1.js rename to docs/_static/jquery-3.5.1.js index 773ad95c..50937333 100644 --- a/docs/_static/jquery-3.4.1.js +++ b/docs/_static/jquery-3.5.1.js @@ -1,5 +1,5 @@ /*! - * jQuery JavaScript Library v3.4.1 + * jQuery JavaScript Library v3.5.1 * https://jquery.com/ * * Includes Sizzle.js @@ -9,7 +9,7 @@ * Released under the MIT license * https://jquery.org/license * - * Date: 2019-05-01T21:04Z + * Date: 2020-05-04T22:49Z */ ( function( global, factory ) { @@ -47,13 +47,16 @@ var arr = []; -var document = window.document; - var getProto = Object.getPrototypeOf; var slice = arr.slice; -var concat = arr.concat; +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + var push = arr.push; @@ -86,6 +89,8 @@ var isWindow = function isWindow( obj ) { }; +var document = window.document; + var preservedScriptAttributes = { @@ -142,7 +147,7 @@ function toType( obj ) { var - version = "3.4.1", + version = "3.5.1", // Define a local copy of jQuery jQuery = function( selector, context ) { @@ -150,11 +155,7 @@ var // The jQuery object is actually just the init constructor 'enhanced' // Need init if jQuery is called (just allow error to be thrown if not included) return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; + }; jQuery.fn = jQuery.prototype = { @@ -220,6 +221,18 @@ jQuery.fn = jQuery.prototype = { return this.eq( -1 ); }, + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + eq: function( i ) { var len = this.length, j = +i + ( i < 0 ? len : 0 ); @@ -353,9 +366,10 @@ jQuery.extend( { return true; }, - // Evaluates a script in a global context - globalEval: function( code, options ) { - DOMEval( code, { nonce: options && options.nonce } ); + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); }, each: function( obj, callback ) { @@ -379,13 +393,6 @@ jQuery.extend( { return obj; }, - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - // results is for internal usage only makeArray: function( arr, results ) { var ret = results || []; @@ -472,7 +479,7 @@ jQuery.extend( { } // Flatten any nested arrays - return concat.apply( [], ret ); + return flat( ret ); }, // A global GUID counter for objects @@ -489,7 +496,7 @@ if ( typeof Symbol === "function" ) { // Populate the class2type map jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { +function( _i, name ) { class2type[ "[object " + name + "]" ] = name.toLowerCase(); } ); @@ -511,17 +518,16 @@ function isArrayLike( obj ) { } var Sizzle = /*! - * Sizzle CSS Selector Engine v2.3.4 + * Sizzle CSS Selector Engine v2.3.5 * https://sizzlejs.com/ * * Copyright JS Foundation and other contributors * Released under the MIT license * https://js.foundation/ * - * Date: 2019-04-08 + * Date: 2020-03-14 */ -(function( window ) { - +( function( window ) { var i, support, Expr, @@ -561,59 +567,70 @@ var i, }, // Instance methods - hasOwn = ({}).hasOwnProperty, + hasOwn = ( {} ).hasOwnProperty, arr = [], pop = arr.pop, - push_native = arr.push, + pushNative = arr.push, push = arr.push, slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native // https://jsperf.com/thor-indexof-vs-for/5 indexOf = function( list, elem ) { var i = 0, len = list.length; for ( ; i < len; i++ ) { - if ( list[i] === elem ) { + if ( list[ i ] === elem ) { return i; } } return -1; }, - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", // Regular expressions // http://www.w3.org/TR/css3-selectors/#whitespace whitespace = "[\\x20\\t\\r\\n\\f]", - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: // 1. quoted (capture 3; capture 4 or capture 5) "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) ".*" + ")\\)|)", // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), rdescend = new RegExp( whitespace + "|>" ), rpseudo = new RegExp( pseudos ), @@ -625,14 +642,16 @@ var i, "TAG": new RegExp( "^(" + identifier + "|[*])" ), "ATTR": new RegExp( "^" + attributes ), "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) }, rhtml = /HTML$/i, @@ -648,18 +667,21 @@ var i, // CSS escapes // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair high < 0 ? - // BMP codepoint String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); }, @@ -675,7 +697,8 @@ var i, } // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; } // Other potentially-special ASCII characters get backslash-escaped @@ -700,18 +723,20 @@ var i, // Optimize for push.apply( _, NodeList ) try { push.apply( - (arr = slice.call( preferredDoc.childNodes )), + ( arr = slice.call( preferredDoc.childNodes ) ), preferredDoc.childNodes ); + // Support: Android<4.0 // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions arr[ preferredDoc.childNodes.length ].nodeType; } catch ( e ) { push = { apply: arr.length ? // Leverage slice if possible function( target, els ) { - push_native.apply( target, slice.call(els) ); + pushNative.apply( target, slice.call( els ) ); } : // Support: IE<9 @@ -719,8 +744,9 @@ try { function( target, els ) { var j = target.length, i = 0; + // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} + while ( ( target[ j++ ] = els[ i++ ] ) ) {} target.length = j - 1; } }; @@ -744,24 +770,21 @@ function Sizzle( selector, context, results, seed ) { // Try to shortcut find operations (as opposed to filters) in HTML documents if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } + setDocument( context ); context = context || document; if ( documentIsHTML ) { // If the selector is sufficiently simple, try using a "get*By*" DOM method // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { // ID selector - if ( (m = match[1]) ) { + if ( ( m = match[ 1 ] ) ) { // Document context if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { + if ( ( elem = context.getElementById( m ) ) ) { // Support: IE, Opera, Webkit // TODO: identify versions @@ -780,7 +803,7 @@ function Sizzle( selector, context, results, seed ) { // Support: IE, Opera, Webkit // TODO: identify versions // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && + if ( newContext && ( elem = newContext.getElementById( m ) ) && contains( context, elem ) && elem.id === m ) { @@ -790,12 +813,12 @@ function Sizzle( selector, context, results, seed ) { } // Type selector - } else if ( match[2] ) { + } else if ( match[ 2 ] ) { push.apply( results, context.getElementsByTagName( selector ) ); return results; // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && context.getElementsByClassName ) { push.apply( results, context.getElementsByClassName( m ) ); @@ -806,11 +829,11 @@ function Sizzle( selector, context, results, seed ) { // Take advantage of querySelectorAll if ( support.qsa && !nonnativeSelectorCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && // Support: IE 8 only // Exclude object elements - (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { newSelector = selector; newContext = context; @@ -819,27 +842,36 @@ function Sizzle( selector, context, results, seed ) { // descendant combinators, which is not what we want. // In such cases, we work around the behavior by prefixing every selector in the // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && rdescend.test( selector ) ) { + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } } // Prefix every selector in the list groups = tokenize( selector ); i = groups.length; while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); } newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; } try { @@ -872,12 +904,14 @@ function createCache() { var keys = []; function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries delete cache[ keys.shift() ]; } - return (cache[ key + " " ] = value); + return ( cache[ key + " " ] = value ); } return cache; } @@ -896,17 +930,19 @@ function markFunction( fn ) { * @param {Function} fn Passed the created element and returns a boolean result */ function assert( fn ) { - var el = document.createElement("fieldset"); + var el = document.createElement( "fieldset" ); try { return !!fn( el ); - } catch (e) { + } catch ( e ) { return false; } finally { + // Remove from its parent by default if ( el.parentNode ) { el.parentNode.removeChild( el ); } + // release memory in IE el = null; } @@ -918,11 +954,11 @@ function assert( fn ) { * @param {Function} handler The method that will be applied */ function addHandle( attrs, handler ) { - var arr = attrs.split("|"), + var arr = attrs.split( "|" ), i = arr.length; while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; + Expr.attrHandle[ arr[ i ] ] = handler; } } @@ -944,7 +980,7 @@ function siblingCheck( a, b ) { // Check if b follows a if ( cur ) { - while ( (cur = cur.nextSibling) ) { + while ( ( cur = cur.nextSibling ) ) { if ( cur === b ) { return -1; } @@ -972,7 +1008,7 @@ function createInputPseudo( type ) { function createButtonPseudo( type ) { return function( elem ) { var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; + return ( name === "input" || name === "button" ) && elem.type === type; }; } @@ -1015,7 +1051,7 @@ function createDisabledPseudo( disabled ) { // Where there is no isDisabled, check manually /* jshint -W018 */ elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; + inDisabledFieldset( elem ) === disabled; } return elem.disabled === disabled; @@ -1037,21 +1073,21 @@ function createDisabledPseudo( disabled ) { * @param {Function} fn */ function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { + return markFunction( function( argument ) { argument = +argument; - return markFunction(function( seed, matches ) { + return markFunction( function( seed, matches ) { var j, matchIndexes = fn( [], seed.length, argument ), i = matchIndexes.length; // Match elements found at the specified indexes while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); } } - }); - }); + } ); + } ); } /** @@ -1073,7 +1109,7 @@ support = Sizzle.support = {}; */ isXML = Sizzle.isXML = function( elem ) { var namespace = elem.namespaceURI, - docElem = (elem.ownerDocument || elem).documentElement; + docElem = ( elem.ownerDocument || elem ).documentElement; // Support: IE <=8 // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes @@ -1091,7 +1127,11 @@ setDocument = Sizzle.setDocument = function( node ) { doc = node ? node.ownerDocument || node : preferredDoc; // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { return document; } @@ -1100,10 +1140,14 @@ setDocument = Sizzle.setDocument = function( node ) { docElem = document.documentElement; documentIsHTML = !isXML( document ); - // Support: IE 9-11, Edge + // Support: IE 9 - 11+, Edge 12 - 18+ // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { // Support: IE 11, Edge if ( subWindow.addEventListener ) { @@ -1115,25 +1159,36 @@ setDocument = Sizzle.setDocument = function( node ) { } } + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + /* Attributes ---------------------------------------------------------------------- */ // Support: IE<8 // Verify that getAttribute really returns attributes and not properties // (excepting IE8 booleans) - support.attributes = assert(function( el ) { + support.attributes = assert( function( el ) { el.className = "i"; - return !el.getAttribute("className"); - }); + return !el.getAttribute( "className" ); + } ); /* getElement(s)By* ---------------------------------------------------------------------- */ // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); // Support: IE<9 support.getElementsByClassName = rnative.test( document.getElementsByClassName ); @@ -1142,38 +1197,38 @@ setDocument = Sizzle.setDocument = function( node ) { // Check if getElementById returns elements by name // The broken getElementById methods don't pick up programmatically-set names, // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { + support.getById = assert( function( el ) { docElem.appendChild( el ).id = expando; return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); + } ); // ID filter and find if ( support.getById ) { - Expr.filter["ID"] = function( id ) { + Expr.filter[ "ID" ] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { - return elem.getAttribute("id") === attrId; + return elem.getAttribute( "id" ) === attrId; }; }; - Expr.find["ID"] = function( id, context ) { + Expr.find[ "ID" ] = function( id, context ) { if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { var elem = context.getElementById( id ); return elem ? [ elem ] : []; } }; } else { - Expr.filter["ID"] = function( id ) { + Expr.filter[ "ID" ] = function( id ) { var attrId = id.replace( runescape, funescape ); return function( elem ) { var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); + elem.getAttributeNode( "id" ); return node && node.value === attrId; }; }; // Support: IE 6 - 7 only // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { + Expr.find[ "ID" ] = function( id, context ) { if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { var node, i, elems, elem = context.getElementById( id ); @@ -1181,7 +1236,7 @@ setDocument = Sizzle.setDocument = function( node ) { if ( elem ) { // Verify the id attribute - node = elem.getAttributeNode("id"); + node = elem.getAttributeNode( "id" ); if ( node && node.value === id ) { return [ elem ]; } @@ -1189,8 +1244,8 @@ setDocument = Sizzle.setDocument = function( node ) { // Fall back on getElementsByName elems = context.getElementsByName( id ); i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); if ( node && node.value === id ) { return [ elem ]; } @@ -1203,7 +1258,7 @@ setDocument = Sizzle.setDocument = function( node ) { } // Tag - Expr.find["TAG"] = support.getElementsByTagName ? + Expr.find[ "TAG" ] = support.getElementsByTagName ? function( tag, context ) { if ( typeof context.getElementsByTagName !== "undefined" ) { return context.getElementsByTagName( tag ); @@ -1218,12 +1273,13 @@ setDocument = Sizzle.setDocument = function( node ) { var elem, tmp = [], i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too results = context.getElementsByTagName( tag ); // Filter out possible comments if ( tag === "*" ) { - while ( (elem = results[i++]) ) { + while ( ( elem = results[ i++ ] ) ) { if ( elem.nodeType === 1 ) { tmp.push( elem ); } @@ -1235,7 +1291,7 @@ setDocument = Sizzle.setDocument = function( node ) { }; // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { return context.getElementsByClassName( className ); } @@ -1256,10 +1312,14 @@ setDocument = Sizzle.setDocument = function( node ) { // See https://bugs.jquery.com/ticket/13378 rbuggyQSA = []; - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + // Build QSA regex // Regex strategy adopted from Diego Perini - assert(function( el ) { + assert( function( el ) { + + var input; + // Select is set to empty string on purpose // This is to test IE's treatment of not explicitly // setting a boolean content attribute, @@ -1273,78 +1333,98 @@ setDocument = Sizzle.setDocument = function( node ) { // Nothing should be selected when empty strings follow ^= or $= or *= // The test attribute must be unknown in Opera but "safe" for WinRT // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); } // Support: IE8 // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { + if ( !el.querySelectorAll( "[selected]" ).length ) { rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); } // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); } // Webkit/Opera - :checked should return selected option elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); } // Support: Safari 8+, iOS 8+ // https://bugs.webkit.org/show_bug.cgi?id=136851 // In-page `selector#id sibling-combinator selector` fails if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); + rbuggyQSA.push( ".#.+[+~]" ); } - }); - assert(function( el ) { + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { el.innerHTML = "" + ""; // Support: Windows 8 Native Apps // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); + var input = document.createElement( "input" ); input.setAttribute( "type", "hidden" ); el.appendChild( input ).setAttribute( "name", "D" ); // Support: IE8 // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { + if ( el.querySelectorAll( "[name=d]" ).length ) { rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); } // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } // Support: IE9-11+ // IE's :disabled selector does not pick up the children of disabled fieldsets docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } + // Support: Opera 10 - 11 only // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); } - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || docElem.webkitMatchesSelector || docElem.mozMatchesSelector || docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { - assert(function( el ) { // Check to see if it's possible to do matchesSelector // on a disconnected node (IE 9) support.disconnectedMatch = matches.call( el, "*" ); @@ -1353,11 +1433,11 @@ setDocument = Sizzle.setDocument = function( node ) { // Gecko does not error, returns false instead matches.call( el, "[s!='']:x" ); rbuggyMatches.push( "!=", pseudos ); - }); + } ); } - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); /* Contains ---------------------------------------------------------------------- */ @@ -1374,11 +1454,11 @@ setDocument = Sizzle.setDocument = function( node ) { adown.contains ? adown.contains( bup ) : a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); + ) ); } : function( a, b ) { if ( b ) { - while ( (b = b.parentNode) ) { + while ( ( b = b.parentNode ) ) { if ( b === a ) { return true; } @@ -1407,7 +1487,11 @@ setDocument = Sizzle.setDocument = function( node ) { } // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? a.compareDocumentPosition( b ) : // Otherwise we know they are disconnected @@ -1415,13 +1499,24 @@ setDocument = Sizzle.setDocument = function( node ) { // Disconnected nodes if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { return -1; } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { return 1; } @@ -1434,6 +1529,7 @@ setDocument = Sizzle.setDocument = function( node ) { return compare & 4 ? -1 : 1; } : function( a, b ) { + // Exit early if the nodes are identical if ( a === b ) { hasDuplicate = true; @@ -1449,8 +1545,14 @@ setDocument = Sizzle.setDocument = function( node ) { // Parentless nodes are either documents or disconnected if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ aup ? -1 : bup ? 1 : sortInput ? @@ -1464,26 +1566,32 @@ setDocument = Sizzle.setDocument = function( node ) { // Otherwise we need full lists of their ancestors for comparison cur = a; - while ( (cur = cur.parentNode) ) { + while ( ( cur = cur.parentNode ) ) { ap.unshift( cur ); } cur = b; - while ( (cur = cur.parentNode) ) { + while ( ( cur = cur.parentNode ) ) { bp.unshift( cur ); } // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { + while ( ap[ i ] === bp[ i ] ) { i++; } return i ? + // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : + siblingCheck( ap[ i ], bp[ i ] ) : // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ 0; }; @@ -1495,10 +1603,7 @@ Sizzle.matches = function( expr, elements ) { }; Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } + setDocument( elem ); if ( support.matchesSelector && documentIsHTML && !nonnativeSelectorCache[ expr + " " ] && @@ -1510,12 +1615,13 @@ Sizzle.matchesSelector = function( elem, expr ) { // IE 9's matchesSelector returns false on disconnected nodes if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { return ret; } - } catch (e) { + } catch ( e ) { nonnativeSelectorCache( expr, true ); } } @@ -1524,20 +1630,31 @@ Sizzle.matchesSelector = function( elem, expr ) { }; Sizzle.contains = function( context, elem ) { + // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { setDocument( context ); } return contains( context, elem ); }; Sizzle.attr = function( elem, name ) { + // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { setDocument( elem ); } var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? fn( elem, name, !documentIsHTML ) : @@ -1547,13 +1664,13 @@ Sizzle.attr = function( elem, name ) { val : support.attributes || !documentIsHTML ? elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? + ( val = elem.getAttributeNode( name ) ) && val.specified ? val.value : null; }; Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); + return ( sel + "" ).replace( rcssescape, fcssescape ); }; Sizzle.error = function( msg ) { @@ -1576,7 +1693,7 @@ Sizzle.uniqueSort = function( results ) { results.sort( sortOrder ); if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { + while ( ( elem = results[ i++ ] ) ) { if ( elem === results[ i ] ) { j = duplicates.push( i ); } @@ -1604,17 +1721,21 @@ getText = Sizzle.getText = function( elem ) { nodeType = elem.nodeType; if ( !nodeType ) { + // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { + while ( ( node = elem[ i++ ] ) ) { + // Do not traverse comment nodes ret += getText( node ); } } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements // innerText usage removed for consistency of new lines (jQuery #11153) if ( typeof elem.textContent === "string" ) { return elem.textContent; } else { + // Traverse its children for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { ret += getText( elem ); @@ -1623,6 +1744,7 @@ getText = Sizzle.getText = function( elem ) { } else if ( nodeType === 3 || nodeType === 4 ) { return elem.nodeValue; } + // Do not include comment or processing instruction nodes return ret; @@ -1650,19 +1772,21 @@ Expr = Sizzle.selectors = { preFilter: { "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; } return match.slice( 0, 4 ); }, "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] 1 type (only|nth|...) 2 what (child|of-type) @@ -1673,22 +1797,25 @@ Expr = Sizzle.selectors = { 7 sign of y-component 8 y of y-component */ - match[1] = match[1].toLowerCase(); + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - if ( match[1].slice( 0, 3 ) === "nth" ) { // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); } // numeric x and y parameters for Expr.filter.CHILD // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); } return match; @@ -1696,26 +1823,28 @@ Expr = Sizzle.selectors = { "PSEUDO": function( match ) { var excess, - unquoted = !match[6] && match[2]; + unquoted = !match[ 6 ] && match[ 2 ]; - if ( matchExpr["CHILD"].test( match[0] ) ) { + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { return null; } // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; // Strip excess characters from unquoted arguments } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && + ( excess = tokenize( unquoted, true ) ) && + // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); } // Return only captures needed by the pseudo filter method (type and argument) @@ -1728,7 +1857,9 @@ Expr = Sizzle.selectors = { "TAG": function( nodeNameSelector ) { var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); return nodeNameSelector === "*" ? - function() { return true; } : + function() { + return true; + } : function( elem ) { return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; }; @@ -1738,10 +1869,16 @@ Expr = Sizzle.selectors = { var pattern = classCache[ className + " " ]; return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); }, "ATTR": function( name, operator, check ) { @@ -1757,6 +1894,8 @@ Expr = Sizzle.selectors = { result += ""; + /* eslint-disable max-len */ + return operator === "=" ? result === check : operator === "!=" ? result !== check : operator === "^=" ? check && result.indexOf( check ) === 0 : @@ -1765,10 +1904,12 @@ Expr = Sizzle.selectors = { operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : false; + /* eslint-enable max-len */ + }; }, - "CHILD": function( type, what, argument, first, last ) { + "CHILD": function( type, what, _argument, first, last ) { var simple = type.slice( 0, 3 ) !== "nth", forward = type.slice( -4 ) !== "last", ofType = what === "of-type"; @@ -1780,7 +1921,7 @@ Expr = Sizzle.selectors = { return !!elem.parentNode; } : - function( elem, context, xml ) { + function( elem, _context, xml ) { var cache, uniqueCache, outerCache, node, nodeIndex, start, dir = simple !== forward ? "nextSibling" : "previousSibling", parent = elem.parentNode, @@ -1794,7 +1935,7 @@ Expr = Sizzle.selectors = { if ( simple ) { while ( dir ) { node = elem; - while ( (node = node[ dir ]) ) { + while ( ( node = node[ dir ] ) ) { if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { @@ -1802,6 +1943,7 @@ Expr = Sizzle.selectors = { return false; } } + // Reverse direction for :only-* (if we haven't yet done so) start = dir = type === "only" && !start && "nextSibling"; } @@ -1817,22 +1959,22 @@ Expr = Sizzle.selectors = { // ...in a gzip-friendly way node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); + outerCache = node[ expando ] || ( node[ expando ] = {} ); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); + ( outerCache[ node.uniqueID ] = {} ); cache = uniqueCache[ type ] || []; nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; diff = nodeIndex && cache[ 2 ]; node = nodeIndex && parent.childNodes[ nodeIndex ]; - while ( (node = ++nodeIndex && node && node[ dir ] || + while ( ( node = ++nodeIndex && node && node[ dir ] || // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { + ( diff = nodeIndex = 0 ) || start.pop() ) ) { // When found, cache indexes on `parent` and break if ( node.nodeType === 1 && ++diff && node === elem ) { @@ -1842,16 +1984,18 @@ Expr = Sizzle.selectors = { } } else { + // Use previously-cached element index if available if ( useCache ) { + // ...in a gzip-friendly way node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); + outerCache = node[ expando ] || ( node[ expando ] = {} ); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); + ( outerCache[ node.uniqueID ] = {} ); cache = uniqueCache[ type ] || []; nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; @@ -1861,9 +2005,10 @@ Expr = Sizzle.selectors = { // xml :nth-child(...) // or :nth-last-child(...) or :nth(-last)?-of-type(...) if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { if ( ( ofType ? node.nodeName.toLowerCase() === name : @@ -1872,12 +2017,13 @@ Expr = Sizzle.selectors = { // Cache the index of each encountered element if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); + outerCache = node[ expando ] || + ( node[ expando ] = {} ); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); + ( outerCache[ node.uniqueID ] = {} ); uniqueCache[ type ] = [ dirruns, diff ]; } @@ -1898,6 +2044,7 @@ Expr = Sizzle.selectors = { }, "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive // http://www.w3.org/TR/selectors/#pseudo-classes // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters @@ -1917,15 +2064,15 @@ Expr = Sizzle.selectors = { if ( fn.length > 1 ) { args = [ pseudo, pseudo, "", argument ]; return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { + markFunction( function( seed, matches ) { var idx, matched = fn( seed, argument ), i = matched.length; while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); } - }) : + } ) : function( elem ) { return fn( elem, 0, args ); }; @@ -1936,8 +2083,10 @@ Expr = Sizzle.selectors = { }, pseudos: { + // Potentially complex pseudos - "not": markFunction(function( selector ) { + "not": markFunction( function( selector ) { + // Trim the selector passed to compile // to avoid treating leading and trailing // spaces as combinators @@ -1946,39 +2095,40 @@ Expr = Sizzle.selectors = { matcher = compile( selector.replace( rtrim, "$1" ) ); return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { + markFunction( function( seed, matches, _context, xml ) { var elem, unmatched = matcher( seed, null, xml, [] ), i = seed.length; // Match elements unmatched by `matcher` while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); } } - }) : - function( elem, context, xml ) { - input[0] = elem; + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; matcher( input, null, xml, results ); + // Don't keep the element (issue #299) - input[0] = null; + input[ 0 ] = null; return !results.pop(); }; - }), + } ), - "has": markFunction(function( selector ) { + "has": markFunction( function( selector ) { return function( elem ) { return Sizzle( selector, elem ).length > 0; }; - }), + } ), - "contains": markFunction(function( text ) { + "contains": markFunction( function( text ) { text = text.replace( runescape, funescape ); return function( elem ) { return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; }; - }), + } ), // "Whether an element is represented by a :lang() selector // is based solely on the element's language value @@ -1988,25 +2138,26 @@ Expr = Sizzle.selectors = { // The identifier C does not have to be a valid language name." // http://www.w3.org/TR/selectors/#lang-pseudo "lang": markFunction( function( lang ) { + // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { + if ( !ridentifier.test( lang || "" ) ) { Sizzle.error( "unsupported lang: " + lang ); } lang = lang.replace( runescape, funescape ).toLowerCase(); return function( elem ) { var elemLang; do { - if ( (elemLang = documentIsHTML ? + if ( ( elemLang = documentIsHTML ? elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { elemLang = elemLang.toLowerCase(); return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); return false; }; - }), + } ), // Miscellaneous "target": function( elem ) { @@ -2019,7 +2170,9 @@ Expr = Sizzle.selectors = { }, "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); }, // Boolean properties @@ -2027,16 +2180,20 @@ Expr = Sizzle.selectors = { "disabled": createDisabledPseudo( true ), "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); }, "selected": function( elem ) { + // Accessing this property makes selected-by-default // options in Safari work properly if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions elem.parentNode.selectedIndex; } @@ -2045,6 +2202,7 @@ Expr = Sizzle.selectors = { // Contents "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), // but not by others (comment: 8; processing instruction: 7; etc.) @@ -2058,7 +2216,7 @@ Expr = Sizzle.selectors = { }, "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); + return !Expr.pseudos[ "empty" ]( elem ); }, // Element/input types @@ -2082,39 +2240,40 @@ Expr = Sizzle.selectors = { // Support: IE<8 // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); }, // Position-in-collection - "first": createPositionalPseudo(function() { + "first": createPositionalPseudo( function() { return [ 0 ]; - }), + } ), - "last": createPositionalPseudo(function( matchIndexes, length ) { + "last": createPositionalPseudo( function( _matchIndexes, length ) { return [ length - 1 ]; - }), + } ), - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { return [ argument < 0 ? argument + length : argument ]; - }), + } ), - "even": createPositionalPseudo(function( matchIndexes, length ) { + "even": createPositionalPseudo( function( matchIndexes, length ) { var i = 0; for ( ; i < length; i += 2 ) { matchIndexes.push( i ); } return matchIndexes; - }), + } ), - "odd": createPositionalPseudo(function( matchIndexes, length ) { + "odd": createPositionalPseudo( function( matchIndexes, length ) { var i = 1; for ( ; i < length; i += 2 ) { matchIndexes.push( i ); } return matchIndexes; - }), + } ), - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { var i = argument < 0 ? argument + length : argument > length ? @@ -2124,19 +2283,19 @@ Expr = Sizzle.selectors = { matchIndexes.push( i ); } return matchIndexes; - }), + } ), - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { var i = argument < 0 ? argument + length : argument; for ( ; ++i < length; ) { matchIndexes.push( i ); } return matchIndexes; - }) + } ) } }; -Expr.pseudos["nth"] = Expr.pseudos["eq"]; +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; // Add button/input type pseudos for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { @@ -2167,37 +2326,39 @@ tokenize = Sizzle.tokenize = function( selector, parseOnly ) { while ( soFar ) { // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { if ( match ) { + // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; + soFar = soFar.slice( match[ 0 ].length ) || soFar; } - groups.push( (tokens = []) ); + groups.push( ( tokens = [] ) ); } matched = false; // Combinators - if ( (match = rcombinators.exec( soFar )) ) { + if ( ( match = rcombinators.exec( soFar ) ) ) { matched = match.shift(); - tokens.push({ + tokens.push( { value: matched, + // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); + type: match[ 0 ].replace( rtrim, " " ) + } ); soFar = soFar.slice( matched.length ); } // Filters for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { matched = match.shift(); - tokens.push({ + tokens.push( { value: matched, type: type, matches: match - }); + } ); soFar = soFar.slice( matched.length ); } } @@ -2214,6 +2375,7 @@ tokenize = Sizzle.tokenize = function( selector, parseOnly ) { soFar.length : soFar ? Sizzle.error( selector ) : + // Cache the tokens tokenCache( selector, groups ).slice( 0 ); }; @@ -2223,7 +2385,7 @@ function toSelector( tokens ) { len = tokens.length, selector = ""; for ( ; i < len; i++ ) { - selector += tokens[i].value; + selector += tokens[ i ].value; } return selector; } @@ -2236,9 +2398,10 @@ function addCombinator( matcher, combinator, base ) { doneName = done++; return combinator.first ? + // Check against closest ancestor/preceding element function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { + while ( ( elem = elem[ dir ] ) ) { if ( elem.nodeType === 1 || checkNonElements ) { return matcher( elem, context, xml ); } @@ -2253,7 +2416,7 @@ function addCombinator( matcher, combinator, base ) { // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching if ( xml ) { - while ( (elem = elem[ dir ]) ) { + while ( ( elem = elem[ dir ] ) ) { if ( elem.nodeType === 1 || checkNonElements ) { if ( matcher( elem, context, xml ) ) { return true; @@ -2261,27 +2424,29 @@ function addCombinator( matcher, combinator, base ) { } } } else { - while ( (elem = elem[ dir ]) ) { + while ( ( elem = elem[ dir ] ) ) { if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); // Support: IE <9 only // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); if ( skip && skip === elem.nodeName.toLowerCase() ) { elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && + } else if ( ( oldCache = uniqueCache[ key ] ) && oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); + return ( newCache[ 2 ] = oldCache[ 2 ] ); } else { + // Reuse newcache so results back-propagate to previous elements uniqueCache[ key ] = newCache; // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { return true; } } @@ -2297,20 +2462,20 @@ function elementMatcher( matchers ) { function( elem, context, xml ) { var i = matchers.length; while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { + if ( !matchers[ i ]( elem, context, xml ) ) { return false; } } return true; } : - matchers[0]; + matchers[ 0 ]; } function multipleContexts( selector, contexts, results ) { var i = 0, len = contexts.length; for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); + Sizzle( selector, contexts[ i ], results ); } return results; } @@ -2323,7 +2488,7 @@ function condense( unmatched, map, filter, context, xml ) { mapped = map != null; for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { + if ( ( elem = unmatched[ i ] ) ) { if ( !filter || filter( elem, context, xml ) ) { newUnmatched.push( elem ); if ( mapped ) { @@ -2343,14 +2508,18 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS if ( postFinder && !postFinder[ expando ] ) { postFinder = setMatcher( postFinder, postSelector ); } - return markFunction(function( seed, results, context, xml ) { + return markFunction( function( seed, results, context, xml ) { var temp, i, elem, preMap = [], postMap = [], preexisting = results.length, // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), // Prefilter to get matcher input, preserving a map for seed-results synchronization matcherIn = preFilter && ( seed || !selector ) ? @@ -2358,6 +2527,7 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS elems, matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, postFinder || ( seed ? preFilter : preexisting || postFilter ) ? @@ -2381,8 +2551,8 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS // Un-match failing elements by moving them back to matcherIn i = temp.length; while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); } } } @@ -2390,25 +2560,27 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS if ( seed ) { if ( postFinder || preFilter ) { if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts temp = []; i = matcherOut.length; while ( i-- ) { - if ( (elem = matcherOut[i]) ) { + if ( ( elem = matcherOut[ i ] ) ) { + // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); + temp.push( ( matcherIn[ i ] = elem ) ); } } - postFinder( null, (matcherOut = []), temp, xml ); + postFinder( null, ( matcherOut = [] ), temp, xml ); } // Move matched elements from seed to results to keep them synchronized i = matcherOut.length; while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - seed[temp] = !(results[temp] = elem); + seed[ temp ] = !( results[ temp ] = elem ); } } } @@ -2426,14 +2598,14 @@ function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postS push.apply( results, matcherOut ); } } - }); + } ); } function matcherFromTokens( tokens ) { var checkContext, matcher, j, len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], i = leadingRelative ? 1 : 0, // The foundational matcher ensures that elements are reachable from top-level context(s) @@ -2445,38 +2617,43 @@ function matcherFromTokens( tokens ) { }, implicitRelative, true ), matchers = [ function( elem, context, xml ) { var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? + ( checkContext = context ).nodeType ? matchContext( elem, context, xml ) : matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) checkContext = null; return ret; } ]; for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); // Return special upon seeing a positional matcher if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling j = ++i; for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { + if ( Expr.relative[ tokens[ j ].type ] ) { break; } } return setMatcher( i > 1 && elementMatcher( matchers ), i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) ).replace( rtrim, "$1" ), matcher, i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), j < len && toSelector( tokens ) ); } @@ -2497,28 +2674,40 @@ function matcherFromGroupMatchers( elementMatchers, setMatchers ) { unmatched = seed && [], setMatched = [], contextBackup = outermostContext, + // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), len = elems.length; if ( outermost ) { - outermostContext = context === document || context || outermost; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; } // Add elements passing elementMatchers directly to results // Support: IE<9, Safari // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { if ( byElement && elem ) { j = 0; - if ( !context && elem.ownerDocument !== document ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { setDocument( elem ); xml = !documentIsHTML; } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { results.push( elem ); break; } @@ -2530,8 +2719,9 @@ function matcherFromGroupMatchers( elementMatchers, setMatchers ) { // Track unmatched elements for set filters if ( bySet ) { + // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { + if ( ( elem = !matcher && elem ) ) { matchedCount--; } @@ -2555,16 +2745,17 @@ function matcherFromGroupMatchers( elementMatchers, setMatchers ) { // numerically zero. if ( bySet && i !== matchedCount ) { j = 0; - while ( (matcher = setMatchers[j++]) ) { + while ( ( matcher = setMatchers[ j++ ] ) ) { matcher( unmatched, setMatched, context, xml ); } if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting if ( matchedCount > 0 ) { while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); } } } @@ -2605,13 +2796,14 @@ compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { cached = compilerCache[ selector + " " ]; if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element if ( !match ) { match = tokenize( selector ); } i = match.length; while ( i-- ) { - cached = matcherFromTokens( match[i] ); + cached = matcherFromTokens( match[ i ] ); if ( cached[ expando ] ) { setMatchers.push( cached ); } else { @@ -2620,7 +2812,10 @@ compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { } // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); // Save selector and tokenization cached.selector = selector; @@ -2640,7 +2835,7 @@ compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { select = Sizzle.select = function( selector, context, results, seed ) { var i, tokens, token, type, find, compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); results = results || []; @@ -2649,11 +2844,12 @@ select = Sizzle.select = function( selector, context, results, seed ) { if ( match.length === 1 ) { // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; if ( !context ) { return results; @@ -2666,20 +2862,22 @@ select = Sizzle.select = function( selector, context, results, seed ) { } // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; while ( i-- ) { - token = tokens[i]; + token = tokens[ i ]; // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { + if ( Expr.relative[ ( type = token.type ) ] ) { break; } - if ( (find = Expr.find[ type ]) ) { + if ( ( find = Expr.find[ type ] ) ) { + // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { // If seed is empty or no tokens remain, we can return early tokens.splice( i, 1 ); @@ -2710,7 +2908,7 @@ select = Sizzle.select = function( selector, context, results, seed ) { // One-time assignments // Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; // Support: Chrome 14-35+ // Always assume duplicates if they aren't passed to the comparison function @@ -2721,58 +2919,59 @@ setDocument(); // Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) // Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { +support.sortDetached = assert( function( el ) { + // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); // Support: IE<8 // Prevent attribute/property "interpolation" // https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { +if ( !assert( function( el ) { el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { addHandle( "type|href|height|width", function( elem, name, isXML ) { if ( !isXML ) { return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); } - }); + } ); } // Support: IE<9 // Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { +if ( !support.attributes || !assert( function( el ) { el.innerHTML = ""; el.firstChild.setAttribute( "value", "" ); return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { return elem.defaultValue; } - }); + } ); } // Support: IE<9 // Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { addHandle( booleans, function( elem, name, isXML ) { var val; if ( !isXML ) { return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? + ( val = elem.getAttributeNode( name ) ) && val.specified ? val.value : - null; + null; } - }); + } ); } return Sizzle; -})( window ); +} )( window ); @@ -3141,7 +3340,7 @@ jQuery.each( { parents: function( elem ) { return dir( elem, "parentNode" ); }, - parentsUntil: function( elem, i, until ) { + parentsUntil: function( elem, _i, until ) { return dir( elem, "parentNode", until ); }, next: function( elem ) { @@ -3156,10 +3355,10 @@ jQuery.each( { prevAll: function( elem ) { return dir( elem, "previousSibling" ); }, - nextUntil: function( elem, i, until ) { + nextUntil: function( elem, _i, until ) { return dir( elem, "nextSibling", until ); }, - prevUntil: function( elem, i, until ) { + prevUntil: function( elem, _i, until ) { return dir( elem, "previousSibling", until ); }, siblings: function( elem ) { @@ -3169,7 +3368,13 @@ jQuery.each( { return siblings( elem.firstChild ); }, contents: function( elem ) { - if ( typeof elem.contentDocument !== "undefined" ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + return elem.contentDocument; } @@ -3512,7 +3717,7 @@ jQuery.extend( { var fns = arguments; return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { + jQuery.each( tuples, function( _i, tuple ) { // Map tuples (progress, done, fail) to arguments (done, fail, progress) var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; @@ -3965,7 +4170,7 @@ var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { // ...except when executing function values } else { bulk = fn; - fn = function( elem, key, value ) { + fn = function( elem, _key, value ) { return bulk.call( jQuery( elem ), value ); }; } @@ -4000,7 +4205,7 @@ var rmsPrefix = /^-ms-/, rdashAlpha = /-([a-z])/g; // Used by camelCase as callback to replace() -function fcamelCase( all, letter ) { +function fcamelCase( _all, letter ) { return letter.toUpperCase(); } @@ -4528,27 +4733,6 @@ var isHiddenWithinTree = function( elem, el ) { jQuery.css( elem, "display" ) === "none"; }; -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - function adjustCSS( elem, prop, valueParts, tween ) { @@ -4719,11 +4903,40 @@ var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); -// We have to close these tags to support XHTML (#13200) -var wrapMap = { +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; // Support: IE <=9 only - option: [ 1, "" ], + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { // XHTML parsers do not magically insert elements in the // same way that tag soup parsers do. So we cannot shorten @@ -4736,12 +4949,14 @@ var wrapMap = { _default: [ 0, "", "" ] }; -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; wrapMap.th = wrapMap.td; +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + function getAll( context, tag ) { @@ -4874,32 +5089,6 @@ function buildFragment( elems, context, scripts, selection, ignored ) { } -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); - - var rkeyEvent = /^key/, rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, @@ -5008,8 +5197,8 @@ jQuery.event = { special, handlers, type, namespaces, origType, elemData = dataPriv.get( elem ); - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { return; } @@ -5033,7 +5222,7 @@ jQuery.event = { // Init the element's event structure and main handler, if this is the first if ( !( events = elemData.events ) ) { - events = elemData.events = {}; + events = elemData.events = Object.create( null ); } if ( !( eventHandle = elemData.handle ) ) { eventHandle = elemData.handle = function( e ) { @@ -5191,12 +5380,15 @@ jQuery.event = { dispatch: function( nativeEvent ) { - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - var i, j, ret, matched, handleObj, handlerQueue, args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], special = jQuery.event.special[ event.type ] || {}; // Use the fix-ed jQuery.Event rather than the (read-only) native event @@ -5771,13 +5963,6 @@ jQuery.fn.extend( { var - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - // Support: IE <=10 - 11, Edge 12 - 13 only // In IE/Edge using regex groups here causes severe slowdowns. // See https://connect.microsoft.com/IE/feedback/details/1736512/ @@ -5814,7 +5999,7 @@ function restoreScript( elem ) { } function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + var i, l, type, pdataOld, udataOld, udataCur, events; if ( dest.nodeType !== 1 ) { return; @@ -5822,13 +6007,11 @@ function cloneCopyEvent( src, dest ) { // 1. Copy private data: events, handlers, etc. if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); + pdataOld = dataPriv.get( src ); events = pdataOld.events; if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; + dataPriv.remove( dest, "handle events" ); for ( type in events ) { for ( i = 0, l = events[ type ].length; i < l; i++ ) { @@ -5864,7 +6047,7 @@ function fixInput( src, dest ) { function domManip( collection, args, callback, ignored ) { // Flatten any nested arrays - args = concat.apply( [], args ); + args = flat( args ); var fragment, first, scripts, hasScripts, node, doc, i = 0, @@ -5939,7 +6122,7 @@ function domManip( collection, args, callback, ignored ) { if ( jQuery._evalUrl && !node.noModule ) { jQuery._evalUrl( node.src, { nonce: node.nonce || node.getAttribute( "nonce" ) - } ); + }, doc ); } } else { DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); @@ -5976,7 +6159,7 @@ function remove( elem, selector, keepData ) { jQuery.extend( { htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); + return html; }, clone: function( elem, dataAndEvents, deepDataAndEvents ) { @@ -6238,6 +6421,27 @@ var getStyles = function( elem ) { return view.getComputedStyle( elem ); }; +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); @@ -6295,7 +6499,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); } var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableMarginLeftVal, + reliableTrDimensionsVal, reliableMarginLeftVal, container = document.createElement( "div" ), div = document.createElement( "div" ); @@ -6330,6 +6534,35 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); scrollboxSize: function() { computeStyleTests(); return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px"; + tr.style.height = "1px"; + trChild.style.height = "9px"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; } } ); } )(); @@ -6454,7 +6687,7 @@ var fontWeight: "400" }; -function setPositiveNumber( elem, value, subtract ) { +function setPositiveNumber( _elem, value, subtract ) { // Any relative (+/-) values have already been // normalized at this point @@ -6559,17 +6792,26 @@ function getWidthOrHeight( elem, dimension, extra ) { } - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - // Support: IE 9-11 only - // Also use offsetWidth/offsetHeight for when box sizing is unreliable - // We use getClientRects() to check for hidden/disconnected. - // In those cases, the computed value can be trusted to be border-box + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected elem.getClientRects().length ) { isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; @@ -6764,7 +7006,7 @@ jQuery.extend( { } } ); -jQuery.each( [ "height", "width" ], function( i, dimension ) { +jQuery.each( [ "height", "width" ], function( _i, dimension ) { jQuery.cssHooks[ dimension ] = { get: function( elem, computed, extra ) { if ( computed ) { @@ -7537,7 +7779,7 @@ jQuery.fn.extend( { clearQueue = type; type = undefined; } - if ( clearQueue && type !== false ) { + if ( clearQueue ) { this.queue( type || "fx", [] ); } @@ -7620,7 +7862,7 @@ jQuery.fn.extend( { } } ); -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { var cssFn = jQuery.fn[ name ]; jQuery.fn[ name ] = function( speed, easing, callback ) { return speed == null || typeof speed === "boolean" ? @@ -7841,7 +8083,7 @@ boolHook = { } }; -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { var getter = attrHandle[ name ] || jQuery.find.attr; attrHandle[ name ] = function( elem, name, isXML ) { @@ -8465,7 +8707,9 @@ jQuery.extend( jQuery.event, { special.bindType || type; // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + handle = ( + dataPriv.get( cur, "events" ) || Object.create( null ) + )[ event.type ] && dataPriv.get( cur, "handle" ); if ( handle ) { handle.apply( cur, data ); @@ -8576,7 +8820,10 @@ if ( !support.focusin ) { jQuery.event.special[ fix ] = { setup: function() { - var doc = this.ownerDocument || this, + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, attaches = dataPriv.access( doc, fix ); if ( !attaches ) { @@ -8585,7 +8832,7 @@ if ( !support.focusin ) { dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); }, teardown: function() { - var doc = this.ownerDocument || this, + var doc = this.ownerDocument || this.document || this, attaches = dataPriv.access( doc, fix ) - 1; if ( !attaches ) { @@ -8601,7 +8848,7 @@ if ( !support.focusin ) { } var location = window.location; -var nonce = Date.now(); +var nonce = { guid: Date.now() }; var rquery = ( /\?/ ); @@ -8733,7 +8980,7 @@ jQuery.fn.extend( { rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && ( this.checked || !rcheckableType.test( type ) ); } ) - .map( function( i, elem ) { + .map( function( _i, elem ) { var val = jQuery( this ).val(); if ( val == null ) { @@ -9346,7 +9593,8 @@ jQuery.extend( { // Add or update anti-cache param if needed if ( s.cache === false ) { cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; } // Put hash and anti-cache on the URL that will be requested (gh-1732) @@ -9479,6 +9727,11 @@ jQuery.extend( { response = ajaxHandleResponses( s, jqXHR, responses ); } + // Use a noop converter for missing script + if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { + s.converters[ "text script" ] = function() {}; + } + // Convert no matter what (that way responseXXX fields are always set) response = ajaxConvert( s, response, jqXHR, isSuccess ); @@ -9569,7 +9822,7 @@ jQuery.extend( { } } ); -jQuery.each( [ "get", "post" ], function( i, method ) { +jQuery.each( [ "get", "post" ], function( _i, method ) { jQuery[ method ] = function( url, data, callback, type ) { // Shift arguments if data argument was omitted @@ -9590,8 +9843,17 @@ jQuery.each( [ "get", "post" ], function( i, method ) { }; } ); +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + -jQuery._evalUrl = function( url, options ) { +jQuery._evalUrl = function( url, options, doc ) { return jQuery.ajax( { url: url, @@ -9609,7 +9871,7 @@ jQuery._evalUrl = function( url, options ) { "text script": function() {} }, dataFilter: function( response ) { - jQuery.globalEval( response, options ); + jQuery.globalEval( response, options, doc ); } } ); }; @@ -9931,7 +10193,7 @@ var oldCallbacks = [], jQuery.ajaxSetup( { jsonp: "callback", jsonpCallback: function() { - var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) ); + var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce.guid++ ) ); this[ callback ] = true; return callback; } @@ -10148,23 +10410,6 @@ jQuery.fn.load = function( url, params, callback ) { -// Attach a bunch of functions for handling common AJAX events -jQuery.each( [ - "ajaxStart", - "ajaxStop", - "ajaxComplete", - "ajaxError", - "ajaxSuccess", - "ajaxSend" -], function( i, type ) { - jQuery.fn[ type ] = function( fn ) { - return this.on( type, fn ); - }; -} ); - - - - jQuery.expr.pseudos.animated = function( elem ) { return jQuery.grep( jQuery.timers, function( fn ) { return elem === fn.elem; @@ -10221,6 +10466,12 @@ jQuery.offset = { options.using.call( elem, props ); } else { + if ( typeof props.top === "number" ) { + props.top += "px"; + } + if ( typeof props.left === "number" ) { + props.left += "px"; + } curElem.css( props ); } } @@ -10371,7 +10622,7 @@ jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( // Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347 // getComputedStyle returns percent when specified for top/left/bottom/right; // rather than make the css module depend on the offset module, just check for it here -jQuery.each( [ "top", "left" ], function( i, prop ) { +jQuery.each( [ "top", "left" ], function( _i, prop ) { jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition, function( elem, computed ) { if ( computed ) { @@ -10434,25 +10685,19 @@ jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { } ); -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); +jQuery.each( [ + "ajaxStart", + "ajaxStop", + "ajaxComplete", + "ajaxError", + "ajaxSuccess", + "ajaxSend" +], function( _i, type ) { + jQuery.fn[ type ] = function( fn ) { + return this.on( type, fn ); }; } ); -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - @@ -10474,9 +10719,33 @@ jQuery.fn.extend( { return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); + }, + + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); } } ); +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( _i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; + } ); + + + + +// Support: Android <=4.0 only +// Make sure we trim BOM and NBSP +var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; + // Bind a function to a context, optionally partially applying any // arguments. // jQuery.proxy is deprecated to promote standards (specifically Function#bind) @@ -10539,6 +10808,11 @@ jQuery.isNumeric = function( obj ) { !isNaN( obj - parseFloat( obj ) ); }; +jQuery.trim = function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); +}; @@ -10587,7 +10861,7 @@ jQuery.noConflict = function( deep ) { // Expose jQuery and $ identifiers, even in AMD // (#7102#comment:10, https://github.com/jquery/jquery/pull/557) // and CommonJS for browser emulators (#13566) -if ( !noGlobal ) { +if ( typeof noGlobal === "undefined" ) { window.jQuery = window.$ = jQuery; } diff --git a/docs/_static/jquery.js b/docs/_static/jquery.js index a1c07fd8..b0614034 100644 --- a/docs/_static/jquery.js +++ b/docs/_static/jquery.js @@ -1,2 +1,2 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0 +
+
+generate_depth_map(sample_idx, datum_idx, filename)[source]
+

Generates the depth map for a camera by projecting LiDAR information. +It also caches the depth map following DGP folder structure, so it’s not recalculated

+
+
Parameters
+
    +
  • sample_idx (int) – sample index

  • +
  • datum_idx (int) – Datum index

  • +
  • filename – Filename used for loading / saving

  • +
+
+
Returns
+

depth – Depth map for that datum in that sample

+
+
Return type
+

np.array [H, W]

+
+
+
+
get_backward(key, sensor_idx)[source]
@@ -215,6 +237,26 @@

Return current timestep of a key from a sensor

+
+
+get_filename(sample_idx, datum_idx)[source]
+

Returns the filename for an index, following DGP structure

+
+
Parameters
+
    +
  • sample_idx (int) – Sample index

  • +
  • datum_idx (int) – Datum index

  • +
+
+
Returns
+

filename – Filename for the datum in that sample

+
+
Return type
+

str

+
+
+
+
get_forward(key, sensor_idx)[source]
diff --git a/docs/datasets/datasets.KITTIDataset.html b/docs/datasets/datasets.KITTIDataset.html index 8014fd31..772b2f50 100644 --- a/docs/datasets/datasets.KITTIDataset.html +++ b/docs/datasets/datasets.KITTIDataset.html @@ -185,7 +185,7 @@

KITTIDataset

-class packnet_sfm.datasets.kitti_dataset.KITTIDataset(root_dir, file_list, train=True, mode='mono', data_transform=None, depth_type=None, with_pose=False, back_context=0, forward_context=0, strides=1)[source]
+class packnet_sfm.datasets.kitti_dataset.KITTIDataset(root_dir, file_list, train=True, data_transform=None, depth_type=None, with_pose=False, back_context=0, forward_context=0, strides=1)[source]

Bases: torch.utils.data.dataset.Dataset

KITTI dataset class.

@@ -194,7 +194,6 @@
  • root_dir (str) – Path to the dataset

  • file_list (str) – Split file, with paths to the images to be used

  • train (bool) – True if the dataset will be used for training

  • -
  • mode (str) – Dataset mode (stereo or mono)

  • data_transform (Function) – Transformations applied to the sample

  • depth_type (str) – Which depth type to load

  • with_pose (bool) – True if returning ground-truth pose

  • diff --git a/docs/genindex.html b/docs/genindex.html index ef54c7ba..7557f7bd 100644 --- a/docs/genindex.html +++ b/docs/genindex.html @@ -1,6 +1,5 @@ - @@ -422,6 +421,8 @@

    F

    G

    • get_default_config() (in module packnet_sfm.utils.config) +
    • +
    • get_filename() (packnet_sfm.datasets.dgp_dataset.DGPDataset method)
    • get_forward() (packnet_sfm.datasets.dgp_dataset.DGPDataset method)
    • @@ -472,7 +475,7 @@

      I

    • imwrite() (in module scripts.infer)
    • -
    • infer() (in module scripts.infer) +
    • infer_and_save_depth() (in module scripts.infer)
    • init_weights() (packnet_sfm.networks.depth.PackNet01.PackNet01 method) @@ -584,6 +587,8 @@

      L

      M

        +
      • main() (in module scripts.infer) +
      • make_list() (in module packnet_sfm.utils.misc)
      • match_scales() (in module packnet_sfm.utils.image) @@ -706,10 +711,12 @@

        N

        - + - +
        +
      • stack_batch() (in module packnet_sfm.models.model_utils) +
      • stack_sample() (in module packnet_sfm.datasets.dgp_dataset)
      • supervised_loss() (packnet_sfm.models.SemiSupModel.SemiSupModel method) @@ -1309,15 +1286,17 @@

        T

      • to_tensor() (in module packnet_sfm.datasets.augmentations)
      • -
        • train() (in module scripts.train)
        • train_dataloader() (packnet_sfm.models.model_wrapper.ModelWrapper method)
        • train_progress_bar() (packnet_sfm.trainers.base_trainer.BaseTrainer method) +
        • +
        • train_requirements() (packnet_sfm.models.SfmModel.SfmModel property)
        • train_transforms() (in module packnet_sfm.datasets.transforms)
        • diff --git a/docs/losses/losses.multiview_photometric_loss.html b/docs/losses/losses.multiview_photometric_loss.html index 1388a610..27e1a7f1 100644 --- a/docs/losses/losses.multiview_photometric_loss.html +++ b/docs/losses/losses.multiview_photometric_loss.html @@ -336,7 +336,7 @@
          packnet_sfm.losses.multiview_photometric_loss.SSIM(x, y, C1=0.0001, C2=0.0009, kernel_size=3, stride=1)[source]
          -

          Structural SIMlilarity (SSIM) distance between two images.

          +

          Structural SIMilarity (SSIM) distance between two images.

          Parameters
            diff --git a/docs/models/models.SelfSupModel.html b/docs/models/models.SelfSupModel.html index b9137645..31fdb554 100644 --- a/docs/models/models.SelfSupModel.html +++ b/docs/models/models.SelfSupModel.html @@ -180,17 +180,13 @@

            SelfSupModel

            -class packnet_sfm.models.SelfSupModel.SelfSupModel(depth_net=None, pose_net=None, **kwargs)[source]
            +class packnet_sfm.models.SelfSupModel.SelfSupModel(**kwargs)[source]

            Bases: packnet_sfm.models.SfmModel.SfmModel

            Model that inherits a depth and pose network from SfmModel and includes the photometric loss for self-supervised training.

            Parameters
            -
              -
            • depth_net (nn.Module) – Depth network to be used

            • -
            • pose_net (nn.Module) – Pose network to be used

            • -
            • kwargs (dict) – Extra parameters

            • -
            +

            kwargs (dict) – Extra parameters

            @@ -221,26 +217,6 @@

            Return logs.

            -
            -
            -property requires_depth_net
            -
            - -
            -
            -property requires_gt_depth
            -
            - -
            -
            -property requires_gt_pose
            -
            - -
            -
            -property requires_pose_net
            -
            -
            self_supervised_loss(image, ref_images, inv_depths, poses, intrinsics, return_logs=False, progress=0.0)[source]
            diff --git a/docs/models/models.SemiSupModel.html b/docs/models/models.SemiSupModel.html index c66abfe0..82b3e66f 100644 --- a/docs/models/models.SemiSupModel.html +++ b/docs/models/models.SemiSupModel.html @@ -220,26 +220,6 @@

            Return logs.

            -
            -
            -property requires_depth_net
            -
            - -
            -
            -property requires_gt_depth
            -
            - -
            -
            -property requires_gt_pose
            -
            - -
            -
            -property requires_pose_net
            -
            -
            supervised_loss(inv_depths, gt_inv_depths, return_logs=False, progress=0.0)[source]
            diff --git a/docs/models/models.SfmModel.html b/docs/models/models.SfmModel.html index 57a7ad81..b5645b0a 100644 --- a/docs/models/models.SfmModel.html +++ b/docs/models/models.SfmModel.html @@ -190,7 +190,7 @@
          • pose_net (nn.Module) – Pose network to be used

          • rotation_mode (str) – Rotation mode for the pose network

          • flip_lr_prob (float) – Probability of flipping when using the depth network

          • -
          • upsample_depth_maps (bool) – True if detph map scales are upsampled to highest resolution

          • +
          • upsample_depth_maps (bool) – True if depth map scales are upsampled to highest resolution

          • kwargs (dict) – Extra parameters

          @@ -198,7 +198,8 @@
          add_depth_net(depth_net)[source]
          -
          +

          Add a depth network to the model

          +
          @@ -209,7 +210,8 @@
          add_pose_net(pose_net)[source]
          -
          +

          Add a pose network to the model

          +
          @@ -256,24 +258,46 @@
          -
          -property requires_depth_net
          -
          - -
          -
          -property requires_gt_depth
          -
          - -
          -
          -property requires_gt_pose
          -
          +
          +property network_requirements
          +

          Networks required to run the model

          +
          +
          Returns
          +

          requirements –

          +
          +
          depth_netbool

          Whether a depth network is required by the model

          +
          +
          pose_netbool

          Whether a depth network is required by the model

          +
          +
          +

          +
          +
          Return type
          +

          dict

          +
          +
          +
          -
          -property requires_pose_net
          -
          +
          +property train_requirements
          +

          Information required by the model at training stage

          +
          +
          Returns
          +

          requirements –

          +
          +
          gt_depthbool

          Whether ground truth depth is required by the model at training time

          +
          +
          gt_posebool

          Whether ground truth pose is required by the model at training time

          +
          +
          +

          +
          +
          Return type
          +

          dict

          +
          +
          +
          diff --git a/docs/models/models.Utilities.html b/docs/models/models.Utilities.html index a4706a7d..90887600 100644 --- a/docs/models/models.Utilities.html +++ b/docs/models/models.Utilities.html @@ -196,6 +196,23 @@ +
          +
          +packnet_sfm.models.model_utils.stack_batch(batch)[source]
          +

          Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W)

          +
          +
          Parameters
          +

          batch (dict) – Batch

          +
          +
          Returns
          +

          batch – Stacked batch

          +
          +
          Return type
          +

          dict

          +
          +
          +
          + diff --git a/docs/models/models.Wrapper.html b/docs/models/models.Wrapper.html index 34663748..5a127a0f 100644 --- a/docs/models/models.Wrapper.html +++ b/docs/models/models.Wrapper.html @@ -239,7 +239,7 @@
          -prepare_datasets()[source]
          +prepare_datasets(validation_requirements, test_requirements)[source]

          Prepare datasets for training, validation and test.

          @@ -254,6 +254,12 @@ print_metrics(**kwargs)
          +
          +
          +property progress
          +

          Returns training progress (current epoch / max. number of epochs)

          +
          +
          test_dataloader()[source]
          @@ -344,14 +350,14 @@
          -packnet_sfm.models.model_wrapper.setup_dataset(config, mode, requires_gt_depth=True, **kwargs)[source]
          +packnet_sfm.models.model_wrapper.setup_dataset(config, mode, requirements, **kwargs)[source]

          Create a dataset class

          Parameters
          • config (CfgNode) – Configuration (cf. configs/default_config.py)

          • mode (str {'train', 'validation', 'test'}) – Mode from which we want the dataset

          • -
          • requires_gt_depth (bool) – True if the model requires ground-truth depth maps at training time

          • +
          • requirements (dict (string -> bool)) – Different requirements for dataset loading (gt_depth, gt_pose, etc)

          • kwargs (dict) – Extra parameters for dataset creation

          diff --git a/docs/objects.inv b/docs/objects.inv index de4315f4fb6c4f15228890705f91741795937958..5ecd625cedf213f8799a05a5bba79cc573e1819b 100644 GIT binary patch delta 4082 zcmVCOWIvYSK1j@?3o-{Gzm$Jxdi}atAG7q z;Q~Ma2#O#&7h516zV87K``V`2YYlLM4%IGCaf$#&yRLRFS|S8s{9w1{_yYkL)6Z~@3N+B;9mvC046A{TEasZ4}V>qQUCaRH%>>gylcxM zOEK7<3IbGo1xXHCeA=F-sM2GA*n~TnpUv88kdGt$Fo))Z?gqR2t*FmZW8oA0`7PTF zJA2;_`m!1HVLPo4+iBgx<}Z-p&F~?}ni^r)ZODO0YYphy=!}u52>;_`8YcPJ!7 z$2vY|Fjpxr2QG3@;{sp&1J`E&5%7+a(|&<#RZS4vpGppcU61Yj*9&x6_&qLE1Tny z)F`_@(>j0o!W@#>d;h&g&T4R-Ja}&bEUJEUSBgoK_tIfNl%HjBfgiSVc?X7Fooou; z!EzI9+E(;?omOHa1Q7d|yrfF=&OP_4%uMc3gujqqKc{jFZv}Qn+@*P*&;n7&&40Wn zbdrW>3tX+54&=1t> zf5hu?^7+#1qL1aAD*klXJw1JXu-G!7CdBla-d_I<)lk(m_*9dO`VZ1fw|)1UDKk^$ z1k?nAyvx8pG&d20wFv}=>9|QSoqvYXE_BblP@;}>pw0yX`CKilNV-(5YLK4+{I#=J zpjwut@D7D*4bM|S%FA}q7~!{$#^~NKE)oZ;j=PecX_+b19yvoM;#@_#hKVjmu}&1N z0<2Ym6ARs*Lq>2U=!Rf2WJ zW5a2F#<5n;O?ZQ!OWWI#`5fIAdS^}5c9<^YoO$w~?Ks9QMSkHO3h8aYa|QEORJ2Jl z5P`P_zLKHC-(3L#ovz$~g@14K@X5jcCD{pNuWY%&Qloqvg3s%*^OlCWy(0yi+hO`E(1V5KWLDHbG>tJBGTh-oS&S-<65=$U##ztmZR z?$~xSe+1VnQZm?oiGP6$fZVe*UlyqVJc6X@FsWOSRbPd|0h^!Aet${(Ga_D)-(cFd z$|{YF_9vHcPlz6pXUIP%e0`~w;1}c4y%mSz*g=x!EiS6!D}c7TZzbD!EO$frnI6ST z3UE%#ZoFAyJD&OGe;n&wcmkXE%-ucx`v*Y3D}T513}SmGwl{~e4hJm3 z6yjk_u#opQLT#tiFEc$ONMyhJenF~^VV0!n6iA|UOHYMyIn@m*xt^1ifj3YbPfCB* z4{@1Z==QbVuXmaZ$$j^|@O}_>v!H=#_pzzZKiq50Xyv<+$ZkZxjGycC{V?F%IC2A3 z$mr2L_JulF%75koVyQ5Gv&h~BkL+F0$dbCh$h)-sxoommrTN{Ml14T_;)--3Q}(}g z8@$w9ael)H-+hA!hb=qwL%%WDd#Ql{Q7%4O$N^n`fYzp(2iQvi7B$y8M&3WXpXg2I zG_$X!b+YJce(rKf;Xd3AN!z#R^0#DlDLg%W=U#_gQ?KM)l3#8n=l>E=J14Om;+8~K5cn{j z?eJzd8Go+Z4m%{cPLZ26AkI&6D)dgaS)E@&T?_`8FVc(D7>~3XXSb-t zUMugG&wr#40@IZ}=ZF?D*zM+*YJRfC4_`)Ft>jpna>05M+a#v}eaQ@`8c3#e%@0m9 zjAp8vGKP(lElts){A&Z7bCW-Fx0H&5%-$euO+QnZ0@1aR*W^3(l&Kq=D@_ZVj1g^1 zLsGoV6Z))+XC8W(GQmzpq31srSwzGZLyP&@fq!N(zHMLLCVWy}+m=eoZ`)F_<@jM6 zL-LZs0F-QbJ}f6))eg%j7iRxl{liaWNR@TDy4CRpmhtYPLvfVq2g+byrBPLqtB%+7wdb)6(iM(gLWjSoe^uhzZQ5P94wn7>p}+fa9n_&i!ZD!rcm-Yc@CMZ%PD*Ne)}ZX0#&xJi7Ly5lOK02Y}@%E+e8F6dlz{^8_C7*xVQd$TPAx#Fjm1ZndmCmVAJT^a#PB zc=v;i?uI)OZ^x<%lo@j%^>oO7On&A(eW;v@DbUhOqf@nh0)3Jp)KPBCp`$6OLWhLrd!hs-o`2m%609vL22{EqRkO zmz|yAdW(C8qi0~jD!g%pLYK9vr$feOV(jqz+?aF%)Fa_b3y@JA=Ifvjmwzh|o#M18 zBf!nz%vjAG$UdLtjx$p?x&!GhX1)yxfxrFWVCV%0gBmrSpt{yNv8y1V3>Ga$?1Z|M zmwS)BC~%dXB~L!Ptm*BWp{_9KtnW>si~67}%Bta*gfRx(x2WD=an*OXAZZ}z-*G)v zTgfcLL)y{nB8)06>NVk=Eq`l3VUzAw<1h)Mak#Iu76RBc%BYJwz#pG}x_|f-Bb@)3 zH(C7Q`BH0E;m7#yp1J8j=|BoJy(#X)bYo%s(9{jRj}gZFT+SziiE*hG z5uejX`~JVkrg(AMkKCb#+bL^$78qHQqsrf|zyUkfoTs@BcNF4~qkmJ6#-25sg=10z z9&=v-Hl(w5DDbR?q8tYmzZhX>Rh0x~V7q% zOVd&Fe4jgk@RCHVd4E$-!(H2cJfBc!P9bbJ!1BWr@$ei*>j(E#Q+&d_S zrrBvP9|n)n>U7Y<8zAM@1U@_zXm$R5H-?G+-up++H)vSUXRh6dg%6SAbr0@r@(mI3 z=#13EDE%Qw9k3+9ary*`+38yp&7FI-!D`RQ#)HcU-GBdgK^_k8iYXAeoFcV>8&93a z(x=FJj?aNWO!ZQq+M4rg4!~E(-D2@eWIe3p0N)>!V!&$Vww-@+hL#RY#qzg_YWSgr z9m|TCr-k%2pU~05%^c{7dq?DUdSwZPSe;C6g-GqJUHeV|Q(_1e?Mw~)c)_QpmMdaq z&PSwIr+lrL#+m;k8R|w?`{6~h>&@~L5v?1no9XR9>aP1(_z=w<>ajy>4gkgS zw@8>h7qO&0Z$sw-Mc4auO@YLDH$IyeU2Tapg=5t0gWeF==xW!IDfF}@ ziO+vT>LwCXz;S~K84;v8&3c-}e{9O=<9AUz3xCL(vVngU7z3D~v}y?tVShY!bw=ak@7*{Z$?~o( zi!8-pe<}!2@f9RFXz_V}nxaZi0p2E*)i@7-t0b-Sq^L10YE)!sM{?XG7}ThNAvpH1 zWqxXtJ}^^k?|*r`=fB;`tbi<}r6gIIBBa$^h29r)6>(ZMGlwHrSx`(-!eH;FeBCN zSI2gL8&6xGcIHVsT$E^mTX=#M&gm2W=M5tY_z1Psy&1>kM-Zgy7H9aJV43JlQx955~Fy?;qAq@3n#6x=7%2{fdJh1WxtRsg1w ztB1HdMQHeE^R}Ag!w5f&p*f+u!5)4q>WkD^_ym7?&341i-u8pO?go9_PwV4;TDP$I z3uJgVd&1uHIiw9f7%N!?ZLbw4WhlYdlL{;XCurxU4B4u7U~{`{E*B)9kRcN#gX z!*%lDtp%{C`W06xCQaURhXGN3mc<3WCT-L%z_6>6UBNq8?t)F*ihixrMr?!tV*iro zRB7IMHTDVp2%8fG2!9n9 z@P&@l)u*k-uDTKztUuYF8{BY}6B=pBAm&6aImVLufkyq0L_JP1pL<>OZ#k!mKOGKV zzI=MH_%fg-#PylpU;hl%P}MZ}T$7CY57JDxeD|9vGgIXR)C7XO%fLT0HxY-m2?WP! zOGYrAhSDx{&%98gj&z_d1p@h8Eq|*>x>T)dlAi(mwX;{CT9&2o4uxwA&zFLfm;GL2 zgx~u$#_*2sB5|Tkdejb0P5wn(joxI91WV1PnrPx|mdndVZv3CXt{_YCWujU&7l3H0zzJDLz2~T?t z!qD9eGVRC42UGU8B_e@D*Qjo0tO}=cI*GZJQ$b$kFqKo4Vkx#$Io(V$TTVAKEG(z~ z_?*hASGw79>W|H-oNlIDQbb+Jj@4}O>_wc4<{&HL8j7R@>x##Qv7EB*diXipu19vs zChO3%rfNG(Cr~fkJZL|TaepbFUwDTKb{+6srMeas?P7*y74$|9mr8j%#auJSpIpLS-5!!>n?EOfec6-X7vs{kQ^(@eL1LK} z7gg~UKwE3Ek!`LP_W?gs$8Mwmm$dBYDBI4jGT+VENHEjg*oZ#UmU=+n*Uc>iZ3@Rz zJkb73)%bn06Af)=bbs^IUTN5>dulYgy{4}g_ag#rlbsV#=R5Nt4B8zj*e-$~Mx#tc zeF|c@foosmk?J^z>4aSv0gNZGc{d-O>|oUzJA(>ZdqT_6=`JHkdhkt^Ai!6w`$g|s z#oxQYqnoXGp0LFpig)y}4e^dFwzwX_UmN%WU_vSioE+=*;D6*lg+jasc(|v3{{ZNB zy~5+@z7&m$h(eE+hz32On2xK+3&tzVAV0r5}QtmBuY1XDvZmi zR#zyd_9`SKGOiRY#!h(6~=G&vhRbx?E9cEOX~h2@6z(;vdLbQ z=67dG`mzCxE7FCu?0@Svc&WMJ{Dv?5{u{h-*s?<(^vm+Sml_BV<>FrpIiN%M(Are< z1b0$`Ma{L2k@pYpCwh}R&Fq_LojkgkpNCv5+^1V%+J8R8GV0rV2Xl&4t2ma}ZX4Oy ze(d^6j3vxVFM5A_d4X3dL!qO{IZ&y z|4TsaY+^aZ8Y3$R{4<{IuVCkhf2!LKI~ZK2$Z8Ep@MG=j>YbREINYBKeUNPx;RnDM zg9GM^^nW5X#tr4h(Jd-*rBgUPM2i^ge)CH;KiJ}jFC(p1a;iqBvZQP2d5E6Gu5h$VdG>|Q?w}m+Q8=0-S#glLGYDJlXG&8bIydr? ze5da5c4Kp;XankHx~CYx>t$CPvoJ)-i(jM?#|JHT}tBJzROrjR;y1GE{GxLSR~&uo{o9JM-ADhm&ASi8Vcqp=tZ5af zuKwXuwDZ*ums1YZu;?Y=S^@i=FnnB8So^mPmi_&qKY4W<)UiXtW}xkO1)cTq2Gws- zN@}`ZzwDdFZK&k?g!NxC+OT_k@k{a54}aX5chrqpx{r6r)T87jXA68RT1lXGw5Ari zwcFv?vRF@8Ll1a6lH3_vk2jsAqH`sSb#Nq?JC;S8l+dD(fHw=ITdB4J>jcxh05wkD zJYrq9WAPEd6D)EMSTzQ)ZP?Ksy8DVZLb4NW(RNIBpMod-suX8uZ<T@@PYin17ipV|%vLi_&=yS6#iZAOr_kDWUka6E5D*QwBC>qJM^?NcdDpKW zKirCi*tgzuciIGWI`zv644I9^JLNYn`P1d}0I)p7Wkhs?qF=SrIDrQ|HZ}$U@{H^a zvCW#YfmYADW69f#NRJR4i#I>m=zsliLg{*}sz8}B2hvD~?8n6V`ede-n-DEfRr&&w zvOt@G5c%j!#$%~hBJsoYj5gxY4p!;~)3QwZ^ZDk#(?oDP>IFdB5qSYupK$D`ELvK3 zP!)C0zSJek5_2SYD77}Tio z1l6_Hfn5a&Ww2;5V*AIXyxd3Ji2^s-S&HPN%bMQ48R`auj{2@jUDQWqQdUjJB#dvs zV~gq)7B_u&3z7z6P-Ha#(tm!gF2Znjg&l>D2d;x)1MOCaFbShWc&zh_2w=A;qb}|M ze|-Gu{^4VcaQJzVJ6w$N-97lW--Q4)61sSgcWB`3ybes5a|I4FI8I4>IIfgZ$Y#pi- ztSHHW#V3*dR!ckdQhz&YF3fZ%5bkb?HMe+b*wXGquUKLA)C`d%$~k@#A4RrXAnnk~ zZISWQ7>-V$F6tb-F@Q3C1|@|Up7ze7Kcn$ZoBm2kfl-O-YgzU?y15ERn{d1*hb8@` z4@EOqpxL2rFSRr3q`O&d0l1hiJ_t`D%RNALc=>*)m@avYW`8dga)cDMeH_CRyiq$v zpDEw;f;t}G#9`tu)xW~nu zSib5d9xkAXTrVszBhOrN5Swmph^z;soB4DnsmJn>?~!x@ga{=c4Z-zeza!JhbLP~_ z>2&cFj?4#2n}6YMj-8IhCz1I8TpOfvOl~YbiS%bkTHwS}CgUL#8P32n0w7U4d8^c6@@8cuqQ!^~+Ge>R2 z!h^{1hzEBz`BaE_3`Xh(lz!u*4p@@lIK6>lw&NB>bANkXZLr#XvGEWxLihh&kcSPs zq6H$Couf8z<4)69dK6jr?l};MRxkC|*6dkx0KVDc7K=|J>jotU_j!w)U&SXRV5E!fw*D@O}Avxz6}1CiT~l_eBnbuxt&BDJ%28#@6^#t8$J3*;vvS~pl1(A$C3mG-gl zAe!6AV~5sk`ibRdkuYK|VoB@VhRy|wZrA3T5_XB>ZhSN^x>*KkN~`t@BF(Bhqq4%H zyy0D=o0UeU)YCE~KK>D@3r9=|$3-RNiy+Nu*3(S*n-fp#0oVtUnx3r&FeD!5$>Rt5 vE4rO%G^G@#AxTp~e@E)9pegA%V;Fx)FRHeDy3mkvnw5#4=+Ef?c=#&s?okEw diff --git a/docs/scripts/scripts.eval.html b/docs/scripts/scripts.eval.html index 31fccea4..2e3e556f 100644 --- a/docs/scripts/scripts.eval.html +++ b/docs/scripts/scripts.eval.html @@ -183,7 +183,7 @@
          -scripts.eval.test(ckpt_file, cfg_file)[source]
          +scripts.eval.test(ckpt_file, cfg_file, half)[source]

          Monocular depth estimation test script.

          Parameters
          diff --git a/docs/scripts/scripts.infer.html b/docs/scripts/scripts.infer.html index d15c06a3..eceb0546 100644 --- a/docs/scripts/scripts.infer.html +++ b/docs/scripts/scripts.infer.html @@ -206,16 +206,18 @@
          -
          -scripts.infer.infer(ckpt_file, input_file, output_file, image_shape)[source]
          -

          Monocular depth estimation test script.

          +
          +scripts.infer.infer_and_save_depth(*args, **kwargs)[source]
          +

          Process a single input file to produce and save visualization

          Parameters
            -
          • ckpt_file (str) – Checkpoint path for a pretrained model

          • -
          • input_file (str) – File or folder with input images

          • -
          • output_file (str) – File or folder with output images

          • -
          • image_shape (tuple) – Input image shape (H,W)

          • +
          • input_file (str) – Image file

          • +
          • output_file (str) – Output file, or folder where the output will be saved

          • +
          • model_wrapper (nn.Module) – Model wrapper used for inference

          • +
          • image_shape (Image shape) – Input image shape

          • +
          • half (bool) – use half precision (fp16)

          • +
          • save_npz (bool) – save .npz output depth maps if True, else save as png

          @@ -228,26 +230,14 @@
          -
          -scripts.infer.parse_args()[source]
          -

          Parse arguments for training script

          -
          +
          +scripts.infer.main(args)[source]
          +
          -
          -scripts.infer.process(input_file, output_file, model_wrapper, image_shape)[source]
          -

          Process a single input file to produce and save visualization

          -
          -
          Parameters
          -
            -
          • input_file (str) – Image file

          • -
          • output_file (str) – Output file, or folder where the output will be saved

          • -
          • model_wrapper (nn.Module) – Model wrapper used for inference

          • -
          • image_shape (Image shape) – Input image shape

          • -
          -
          -
          -
          +
          +scripts.infer.parse_args()[source]
          +
          diff --git a/docs/searchindex.js b/docs/searchindex.js index 318becdb..759b0d80 100644 --- a/docs/searchindex.js +++ b/docs/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["README","configs/configs","configs/configs.default_config","configs/configs.eval_ddad","configs/configs.eval_kitti","configs/configs.overfit_ddad","configs/configs.overfit_kitti","configs/configs.train_ddad","configs/configs.train_kitti","datasets/KITTIDataset","datasets/datasets","datasets/datasets.DGPDataset","datasets/datasets.ImageDataset","datasets/datasets.KITTIDataset","datasets/datasets.KITTIDataset_utils","datasets/datasets.augmentations","datasets/datasets.transforms","geometry/camera","geometry/geometry","geometry/geometry.camera.camera","geometry/geometry.camera.camera_utils","geometry/geometry.pose.pose","geometry/geometry.pose.pose_utils","geometry/pose","index","loggers/loggers","loggers/loggers.WandbLogger","losses/losses","losses/losses.loss_base","losses/losses.multiview_photometric_loss","losses/losses.supervised_loss","models/models","models/models.Checkpoint","models/models.SelfSupModel","models/models.SemiSupModel","models/models.SfmModel","models/models.Utilities","models/models.Wrapper","networks/depth/depth","networks/depth/networks.depth.DepthResNet","networks/depth/networks.depth.PackNet01","networks/layers/layers","networks/layers/packnet/layers01","networks/layers/packnet/packnet","networks/layers/resnet/depth_decoder","networks/layers/resnet/layers","networks/layers/resnet/pose_decoder","networks/layers/resnet/resnet","networks/layers/resnet/resnet_encoder","networks/networks","networks/pose/networks.pose.PoseNet","networks/pose/networks.pose.PoseResNet","networks/pose/pose","scripts/scripts","scripts/scripts.eval","scripts/scripts.infer","scripts/scripts.train","trainers/trainers","trainers/trainers.BaseTrainer","trainers/trainers.HorovodTrainer","utils/utils","utils/utils.config","utils/utils.depth","utils/utils.horovod","utils/utils.image","utils/utils.load","utils/utils.logging","utils/utils.misc","utils/utils.reduce","utils/utils.save","utils/utils.types"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":2,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["README.rst","configs/configs.rst","configs/configs.default_config.rst","configs/configs.eval_ddad.rst","configs/configs.eval_kitti.rst","configs/configs.overfit_ddad.rst","configs/configs.overfit_kitti.rst","configs/configs.train_ddad.rst","configs/configs.train_kitti.rst","datasets/KITTIDataset.rst","datasets/datasets.rst","datasets/datasets.DGPDataset.rst","datasets/datasets.ImageDataset.rst","datasets/datasets.KITTIDataset.rst","datasets/datasets.KITTIDataset_utils.rst","datasets/datasets.augmentations.rst","datasets/datasets.transforms.rst","geometry/camera.rst","geometry/geometry.rst","geometry/geometry.camera.camera.rst","geometry/geometry.camera.camera_utils.rst","geometry/geometry.pose.pose.rst","geometry/geometry.pose.pose_utils.rst","geometry/pose.rst","index.rst","loggers/loggers.rst","loggers/loggers.WandbLogger.rst","losses/losses.rst","losses/losses.loss_base.rst","losses/losses.multiview_photometric_loss.rst","losses/losses.supervised_loss.rst","models/models.rst","models/models.Checkpoint.rst","models/models.SelfSupModel.rst","models/models.SemiSupModel.rst","models/models.SfmModel.rst","models/models.Utilities.rst","models/models.Wrapper.rst","networks/depth/depth.rst","networks/depth/networks.depth.DepthResNet.rst","networks/depth/networks.depth.PackNet01.rst","networks/layers/layers.rst","networks/layers/packnet/layers01.rst","networks/layers/packnet/packnet.rst","networks/layers/resnet/depth_decoder.rst","networks/layers/resnet/layers.rst","networks/layers/resnet/pose_decoder.rst","networks/layers/resnet/resnet.rst","networks/layers/resnet/resnet_encoder.rst","networks/networks.rst","networks/pose/networks.pose.PoseNet.rst","networks/pose/networks.pose.PoseResNet.rst","networks/pose/pose.rst","scripts/scripts.rst","scripts/scripts.eval.rst","scripts/scripts.infer.rst","scripts/scripts.train.rst","trainers/trainers.rst","trainers/trainers.BaseTrainer.rst","trainers/trainers.HorovodTrainer.rst","utils/utils.rst","utils/utils.config.rst","utils/utils.depth.rst","utils/utils.horovod.rst","utils/utils.image.rst","utils/utils.load.rst","utils/utils.logging.rst","utils/utils.misc.rst","utils/utils.reduce.rst","utils/utils.save.rst","utils/utils.types.rst"],objects:{"packnet_sfm.datasets":{augmentations:[15,0,0,"-"],dgp_dataset:[11,0,0,"-"],image_dataset:[12,0,0,"-"],kitti_dataset:[13,0,0,"-"],kitti_dataset_utils:[14,0,0,"-"],transforms:[16,0,0,"-"]},"packnet_sfm.datasets.augmentations":{colorjitter_sample:[15,1,1,""],duplicate_sample:[15,1,1,""],resize_depth:[15,1,1,""],resize_image:[15,1,1,""],resize_sample:[15,1,1,""],resize_sample_image_and_intrinsics:[15,1,1,""],to_tensor:[15,1,1,""],to_tensor_sample:[15,1,1,""]},"packnet_sfm.datasets.dgp_dataset":{DGPDataset:[11,2,1,""],stack_sample:[11,1,1,""]},"packnet_sfm.datasets.dgp_dataset.DGPDataset":{get_backward:[11,3,1,""],get_context:[11,3,1,""],get_current:[11,3,1,""],get_forward:[11,3,1,""]},"packnet_sfm.datasets.image_dataset":{ImageDataset:[12,2,1,""],dummy_calibration:[12,1,1,""],get_idx:[12,1,1,""],read_files:[12,1,1,""]},"packnet_sfm.datasets.kitti_dataset":{KITTIDataset:[13,2,1,""],read_npz_depth:[13,1,1,""],read_png_depth:[13,1,1,""]},"packnet_sfm.datasets.kitti_dataset_utils":{OxtsData:[14,2,1,""],OxtsPacket:[14,2,1,""],load_oxts_packets_and_poses:[14,1,1,""],pose_from_oxts_packet:[14,1,1,""],read_calib_file:[14,1,1,""],rotx:[14,1,1,""],roty:[14,1,1,""],rotz:[14,1,1,""],transform_from_rot_trans:[14,1,1,""]},"packnet_sfm.datasets.kitti_dataset_utils.OxtsData":{T_w_imu:[14,3,1,""],packet:[14,3,1,""]},"packnet_sfm.datasets.kitti_dataset_utils.OxtsPacket":{af:[14,3,1,""],al:[14,3,1,""],alt:[14,3,1,""],au:[14,3,1,""],ax:[14,3,1,""],ay:[14,3,1,""],az:[14,3,1,""],lat:[14,3,1,""],lon:[14,3,1,""],navstat:[14,3,1,""],numsats:[14,3,1,""],orimode:[14,3,1,""],pitch:[14,3,1,""],pos_accuracy:[14,3,1,""],posmode:[14,3,1,""],roll:[14,3,1,""],ve:[14,3,1,""],vel_accuracy:[14,3,1,""],velmode:[14,3,1,""],vf:[14,3,1,""],vl:[14,3,1,""],vn:[14,3,1,""],vu:[14,3,1,""],wf:[14,3,1,""],wl:[14,3,1,""],wu:[14,3,1,""],wx:[14,3,1,""],wy:[14,3,1,""],wz:[14,3,1,""],yaw:[14,3,1,""]},"packnet_sfm.datasets.transforms":{get_transforms:[16,1,1,""],test_transforms:[16,1,1,""],train_transforms:[16,1,1,""],validation_transforms:[16,1,1,""]},"packnet_sfm.geometry":{camera:[19,0,0,"-"],camera_utils:[20,0,0,"-"],pose:[21,0,0,"-"],pose_utils:[22,0,0,"-"]},"packnet_sfm.geometry.camera":{Camera:[19,2,1,""]},"packnet_sfm.geometry.camera.Camera":{Kinv:[19,3,1,""],Twc:[19,3,1,""],cx:[19,3,1,""],cy:[19,3,1,""],fx:[19,3,1,""],fy:[19,3,1,""],project:[19,3,1,""],reconstruct:[19,3,1,""],scaled:[19,3,1,""],to:[19,3,1,""]},"packnet_sfm.geometry.camera_utils":{construct_K:[20,1,1,""],scale_intrinsics:[20,1,1,""],view_synthesis:[20,1,1,""]},"packnet_sfm.geometry.pose":{Pose:[21,2,1,""]},"packnet_sfm.geometry.pose.Pose":{from_vec:[21,3,1,""],identity:[21,3,1,""],inverse:[21,3,1,""],item:[21,3,1,""],repeat:[21,3,1,""],shape:[21,3,1,""],to:[21,3,1,""],transform_points:[21,3,1,""],transform_pose:[21,3,1,""]},"packnet_sfm.geometry.pose_utils":{euler2mat:[22,1,1,""],invert_pose:[22,1,1,""],invert_pose_numpy:[22,1,1,""],pose_vec2mat:[22,1,1,""]},"packnet_sfm.loggers":{WandbLogger:[26,0,0,"-"]},"packnet_sfm.loggers.WandbLogger":{experiment:[26,4,1,""],name:[26,4,1,""],run_name:[26,4,1,""],run_url:[26,4,1,""],version:[26,4,1,""]},"packnet_sfm.losses":{loss_base:[28,0,0,"-"],multiview_photometric_loss:[29,0,0,"-"],supervised_loss:[30,0,0,"-"]},"packnet_sfm.losses.loss_base":{LossBase:[28,2,1,""],ProgressiveScaling:[28,2,1,""]},"packnet_sfm.losses.loss_base.LossBase":{add_metric:[28,3,1,""],logs:[28,3,1,""],metrics:[28,3,1,""]},"packnet_sfm.losses.multiview_photometric_loss":{MultiViewPhotometricLoss:[29,2,1,""],SSIM:[29,1,1,""]},"packnet_sfm.losses.multiview_photometric_loss.MultiViewPhotometricLoss":{SSIM:[29,3,1,""],calc_photometric_loss:[29,3,1,""],calc_smoothness_loss:[29,3,1,""],forward:[29,3,1,""],logs:[29,3,1,""],reduce_photometric_loss:[29,3,1,""],warp_ref_image:[29,3,1,""]},"packnet_sfm.losses.supervised_loss":{BerHuLoss:[30,2,1,""],SilogLoss:[30,2,1,""],SupervisedLoss:[30,2,1,""],get_loss_func:[30,1,1,""]},"packnet_sfm.losses.supervised_loss.BerHuLoss":{forward:[30,3,1,""]},"packnet_sfm.losses.supervised_loss.SilogLoss":{forward:[30,3,1,""]},"packnet_sfm.losses.supervised_loss.SupervisedLoss":{calculate_loss:[30,3,1,""],forward:[30,3,1,""],logs:[30,3,1,""]},"packnet_sfm.models":{SelfSupModel:[33,0,0,"-"],SemiSupModel:[34,0,0,"-"],SfmModel:[35,0,0,"-"],model_checkpoint:[32,0,0,"-"],model_utils:[36,0,0,"-"],model_wrapper:[37,0,0,"-"]},"packnet_sfm.models.SelfSupModel":{SelfSupModel:[33,2,1,""]},"packnet_sfm.models.SelfSupModel.SelfSupModel":{forward:[33,3,1,""],logs:[33,3,1,""],requires_depth_net:[33,3,1,""],requires_gt_depth:[33,3,1,""],requires_gt_pose:[33,3,1,""],requires_pose_net:[33,3,1,""],self_supervised_loss:[33,3,1,""]},"packnet_sfm.models.SemiSupModel":{SemiSupModel:[34,2,1,""]},"packnet_sfm.models.SemiSupModel.SemiSupModel":{forward:[34,3,1,""],logs:[34,3,1,""],requires_depth_net:[34,3,1,""],requires_gt_depth:[34,3,1,""],requires_gt_pose:[34,3,1,""],requires_pose_net:[34,3,1,""],supervised_loss:[34,3,1,""]},"packnet_sfm.models.SfmModel":{SfmModel:[35,2,1,""]},"packnet_sfm.models.SfmModel.SfmModel":{add_depth_net:[35,3,1,""],add_loss:[35,3,1,""],add_pose_net:[35,3,1,""],compute_inv_depths:[35,3,1,""],compute_poses:[35,3,1,""],forward:[35,3,1,""],logs:[35,3,1,""],losses:[35,3,1,""],requires_depth_net:[35,3,1,""],requires_gt_depth:[35,3,1,""],requires_gt_pose:[35,3,1,""],requires_pose_net:[35,3,1,""]},"packnet_sfm.models.model_checkpoint":{ModelCheckpoint:[32,2,1,""],save_code:[32,1,1,""],sync_s3_data:[32,1,1,""]},"packnet_sfm.models.model_checkpoint.ModelCheckpoint":{check_and_save:[32,3,1,""],check_monitor_top_k:[32,3,1,""],format_checkpoint_name:[32,3,1,""]},"packnet_sfm.models.model_utils":{merge_outputs:[36,1,1,""]},"packnet_sfm.models.model_wrapper":{ModelWrapper:[37,2,1,""],get_datasampler:[37,1,1,""],set_random_seed:[37,1,1,""],setup_dataloader:[37,1,1,""],setup_dataset:[37,1,1,""],setup_depth_net:[37,1,1,""],setup_model:[37,1,1,""],setup_pose_net:[37,1,1,""],worker_init_fn:[37,1,1,""]},"packnet_sfm.models.model_wrapper.ModelWrapper":{configure_optimizers:[37,3,1,""],depth:[37,3,1,""],depth_net:[37,3,1,""],evaluate_depth:[37,3,1,""],forward:[37,3,1,""],logs:[37,3,1,""],pose:[37,3,1,""],pose_net:[37,3,1,""],prepare_datasets:[37,3,1,""],prepare_model:[37,3,1,""],print_metrics:[37,3,1,""],test_dataloader:[37,3,1,""],test_epoch_end:[37,3,1,""],test_step:[37,3,1,""],train_dataloader:[37,3,1,""],training_epoch_end:[37,3,1,""],training_step:[37,3,1,""],val_dataloader:[37,3,1,""],validation_epoch_end:[37,3,1,""],validation_step:[37,3,1,""]},"packnet_sfm.networks.depth":{DepthResNet:[39,0,0,"-"],PackNet01:[40,0,0,"-"]},"packnet_sfm.networks.depth.DepthResNet":{DepthResNet:[39,2,1,""]},"packnet_sfm.networks.depth.DepthResNet.DepthResNet":{forward:[39,3,1,""]},"packnet_sfm.networks.depth.PackNet01":{PackNet01:[40,2,1,""]},"packnet_sfm.networks.depth.PackNet01.PackNet01":{forward:[40,3,1,""],init_weights:[40,3,1,""]},"packnet_sfm.networks.layers.packnet":{layers01:[42,0,0,"-"]},"packnet_sfm.networks.layers.packnet.layers01":{Conv2D:[42,2,1,""],InvDepth:[42,2,1,""],PackLayerConv2d:[42,2,1,""],PackLayerConv3d:[42,2,1,""],ResidualBlock:[42,1,1,""],ResidualConv:[42,2,1,""],UnpackLayerConv2d:[42,2,1,""],UnpackLayerConv3d:[42,2,1,""],packing:[42,1,1,""]},"packnet_sfm.networks.layers.packnet.layers01.Conv2D":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.InvDepth":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.PackLayerConv2d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.PackLayerConv3d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.ResidualConv":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.UnpackLayerConv2d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.UnpackLayerConv3d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.resnet":{depth_decoder:[44,0,0,"-"],layers:[45,0,0,"-"],pose_decoder:[46,0,0,"-"],resnet_encoder:[48,0,0,"-"]},"packnet_sfm.networks.layers.resnet.depth_decoder":{DepthDecoder:[44,2,1,""]},"packnet_sfm.networks.layers.resnet.depth_decoder.DepthDecoder":{forward:[44,3,1,""]},"packnet_sfm.networks.layers.resnet.layers":{Conv3x3:[45,2,1,""],ConvBlock:[45,2,1,""],disp_to_depth:[45,1,1,""],upsample:[45,1,1,""]},"packnet_sfm.networks.layers.resnet.layers.Conv3x3":{forward:[45,3,1,""]},"packnet_sfm.networks.layers.resnet.layers.ConvBlock":{forward:[45,3,1,""]},"packnet_sfm.networks.layers.resnet.pose_decoder":{PoseDecoder:[46,2,1,""]},"packnet_sfm.networks.layers.resnet.pose_decoder.PoseDecoder":{forward:[46,3,1,""]},"packnet_sfm.networks.layers.resnet.resnet_encoder":{ResNetMultiImageInput:[48,2,1,""],ResnetEncoder:[48,2,1,""],resnet_multiimage_input:[48,1,1,""]},"packnet_sfm.networks.layers.resnet.resnet_encoder.ResnetEncoder":{forward:[48,3,1,""]},"packnet_sfm.networks.pose":{PoseNet:[50,0,0,"-"],PoseResNet:[51,0,0,"-"]},"packnet_sfm.networks.pose.PoseNet":{PoseNet:[50,2,1,""],conv_gn:[50,1,1,""]},"packnet_sfm.networks.pose.PoseNet.PoseNet":{forward:[50,3,1,""],init_weights:[50,3,1,""]},"packnet_sfm.networks.pose.PoseResNet":{PoseResNet:[51,2,1,""]},"packnet_sfm.networks.pose.PoseResNet.PoseResNet":{forward:[51,3,1,""]},"packnet_sfm.trainers":{HorovodTrainer:[59,0,0,"-"],base_trainer:[58,0,0,"-"]},"packnet_sfm.trainers.HorovodTrainer":{is_rank_0:[59,4,1,""],proc_rank:[59,4,1,""],world_size:[59,4,1,""]},"packnet_sfm.trainers.base_trainer":{BaseTrainer:[58,2,1,""],sample_to_cuda:[58,1,1,""]},"packnet_sfm.trainers.base_trainer.BaseTrainer":{check_and_save:[58,3,1,""],is_rank_0:[58,3,1,""],proc_rank:[58,3,1,""],test_progress_bar:[58,3,1,""],train_progress_bar:[58,3,1,""],val_progress_bar:[58,3,1,""],world_size:[58,3,1,""]},"packnet_sfm.utils":{config:[61,0,0,"-"],depth:[62,0,0,"-"],horovod:[63,0,0,"-"],image:[64,0,0,"-"],load:[65,0,0,"-"],logging:[66,0,0,"-"],misc:[67,0,0,"-"],reduce:[68,0,0,"-"],save:[69,0,0,"-"],types:[70,0,0,"-"]},"packnet_sfm.utils.config":{backwards_config:[61,1,1,""],get_default_config:[61,1,1,""],merge_cfg_file:[61,1,1,""],merge_cfgs:[61,1,1,""],parse_test_config:[61,1,1,""],parse_test_file:[61,1,1,""],parse_train_config:[61,1,1,""],parse_train_file:[61,1,1,""],prep_dataset:[61,1,1,""],prepare_test_config:[61,1,1,""],prepare_train_config:[61,1,1,""],set_checkpoint:[61,1,1,""],set_name:[61,1,1,""]},"packnet_sfm.utils.depth":{calc_smoothness:[62,1,1,""],compute_depth_metrics:[62,1,1,""],depth2inv:[62,1,1,""],fuse_inv_depth:[62,1,1,""],inv2depth:[62,1,1,""],inv_depths_normalize:[62,1,1,""],post_process_inv_depth:[62,1,1,""],viz_inv_depth:[62,1,1,""]},"packnet_sfm.utils.horovod":{hvd_init:[63,1,1,""],on_rank_0:[63,1,1,""],print0:[63,1,1,""],rank:[63,1,1,""],world_size:[63,1,1,""]},"packnet_sfm.utils.image":{flip_lr:[64,1,1,""],flip_model:[64,1,1,""],gradient_x:[64,1,1,""],gradient_y:[64,1,1,""],image_grid:[64,1,1,""],interpolate_image:[64,1,1,""],interpolate_scales:[64,1,1,""],load_image:[64,1,1,""],match_scales:[64,1,1,""],meshgrid:[64,1,1,""]},"packnet_sfm.utils.load":{backwards_state_dict:[65,1,1,""],filter_args:[65,1,1,""],filter_args_create:[65,1,1,""],load_class:[65,1,1,""],load_class_args_create:[65,1,1,""],load_network:[65,1,1,""],set_debug:[65,1,1,""]},"packnet_sfm.utils.logging":{AvgMeter:[66,2,1,""],pcolor:[66,1,1,""],prepare_dataset_prefix:[66,1,1,""],s3_url:[66,1,1,""]},"packnet_sfm.utils.logging.AvgMeter":{get:[66,3,1,""],get_and_reset:[66,3,1,""],reset:[66,3,1,""]},"packnet_sfm.utils.misc":{filter_dict:[67,1,1,""],make_list:[67,1,1,""],same_shape:[67,1,1,""]},"packnet_sfm.utils.reduce":{all_reduce_metrics:[68,1,1,""],average_key:[68,1,1,""],average_loss_and_metrics:[68,1,1,""],average_sub_key:[68,1,1,""],collate_metrics:[68,1,1,""],create_dict:[68,1,1,""],reduce_dict:[68,1,1,""],reduce_value:[68,1,1,""]},"packnet_sfm.utils.save":{save_depth:[69,1,1,""]},"packnet_sfm.utils.types":{is_cfg:[70,1,1,""],is_dict:[70,1,1,""],is_int:[70,1,1,""],is_list:[70,1,1,""],is_numpy:[70,1,1,""],is_seq:[70,1,1,""],is_str:[70,1,1,""],is_tensor:[70,1,1,""],is_tuple:[70,1,1,""]},"scripts.eval":{parse_args:[54,1,1,""],test:[54,1,1,""]},"scripts.infer":{imwrite:[55,1,1,""],infer:[55,1,1,""],is_image:[55,1,1,""],parse_args:[55,1,1,""],process:[55,1,1,""]},"scripts.train":{parse_args:[56,1,1,""],train:[56,1,1,""]},scripts:{eval:[54,0,0,"-"],infer:[55,0,0,"-"],train:[56,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute"},terms:{"18pt":[5,6,39,51],"192x640":[0,24],"384x1280":[0,24],"384x640":[0,24],"6gb":[0,24],"byte":55,"case":[0,24,55],"class":[11,12,13,14,19,20,21,26,28,29,30,32,33,34,35,37,39,40,42,44,45,46,48,50,51,58,65,66],"default":[0,24,61],"export":[0,24],"float":[14,15,19,28,29,30,34,35,40,42,55,62,68],"function":[11,13,16,19,30,37,44,45,46,48,50,55,65],"int":[11,13,15,28,29,30,42,48,50,62,64,66,67],"long":[0,24],"new":[0,21,24,26,28,35],"public":[0,24],"return":[11,13,14,15,16,19,20,21,26,28,29,30,33,34,35,36,37,39,40,42,48,50,51,61,62,64,65,66,67,68],"static":[0,2,24],"super":[0,24],"true":[0,2,3,4,12,13,24,29,30,33,34,35,37,44,45,48,62,64,65,67],"try":[0,24],"var":67,"while":[30,44,45,46,48,50],AWS:[0,2,24],Abs:[0,24],For:[0,24],GPS:14,Has:[39,40,51],The:[0,24,45,55],These:[0,24],_origin:15,abl:[0,24],about:14,abov:[0,24],abs:40,abs_rel:62,access:[0,24],account:[0,24],accur:[0,24],accuraci:[0,24],adam:[2,5,6,7,8],adapt:[0,24,48],add:[28,35,61],add_depth_net:35,add_loss:35,add_metr:28,add_pose_net:35,added:[39,51],addit:[40,45],adrien:[0,24],after:[28,64],afterward:[30,44,45,46,48,50],alia:14,align:64,align_corn:64,all:[0,24,29,30,36,44,45,46,48,50,68],all_reduce_metr:68,allan:[0,24],alpha:55,also:[0,11,24,55],alt:14,although:[0,24,30,44,45,46,48,50],amazonaw:[0,24],ambru:[0,24],angl:[14,22],ani:[0,24,67],anonym:26,anoth:[20,21,61],append:26,appli:[11,13,42],applic:[0,24],april:[0,24],arch:[2,5,6],architectur:[0,24,39,51],arg:[19,21,37,63,65,69],argument:[54,55,56,65,69],around:37,arrai:[14,15,22,62,70],arxiv:40,aspect:[0,24],associ:[0,24],assum:[0,24],attr:66,attribut:66,augment:[0,2,3,4,5,6,7,8,10,16,24],author:[0,24],auto:[2,32],autom:[0,24],automask:[2,29],automask_loss:[2,29],automat:[2,62],avail:61,averag:[30,66,68],average_kei:68,average_loss_and_metr:68,average_sub_kei:68,avgmet:66,awar:[0,24],aws_access_key_id:[0,24],aws_default_region:[0,24],aws_secret_access_kei:[0,24],axi:14,back_context:[2,11,12,13],background:66,backward:[2,11,13,61,65],backwards_config:61,backwards_state_dict:65,base:[11,12,13,14,19,21,28,29,30,32,33,34,35,37,39,40,42,44,45,46,48,50,51,55,58,61,66],base_train:58,basetrain:[24,57],batch:[2,21,33,34,35,37,64,68,69],batch_list:68,batch_siz:[0,2,5,6,7,8,24],beam:[0,24],been:37,befor:[37,55],belong:68,below:[0,24,55],benchmark:[0,24],berhu:30,berhuloss:30,best:2,better:[0,24],between:[29,33,64],bgr:55,bgra:55,bias:[0,24],bigger:[0,24],bilinear:[20,64],bit:55,blob:48,block:[0,24,42,48,50],booktitl:[0,24],bool:[11,13,26,29,30,33,34,35,37,48,62,64,65,67],both:[11,42,67],boundari:19,brief:55,bright:[15,16],bucket:[32,66],build:[0,24],calc_photometric_loss:29,calc_smooth:62,calc_smoothness_loss:29,calcul:[29,30,33,34,62,64],calculate_loss:30,calib:14,calibr:[0,14,24],call:[30,44,45,46,48,50],cam:20,camera:[0,2,3,5,7,11,18,20,24,29,33],camera_01:[3,5,7],camera_util:[17,18,24],can:[0,24,55,56,61,65],car:[0,24],care:[30,44,45,46,48,50],cast:15,certain:[13,28,55],cfg_default:61,cfg_file:[54,61],cfgnode:[37,61,62,66,69],chang:19,channel:[42,50,55],check:[55,67,70],check_and_sav:[32,58],check_monitor_top_k:32,checkpoint:[0,2,24,31,54,55,56,58,61],checkpoint_path:[0,2,24],choic:[0,24],chosen:55,citat:[0,24],cityscap:[0,24],ckpt:[0,24,56,61],ckpt_file:[54,55,61],classmethod:21,clip:[2,29],clip_loss:[2,29],clone:[0,24],cmaera:19,code:[0,24,32,55],collat:68,collate_metr:68,color:[2,15,16,66],colorjitter_sampl:15,colormap:62,com:[0,24,48],combin:29,command:[0,24],compar:[0,24],compat:[61,65],compound:21,comprehens:[0,24],compress:[0,24,55],comput:[0,14,24,30,35,44,45,46,48,50,62],compute_depth_metr:62,compute_inv_depth:35,compute_pos:35,concat:65,concaten:[40,42,65,66],conclus:[0,24],conda:[0,24],confer:[0,24],config:[0,24,37,58,60,62,66],configur:[0,24,37,54,56,61,66,69,70],configure_optim:37,consid:[13,29,62,64,65],consider:45,constraint:[0,24],construct:[20,48],construct_k:20,contact:24,contain:[0,24,29,33,34,35,36,64,65,66,67,68],content:24,context:[2,11,13,15,29,33,35,50],continu:[0,24],contrast:[15,16],control:[0,24,40],contxt:2,conv2d:[42,50],conv3x3:45,conv_gn:50,convblock:45,conveni:[0,24],convers:45,convert:[22,45,55,62],convertto:55,convolut:[0,24,29,40,42,45,50],convolv:[42,45],coordin:14,copi:15,corl:[0,24],corner:64,correctli:[0,24],correspond:[0,24,37],could:[0,24],cpp:55,creat:[0,21,24,26,37,55,64,65,68],create_dict:68,creation:37,crop:[2,3,4,5,6,7,8],curl:[0,24],current:[0,11,19,24,32,66],custom:55,cv_16u:55,cv_32f:55,cv_32fc3:55,cvpr:[0,24,40],cvtcolor:55,data:[0,2,3,4,5,6,7,8,12,13,14,15,16,24,37,58,70],data_split:[4,8],data_transform:[11,12,13],dataload:[37,58,69],dataset:[2,3,4,5,6,7,8,11,12,13,14,15,16,37,61,66,68,69],date:61,ddad:[3,7],ddad_tini:[0,5,24],debug:[2,65],decai:[0,2,24],decod:[0,24],decompress:[0,24],decreas:[2,28],default_config:[0,1,24,37],defin:[0,24,30,44,45,46,48,50],degre:[0,24],demonstr:55,densifi:[0,24],depend:[0,24],dept:2,depth2inv:62,depth:[2,5,6,7,8,13,15,19,20,29,30,33,34,35,37,39,40,42,45,49,54,55,56,60,64,68,69],depth_decod:[24,41,47,49],depth_net:[0,2,3,4,5,6,7,8,24,33,35,37],depth_typ:[2,3,4,5,6,7,8,11,12,13],depthdecod:44,depthresnet:[5,6,24,38,49],design:37,detach:[28,35],detail:[0,24],determin:[2,30],detph:35,devic:[19,20,21,64],dgp:[3,5,7,11],dgp_dataset:11,dgpdataset:[10,24],dict:[14,15,16,29,30,33,34,35,36,37,39,40,51,61,65,67,68,69],dictionari:[14,15,28,29,30,33,34,35,36,65,67,68,70],differ:[0,24,29,33,34,36,55,64,65,66],differenti:19,dimens:[16,64,67],dir:[0,2,24,26],direct:[2,62],directli:[0,24],directori:12,disabl:[29,30,65],disp:45,disp_norm:[2,29],disp_to_depth:45,distanc:[0,24,29],distribut:[0,24,37],divers:[0,24],doc:[0,24],docker2:[0,24],docker:[0,24],dockerfil:[0,24],doesn:26,done:[0,24],download:[0,24],downstream:[33,34],driver:[0,24],dropout:[2,40,42],dry:[0,2,24],dry_run:[0,2,24],dtype:[20,21,64],dummy_calibr:12,duplic:15,duplicate_sampl:15,dure:[0,2,24,62,65],dynam:55,each:[16,37,51,65],east:14,effect:[0,24],ego:[0,24],eigen:[0,24],eigen_test:[0,24],eigen_test_fil:[0,4,8,24],eigen_train:[0,24],eigen_train_fil:[0,24],eigen_v:[0,24],eigen_val_fil:[0,8,24],eigen_zh:[0,24],eigen_zhou_fil:[0,8,24],either:[0,24,56,61],elu:[42,45],empti:26,enabl:[0,11,24,29,65],encapsul:[21,35],encod:[48,55],encount:[0,24],enforc:[0,24],entir:29,entiti:[0,2,24,26],entri:68,environ:[0,2,24],epoch:[2,32,37,68],equip:[0,24],error:[0,24],estim:[11,54,55,56],etc:[0,24],euler2mat:22,euler:[2,22,35,50],eval:[0,24,54],eval_ddad:[1,24],eval_kitti:[1,24],evalu:[2,37,53],evaluate_depth:37,everi:[0,24,30,44,45,46,48,50],exampl:[0,24,39,51],except:55,exist:[0,24,26],expand:61,experi:[0,24,26],ext:[12,55],extens:55,extra:[29,30,33,34,35,37,39,40,42,51],factor:[20,45],fals:[2,11,13,29,30,33,34,35,48,62,64,68],fanci:26,featur:[0,24,40],few:[0,24],field:14,file:[0,13,14,24,54,55,56,61,65],file_list:13,filenam:[12,55,65],filepath:[0,2,14,24,32,56],filestorag:55,filter:[65,67],filter_arg:65,filter_args_cr:65,filter_dict:67,filter_zero:62,fine:[0,24],finish:37,first:[0,14,24,67],fix:[0,24],flag:[2,39,51,65,67],fleet:[0,24],flip:[35,62,64],flip_lr:64,flip_lr_prob:[2,35],flip_model:64,flipp:2,float32:[20,21],floattensor:15,focal:19,folder:[0,2,3,4,24,26,32,55,65],follow:[0,24,45],format:[39,40,51,55],format_checkpoint_nam:32,former:[30,44,45,46,48,50],formula:45,forward:[2,11,13,29,30,33,34,35,37,39,40,42,44,45,46,48,50,51],forward_context:[2,11,12,13],found:[0,24],frame:[0,13,19,20,21,24,48],from:[0,11,14,16,19,20,21,24,29,33,34,35,37,39,40,48,51,61,62,67,68,69],from_vec:21,full:[0,24],fulli:[0,24,55],func:[63,65],further:[0,24],furthermor:[0,24],fuse:62,fuse_inv_depth:62,fused_inv_depth:62,gaidon:[0,24],gamma:[2,5,6,7,8],garg:[4,6,8],gener:[0,11,14,24,55,62,66,67],geometri:[19,20,21,22,24],get:[0,2,11,16,24,42,61,66],get_and_reset:66,get_backward:11,get_context:11,get_curr:11,get_datasampl:37,get_default_config:61,get_forward:11,get_idx:12,get_loss_func:30,get_transform:16,git:[0,24],github:[0,24,48],given:[13,14,20,30,45],goe:55,gpu:[0,24,68],gradient:64,gradient_i:64,gradient_x:64,grid:64,ground:[0,11,13,14,24,30,34,37,62],groupnorm:[42,50],gt_inv_depth:[30,34],guid:[0,24],guizilini:[0,24],handl:36,happen:64,has:37,have:[0,24,55],hdr:55,height:[16,64],helper:[14,28],here:[0,24],high:[0,24,37,55],higher:[0,24],highest:[2,35],highli:26,hook:[30,44,45,46,48,50],horizont:[2,64],horovod:[24,60],horovodtrain:[24,57],hou:[0,24],how:[0,2,24,55,67],http:[0,24,40,48],hue:[15,16],hvd_init:63,iclr:[0,24],icra:[0,24],ident:21,ieee:[0,24],ignor:[30,44,45,46,48,50],imag:[0,2,12,13,15,16,19,20,24,29,30,33,34,35,48,50,51,55,60,62],image_dataset:12,image_flip:64,image_grid:64,image_interpol:15,image_shap:[0,2,3,4,5,6,7,8,16,24,55],imagedataset:[10,24],imagenet:[0,24,39,48,51],img:55,imgcodecs_imwrit:55,implement:[0,19,24,30],improv:[0,2,24],imread:55,imwrit:55,imwriteflag:55,in_channel:[42,45],in_plan:50,incl:37,includ:[0,15,24,33,34,55],increas:2,index:[2,24,66],individu:[0,24],induc:[0,24],infer:[0,24,53],info:[0,24],inform:[0,11,24,26,61],inherit:[33,34],init_weight:[40,50],initi:[0,2,21,24,28,37,39,40,51],inject:[0,24],inproceed:[0,24],input:[0,15,24,29,33,34,35,37,42,45,48,50,55,64,67],input_featur:[44,46],input_fil:55,input_imag:48,insid:[0,24],instanc:[0,24,30,44,45,46,48,50,65],instead:[0,24,30,44,45,46,48,50,65],institut:[0,24],instruct:[0,24],integ:70,interact:[0,24],intern:[0,24],interpol:[15,20,64],interpolate_imag:64,interpolate_scal:64,intrins:[15,19,20,29,33],introduc:[0,24],inv2depth:62,inv_depth:[29,30,33,34,62,64],inv_depth_flip:62,inv_depth_hat:62,inv_depth_pp:62,inv_depths_norm:62,invdepth:42,invent:[0,24],invers:[2,19,21,29,30,33,34,35,39,40,42,62,64],invert:[22,62],invert_pos:22,invert_pose_numpi:22,is_cfg:70,is_dict:70,is_imag:55,is_int:70,is_list:70,is_numpi:70,is_rank_0:[58,59],is_seq:70,is_str:70,is_tensor:70,is_tupl:70,issu:[0,24],item:21,iter:2,its:65,itself:[0,24],jie:[0,24],jitter:[2,15,16],jointli:[0,24],jpeg:[12,55],jpg:[12,55],json:[3,5,7],kei:[11,15,28,35,36,65,67,68],kernel:[42,50],kernel_s:[29,42,50],keyword:67,kinv:19,kitti:[4,6,8,13,14],kitti_dataset:13,kitti_dataset_util:[9,10,24],kitti_raw:[0,4,8,24],kitti_tini:[0,6,24],kittidataset:[10,24],kwarg:[12,16,19,21,29,30,33,34,35,37,39,40,50,51,58,63],land:[0,24],last:55,lat:14,latest:[0,24],latter:[30,44,45,46,48,50],layer:[0,24,39,42,44,46,47,48,49,50,51,65],layers01:[24,41,43,49],learn:[0,2,24],least:[0,24],length:[19,61,67],level:37,leverag:[0,24],lidar:[0,3,5,7,11,24],lift:19,link:[0,24],list:[0,11,13,14,24,26,29,30,33,37,55,62,64,65,66,67,68,70],live:26,load:[13,14,24,37,60,64],load_class:65,load_class_args_cr:65,load_dataset:37,load_imag:64,load_network:65,load_oxts_packets_and_pos:14,local:32,locat:[0,24,65],log:[0,2,24,26,28,29,30,33,34,35,36,37,60,65,68],log_model:26,logger:[24,26,37],logluv:55,lon:14,look:65,loss:[0,2,24,28,29,30,33,34,35,36,68],loss_bas:[28,29,30],lossbas:[24,27,29,30],losses_and_metr:[29,30],lower:[0,24],machin:[0,24],mai:[0,24],make:[0,24],make_list:67,manag:[0,24,28],map:[2,13,15,19,20,29,30,33,34,35,37,39,40,62,64],master:48,mat:[21,55],match:61,match_scal:64,matrix:[14,21,22],max_depth:[2,3,4,5,6,7,8,45],max_epoch:[2,5,6,58],maximum:2,mean:[29,62,68],median:62,memori:[0,24],merg:[36,61],merge_cfg:61,merge_cfg_fil:61,merge_output:36,meshgrid:64,meter:66,method:[0,2,14,24,29,30,62,65],metric:[0,2,24,28,32,33,34,35,36,37,62,66,68],metrics_data:68,metrics_kei:68,metrics_mod:68,min:2,min_depth:[2,3,4,5,6,7,8,42,45],min_epoch:[2,58],minim:[0,24],minimum:2,misc:[24,60],mit:[0,24],mix:[0,24],mode:[0,2,13,15,16,20,21,22,24,26,29,32,35,37,64],model:[2,3,4,5,6,7,8,19,26,32,33,34,35,36,37,48,54,55,61,64,65,66,69],model_checkpoint:32,model_util:36,model_wrapp:[37,55],modelcheckpoint:32,modelwrapp:37,modifi:[0,24,65],modul:[19,24,28,30,33,35,37,39,40,42,44,45,46,48,50,51,55,58,64,65],moment:40,monitor:[2,26,32],monitor_index:2,mono:13,monocular:[54,55,56],month:[0,24],more:[0,24],motion:[0,24],move:[19,21],mpi:[0,24],much:[0,24,67],multi:[0,24],multipl:[11,21,29,66],multiview:29,multiview_photometric_loss:29,multiviewphotometricloss:[24,27],must:48,n_max:66,name:[0,2,3,4,5,6,7,8,24,26,55,61,65,68],navstat:14,nb_ref_img:50,ncol:58,need:[0,24,30,44,45,46,48,50,61],neighbor:42,network:[0,2,24,33,34,35,37,39,40,42,44,45,46,48,50,51,65],node:[61,70],none:[11,12,13,19,20,21,32,33,35,37,39,40,42,46,51,58,61,62,64,66,67],norm_inv_depth:62,normal:[2,29,62,64],north:14,note:[0,24],novel:[0,24],now:61,npz:[2,3,4,13],num_block:42,num_ch_enc:[44,46],num_class:48,num_frames_to_predict_for:46,num_input_featur:46,num_input_imag:48,num_lay:48,num_log:2,num_output_channel:44,num_scal:[2,28,29,62,64],num_work:[2,7],number:[0,2,13,14,24,28,29,30,39,42,48,50,51,62,64],numpi:[2,70],numsat:14,nvidia:[0,24],object:[11,19,21,28,32,58,66],occ_reg_weight:[2,29],occlus:[2,29],octob:[0,24],offici:[0,24],often:[0,2,24],older:65,on_color:66,on_rank_0:63,one:[0,21,24,29,30,44,45,46,48,50,61,64],ones:[0,24],onli:[0,24,55,65,67],onto:19,opaqu:55,openexr:55,optim:[0,2,5,6,7,8,24,37],option:[0,24,39,51,65,67],oral:[0,24],order:55,org:40,origin:[0,14,20,24,29,30,33,34,61],orimod:14,other:[0,24,29,36],our:[0,24],out:[0,24,42],out_channel:[42,45],out_plan:50,outperform:[0,24],output:[0,15,24,29,30,33,34,35,36,37,42,45,50,55,58,64,68,69],output_batch:37,output_data_batch:[37,68],output_fil:55,overfit:[0,24],overfit_ddad:[1,24],overfit_kitti:[0,1,24],overrid:[0,24,61],overridden:[30,44,45,46,48,50],overwrit:[0,24],own:[0,24],oxt:14,oxts_fil:14,oxtsdata:14,oxtspacket:14,pack:42,packet:14,packlayerconv2d:42,packlayerconv3d:42,packnet01:[3,4,7,8,24,38,49],packnet:[40,41,42,49],packnet_sfm:[11,12,13,14,15,16,19,20,21,22,26,28,29,30,32,33,34,35,36,37,39,40,42,44,45,46,48,50,51,58,59,61,62,63,64,65,66,67,68,69,70],pad:[2,20,29,45],padding_mod:[2,20,29],page:24,pair:55,paper:[0,24,40,45],param:[2,3,4,5,6,7,8,26,29,48,55,64],paramet:[0,2,11,13,14,15,16,19,20,22,24,28,29,30,33,34,35,36,37,39,40,42,50,51,54,55,56,61,62,64,65,66,67,68,69],paramid_1:55,paramid_2:55,paramvalue_1:55,paramvalue_2:55,pars:[14,54,55,56,61],parse_arg:[54,55,56],parse_test_config:61,parse_test_fil:61,parse_train_config:61,parse_train_fil:61,part:67,partial:16,particular:[0,24],pass:[0,24,30,44,45,46,48,50],path:[0,2,3,4,5,6,7,8,11,13,14,24,54,55,64,65,66],pattern:[0,24],pcolor:66,per:[2,55],percentag:[2,28,29,30,33,34],percentil:62,perform:[0,24,30,44,45,46,48,50],period:32,perspect:29,pfm:55,photometr:[2,29,33],photometric_loss:29,photometric_reduce_op:[2,29],pil:[15,64],pillai:[0,24],pinhol:[19,20],pip:[0,24],pitch:14,pixel:[0,2,19,24,29,42,55],pixelshuffl:42,plane:[0,19,24],plasma:62,pleas:[0,24],plu:[20,34],png:[12,13,55],point:[19,21,66],polici:[0,24],pos_accuraci:14,pose:[0,2,5,6,7,8,11,13,14,18,19,22,24,29,33,34,35,37,49,50,51],pose_decod:[24,41,47,49],pose_from_oxts_packet:14,pose_net:[0,2,3,4,5,6,7,8,24,33,35,37],pose_util:[18,23,24],pose_vec2mat:22,posedecod:46,posenet:[3,4,7,8,24,49,52],poseresnet:[5,6,24,49,52],posit:14,posmod:14,possibl:[0,24],post:62,post_process_inv_depth:62,pre:[0,24,48,56,61],pred:[30,62],predict:[29,30,33,34,35,45,51,62,64,69],prefix:[65,66,68],prep_dataset:61,prepar:[37,61],prepare_dataset:37,prepare_dataset_prefix:66,prepare_model:37,prepare_test_config:61,prepare_train_config:61,preprocess:[0,24],preserv:[0,15,24],pretrain:[39,48,51,54,55,61,65],previou:37,primaryclass:[0,24],princip:19,print0:63,print:66,print_metr:37,privaci:[0,24],prob:15,probabl:[2,15,35],proc_rank:[58,59],process:[33,34,35,37,55,62],produc:[29,37,42,55,62,64,66,68],progress:[28,29,30,33,34],progressive_sc:[2,28,29,30],progressivesc:28,project:[0,2,19,24,26],properti:[14,19,21,28,29,30,33,34,35,37,58],propos:[0,24],protocol:[0,24],provid:[0,14,24],python3:[0,24],pytorch:[0,2,24,48],radianc:55,random:2,rang:[0,24,44,55],rank:63,rare:[0,24],rate:[0,2,24],rather:[0,24],ratio2:30,ratio:[30,42],ravento:[0,24],raw:[0,24],raw_data:14,read:[13,14,64],read_calib_fil:14,read_fil:12,read_npz_depth:13,read_png_depth:13,real:[0,24],receiv:65,recent:[0,24],recip:[30,44,45,46,48,50],recognit:[0,24],recommend:[0,24,26],reconstruct:[19,29],reduc:[2,24,29,60],reduce_dict:68,reduce_photometric_loss:29,reduce_valu:68,ref_cam:20,ref_imag:[20,29,33],ref_img:51,ref_k:29,ref_warp:[20,29],refer:[19,20,21,29,33,51],referenc:[0,24],regist:[30,44,45,46,48,50],regular:[0,2,24,29],rel:[0,24],relat:[0,24],releas:[0,24],reli:[0,24],relu:50,remov:[0,2,24],repeat:[2,5,6,7,8,21,67],report:[0,24],repositori:[0,24],represent:[0,24],reproduc:[0,24],reproject:[0,24],requir:37,requires_depth_net:[33,34,35],requires_gt_depth:[33,34,35,37],requires_gt_pos:[33,34,35],requires_pose_net:[33,34,35],research:[0,24],reset:66,reshap:16,residu:[39,42,51],residualblock:42,residualconv:42,resiz:[2,15,19],resize_depth:15,resize_imag:15,resize_sampl:15,resize_sample_image_and_intrins:15,resnet18:[0,24,39,51],resnet34:[39,51],resnet:[24,39,41,44,45,46,48,49,51],resnet_encod:[24,41,47,49],resnet_multiimage_input:48,resnetencod:48,resnetmultiimageinput:48,resolut:[0,2,24,35,64],resolv:[0,24],respect:64,result:[0,24],resum:37,return_log:[29,30,33,34,35],retval:55,rgb:29,right:61,risk:[0,24],rmse:[0,24,62],rmse_log:62,rmselog:[0,24],robot:[0,24],robust:[0,24],roll:14,root_dir:[12,13],rotat:[2,14,22,35],rotation_mod:[2,35,50],roti:14,rotx:14,rotz:14,row:[0,24],rui:[0,24],run:[0,2,24,26,30,37,39,40,42,44,45,46,48,50,51,61],run_nam:26,run_url:26,s3_frequenc:[0,2,24,32],s3_path:[0,2,24,32],s3_url:66,same:[0,19,24,42,64,65,67,68],same_shap:67,sampl:[11,13,15,16,55],sample_to_cuda:58,sampler:37,satur:[15,16],save:[0,2,3,4,24,29,30,32,55,60,66],save_cod:32,save_depth:69,save_top_k:[2,32],scalar:[33,34],scale:[0,2,14,19,20,24,28,29,30,35,39,40,44,62,64],scale_intrins:20,scalesto:29,scene:[0,24],schedul:[0,2,5,6,7,8,24,37],scratch:[0,24,39,51],script:[0,24,54,55,56],search:[24,65],second:67,section:45,see:[0,24,55],seed:[2,37],segment:[0,24],self:[21,29,33,34,37],self_supervised_loss:33,selfsupmodel:[3,4,5,6,7,8,24,31,34],semant:[0,11,24],semguid:[0,24],semi:[0,24,34],semisup:[0,24],semisupmodel:[24,31],sensor:[0,11,24],sensor_idx:11,sequenc:[35,50],sequenti:[42,50],session:[0,24],set:[0,24,55,61],set_checkpoint:61,set_debug:65,set_nam:61,set_random_se:37,setup:[0,24],setup_dataload:37,setup_dataset:37,setup_depth_net:37,setup_model:37,setup_pose_net:37,sfmmodel:[24,31,33,37],shape1:67,shape2:67,shape:[0,2,15,21,24,55,64,67],shell:[0,24],should:[0,2,24,30,42,44,45,46,48,50,55],show:55,sigmoid:45,silent:[30,44,45,46,48,50],silogloss:30,similar:[0,24,29],simlilar:29,simpl:[0,24],sinc:[0,24,30,44,45,46,48,50],singl:[0,24,35,55],size:[2,11,42,50,64,67],skip_empti:12,slightli:[0,24],smooth:[2,29,62],smooth_loss_weight:[2,29],smoothness_i:62,smoothness_loss:29,smoothness_x:62,snippet:55,some:[0,24],someth:[0,24],sourc:[0,11,12,13,14,15,16,19,20,21,22,24,28,29,30,32,33,34,35,36,37,39,40,42,44,45,46,48,50,51,54,55,56,58,61,62,63,64,65,66,67,68,69,70],spars:[0,2,24,30],sparsiti:[0,24],spatial:42,specif:[19,21,55,64],specifi:55,split:[0,2,3,4,5,6,7,8,11,12,13,16,24,61,66],spotlight:[0,24],sq_rel:62,sqr:[0,24],ssim:[2,29],ssim_loss_weight:[2,29],stack:[11,40,48],stack_sampl:11,standard:[0,24],start:[0,24],state:[0,24,37,61,65],state_dict:[61,65],step:[0,2,24,69],step_siz:[2,5,6,7,8],steplr:[2,5,6,7,8],stereo:13,store:[0,24,26,33,34,35],str:[11,13,14,15,16,20,26,29,30,35,37,39,40,51,54,55,56,61,62,64,65,66,67,68],stream:[0,24],stride:[12,13,29,42,46],string:[66,68,70],stronger:[0,24],structur:29,sub:[0,24,68],sub_kei:68,subclass:[30,44,45,46,48,50],subkei:68,subset:[0,24],sudeep:[0,24],suffix:[39,51],superdepth:[0,24],superresolut:[0,24],supervis:[2,29,30,33,34],supervised_loss:[30,34],supervised_loss_weight:[2,34],supervised_method:[2,30],supervised_num_scal:[2,30],supervisedloss:[24,27],symbol:[0,24],symmetr:[0,24],sync:[0,2,24,32],sync_s3_data:32,synthes:20,synthesi:29,system:14,t_est:29,t_max:2,t_w_imu:14,tag:[0,2,24,26],take:[29,30,42,44,45,46,48,50],tar:[0,24],target:64,target_imag:51,tcw:19,team:[0,24],tensor:[15,19,20,22,29,30,33,34,42,45,62,64,68,70],tensor_typ:15,tensorrt:[0,24],termin:[0,24,65],test:[0,2,3,4,5,6,7,8,11,16,24,37,54,55,61],test_dataload:37,test_epoch_end:37,test_progress_bar:58,test_step:37,test_transform:16,thank:[0,24],them:[29,30,44,45,46,48,50],theta:14,thi:[0,21,24,30,44,45,46,48,50,55],those:[0,24,65],threshold:[2,29,30],tiff:55,time:[0,2,21,24,37],timestep:11,titl:[0,24],to_item:68,to_tensor:15,to_tensor_sampl:15,togeth:[0,24],top:37,torch:[12,13,15,19,20,21,22,28,29,30,33,34,35,37,39,40,42,44,45,46,48,50,51,62,64,70],torchvis:48,toyota:[0,24],track:37,train:[2,5,6,7,8,11,13,16,26,28,29,30,33,34,37,39,40,48,53,54,55,61],train_dataload:37,train_ddad:[1,24],train_kitti:[1,24],train_progress_bar:58,train_transform:16,trainer:[24,37,58,59],training_epoch_end:37,training_step:37,transform:[10,11,13,14,19,21,22,24,29],transform_from_rot_tran:14,transform_point:21,transform_pos:21,translat:14,transpar:55,tri:[0,24],truth:[0,11,13,14,24,30,34,37,62],tune:[0,24],tupl:[13,14,15,16,36,55,64,67,69,70],twc:19,two:[0,24,29,67],twostream:[0,24],txt:[4,6,8],type:[0,2,13,14,15,16,19,20,24,26,29,30,33,34,35,36,37,42,48,50,60,61,62,64,65,66,67,68],ubuntu:[0,24],unaug:15,under:[0,24],univers:55,unpack:[0,24,42],unpacklayerconv2d:42,unpacklayerconv3d:42,unsign:55,updat:[0,24,61,65],upsampl:[35,40,45],upsample_depth_map:[2,35],url:[26,66],usag:[33,34],use:[0,2,11,19,24,37,40,55,65,66],use_gt_scal:62,use_refl:45,use_skip:44,used:[0,2,11,13,24,30,33,35,40,55,61,62,64],uses:[0,24,26],using:[0,21,24,35,55,64],util:[12,13,24,31,61,62,63,64,65,66,67,68,69,70],val:[3,7,11,28,35],val_dataload:37,val_loss:32,val_progress_bar:58,valid:[2,5,6,7,8,16,37],validation_epoch_end:37,validation_step:37,validation_transform:16,valu:[2,14,15,40,42,61,62,68],var_list:67,vari:48,variabl:[0,24,67],varianc:2,variat:40,variou:[36,37,42,69],vec:[21,22],vector:[14,21],vel_accuraci:14,velmod:14,veloc:[0,24],velodyn:[4,6,8],verifi:[0,24],version:[0,2,3,4,5,6,7,8,15,19,24,26,39,40,51],via:[0,24],video:[0,24],view:29,view_synthesi:20,vision:[0,24,48],visual:[0,2,24,29,30,55,62],vitor:[0,24],viz:[2,3,4],viz_inv_depth:62,wai:[0,24,69],wandb:[0,2,24,26],wandb_api_kei:[0,24],wandb_ent:[0,2,24],wandb_project:[0,2,24],wandblogg:[24,25],want:[0,16,24,37],warp:[20,29],warp_ref_imag:29,weak:[0,24],websit:[0,24],weight:[0,2,24,29,34,40,61,65],weight_decai:2,well:[0,24],were:[0,24],when:[0,24,35,65],where:[0,2,24,26,28,39,40,51,55,66],which:[0,2,11,13,16,24,30,37,65],whose:14,width:[16,64],wise:[19,29],with_pos:[11,13],with_semant:11,within:[0,19,24,30,44,45,46,48,50],work:[0,24],worker:[2,37],worker_id:37,worker_init_fn:37,world:19,world_siz:[58,59,63],would:[0,24],wrap:67,wrapper:[24,31,55],x_scale:[19,20],xml:55,xxx_transform:16,y_scale:[19,20],yac:[56,61],yaml:[0,24,55,56,61],yaw:14,year:[0,24],you:[0,24,42,65],your:[0,24],zero:[2,20,29,62],zhou:[0,24]},titles:["PackNet-SfM: 3D Packing for Self-Supervised Monocular Depth Estimation","Configs","default_config","eval_ddad","eval_kitti","overfit_ddad","overfit_kitti","train_ddad","train_kitti","KITTIDataset","Datasets","DGPDataset","ImageDataset","KITTIDataset","kitti_dataset_utils","Augmentations","Transforms","Camera","Geometry","Camera","Camera_utils","Pose","Pose_utils","Pose","PackNet-SfM: 3D Packing for Self-Supervised Monocular Depth Estimation","Loggers","WandbLogger","Losses","LossBase","MultiViewPhotometricLoss","SupervisedLoss","Models","Checkpoint","SelfSupModel","SemiSupModel","SfmModel","Utilities","Wrapper","Depth Networks","DepthResNet","PackNet01","Network Layers","layers01","PackNet","depth_decoder","layers","pose_decoder","ResNet","resnet_encoder","Networks","PoseNet","PoseResNet","Pose Networks","Scripts","Evaluation","Inference","Training","Trainers","BaseTrainer","HorovodTrainer","Utils","Config","Depth","Horovod","Image","Loading","Logging","Misc.","Reduce","Saving","Types"],titleterms:{augment:15,autonom:[0,24],basetrain:58,camera:[17,19],camera_util:20,checkpoint:32,config:[1,61],dataset:[0,10,24],ddad:[0,24],default_config:2,dens:[0,24],depth:[0,24,38,62],depth_decod:44,depthresnet:39,dgpdataset:11,drive:[0,24],estim:[0,24],eval_ddad:3,eval_kitti:4,evalu:[0,24,54],geometri:18,horovod:63,horovodtrain:59,imag:64,imagedataset:12,infer:55,instal:[0,24],kitti:[0,24],kitti_dataset_util:14,kittidataset:[9,13],layer:[41,45],layers01:42,licens:[0,24],load:65,log:66,logger:25,loss:27,lossbas:28,map:[0,24],misc:67,model:[0,24,31],monocular:[0,24],multiviewphotometricloss:29,network:[38,41,49,52],overfit_ddad:5,overfit_kitti:6,pack:[0,24],packnet01:40,packnet:[0,24,43],pose:[21,23,52],pose_decod:46,pose_util:22,posenet:50,poseresnet:51,precomput:[0,24],reduc:68,refer:[0,24],resnet:47,resnet_encod:48,save:69,script:53,self:[0,24],selfsupmodel:33,semisupmodel:34,sfm:[0,24],sfmmodel:35,supervis:[0,24],supervisedloss:30,tini:[0,24],train:[0,24,56],train_ddad:7,train_kitti:8,trainer:57,transform:16,type:70,util:[36,60],wandblogg:26,wrapper:37}}) \ No newline at end of file +Search.setIndex({docnames:["README","configs/configs","configs/configs.default_config","configs/configs.eval_ddad","configs/configs.eval_kitti","configs/configs.overfit_ddad","configs/configs.overfit_kitti","configs/configs.train_ddad","configs/configs.train_kitti","datasets/KITTIDataset","datasets/datasets","datasets/datasets.DGPDataset","datasets/datasets.ImageDataset","datasets/datasets.KITTIDataset","datasets/datasets.KITTIDataset_utils","datasets/datasets.augmentations","datasets/datasets.transforms","geometry/camera","geometry/geometry","geometry/geometry.camera.camera","geometry/geometry.camera.camera_utils","geometry/geometry.pose.pose","geometry/geometry.pose.pose_utils","geometry/pose","index","loggers/loggers","loggers/loggers.WandbLogger","losses/losses","losses/losses.loss_base","losses/losses.multiview_photometric_loss","losses/losses.supervised_loss","models/models","models/models.Checkpoint","models/models.SelfSupModel","models/models.SemiSupModel","models/models.SfmModel","models/models.Utilities","models/models.Wrapper","networks/depth/depth","networks/depth/networks.depth.DepthResNet","networks/depth/networks.depth.PackNet01","networks/layers/layers","networks/layers/packnet/layers01","networks/layers/packnet/packnet","networks/layers/resnet/depth_decoder","networks/layers/resnet/layers","networks/layers/resnet/pose_decoder","networks/layers/resnet/resnet","networks/layers/resnet/resnet_encoder","networks/networks","networks/pose/networks.pose.PoseNet","networks/pose/networks.pose.PoseResNet","networks/pose/pose","scripts/scripts","scripts/scripts.eval","scripts/scripts.infer","scripts/scripts.train","trainers/trainers","trainers/trainers.BaseTrainer","trainers/trainers.HorovodTrainer","utils/utils","utils/utils.config","utils/utils.depth","utils/utils.horovod","utils/utils.image","utils/utils.load","utils/utils.logging","utils/utils.misc","utils/utils.reduce","utils/utils.save","utils/utils.types"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["README.rst","configs/configs.rst","configs/configs.default_config.rst","configs/configs.eval_ddad.rst","configs/configs.eval_kitti.rst","configs/configs.overfit_ddad.rst","configs/configs.overfit_kitti.rst","configs/configs.train_ddad.rst","configs/configs.train_kitti.rst","datasets/KITTIDataset.rst","datasets/datasets.rst","datasets/datasets.DGPDataset.rst","datasets/datasets.ImageDataset.rst","datasets/datasets.KITTIDataset.rst","datasets/datasets.KITTIDataset_utils.rst","datasets/datasets.augmentations.rst","datasets/datasets.transforms.rst","geometry/camera.rst","geometry/geometry.rst","geometry/geometry.camera.camera.rst","geometry/geometry.camera.camera_utils.rst","geometry/geometry.pose.pose.rst","geometry/geometry.pose.pose_utils.rst","geometry/pose.rst","index.rst","loggers/loggers.rst","loggers/loggers.WandbLogger.rst","losses/losses.rst","losses/losses.loss_base.rst","losses/losses.multiview_photometric_loss.rst","losses/losses.supervised_loss.rst","models/models.rst","models/models.Checkpoint.rst","models/models.SelfSupModel.rst","models/models.SemiSupModel.rst","models/models.SfmModel.rst","models/models.Utilities.rst","models/models.Wrapper.rst","networks/depth/depth.rst","networks/depth/networks.depth.DepthResNet.rst","networks/depth/networks.depth.PackNet01.rst","networks/layers/layers.rst","networks/layers/packnet/layers01.rst","networks/layers/packnet/packnet.rst","networks/layers/resnet/depth_decoder.rst","networks/layers/resnet/layers.rst","networks/layers/resnet/pose_decoder.rst","networks/layers/resnet/resnet.rst","networks/layers/resnet/resnet_encoder.rst","networks/networks.rst","networks/pose/networks.pose.PoseNet.rst","networks/pose/networks.pose.PoseResNet.rst","networks/pose/pose.rst","scripts/scripts.rst","scripts/scripts.eval.rst","scripts/scripts.infer.rst","scripts/scripts.train.rst","trainers/trainers.rst","trainers/trainers.BaseTrainer.rst","trainers/trainers.HorovodTrainer.rst","utils/utils.rst","utils/utils.config.rst","utils/utils.depth.rst","utils/utils.horovod.rst","utils/utils.image.rst","utils/utils.load.rst","utils/utils.logging.rst","utils/utils.misc.rst","utils/utils.reduce.rst","utils/utils.save.rst","utils/utils.types.rst"],objects:{"packnet_sfm.datasets":{augmentations:[15,0,0,"-"],dgp_dataset:[11,0,0,"-"],image_dataset:[12,0,0,"-"],kitti_dataset:[13,0,0,"-"],kitti_dataset_utils:[14,0,0,"-"],transforms:[16,0,0,"-"]},"packnet_sfm.datasets.augmentations":{colorjitter_sample:[15,1,1,""],duplicate_sample:[15,1,1,""],resize_depth:[15,1,1,""],resize_image:[15,1,1,""],resize_sample:[15,1,1,""],resize_sample_image_and_intrinsics:[15,1,1,""],to_tensor:[15,1,1,""],to_tensor_sample:[15,1,1,""]},"packnet_sfm.datasets.dgp_dataset":{DGPDataset:[11,2,1,""],stack_sample:[11,1,1,""]},"packnet_sfm.datasets.dgp_dataset.DGPDataset":{generate_depth_map:[11,3,1,""],get_backward:[11,3,1,""],get_context:[11,3,1,""],get_current:[11,3,1,""],get_filename:[11,3,1,""],get_forward:[11,3,1,""]},"packnet_sfm.datasets.image_dataset":{ImageDataset:[12,2,1,""],dummy_calibration:[12,1,1,""],get_idx:[12,1,1,""],read_files:[12,1,1,""]},"packnet_sfm.datasets.kitti_dataset":{KITTIDataset:[13,2,1,""],read_npz_depth:[13,1,1,""],read_png_depth:[13,1,1,""]},"packnet_sfm.datasets.kitti_dataset_utils":{OxtsData:[14,2,1,""],OxtsPacket:[14,2,1,""],load_oxts_packets_and_poses:[14,1,1,""],pose_from_oxts_packet:[14,1,1,""],read_calib_file:[14,1,1,""],rotx:[14,1,1,""],roty:[14,1,1,""],rotz:[14,1,1,""],transform_from_rot_trans:[14,1,1,""]},"packnet_sfm.datasets.kitti_dataset_utils.OxtsData":{T_w_imu:[14,3,1,""],packet:[14,3,1,""]},"packnet_sfm.datasets.kitti_dataset_utils.OxtsPacket":{af:[14,3,1,""],al:[14,3,1,""],alt:[14,3,1,""],au:[14,3,1,""],ax:[14,3,1,""],ay:[14,3,1,""],az:[14,3,1,""],lat:[14,3,1,""],lon:[14,3,1,""],navstat:[14,3,1,""],numsats:[14,3,1,""],orimode:[14,3,1,""],pitch:[14,3,1,""],pos_accuracy:[14,3,1,""],posmode:[14,3,1,""],roll:[14,3,1,""],ve:[14,3,1,""],vel_accuracy:[14,3,1,""],velmode:[14,3,1,""],vf:[14,3,1,""],vl:[14,3,1,""],vn:[14,3,1,""],vu:[14,3,1,""],wf:[14,3,1,""],wl:[14,3,1,""],wu:[14,3,1,""],wx:[14,3,1,""],wy:[14,3,1,""],wz:[14,3,1,""],yaw:[14,3,1,""]},"packnet_sfm.datasets.transforms":{get_transforms:[16,1,1,""],test_transforms:[16,1,1,""],train_transforms:[16,1,1,""],validation_transforms:[16,1,1,""]},"packnet_sfm.geometry":{camera:[19,0,0,"-"],camera_utils:[20,0,0,"-"],pose:[21,0,0,"-"],pose_utils:[22,0,0,"-"]},"packnet_sfm.geometry.camera":{Camera:[19,2,1,""]},"packnet_sfm.geometry.camera.Camera":{Kinv:[19,3,1,""],Twc:[19,3,1,""],cx:[19,3,1,""],cy:[19,3,1,""],fx:[19,3,1,""],fy:[19,3,1,""],project:[19,3,1,""],reconstruct:[19,3,1,""],scaled:[19,3,1,""],to:[19,3,1,""]},"packnet_sfm.geometry.camera_utils":{construct_K:[20,1,1,""],scale_intrinsics:[20,1,1,""],view_synthesis:[20,1,1,""]},"packnet_sfm.geometry.pose":{Pose:[21,2,1,""]},"packnet_sfm.geometry.pose.Pose":{from_vec:[21,3,1,""],identity:[21,3,1,""],inverse:[21,3,1,""],item:[21,3,1,""],repeat:[21,3,1,""],shape:[21,3,1,""],to:[21,3,1,""],transform_points:[21,3,1,""],transform_pose:[21,3,1,""]},"packnet_sfm.geometry.pose_utils":{euler2mat:[22,1,1,""],invert_pose:[22,1,1,""],invert_pose_numpy:[22,1,1,""],pose_vec2mat:[22,1,1,""]},"packnet_sfm.loggers":{WandbLogger:[26,0,0,"-"]},"packnet_sfm.loggers.WandbLogger":{experiment:[26,4,1,""],name:[26,4,1,""],run_name:[26,4,1,""],run_url:[26,4,1,""],version:[26,4,1,""]},"packnet_sfm.losses":{loss_base:[28,0,0,"-"],multiview_photometric_loss:[29,0,0,"-"],supervised_loss:[30,0,0,"-"]},"packnet_sfm.losses.loss_base":{LossBase:[28,2,1,""],ProgressiveScaling:[28,2,1,""]},"packnet_sfm.losses.loss_base.LossBase":{add_metric:[28,3,1,""],logs:[28,3,1,""],metrics:[28,3,1,""]},"packnet_sfm.losses.multiview_photometric_loss":{MultiViewPhotometricLoss:[29,2,1,""],SSIM:[29,1,1,""]},"packnet_sfm.losses.multiview_photometric_loss.MultiViewPhotometricLoss":{SSIM:[29,3,1,""],calc_photometric_loss:[29,3,1,""],calc_smoothness_loss:[29,3,1,""],forward:[29,3,1,""],logs:[29,3,1,""],reduce_photometric_loss:[29,3,1,""],warp_ref_image:[29,3,1,""]},"packnet_sfm.losses.supervised_loss":{BerHuLoss:[30,2,1,""],SilogLoss:[30,2,1,""],SupervisedLoss:[30,2,1,""],get_loss_func:[30,1,1,""]},"packnet_sfm.losses.supervised_loss.BerHuLoss":{forward:[30,3,1,""]},"packnet_sfm.losses.supervised_loss.SilogLoss":{forward:[30,3,1,""]},"packnet_sfm.losses.supervised_loss.SupervisedLoss":{calculate_loss:[30,3,1,""],forward:[30,3,1,""],logs:[30,3,1,""]},"packnet_sfm.models":{SelfSupModel:[33,0,0,"-"],SemiSupModel:[34,0,0,"-"],SfmModel:[35,0,0,"-"],model_checkpoint:[32,0,0,"-"],model_utils:[36,0,0,"-"],model_wrapper:[37,0,0,"-"]},"packnet_sfm.models.SelfSupModel":{SelfSupModel:[33,2,1,""]},"packnet_sfm.models.SelfSupModel.SelfSupModel":{forward:[33,3,1,""],logs:[33,3,1,""],self_supervised_loss:[33,3,1,""]},"packnet_sfm.models.SemiSupModel":{SemiSupModel:[34,2,1,""]},"packnet_sfm.models.SemiSupModel.SemiSupModel":{forward:[34,3,1,""],logs:[34,3,1,""],supervised_loss:[34,3,1,""]},"packnet_sfm.models.SfmModel":{SfmModel:[35,2,1,""]},"packnet_sfm.models.SfmModel.SfmModel":{add_depth_net:[35,3,1,""],add_loss:[35,3,1,""],add_pose_net:[35,3,1,""],compute_inv_depths:[35,3,1,""],compute_poses:[35,3,1,""],forward:[35,3,1,""],logs:[35,3,1,""],losses:[35,3,1,""],network_requirements:[35,3,1,""],train_requirements:[35,3,1,""]},"packnet_sfm.models.model_checkpoint":{ModelCheckpoint:[32,2,1,""],save_code:[32,1,1,""],sync_s3_data:[32,1,1,""]},"packnet_sfm.models.model_checkpoint.ModelCheckpoint":{check_and_save:[32,3,1,""],check_monitor_top_k:[32,3,1,""],format_checkpoint_name:[32,3,1,""]},"packnet_sfm.models.model_utils":{merge_outputs:[36,1,1,""],stack_batch:[36,1,1,""]},"packnet_sfm.models.model_wrapper":{ModelWrapper:[37,2,1,""],get_datasampler:[37,1,1,""],set_random_seed:[37,1,1,""],setup_dataloader:[37,1,1,""],setup_dataset:[37,1,1,""],setup_depth_net:[37,1,1,""],setup_model:[37,1,1,""],setup_pose_net:[37,1,1,""],worker_init_fn:[37,1,1,""]},"packnet_sfm.models.model_wrapper.ModelWrapper":{configure_optimizers:[37,3,1,""],depth:[37,3,1,""],depth_net:[37,3,1,""],evaluate_depth:[37,3,1,""],forward:[37,3,1,""],logs:[37,3,1,""],pose:[37,3,1,""],pose_net:[37,3,1,""],prepare_datasets:[37,3,1,""],prepare_model:[37,3,1,""],print_metrics:[37,3,1,""],progress:[37,3,1,""],test_dataloader:[37,3,1,""],test_epoch_end:[37,3,1,""],test_step:[37,3,1,""],train_dataloader:[37,3,1,""],training_epoch_end:[37,3,1,""],training_step:[37,3,1,""],val_dataloader:[37,3,1,""],validation_epoch_end:[37,3,1,""],validation_step:[37,3,1,""]},"packnet_sfm.networks.depth":{DepthResNet:[39,0,0,"-"],PackNet01:[40,0,0,"-"]},"packnet_sfm.networks.depth.DepthResNet":{DepthResNet:[39,2,1,""]},"packnet_sfm.networks.depth.DepthResNet.DepthResNet":{forward:[39,3,1,""]},"packnet_sfm.networks.depth.PackNet01":{PackNet01:[40,2,1,""]},"packnet_sfm.networks.depth.PackNet01.PackNet01":{forward:[40,3,1,""],init_weights:[40,3,1,""]},"packnet_sfm.networks.layers.packnet":{layers01:[42,0,0,"-"]},"packnet_sfm.networks.layers.packnet.layers01":{Conv2D:[42,2,1,""],InvDepth:[42,2,1,""],PackLayerConv2d:[42,2,1,""],PackLayerConv3d:[42,2,1,""],ResidualBlock:[42,1,1,""],ResidualConv:[42,2,1,""],UnpackLayerConv2d:[42,2,1,""],UnpackLayerConv3d:[42,2,1,""],packing:[42,1,1,""]},"packnet_sfm.networks.layers.packnet.layers01.Conv2D":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.InvDepth":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.PackLayerConv2d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.PackLayerConv3d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.ResidualConv":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.UnpackLayerConv2d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.packnet.layers01.UnpackLayerConv3d":{forward:[42,3,1,""]},"packnet_sfm.networks.layers.resnet":{depth_decoder:[44,0,0,"-"],layers:[45,0,0,"-"],pose_decoder:[46,0,0,"-"],resnet_encoder:[48,0,0,"-"]},"packnet_sfm.networks.layers.resnet.depth_decoder":{DepthDecoder:[44,2,1,""]},"packnet_sfm.networks.layers.resnet.depth_decoder.DepthDecoder":{forward:[44,3,1,""]},"packnet_sfm.networks.layers.resnet.layers":{Conv3x3:[45,2,1,""],ConvBlock:[45,2,1,""],disp_to_depth:[45,1,1,""],upsample:[45,1,1,""]},"packnet_sfm.networks.layers.resnet.layers.Conv3x3":{forward:[45,3,1,""]},"packnet_sfm.networks.layers.resnet.layers.ConvBlock":{forward:[45,3,1,""]},"packnet_sfm.networks.layers.resnet.pose_decoder":{PoseDecoder:[46,2,1,""]},"packnet_sfm.networks.layers.resnet.pose_decoder.PoseDecoder":{forward:[46,3,1,""]},"packnet_sfm.networks.layers.resnet.resnet_encoder":{ResNetMultiImageInput:[48,2,1,""],ResnetEncoder:[48,2,1,""],resnet_multiimage_input:[48,1,1,""]},"packnet_sfm.networks.layers.resnet.resnet_encoder.ResnetEncoder":{forward:[48,3,1,""]},"packnet_sfm.networks.pose":{PoseNet:[50,0,0,"-"],PoseResNet:[51,0,0,"-"]},"packnet_sfm.networks.pose.PoseNet":{PoseNet:[50,2,1,""],conv_gn:[50,1,1,""]},"packnet_sfm.networks.pose.PoseNet.PoseNet":{forward:[50,3,1,""],init_weights:[50,3,1,""]},"packnet_sfm.networks.pose.PoseResNet":{PoseResNet:[51,2,1,""]},"packnet_sfm.networks.pose.PoseResNet.PoseResNet":{forward:[51,3,1,""]},"packnet_sfm.trainers":{HorovodTrainer:[59,0,0,"-"],base_trainer:[58,0,0,"-"]},"packnet_sfm.trainers.HorovodTrainer":{is_rank_0:[59,4,1,""],proc_rank:[59,4,1,""],world_size:[59,4,1,""]},"packnet_sfm.trainers.base_trainer":{BaseTrainer:[58,2,1,""],sample_to_cuda:[58,1,1,""]},"packnet_sfm.trainers.base_trainer.BaseTrainer":{check_and_save:[58,3,1,""],is_rank_0:[58,3,1,""],proc_rank:[58,3,1,""],test_progress_bar:[58,3,1,""],train_progress_bar:[58,3,1,""],val_progress_bar:[58,3,1,""],world_size:[58,3,1,""]},"packnet_sfm.utils":{config:[61,0,0,"-"],depth:[62,0,0,"-"],horovod:[63,0,0,"-"],image:[64,0,0,"-"],load:[65,0,0,"-"],logging:[66,0,0,"-"],misc:[67,0,0,"-"],reduce:[68,0,0,"-"],save:[69,0,0,"-"],types:[70,0,0,"-"]},"packnet_sfm.utils.config":{backwards_config:[61,1,1,""],get_default_config:[61,1,1,""],merge_cfg_file:[61,1,1,""],merge_cfgs:[61,1,1,""],parse_test_config:[61,1,1,""],parse_test_file:[61,1,1,""],parse_train_config:[61,1,1,""],parse_train_file:[61,1,1,""],prep_dataset:[61,1,1,""],prepare_test_config:[61,1,1,""],prepare_train_config:[61,1,1,""],set_checkpoint:[61,1,1,""],set_name:[61,1,1,""]},"packnet_sfm.utils.depth":{calc_smoothness:[62,1,1,""],compute_depth_metrics:[62,1,1,""],depth2inv:[62,1,1,""],fuse_inv_depth:[62,1,1,""],inv2depth:[62,1,1,""],inv_depths_normalize:[62,1,1,""],post_process_inv_depth:[62,1,1,""],viz_inv_depth:[62,1,1,""]},"packnet_sfm.utils.horovod":{hvd_init:[63,1,1,""],on_rank_0:[63,1,1,""],print0:[63,1,1,""],rank:[63,1,1,""],world_size:[63,1,1,""]},"packnet_sfm.utils.image":{flip_lr:[64,1,1,""],flip_model:[64,1,1,""],gradient_x:[64,1,1,""],gradient_y:[64,1,1,""],image_grid:[64,1,1,""],interpolate_image:[64,1,1,""],interpolate_scales:[64,1,1,""],load_image:[64,1,1,""],match_scales:[64,1,1,""],meshgrid:[64,1,1,""]},"packnet_sfm.utils.load":{backwards_state_dict:[65,1,1,""],filter_args:[65,1,1,""],filter_args_create:[65,1,1,""],load_class:[65,1,1,""],load_class_args_create:[65,1,1,""],load_network:[65,1,1,""],set_debug:[65,1,1,""]},"packnet_sfm.utils.logging":{AvgMeter:[66,2,1,""],pcolor:[66,1,1,""],prepare_dataset_prefix:[66,1,1,""],s3_url:[66,1,1,""]},"packnet_sfm.utils.logging.AvgMeter":{get:[66,3,1,""],get_and_reset:[66,3,1,""],reset:[66,3,1,""]},"packnet_sfm.utils.misc":{filter_dict:[67,1,1,""],make_list:[67,1,1,""],same_shape:[67,1,1,""]},"packnet_sfm.utils.reduce":{all_reduce_metrics:[68,1,1,""],average_key:[68,1,1,""],average_loss_and_metrics:[68,1,1,""],average_sub_key:[68,1,1,""],collate_metrics:[68,1,1,""],create_dict:[68,1,1,""],reduce_dict:[68,1,1,""],reduce_value:[68,1,1,""]},"packnet_sfm.utils.save":{save_depth:[69,1,1,""]},"packnet_sfm.utils.types":{is_cfg:[70,1,1,""],is_dict:[70,1,1,""],is_int:[70,1,1,""],is_list:[70,1,1,""],is_numpy:[70,1,1,""],is_seq:[70,1,1,""],is_str:[70,1,1,""],is_tensor:[70,1,1,""],is_tuple:[70,1,1,""]},"scripts.eval":{parse_args:[54,1,1,""],test:[54,1,1,""]},"scripts.infer":{imwrite:[55,1,1,""],infer_and_save_depth:[55,1,1,""],is_image:[55,1,1,""],main:[55,1,1,""],parse_args:[55,1,1,""]},"scripts.train":{parse_args:[56,1,1,""],train:[56,1,1,""]},scripts:{eval:[54,0,0,"-"],infer:[55,0,0,"-"],train:[56,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute"},terms:{"18pt":[5,6,39,51],"192x640":[0,24],"384x1280":[0,24],"384x640":[0,24],"6gb":[0,24],"byte":55,"case":[0,24,55],"class":[11,12,13,14,19,20,21,26,28,29,30,32,33,34,35,37,39,40,42,44,45,46,48,50,51,58,65,66],"default":[0,24,61],"export":[0,24],"float":[14,15,19,28,29,30,34,35,40,42,55,62,68],"function":[11,13,16,19,30,37,44,45,46,48,50,55,65],"int":[11,13,15,28,29,30,42,48,50,62,64,66,67],"long":[0,24],"new":[0,21,24,26,28,35],"public":[0,24],"return":[11,13,14,15,16,19,20,21,26,28,29,30,33,34,35,36,37,39,40,42,48,50,51,61,62,64,65,66,67,68],"static":[0,2,24],"super":[0,24],"true":[0,2,3,4,12,13,24,29,30,33,34,35,37,44,45,48,55,62,64,65,67],"try":[0,24],"var":67,"while":[30,44,45,46,48,50],AWS:[0,2,24],Abs:[0,24],For:[0,24],GPS:14,Has:[39,40,51],The:[0,24,45,55],These:[0,24],_origin:15,abl:[0,24],about:14,abov:[0,24],abs:40,abs_rel:62,access:[0,24],account:[0,24],accur:[0,24],accuraci:[0,24],adam:[2,5,6,7,8],adapt:[0,24,48],add:[28,35,61],add_depth_net:35,add_loss:35,add_metr:28,add_pose_net:35,added:[39,51],addit:[40,45],adrien:[0,24],after:[28,64],afterward:[30,44,45,46,48,50],alia:14,align:64,align_corn:64,all:[0,24,29,30,36,44,45,46,48,50,68],all_reduce_metr:68,allan:[0,24],alpha:55,also:[0,11,24,55],alt:14,although:[0,24,30,44,45,46,48,50],amazonaw:[0,24],ambru:[0,24],angl:[14,22],ani:[0,24,67],anonym:26,anoth:[20,21,61],append:26,appli:[11,13,42],applic:[0,24],april:[0,24],arch:[2,5,6],architectur:[0,24,39,51],arg:[19,21,37,55,63,65,69],argument:[54,56,65,69],around:37,arrai:[11,14,15,22,62,70],arxiv:40,aspect:[0,24],associ:[0,24],assum:[0,24],attr:66,attribut:66,augment:[0,2,3,4,5,6,7,8,10,16,24],author:[0,24],auto:[2,32],autom:[0,24],automask:[2,29],automask_loss:[2,29],automat:[2,62],avail:61,averag:[30,66,68],average_kei:68,average_loss_and_metr:68,average_sub_kei:68,avgmet:66,awar:[0,24],aws_access_key_id:[0,24],aws_default_region:[0,24],aws_secret_access_kei:[0,24],axi:14,back_context:[2,11,12,13],background:66,backward:[2,11,13,61,65],backwards_config:61,backwards_state_dict:65,base:[11,12,13,14,19,21,28,29,30,32,33,34,35,37,39,40,42,44,45,46,48,50,51,55,58,61,66],base_train:58,basetrain:[24,57],batch:[2,21,33,34,35,36,37,64,68,69],batch_list:68,batch_siz:[0,2,5,6,7,8,24],beam:[0,24],becom:36,been:37,befor:[37,55],belong:68,below:[0,24,55],benchmark:[0,24],berhu:30,berhuloss:30,best:2,better:[0,24],between:[29,33,64],bgr:55,bgra:55,bias:[0,24],bigger:[0,24],bilinear:[20,64],bit:55,blob:48,block:[0,24,42,48,50],booktitl:[0,24],bool:[11,13,26,29,30,33,34,35,37,48,55,62,64,65,67],both:[11,42,67],boundari:19,brief:55,bright:[15,16],bucket:[32,66],build:[0,24],cach:11,calc_photometric_loss:29,calc_smooth:62,calc_smoothness_loss:29,calcul:[29,30,33,34,62,64],calculate_loss:30,calib:14,calibr:[0,14,24],call:[30,44,45,46,48,50],cam:20,camera:[0,2,3,5,7,11,18,20,24,29,33,36],camera_01:[3,5,7],camera_util:[17,18,24],can:[0,24,55,56,61,65],car:[0,24],care:[30,44,45,46,48,50],cast:15,certain:[13,28,55],cfg_default:61,cfg_file:[54,61],cfgnode:[37,61,62,66,69],chang:19,channel:[42,50,55],check:[55,67,70],check_and_sav:[32,58],check_monitor_top_k:32,checkpoint:[0,2,24,31,54,56,58,61],checkpoint_path:[0,2,24],choic:[0,24],chosen:55,citat:[0,24],cityscap:[0,24],ckpt:[0,24,56,61],ckpt_file:[54,61],classmethod:21,clip:[2,29],clip_loss:[2,29],clone:[0,24],cmaera:19,code:[0,24,32,55],collat:68,collate_metr:68,color:[2,15,16,66],colorjitter_sampl:15,colormap:62,com:[0,24,48],combin:29,command:[0,24],compar:[0,24],compat:[61,65],compound:21,comprehens:[0,24],compress:[0,24,55],comput:[0,14,24,30,35,44,45,46,48,50,62],compute_depth_metr:62,compute_inv_depth:35,compute_pos:35,concat:65,concaten:[40,42,65,66],conclus:[0,24],conda:[0,24],confer:[0,24],config:[0,24,37,58,60,62,66],configur:[0,24,37,54,56,61,66,69,70],configure_optim:37,consid:[13,29,62,64,65],consider:45,constraint:[0,24],construct:[20,48],construct_k:20,contact:24,contain:[0,24,29,33,34,35,36,64,65,66,67,68],content:24,context:[2,11,13,15,29,33,35,50],continu:[0,24],contrast:[15,16],control:[0,24,40],contxt:2,conv2d:[42,50],conv3x3:45,conv_gn:50,convblock:45,conveni:[0,24],convers:45,convert:[22,45,55,62],convertto:55,convolut:[0,24,29,40,42,45,50],convolv:[42,45],coordin:14,copi:15,corl:[0,24],corner:64,correctli:[0,24],correspond:[0,24,37],could:[0,24],cpp:55,creat:[0,21,24,26,37,55,64,65,68],create_dict:68,creation:37,crop:[2,3,4,5,6,7,8],curl:[0,24],current:[0,11,19,24,32,37,66],custom:55,cv_16u:55,cv_32f:55,cv_32fc3:55,cvpr:[0,24,40],cvtcolor:55,data:[0,2,3,4,5,6,7,8,12,13,14,15,16,24,37,58,70],data_split:[4,8],data_transform:[11,12,13],dataload:[37,58,69],dataset:[2,3,4,5,6,7,8,11,12,13,14,15,16,37,61,66,68,69],date:61,datum:11,datum_idx:11,ddad:[3,7],ddad_tini:[0,5,24],debug:[2,65],decai:[0,2,24],decod:[0,24],decompress:[0,24],decreas:[2,28],default_config:[0,1,24,37],defin:[0,24,30,44,45,46,48,50],degre:[0,24],demonstr:55,densifi:[0,24],depend:[0,24],dept:2,depth2inv:62,depth:[2,5,6,7,8,11,13,15,19,20,29,30,33,34,35,37,39,40,42,45,49,54,55,56,60,64,68,69],depth_decod:[24,41,47,49],depth_net:[0,2,3,4,5,6,7,8,24,35,37],depth_typ:[2,3,4,5,6,7,8,11,12,13],depthdecod:44,depthresnet:[5,6,24,38,49],design:37,detach:[28,35],detail:[0,24],determin:[2,30],devic:[19,20,21,64],dgp:[3,5,7,11],dgp_dataset:11,dgpdataset:[10,24],dict:[14,15,16,29,30,33,34,35,36,37,39,40,51,61,65,67,68,69],dictionari:[14,15,28,29,30,33,34,35,36,65,67,68,70],differ:[0,24,29,33,34,36,37,55,64,65,66],differenti:19,dimens:[16,64,67],dir:[0,2,24,26],direct:[2,62],directli:[0,24],directori:12,disabl:[29,30,65],disp:45,disp_norm:[2,29],disp_to_depth:45,distanc:[0,24,29],distribut:[0,24,37],divers:[0,24],doc:[0,24],docker2:[0,24],docker:[0,24],dockerfil:[0,24],doesn:26,done:[0,24],download:[0,24],downstream:[33,34],driver:[0,24],dropout:[2,40,42],dry:[0,2,24],dry_run:[0,2,24],dtype:[20,21,58,64],dummy_calibr:12,duplic:15,duplicate_sampl:15,dure:[0,2,24,62,65],dynam:55,each:[16,37,51,65],east:14,effect:[0,24],ego:[0,24],eigen:[0,24],eigen_test:[0,24],eigen_test_fil:[0,4,8,24],eigen_train:[0,24],eigen_train_fil:[0,24],eigen_v:[0,24],eigen_val_fil:[0,8,24],eigen_zh:[0,24],eigen_zhou_fil:[0,8,24],either:[0,24,56,61],els:55,elu:[42,45],empti:26,enabl:[0,11,24,29,65],encapsul:[21,35],encod:[48,55],encount:[0,24],enforc:[0,24],entir:29,entiti:[0,2,24,26],entri:68,environ:[0,2,24],epoch:[2,32,37,68],equip:[0,24],error:[0,24],estim:[11,54,56],etc:[0,24,37],euler2mat:22,euler:[2,22,35,50],eval:[0,24,54],eval_ddad:[1,24],eval_kitti:[1,24],evalu:[2,37,53],evaluate_depth:37,everi:[0,24,30,44,45,46,48,50],exampl:[0,24,39,51],except:55,exist:[0,24,26],expand:61,experi:[0,24,26],ext:[12,55],extens:55,extra:[29,30,33,34,35,37,39,40,42,51],factor:[20,45],fals:[2,11,13,29,30,33,34,35,48,62,64,68],fanci:26,featur:[0,24,40],few:[0,24],field:14,file:[0,13,14,24,54,55,56,61,65],file_list:13,filenam:[11,12,55,65],filepath:[0,2,14,24,32,56],filestorag:55,filter:[65,67],filter_arg:65,filter_args_cr:65,filter_dict:67,filter_zero:62,fine:[0,24],finish:37,first:[0,14,24,67],fix:[0,24],flag:[2,39,51,65,67],fleet:[0,24],flip:[35,62,64],flip_lr:64,flip_lr_prob:[2,35],flip_model:64,flipp:2,float32:[20,21],floattensor:15,focal:19,folder:[0,2,3,4,11,24,26,32,55,65],follow:[0,11,24,45],format:[39,40,51,55],format_checkpoint_nam:32,former:[30,44,45,46,48,50],formula:45,forward:[2,11,13,29,30,33,34,35,37,39,40,42,44,45,46,48,50,51],forward_context:[2,11,12,13],found:[0,24],fp16:55,frame:[0,13,19,20,21,24,48],from:[0,11,14,16,19,20,21,24,29,33,34,35,37,39,40,48,51,61,62,67,68,69],from_vec:21,full:[0,24],fulli:[0,24,55],func:[63,65],further:[0,24],furthermor:[0,24],fuse:62,fuse_inv_depth:62,fused_inv_depth:62,gaidon:[0,24],gamma:[2,5,6,7,8],garg:[4,6,8],gener:[0,11,14,24,55,62,66,67],generate_depth_map:11,geometri:[19,20,21,22,24],get:[0,2,11,16,24,42,61,66],get_and_reset:66,get_backward:11,get_context:11,get_curr:11,get_datasampl:37,get_default_config:61,get_filenam:11,get_forward:11,get_idx:12,get_loss_func:30,get_transform:16,git:[0,24],github:[0,24,48],given:[13,14,20,30,45],goe:55,gpu:[0,24,68],gradient:64,gradient_i:64,gradient_x:64,grid:64,ground:[0,11,13,14,24,30,34,35,62],groupnorm:[42,50],gt_depth:[35,37],gt_inv_depth:[30,34],gt_pose:[35,37],guid:[0,24],guizilini:[0,24],half:[54,55],handl:36,happen:64,has:37,have:[0,24,55],hdr:55,height:[16,64],helper:[14,28],here:[0,24],high:[0,24,37,55],higher:[0,24],highest:[2,35],highli:26,hook:[30,44,45,46,48,50],horizont:[2,64],horovod:[24,60],horovodtrain:[24,57],hou:[0,24],how:[0,2,24,55,67],http:[0,24,40,48],hue:[15,16],hvd_init:63,iclr:[0,24],icra:[0,24],ident:21,ieee:[0,24],ignor:[30,44,45,46,48,50],imag:[0,2,12,13,15,16,19,20,24,29,30,33,34,35,48,50,51,55,60,62],image_dataset:12,image_flip:64,image_grid:64,image_interpol:15,image_shap:[0,2,3,4,5,6,7,8,16,24,55],imagedataset:[10,24],imagenet:[0,24,39,48,51],img:55,imgcodecs_imwrit:55,implement:[0,19,24,30],improv:[0,2,24],imread:55,imwrit:55,imwriteflag:55,in_channel:[42,45],in_plan:50,incl:37,includ:[0,15,24,33,34,55],increas:2,index:[2,11,24,66],individu:[0,24],induc:[0,24],infer:[0,24,53],infer_and_save_depth:55,info:[0,24],inform:[0,11,24,26,35,61],inherit:[33,34],init_weight:[40,50],initi:[0,2,21,24,28,37,39,40,51],inject:[0,24],inproceed:[0,24],input:[0,15,24,29,33,34,35,37,42,45,48,50,55,64,67],input_featur:[44,46],input_fil:55,input_imag:48,insid:[0,24],instanc:[0,24,30,44,45,46,48,50,65],instead:[0,24,30,44,45,46,48,50,65],institut:[0,24],instruct:[0,24],integ:70,interact:[0,24],intern:[0,24],interpol:[15,20,64],interpolate_imag:64,interpolate_scal:64,intrins:[15,19,20,29,33],introduc:[0,24],inv2depth:62,inv_depth:[29,30,33,34,62,64],inv_depth_flip:62,inv_depth_hat:62,inv_depth_pp:62,inv_depths_norm:62,invdepth:42,invent:[0,24],invers:[2,19,21,29,30,33,34,35,39,40,42,62,64],invert:[22,62],invert_pos:22,invert_pose_numpi:22,is_cfg:70,is_dict:70,is_imag:55,is_int:70,is_list:70,is_numpi:70,is_rank_0:[58,59],is_seq:70,is_str:70,is_tensor:70,is_tupl:70,issu:[0,24],item:21,iter:2,its:65,itself:[0,24],jie:[0,24],jitter:[2,15,16],jointli:[0,24],jpeg:[12,55],jpg:[12,55],json:[3,5,7],kei:[11,15,28,35,36,65,67,68],kernel:[42,50],kernel_s:[29,42,50],keyword:67,kinv:19,kitti:[4,6,8,13,14],kitti_dataset:13,kitti_dataset_util:[9,10,24],kitti_raw:[0,4,8,24],kitti_tini:[0,6,24],kittidataset:[10,24],kwarg:[12,16,19,21,29,30,33,34,35,37,39,40,50,51,55,58,63],land:[0,24],last:55,lat:14,latest:[0,24],latter:[30,44,45,46,48,50],layer:[0,24,39,42,44,46,47,48,49,50,51,65],layers01:[24,41,43,49],learn:[0,2,24],least:[0,24],length:[19,61,67],level:37,leverag:[0,24],lidar:[0,3,5,7,11,24],lift:19,link:[0,24],list:[0,11,13,14,24,26,29,30,33,37,55,62,64,65,66,67,68,70],live:26,load:[11,13,14,24,37,60,64],load_class:65,load_class_args_cr:65,load_dataset:37,load_imag:64,load_network:65,load_oxts_packets_and_pos:14,local:32,locat:[0,24,65],log:[0,2,24,26,28,29,30,33,34,35,36,37,60,65,68],log_model:26,logger:[24,26,37],logluv:55,lon:14,look:65,loss:[0,2,24,28,29,30,33,34,35,36,68],loss_bas:[28,29,30],lossbas:[24,27,29,30],losses_and_metr:[29,30],lower:[0,24],machin:[0,24],mai:[0,24],main:55,make:[0,24],make_list:67,manag:[0,24,28],map:[2,11,13,15,19,20,29,30,33,34,35,39,40,55,62,64],master:48,mat:[21,55],match:61,match_scal:64,matrix:[14,21,22],max:37,max_depth:[2,3,4,5,6,7,8,45],max_epoch:[2,5,6,58],maximum:2,mean:[29,62,68],median:62,memori:[0,24],merg:[36,61],merge_cfg:61,merge_cfg_fil:61,merge_output:36,meshgrid:64,meter:66,method:[0,2,14,24,29,30,62,65],metric:[0,2,24,28,32,33,34,35,36,37,62,66,68],metrics_data:68,metrics_kei:68,metrics_mod:68,min:2,min_depth:[2,3,4,5,6,7,8,42,45],min_epoch:[2,58],minim:[0,24],minimum:2,misc:[24,60],mit:[0,24],mix:[0,24],mode:[0,2,15,16,20,21,22,24,26,29,32,35,37,64],model:[2,3,4,5,6,7,8,19,26,32,33,34,35,36,37,48,54,55,61,64,65,66,69],model_checkpoint:32,model_util:36,model_wrapp:[37,55],modelcheckpoint:32,modelwrapp:37,modifi:[0,24,65],modul:[19,24,28,30,35,37,39,40,42,44,45,46,48,50,51,55,58,64,65],moment:40,monitor:[2,26,32],monitor_index:2,monocular:[54,56],month:[0,24],more:[0,24],motion:[0,24],move:[19,21],mpi:[0,24],much:[0,24,67],multi:[0,24,36],multipl:[11,21,29,66],multiview:29,multiview_photometric_loss:29,multiviewphotometricloss:[24,27],must:48,n_max:66,name:[0,2,3,4,5,6,7,8,24,26,55,61,65,68],navstat:14,nb_ref_img:50,ncol:58,need:[0,24,30,44,45,46,48,50,61],neighbor:42,network:[0,2,24,33,34,35,37,39,40,42,44,45,46,48,50,51,65],network_requir:35,node:[61,70],none:[11,12,13,19,20,21,32,35,37,39,40,42,46,51,58,61,62,64,66,67],norm_inv_depth:62,normal:[2,29,62,64],north:14,note:[0,24],novel:[0,24],now:61,npz:[2,3,4,13,55],num_block:42,num_ch_enc:[44,46],num_class:48,num_frames_to_predict_for:46,num_input_featur:46,num_input_imag:48,num_lay:48,num_log:2,num_output_channel:44,num_scal:[2,28,29,62,64],num_work:[2,7],number:[0,2,13,14,24,28,29,30,37,39,42,48,50,51,62,64],numpi:[2,70],numsat:14,nvidia:[0,24],object:[11,19,21,28,32,58,66],occ_reg_weight:[2,29],occlus:[2,29],octob:[0,24],offici:[0,24],often:[0,2,24],older:65,on_color:66,on_rank_0:63,one:[0,21,24,29,30,44,45,46,48,50,61,64],ones:[0,24],onli:[0,24,55,65,67],onto:19,opaqu:55,openexr:55,optim:[0,2,5,6,7,8,24,37],option:[0,24,39,51,65,67],oral:[0,24],order:55,org:40,origin:[0,14,20,24,29,30,33,34,61],orimod:14,other:[0,24,29,36],our:[0,24],out:[0,24,42],out_channel:[42,45],out_plan:50,outperform:[0,24],output:[0,15,24,29,30,33,34,35,36,37,42,45,50,55,58,64,68,69],output_batch:37,output_data_batch:[37,68],output_fil:55,overfit:[0,24],overfit_ddad:[1,24],overfit_kitti:[0,1,24],overrid:[0,24,61],overridden:[30,44,45,46,48,50],overwrit:[0,24],own:[0,24],oxt:14,oxts_fil:14,oxtsdata:14,oxtspacket:14,pack:42,packet:14,packlayerconv2d:42,packlayerconv3d:42,packnet01:[3,4,7,8,24,38,49],packnet:[40,41,42,49],packnet_sfm:[11,12,13,14,15,16,19,20,21,22,26,28,29,30,32,33,34,35,36,37,39,40,42,44,45,46,48,50,51,58,59,61,62,63,64,65,66,67,68,69,70],pad:[2,20,29,45],padding_mod:[2,20,29],page:24,pair:55,paper:[0,24,40,45],param:[2,3,4,5,6,7,8,26,29,48,55,64],paramet:[0,2,11,13,14,15,16,19,20,22,24,28,29,30,33,34,35,36,37,39,40,42,50,51,54,55,56,61,62,64,65,66,67,68,69],paramid_1:55,paramid_2:55,paramvalue_1:55,paramvalue_2:55,pars:[14,54,56,61],parse_arg:[54,55,56],parse_test_config:61,parse_test_fil:61,parse_train_config:61,parse_train_fil:61,part:67,partial:16,particular:[0,24],pass:[0,24,30,44,45,46,48,50],path:[0,2,3,4,5,6,7,8,11,13,14,24,54,64,65,66],pattern:[0,24],pcolor:66,per:[2,55],percentag:[2,28,29,30,33,34],percentil:62,perform:[0,24,30,44,45,46,48,50],period:32,perspect:29,pfm:55,photometr:[2,29,33],photometric_loss:29,photometric_reduce_op:[2,29],pil:[15,64],pillai:[0,24],pinhol:[19,20],pip:[0,24],pitch:14,pixel:[0,2,19,24,29,42,55],pixelshuffl:42,plane:[0,19,24],plasma:62,pleas:[0,24],plu:[20,34],png:[12,13,55],point:[19,21,66],polici:[0,24],pos_accuraci:14,pose:[0,2,5,6,7,8,11,13,14,18,19,22,24,29,33,34,35,37,49,50,51],pose_decod:[24,41,47,49],pose_from_oxts_packet:14,pose_net:[0,2,3,4,5,6,7,8,24,35,37],pose_util:[18,23,24],pose_vec2mat:22,posedecod:46,posenet:[3,4,7,8,24,49,52],poseresnet:[5,6,24,49,52],posit:14,posmod:14,possibl:[0,24],post:62,post_process_inv_depth:62,pre:[0,24,48,56,61],precis:55,pred:[30,62],predict:[29,30,33,34,35,45,51,62,64,69],prefix:[65,66,68],prep_dataset:61,prepar:[37,61],prepare_dataset:37,prepare_dataset_prefix:66,prepare_model:37,prepare_test_config:61,prepare_train_config:61,preprocess:[0,24],preserv:[0,15,24],pretrain:[39,48,51,54,61,65],previou:37,primaryclass:[0,24],princip:19,print0:63,print:66,print_metr:37,privaci:[0,24],prob:15,probabl:[2,15,35],proc_rank:[58,59],process:[33,34,35,37,55,62],produc:[29,37,42,55,62,64,66,68],progress:[28,29,30,33,34,37],progressive_sc:[2,28,29,30],progressivesc:28,project:[0,2,11,19,24,26],properti:[14,19,21,28,29,30,33,34,35,37,58],propos:[0,24],protocol:[0,24],provid:[0,14,24],python3:[0,24],pytorch:[0,2,24,48],radianc:55,random:2,rang:[0,24,44,55],rank:63,rare:[0,24],rate:[0,2,24],rather:[0,24],ratio2:30,ratio:[30,42],ravento:[0,24],raw:[0,24],raw_data:14,read:[13,14,64],read_calib_fil:14,read_fil:12,read_npz_depth:13,read_png_depth:13,real:[0,24],recalcul:11,receiv:65,recent:[0,24],recip:[30,44,45,46,48,50],recognit:[0,24],recommend:[0,24,26],reconstruct:[19,29],reduc:[2,24,29,60],reduce_dict:68,reduce_photometric_loss:29,reduce_valu:68,ref_cam:20,ref_imag:[20,29,33],ref_img:51,ref_k:29,ref_warp:[20,29],refer:[19,20,21,29,33,51],referenc:[0,24],regist:[30,44,45,46,48,50],regular:[0,2,24,29],rel:[0,24],relat:[0,24],releas:[0,24],reli:[0,24],relu:50,remov:[0,2,24],repeat:[2,5,6,7,8,21,67],report:[0,24],repositori:[0,24],represent:[0,24],reproduc:[0,24],reproject:[0,24],requir:[35,37],research:[0,24],reset:66,reshap:16,residu:[39,42,51],residualblock:42,residualconv:42,resiz:[2,15,19],resize_depth:15,resize_imag:15,resize_sampl:15,resize_sample_image_and_intrins:15,resnet18:[0,24,39,51],resnet34:[39,51],resnet:[24,39,41,44,45,46,48,49,51],resnet_encod:[24,41,47,49],resnet_multiimage_input:48,resnetencod:48,resnetmultiimageinput:48,resolut:[0,2,24,35,64],resolv:[0,24],respect:64,result:[0,24],resum:37,return_log:[29,30,33,34,35],retval:55,rgb:29,right:61,risk:[0,24],rmse:[0,24,62],rmse_log:62,rmselog:[0,24],robot:[0,24],robust:[0,24],roll:14,root_dir:[12,13],rotat:[2,14,22,35],rotation_mod:[2,35,50],roti:14,rotx:14,rotz:14,row:[0,24],rui:[0,24],run:[0,2,24,26,30,35,37,39,40,42,44,45,46,48,50,51,61],run_nam:26,run_url:26,s3_frequenc:[0,2,24,32],s3_path:[0,2,24,32],s3_url:66,same:[0,19,24,42,64,65,67,68],same_shap:67,sampl:[11,13,15,16,55],sample_idx:11,sample_to_cuda:58,sampler:37,satur:[15,16],save:[0,2,3,4,11,24,29,30,32,55,60,66],save_cod:32,save_depth:69,save_npz:55,save_top_k:[2,32],scalar:[33,34],scale:[0,2,14,19,20,24,28,29,30,35,39,40,44,62,64],scale_intrins:20,scalesto:29,scene:[0,24],schedul:[0,2,5,6,7,8,24,37],scratch:[0,24,39,51],script:[0,24,54,55,56],search:[24,65],second:67,section:45,see:[0,24,55],seed:[2,37],segment:[0,24],self:[21,29,33,34,37],self_supervised_loss:33,selfsupmodel:[3,4,5,6,7,8,24,31,34],semant:[0,11,24],semguid:[0,24],semi:[0,24,34],semisup:[0,24],semisupmodel:[24,31],sensor:[0,11,24],sensor_idx:11,sequenc:[35,50],sequenti:[42,50],session:[0,24],set:[0,24,55,61],set_checkpoint:61,set_debug:65,set_nam:61,set_random_se:37,setup:[0,24],setup_dataload:37,setup_dataset:37,setup_depth_net:37,setup_model:37,setup_pose_net:37,sfmmodel:[24,31,33,37],shape1:67,shape2:67,shape:[0,2,15,21,24,55,64,67],shell:[0,24],should:[0,2,24,30,42,44,45,46,48,50,55],show:55,sigmoid:45,silent:[30,44,45,46,48,50],silogloss:30,similar:[0,24,29],simpl:[0,24],sinc:[0,24,30,44,45,46,48,50],singl:[0,24,35,55],size:[2,11,42,50,64,67],skip_empti:12,slightli:[0,24],smooth:[2,29,62],smooth_loss_weight:[2,29],smoothness_i:62,smoothness_loss:29,smoothness_x:62,snippet:55,some:[0,24],someth:[0,24],sourc:[0,11,12,13,14,15,16,19,20,21,22,24,28,29,30,32,33,34,35,36,37,39,40,42,44,45,46,48,50,51,54,55,56,58,61,62,63,64,65,66,67,68,69,70],spars:[0,2,24,30],sparsiti:[0,24],spatial:42,specif:[19,21,55,64],specifi:55,split:[0,2,3,4,5,6,7,8,11,12,13,16,24,61,66],spotlight:[0,24],sq_rel:62,sqr:[0,24],ssim:[2,29],ssim_loss_weight:[2,29],stack:[11,36,40,48],stack_batch:36,stack_sampl:11,stage:35,standard:[0,24],start:[0,24],state:[0,24,37,61,65],state_dict:[61,65],step:[0,2,24,69],step_siz:[2,5,6,7,8],steplr:[2,5,6,7,8],store:[0,24,26,33,34,35],str:[11,13,14,15,16,20,26,29,30,35,37,39,40,51,54,55,56,61,62,64,65,66,67,68],stream:[0,24],stride:[12,13,29,42,46],string:[37,66,68,70],stronger:[0,24],structur:[11,29],sub:[0,24,68],sub_kei:68,subclass:[30,44,45,46,48,50],subkei:68,subset:[0,24],sudeep:[0,24],suffix:[39,51],superdepth:[0,24],superresolut:[0,24],supervis:[2,29,30,33,34],supervised_loss:[30,34],supervised_loss_weight:[2,34],supervised_method:[2,30],supervised_num_scal:[2,30],supervisedloss:[24,27],symbol:[0,24],symmetr:[0,24],sync:[0,2,24,32],sync_s3_data:32,synthes:20,synthesi:29,system:14,t_est:29,t_max:2,t_w_imu:14,tag:[0,2,24,26],take:[29,30,42,44,45,46,48,50],tar:[0,24],target:64,target_imag:51,tcw:19,team:[0,24],tensor:[15,19,20,22,29,30,33,34,42,45,62,64,68,70],tensor_typ:15,tensorrt:[0,24],termin:[0,24,65],test:[0,2,3,4,5,6,7,8,11,16,24,37,54,61],test_dataload:37,test_epoch_end:37,test_progress_bar:58,test_requir:37,test_step:37,test_transform:16,thank:[0,24],them:[29,30,44,45,46,48,50],theta:14,thi:[0,21,24,30,44,45,46,48,50,55],those:[0,24,65],threshold:[2,29,30],tiff:55,time:[0,2,21,24,35],timestep:11,titl:[0,24],to_item:68,to_tensor:15,to_tensor_sampl:15,togeth:[0,24],top:37,torch:[12,13,15,19,20,21,22,28,29,30,33,34,35,37,39,40,42,44,45,46,48,50,51,62,64,70],torchvis:48,toyota:[0,24],track:37,train:[2,5,6,7,8,11,13,16,26,28,29,30,33,34,35,37,39,40,48,53,54,61],train_dataload:37,train_ddad:[1,24],train_kitti:[1,24],train_progress_bar:58,train_requir:35,train_transform:16,trainer:[24,37,58,59],training_epoch_end:37,training_step:37,transform:[10,11,13,14,19,21,22,24,29],transform_from_rot_tran:14,transform_point:21,transform_pos:21,translat:14,transpar:55,tri:[0,24],truth:[0,11,13,14,24,30,34,35,62],tune:[0,24],tupl:[13,14,15,16,36,64,67,69,70],twc:19,two:[0,24,29,67],twostream:[0,24],txt:[4,6,8],type:[0,2,11,13,14,15,16,19,20,24,26,29,30,33,34,35,36,37,42,48,50,60,61,62,64,65,66,67,68],ubuntu:[0,24],unaug:15,under:[0,24],univers:55,unpack:[0,24,42],unpacklayerconv2d:42,unpacklayerconv3d:42,unsign:55,updat:[0,24,61,65],upsampl:[35,40,45],upsample_depth_map:[2,35],url:[26,66],usag:[33,34],use:[0,2,11,19,24,37,40,55,65,66],use_gt_scal:62,use_refl:45,use_skip:44,used:[0,2,11,13,24,30,35,40,55,61,62,64],uses:[0,24,26],using:[0,21,24,35,55,64],util:[12,13,24,31,61,62,63,64,65,66,67,68,69,70],val:[3,7,11,28,35],val_dataload:37,val_loss:32,val_progress_bar:58,valid:[2,5,6,7,8,16,37],validation_epoch_end:37,validation_requir:37,validation_step:37,validation_transform:16,valu:[2,14,15,40,42,61,62,68],var_list:67,vari:48,variabl:[0,24,67],varianc:2,variat:40,variou:[36,37,42,69],vec:[21,22],vector:[14,21],vel_accuraci:14,velmod:14,veloc:[0,24],velodyn:[4,6,8],verifi:[0,24],version:[0,2,3,4,5,6,7,8,15,19,24,26,39,40,51],via:[0,24],video:[0,24],view:29,view_synthesi:20,vision:[0,24,48],visual:[0,2,24,29,30,55,62],vitor:[0,24],viz:[2,3,4],viz_inv_depth:62,wai:[0,24,69],wandb:[0,2,24,26],wandb_api_kei:[0,24],wandb_ent:[0,2,24],wandb_project:[0,2,24],wandblogg:[24,25],want:[0,16,24,37],warp:[20,29],warp_ref_imag:29,weak:[0,24],websit:[0,24],weight:[0,2,24,29,34,40,61,65],weight_decai:2,well:[0,24],were:[0,24],when:[0,24,35,65],where:[0,2,24,26,28,39,40,51,55,66],whether:35,which:[0,2,11,13,16,24,30,37,65],whose:14,width:[16,64],wise:[19,29],with_pos:[11,13],with_semant:11,within:[0,19,24,30,44,45,46,48,50],work:[0,24],worker:[2,37],worker_id:37,worker_init_fn:37,world:19,world_siz:[58,59,63],would:[0,24],wrap:67,wrapper:[24,31,55],x_scale:[19,20],xml:55,xxx_transform:16,y_scale:[19,20],yac:[56,61],yaml:[0,24,55,56,61],yaw:14,year:[0,24],you:[0,24,42,65],your:[0,24],zero:[2,20,29,62],zhou:[0,24]},titles:["PackNet-SfM: 3D Packing for Self-Supervised Monocular Depth Estimation","Configs","default_config","eval_ddad","eval_kitti","overfit_ddad","overfit_kitti","train_ddad","train_kitti","KITTIDataset","Datasets","DGPDataset","ImageDataset","KITTIDataset","kitti_dataset_utils","Augmentations","Transforms","Camera","Geometry","Camera","Camera_utils","Pose","Pose_utils","Pose","PackNet-SfM: 3D Packing for Self-Supervised Monocular Depth Estimation","Loggers","WandbLogger","Losses","LossBase","MultiViewPhotometricLoss","SupervisedLoss","Models","Checkpoint","SelfSupModel","SemiSupModel","SfmModel","Utilities","Wrapper","Depth Networks","DepthResNet","PackNet01","Network Layers","layers01","PackNet","depth_decoder","layers","pose_decoder","ResNet","resnet_encoder","Networks","PoseNet","PoseResNet","Pose Networks","Scripts","Evaluation","Inference","Training","Trainers","BaseTrainer","HorovodTrainer","Utils","Config","Depth","Horovod","Image","Loading","Logging","Misc.","Reduce","Saving","Types"],titleterms:{augment:15,autonom:[0,24],basetrain:58,camera:[17,19],camera_util:20,checkpoint:32,config:[1,61],dataset:[0,10,24],ddad:[0,24],default_config:2,dens:[0,24],depth:[0,24,38,62],depth_decod:44,depthresnet:39,dgpdataset:11,drive:[0,24],estim:[0,24],eval_ddad:3,eval_kitti:4,evalu:[0,24,54],geometri:18,horovod:63,horovodtrain:59,imag:64,imagedataset:12,infer:55,instal:[0,24],kitti:[0,24],kitti_dataset_util:14,kittidataset:[9,13],layer:[41,45],layers01:42,licens:[0,24],load:65,log:66,logger:25,loss:27,lossbas:28,map:[0,24],misc:67,model:[0,24,31],monocular:[0,24],multiviewphotometricloss:29,network:[38,41,49,52],overfit_ddad:5,overfit_kitti:6,pack:[0,24],packnet01:40,packnet:[0,24,43],pose:[21,23,52],pose_decod:46,pose_util:22,posenet:50,poseresnet:51,precomput:[0,24],reduc:68,refer:[0,24],resnet:47,resnet_encod:48,save:69,script:53,self:[0,24],selfsupmodel:33,semisupmodel:34,sfm:[0,24],sfmmodel:35,supervis:[0,24],supervisedloss:30,tini:[0,24],train:[0,24,56],train_ddad:7,train_kitti:8,trainer:57,transform:16,type:70,util:[36,60],wandblogg:26,wrapper:37}}) \ No newline at end of file diff --git a/docs/trainers/trainers.BaseTrainer.html b/docs/trainers/trainers.BaseTrainer.html index 8cda2598..448ebbcc 100644 --- a/docs/trainers/trainers.BaseTrainer.html +++ b/docs/trainers/trainers.BaseTrainer.html @@ -217,7 +217,7 @@
          -packnet_sfm.trainers.base_trainer.sample_to_cuda(data)[source]
          +packnet_sfm.trainers.base_trainer.sample_to_cuda(data, dtype=None)[source]
          diff --git a/packnet_sfm/datasets/dgp_dataset.py b/packnet_sfm/datasets/dgp_dataset.py index 47f90b94..c8356d24 100644 --- a/packnet_sfm/datasets/dgp_dataset.py +++ b/packnet_sfm/datasets/dgp_dataset.py @@ -1,9 +1,15 @@ # Copyright 2020 Toyota Research Institute. All rights reserved. +import os import torch -from packnet_sfm.utils.misc import make_list -from packnet_sfm.utils.types import is_tensor +import numpy as np + from dgp.datasets.synchronized_dataset import SynchronizedSceneDataset +from dgp.utils.camera import Camera, generate_depth_map +from dgp.utils.geometry import Pose + +from packnet_sfm.utils.misc import make_list +from packnet_sfm.utils.types import is_tensor, is_numpy, is_list ######################################################################################################################## #### FUNCTIONS @@ -24,7 +30,24 @@ def stack_sample(sample): else: # Stack torch tensors if is_tensor(sample[0][key]): - stacked_sample[key] = torch.cat([s[key].unsqueeze(0) for s in sample], 0) + stacked_sample[key] = torch.stack([s[key] for s in sample], 0) + # Stack numpy arrays + elif is_numpy(sample[0][key]): + stacked_sample[key] = np.stack([s[key] for s in sample], 0) + # Stack list + elif is_list(sample[0][key]): + stacked_sample[key] = [] + # Stack list of torch tensors + if is_tensor(sample[0][key][0]): + for i in range(len(sample[0][key])): + stacked_sample[key].append( + torch.stack([s[key][i] for s in sample], 0)) + # Stack list of numpy arrays + if is_numpy(sample[0][key][0]): + for i in range(len(sample[0][key])): + stacked_sample[key].append( + np.stack([s[key][i] for s in sample], 0)) + # Return stacked sample return stacked_sample @@ -66,6 +89,7 @@ def __init__(self, path, split, forward_context=0, data_transform=None, ): + self.path = path self.split = split self.dataset_idx = 0 @@ -76,6 +100,7 @@ def __init__(self, path, split, self.num_cameras = len(cameras) self.data_transform = data_transform + self.depth_type = depth_type self.with_depth = depth_type is not None self.with_pose = with_pose self.with_semantic = with_semantic @@ -85,11 +110,57 @@ def __init__(self, path, split, datum_names=cameras, backward_context=back_context, forward_context=forward_context, - generate_depth_from_datum=depth_type, requested_annotations=None, only_annotated_datums=False, ) + def generate_depth_map(self, sample_idx, datum_idx, filename): + """ + Generates the depth map for a camera by projecting LiDAR information. + It also caches the depth map following DGP folder structure, so it's not recalculated + + Parameters + ---------- + sample_idx : int + sample index + datum_idx : int + Datum index + filename : + Filename used for loading / saving + + Returns + ------- + depth : np.array [H, W] + Depth map for that datum in that sample + """ + # Generate depth filename + filename = '{}/{}.npz'.format( + os.path.dirname(self.path), filename.format('depth/{}'.format(self.depth_type))) + # Load and return if exists + if os.path.exists(filename): + return np.load(filename)['depth'] + # Otherwise, create, save and return + else: + # Get pointcloud + scene_idx, sample_idx_in_scene, _ = self.dataset.dataset_item_index[sample_idx] + pc_datum_idx_in_sample = self.dataset.get_datum_index_for_datum_name( + scene_idx, sample_idx_in_scene, self.depth_type) + pc_datum_data = self.dataset.get_point_cloud_from_datum( + scene_idx, sample_idx_in_scene, pc_datum_idx_in_sample) + # Create camera + camera_rgb = self.get_current('rgb', datum_idx) + camera_pose = self.get_current('pose', datum_idx) + camera_intrinsics = self.get_current('intrinsics', datum_idx) + camera = Camera(K=camera_intrinsics, p_cw=camera_pose.inverse()) + # Generate depth map + world_points = pc_datum_data['pose'] * pc_datum_data['point_cloud'] + depth = generate_depth_map(camera, world_points, camera_rgb.size[::-1]) + # Save depth map + os.makedirs(os.path.dirname(filename), exist_ok=True) + np.savez_compressed(filename, depth=depth) + # Return depth map + return depth + def get_current(self, key, sensor_idx): """Return current timestep of a key from a sensor""" return self.sample_dgp[self.bwd][sensor_idx][key] @@ -110,6 +181,29 @@ def get_context(self, key, sensor_idx): """Get both backward and forward contexts""" return self.get_backward(key, sensor_idx) + self.get_forward(key, sensor_idx) + def get_filename(self, sample_idx, datum_idx): + """ + Returns the filename for an index, following DGP structure + + Parameters + ---------- + sample_idx : int + Sample index + datum_idx : int + Datum index + + Returns + ------- + filename : str + Filename for the datum in that sample + """ + scene_idx, sample_idx_in_scene, datum_indices = self.dataset.dataset_item_index[sample_idx] + scene_dir = self.dataset.get_scene_directory(scene_idx) + filename = self.dataset.get_datum( + scene_idx, sample_idx_in_scene, datum_indices[datum_idx]).datum.image.filename + return os.path.splitext(os.path.join(os.path.basename(scene_dir), + filename.replace('rgb', '{}')))[0] + def __len__(self): """Length of dataset""" return len(self.dataset) @@ -127,27 +221,45 @@ def __getitem__(self, idx): 'idx': idx, 'dataset_idx': self.dataset_idx, 'sensor_name': self.get_current('datum_name', i), - 'filename': '%s_%010d' % (self.split, idx), + # + 'filename': self.get_filename(idx, i), + 'splitname': '%s_%010d' % (self.split, idx), # 'rgb': self.get_current('rgb', i), 'intrinsics': self.get_current('intrinsics', i), } + # If depth is returned if self.with_depth: data.update({ - 'depth': self.get_current('depth', i), + 'depth': self.generate_depth_map(idx, i, data['filename']) }) + # If pose is returned if self.with_pose: data.update({ - 'extrinsics': [pose.matrix for pose in self.get_current('extrinsics', i)], - 'pose': [pose.matrix for pose in self.get_current('pose', i)], + 'extrinsics': self.get_current('extrinsics', i).matrix, + 'pose': self.get_current('pose', i).matrix, }) + # If context is returned if self.has_context: data.update({ 'rgb_context': self.get_context('rgb', i), }) + # If context pose is returned + if self.with_pose: + # Get original values to calculate relative motion + orig_extrinsics = Pose.from_matrix(data['extrinsics']) + orig_pose = Pose.from_matrix(data['pose']) + data.update({ + 'extrinsics_context': + [(orig_extrinsics.inverse() * extrinsics).matrix + for extrinsics in self.get_context('extrinsics', i)], + 'pose_context': + [(orig_pose.inverse() * pose).matrix + for pose in self.get_context('pose', i)], + }) sample.append(data) diff --git a/packnet_sfm/datasets/kitti_dataset.py b/packnet_sfm/datasets/kitti_dataset.py index dc6d6f9d..ad854fc8 100644 --- a/packnet_sfm/datasets/kitti_dataset.py +++ b/packnet_sfm/datasets/kitti_dataset.py @@ -58,8 +58,6 @@ class KITTIDataset(Dataset): Split file, with paths to the images to be used train : bool True if the dataset will be used for training - mode : str - Dataset mode (stereo or mono) data_transform : Function Transformations applied to the sample depth_type : str @@ -73,7 +71,7 @@ class KITTIDataset(Dataset): strides : tuple List of context strides """ - def __init__(self, root_dir, file_list, train=True, mode='mono', + def __init__(self, root_dir, file_list, train=True, data_transform=None, depth_type=None, with_pose=False, back_context=0, forward_context=0, strides=(1,)): # Assertions @@ -294,9 +292,12 @@ def _get_imu2cam_transform(self, image_file): def _get_oxts_file(image_file): """Gets the oxts file from an image file.""" # find oxts pose file - oxts_file = image_file.replace(IMAGE_FOLDER['left'], OXTS_POSE_DATA) - oxts_file = oxts_file.replace('png', 'txt') - return oxts_file + for cam in ['left', 'right']: + # Check for both cameras, if found replace and return file name + if IMAGE_FOLDER[cam] in image_file: + return image_file.replace(IMAGE_FOLDER[cam], OXTS_POSE_DATA).replace('.png', '.txt') + # Something went wrong (invalid image file) + raise ValueError('Invalid KITTI path for pose supervision.') def _get_oxts_data(self, image_file): """Gets the oxts data from an image file.""" diff --git a/packnet_sfm/datasets/transforms.py b/packnet_sfm/datasets/transforms.py index 4bf7caae..1ae88bfa 100644 --- a/packnet_sfm/datasets/transforms.py +++ b/packnet_sfm/datasets/transforms.py @@ -24,7 +24,8 @@ def train_transforms(sample, image_shape, jittering): sample : dict Augmented sample """ - sample = resize_sample(sample, image_shape) + if len(image_shape) > 0: + sample = resize_sample(sample, image_shape) sample = duplicate_sample(sample) if len(jittering) > 0: sample = colorjitter_sample(sample, jittering) @@ -47,7 +48,8 @@ def validation_transforms(sample, image_shape): sample : dict Augmented sample """ - sample['rgb'] = resize_image(sample['rgb'], image_shape) + if len(image_shape) > 0: + sample['rgb'] = resize_image(sample['rgb'], image_shape) sample = to_tensor_sample(sample) return sample @@ -67,7 +69,8 @@ def test_transforms(sample, image_shape): sample : dict Augmented sample """ - sample['rgb'] = resize_image(sample['rgb'], image_shape) + if len(image_shape) > 0: + sample['rgb'] = resize_image(sample['rgb'], image_shape) sample = to_tensor_sample(sample) return sample diff --git a/packnet_sfm/losses/multiview_photometric_loss.py b/packnet_sfm/losses/multiview_photometric_loss.py index 500110bd..c38efab8 100644 --- a/packnet_sfm/losses/multiview_photometric_loss.py +++ b/packnet_sfm/losses/multiview_photometric_loss.py @@ -13,7 +13,7 @@ def SSIM(x, y, C1=1e-4, C2=9e-4, kernel_size=3, stride=1): """ - Structural SIMlilarity (SSIM) distance between two images. + Structural SIMilarity (SSIM) distance between two images. Parameters ---------- diff --git a/packnet_sfm/losses/velocity_loss.py b/packnet_sfm/losses/velocity_loss.py new file mode 100644 index 00000000..350fa1c5 --- /dev/null +++ b/packnet_sfm/losses/velocity_loss.py @@ -0,0 +1,42 @@ +# Copyright 2020 Toyota Research Institute. All rights reserved. + +import torch +import torch.nn as nn + +from packnet_sfm.utils.image import match_scales +from packnet_sfm.losses.loss_base import LossBase + + +class VelocityLoss(LossBase): + """ + Velocity loss for pose translation. + """ + def __init__(self, **kwargs): + super().__init__() + + def forward(self, pred_pose, gt_pose_context, **kwargs): + """ + Calculates velocity loss. + + Parameters + ---------- + pred_pose : list of Pose + Predicted pose transformation between origin and reference + gt_pose_context : list of Pose + Ground-truth pose transformation between origin and reference + + Returns + ------- + losses_and_metrics : dict + Output dictionary + """ + pred_trans = [pose.mat[:, :3, -1].norm(dim=-1) for pose in pred_pose] + gt_trans = [pose[:, :3, -1].norm(dim=-1) for pose in gt_pose_context] + # Calculate velocity supervision loss + loss = sum([(pred - gt).abs().mean() + for pred, gt in zip(pred_trans, gt_trans)]) / len(gt_trans) + self.add_metric('velocity_loss', loss) + return { + 'loss': loss.unsqueeze(0), + 'metrics': self.metrics, + } diff --git a/packnet_sfm/models/SelfSupModel.py b/packnet_sfm/models/SelfSupModel.py index ecc81bd2..da40c88e 100644 --- a/packnet_sfm/models/SelfSupModel.py +++ b/packnet_sfm/models/SelfSupModel.py @@ -12,16 +12,12 @@ class SelfSupModel(SfmModel): Parameters ---------- - depth_net : nn.Module - Depth network to be used - pose_net : nn.Module - Pose network to be used kwargs : dict Extra parameters """ - def __init__(self, depth_net=None, pose_net=None, **kwargs): + def __init__(self, **kwargs): # Initializes SfmModel - super().__init__(depth_net, pose_net, **kwargs) + super().__init__(**kwargs) # Initializes the photometric loss self._photometric_loss = MultiViewPhotometricLoss(**kwargs) @@ -33,22 +29,6 @@ def logs(self): **self._photometric_loss.logs } - @property - def requires_depth_net(self): - return True - - @property - def requires_pose_net(self): - return True - - @property - def requires_gt_depth(self): - return False - - @property - def requires_gt_pose(self): - return False - def self_supervised_loss(self, image, ref_images, inv_depths, poses, intrinsics, return_logs=False, progress=0.0): """ diff --git a/packnet_sfm/models/SemiSupModel.py b/packnet_sfm/models/SemiSupModel.py index 99a28d4f..8941643c 100644 --- a/packnet_sfm/models/SemiSupModel.py +++ b/packnet_sfm/models/SemiSupModel.py @@ -21,6 +21,7 @@ class SemiSupModel(SelfSupModel): Extra parameters """ def __init__(self, supervised_loss_weight=0.9, **kwargs): + # Initializes SelfSupModel super().__init__(**kwargs) # If supervision weight is 0.0, use SelfSupModel directly assert 0. < supervised_loss_weight <= 1., "Model requires (0, 1] supervision" @@ -28,6 +29,11 @@ def __init__(self, supervised_loss_weight=0.9, **kwargs): self.supervised_loss_weight = supervised_loss_weight self._supervised_loss = SupervisedLoss(**kwargs) + # Pose network is only required if there is self-supervision + self._network_requirements['pose_net'] = self.supervised_loss_weight < 1 + # GT depth is only required if there is supervision + self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0 + @property def logs(self): """Return logs.""" @@ -36,22 +42,6 @@ def logs(self): **self._supervised_loss.logs } - @property - def requires_depth_net(self): - return True - - @property - def requires_pose_net(self): - return self.supervised_loss_weight < 1. - - @property - def requires_gt_depth(self): - return self.supervised_loss_weight > 0. - - @property - def requires_gt_pose(self): - return False - def supervised_loss(self, inv_depths, gt_inv_depths, return_logs=False, progress=0.0): """ diff --git a/packnet_sfm/models/SfmModel.py b/packnet_sfm/models/SfmModel.py index 6c3cf7f8..d0bf494e 100644 --- a/packnet_sfm/models/SfmModel.py +++ b/packnet_sfm/models/SfmModel.py @@ -22,7 +22,7 @@ class SfmModel(nn.Module): flip_lr_prob : float Probability of flipping when using the depth network upsample_depth_maps : bool - True if detph map scales are upsampled to highest resolution + True if depth map scales are upsampled to highest resolution kwargs : dict Extra parameters """ @@ -38,6 +38,15 @@ def __init__(self, depth_net=None, pose_net=None, self._logs = {} self._losses = {} + self._network_requirements = { + 'depth_net': True, # Depth network required + 'pose_net': True, # Pose network required + } + self._train_requirements = { + 'gt_depth': False, # No ground-truth depth required + 'gt_pose': False, # No ground-truth pose required + } + @property def logs(self): """Return logs.""" @@ -53,25 +62,41 @@ def add_loss(self, key, val): self._losses[key] = val.detach() @property - def requires_depth_net(self): - return True + def network_requirements(self): + """ + Networks required to run the model - @property - def requires_pose_net(self): - return True + Returns + ------- + requirements : dict + depth_net : bool + Whether a depth network is required by the model + pose_net : bool + Whether a depth network is required by the model + """ + return self._network_requirements @property - def requires_gt_depth(self): - return False + def train_requirements(self): + """ + Information required by the model at training stage - @property - def requires_gt_pose(self): - return False + Returns + ------- + requirements : dict + gt_depth : bool + Whether ground truth depth is required by the model at training time + gt_pose : bool + Whether ground truth pose is required by the model at training time + """ + return self._train_requirements def add_depth_net(self, depth_net): + """Add a depth network to the model""" self.depth_net = depth_net def add_pose_net(self, pose_net): + """Add a pose network to the model""" self.pose_net = pose_net def compute_inv_depths(self, image): diff --git a/packnet_sfm/models/VelSupModel.py b/packnet_sfm/models/VelSupModel.py new file mode 100644 index 00000000..a55d07ae --- /dev/null +++ b/packnet_sfm/models/VelSupModel.py @@ -0,0 +1,52 @@ +# Copyright 2020 Toyota Research Institute. All rights reserved. + +from packnet_sfm.models.SelfSupModel import SelfSupModel +from packnet_sfm.losses.velocity_loss import VelocityLoss + + +class VelSupModel(SelfSupModel): + """ + Self-supervised model with additional velocity supervision loss. + + Parameters + ---------- + velocity_loss_weight : float + Weight for velocity supervision + kwargs : dict + Extra parameters + """ + def __init__(self, velocity_loss_weight=0.1, **kwargs): + # Initializes SelfSupModel + super().__init__(**kwargs) + # Stores velocity supervision loss weight + self._velocity_loss = VelocityLoss(**kwargs) + self.velocity_loss_weight = velocity_loss_weight + + # GT pose is required + self._train_requirements['gt_pose'] = True + + def forward(self, batch, return_logs=False, progress=0.0): + """ + Processes a batch. + + Parameters + ---------- + batch : dict + Input batch + return_logs : bool + True if logs are stored + progress : + Training progress percentage + + Returns + ------- + output : dict + Dictionary containing a "loss" scalar and different metrics and predictions + for logging and downstream usage. + """ + output = super().forward(batch, return_logs, progress) + if self.training: + # Update self-supervised loss with velocity supervision + velocity_loss = self._velocity_loss(output['poses'], batch['pose_context']) + output['loss'] += self.velocity_loss_weight * velocity_loss['loss'] + return output diff --git a/packnet_sfm/models/model_utils.py b/packnet_sfm/models/model_utils.py index f16bd18f..6e8043e4 100644 --- a/packnet_sfm/models/model_utils.py +++ b/packnet_sfm/models/model_utils.py @@ -1,5 +1,6 @@ # Copyright 2020 Toyota Research Institute. All rights reserved. +from packnet_sfm.utils.types import is_tensor, is_list, is_numpy def merge_outputs(*outputs): """ @@ -34,3 +35,32 @@ def merge_outputs(*outputs): 'Adding duplicated key {}'.format(key) merge[key] = val return merge + + +def stack_batch(batch): + """ + Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W) + + Parameters + ---------- + batch : dict + Batch + + Returns + ------- + batch : dict + Stacked batch + """ + # If there is multi-camera information + if len(batch['rgb'].shape) == 5: + assert batch['rgb'].shape[0] == 1, 'Only batch size 1 is supported for multi-cameras' + # Loop over all keys + for key in batch.keys(): + # If list, stack every item + if is_list(batch[key]): + if is_tensor(batch[key][0]) or is_numpy(batch[key][0]): + batch[key] = [sample[0] for sample in batch[key]] + # Else, stack single item + else: + batch[key] = batch[key][0] + return batch diff --git a/packnet_sfm/models/model_wrapper.py b/packnet_sfm/models/model_wrapper.py index c1c18aca..0eacd162 100644 --- a/packnet_sfm/models/model_wrapper.py +++ b/packnet_sfm/models/model_wrapper.py @@ -19,6 +19,7 @@ from packnet_sfm.utils.reduce import all_reduce_metrics, reduce_dict, \ create_dict, average_loss_and_metrics from packnet_sfm.utils.save import save_depth +from packnet_sfm.models.model_utils import stack_batch class ModelWrapper(torch.nn.Module): @@ -58,7 +59,10 @@ def __init__(self, config, resume=None, logger=None, load_datasets=True): # Prepare datasets if load_datasets: - self.prepare_datasets() + # Requirements for validation (we only evaluate depth for now) + validation_requirements = {'gt_depth': True, 'gt_pose': False} + test_requirements = validation_requirements + self.prepare_datasets(validation_requirements, test_requirements) # Preparations done self.config.prepared = True @@ -76,20 +80,24 @@ def prepare_model(self, resume=None): if 'epoch' in resume: self.current_epoch = resume['epoch'] - def prepare_datasets(self): + def prepare_datasets(self, validation_requirements, test_requirements): """Prepare datasets for training, validation and test.""" - # Prepare datasets print0(pcolor('### Preparing Datasets', 'green')) augmentation = self.config.datasets.augmentation + # Setup train dataset (requirements are given by the model itself) self.train_dataset = setup_dataset( self.config.datasets.train, 'train', - self.model.requires_gt_depth, **augmentation) + self.model.train_requirements, **augmentation) + # Setup validation dataset self.validation_dataset = setup_dataset( - self.config.datasets.validation, 'validation', **augmentation) + self.config.datasets.validation, 'validation', + validation_requirements, **augmentation) + # Setup test dataset self.test_dataset = setup_dataset( - self.config.datasets.test, 'test', **augmentation) + self.config.datasets.test, 'test', + test_requirements, **augmentation) @property def depth_net(self): @@ -107,12 +115,17 @@ def logs(self): params = OrderedDict() for param in self.optimizer.param_groups: params['{}_learning_rate'.format(param['name'].lower())] = param['lr'] - params['progress'] = self.current_epoch / self.config.arch.max_epochs + params['progress'] = self.progress return { **params, **self.model.logs, } + @property + def progress(self): + """Returns training progress (current epoch / max. number of epochs)""" + return self.current_epoch / self.config.arch.max_epochs + def configure_optimizers(self): """Configure depth and pose optimizers and the corresponding scheduler.""" @@ -170,8 +183,8 @@ def test_dataloader(self): def training_step(self, batch, *args): """Processes a training batch.""" - # loss = self.model(batch)[-1] - output = self.model(batch) + batch = stack_batch(batch) + output = self.model(batch, progress=self.progress) return { 'loss': output['loss'], 'metrics': output['metrics'] @@ -336,8 +349,11 @@ def wrap(string): print(met_line.format(*(('METRIC',) + self.metrics_keys))) for n, metrics in enumerate(metrics_data): print(hor_line) - print(wrap(pcolor('*** {:<87}'.format( - os.path.join(dataset.path[n], dataset.split[n])), 'magenta', attrs=['bold']))) + path_line = '{}'.format( + os.path.join(dataset.path[n], dataset.split[n])) + if len(dataset.cameras[n]) == 1: # only allows single cameras + path_line += ' ({})'.format(dataset.cameras[n][0]) + print(wrap(pcolor('*** {:<87}'.format(path_line), 'magenta', attrs=['bold']))) print(hor_line) for key, metric in metrics.items(): if self.metrics_name in key: @@ -442,10 +458,10 @@ def setup_model(config, prepared, **kwargs): model = load_class(config.name, paths=['packnet_sfm.models',])( **{**config.loss, **kwargs}) # Add depth network if required - if model.requires_depth_net: + if model.network_requirements['depth_net']: model.add_depth_net(setup_depth_net(config.depth_net, prepared)) # Add pose network if required - if model.requires_pose_net: + if model.network_requirements['pose_net']: model.add_pose_net(setup_pose_net(config.pose_net, prepared)) # If a checkpoint is provided, load pretrained model if not prepared and config.checkpoint_path is not '': @@ -454,7 +470,7 @@ def setup_model(config, prepared, **kwargs): return model -def setup_dataset(config, mode, requires_gt_depth=True, **kwargs): +def setup_dataset(config, mode, requirements, **kwargs): """ Create a dataset class @@ -464,8 +480,8 @@ def setup_dataset(config, mode, requires_gt_depth=True, **kwargs): Configuration (cf. configs/default_config.py) mode : str {'train', 'validation', 'test'} Mode from which we want the dataset - requires_gt_depth : bool - True if the model requires ground-truth depth maps at training time + requirements : dict (string -> bool) + Different requirements for dataset loading (gt_depth, gt_pose, etc) kwargs : dict Extra parameters for dataset creation @@ -494,7 +510,8 @@ def setup_dataset(config, mode, requires_gt_depth=True, **kwargs): # Individual shared dataset arguments dataset_args_i = { - 'depth_type': config.depth_type[i] if requires_gt_depth else None, + 'depth_type': config.depth_type[i] if requirements['gt_depth'] else None, + 'with_pose': requirements['gt_pose'], } # KITTI dataset @@ -502,14 +519,13 @@ def setup_dataset(config, mode, requires_gt_depth=True, **kwargs): dataset = KITTIDataset( config.path[i], path_split, **dataset_args, **dataset_args_i, - mode='mono', ) # DGP dataset elif config.dataset[i] == 'DGP': dataset = DGPDataset( config.path[i], config.split[i], **dataset_args, **dataset_args_i, - cameras=config.cameras, + cameras=config.cameras[i], ) # Image dataset elif config.dataset[i] == 'Image': diff --git a/packnet_sfm/networks/depth/PackNet01.py b/packnet_sfm/networks/depth/PackNet01.py index 3be5ae0d..adde8a65 100644 --- a/packnet_sfm/networks/depth/PackNet01.py +++ b/packnet_sfm/networks/depth/PackNet01.py @@ -5,8 +5,6 @@ from packnet_sfm.networks.layers.packnet.layers01 import \ PackLayerConv3d, UnpackLayerConv3d, Conv2D, ResidualBlock, InvDepth -######################################################################################################################## - class PackNet01(nn.Module): """ PackNet network with 3d convolutions (version 01, from the CVPR paper). @@ -40,13 +38,13 @@ def __init__(self, dropout=None, version=None, **kwargs): # Initial convolutional layer self.pre_calc = Conv2D(in_channels, ni, 5, 1) # Support for different versions - if self.version == 'A': + if self.version == 'A': # Channel concatenation n1o, n1i = n1, n1 + ni + no n2o, n2i = n2, n2 + n1 + no n3o, n3i = n3, n3 + n2 + no n4o, n4i = n4, n4 + n3 n5o, n5i = n5, n5 + n4 - elif self.version == 'B': + elif self.version == 'B': # Channel addition n1o, n1i = n1, n1 + no n2o, n2i = n2, n2 + no n3o, n3i = n3//2, n3//2 + no @@ -181,5 +179,3 @@ def forward(self, x): return [disp1, disp2, disp3, disp4] else: return disp1 - -######################################################################################################################## diff --git a/packnet_sfm/networks/depth/PackNetSlim01.py b/packnet_sfm/networks/depth/PackNetSlim01.py new file mode 100644 index 00000000..be5f009b --- /dev/null +++ b/packnet_sfm/networks/depth/PackNetSlim01.py @@ -0,0 +1,183 @@ +# Copyright 2020 Toyota Research Institute. All rights reserved. + +import torch +import torch.nn as nn +from packnet_sfm.networks.layers.packnet.layers01 import \ + PackLayerConv3d, UnpackLayerConv3d, Conv2D, ResidualBlock, InvDepth + +class PackNetSlim01(nn.Module): + """ + PackNet network with 3d convolutions (version 01, from the CVPR paper). + Slimmer version, with fewer feature channels + + https://arxiv.org/abs/1905.02693 + + Parameters + ---------- + dropout : float + Dropout value to use + version : str + Has a XY format, where: + X controls upsampling variations (not used at the moment). + Y controls feature stacking (A for concatenation and B for addition) + kwargs : dict + Extra parameters + """ + def __init__(self, dropout=None, version=None, **kwargs): + super().__init__() + self.version = version[1:] + # Input/output channels + in_channels = 3 + out_channels = 1 + # Hyper-parameters + ni, no = 32, out_channels + n1, n2, n3, n4, n5 = 32, 64, 128, 256, 512 + num_blocks = [2, 2, 3, 3] + pack_kernel = [5, 3, 3, 3, 3] + unpack_kernel = [3, 3, 3, 3, 3] + iconv_kernel = [3, 3, 3, 3, 3] + num_3d_feat = 4 + # Initial convolutional layer + self.pre_calc = Conv2D(in_channels, ni, 5, 1) + # Support for different versions + if self.version == 'A': # Channel concatenation + n1o, n1i = n1, n1 + ni + no + n2o, n2i = n2, n2 + n1 + no + n3o, n3i = n3, n3 + n2 + no + n4o, n4i = n4, n4 + n3 + n5o, n5i = n5, n5 + n4 + elif self.version == 'B': # Channel addition + n1o, n1i = n1, n1 + no + n2o, n2i = n2, n2 + no + n3o, n3i = n3//2, n3//2 + no + n4o, n4i = n4//2, n4//2 + n5o, n5i = n5//2, n5//2 + else: + raise ValueError('Unknown PackNet version {}'.format(version)) + + # Encoder + + self.pack1 = PackLayerConv3d(n1, pack_kernel[0], d=num_3d_feat) + self.pack2 = PackLayerConv3d(n2, pack_kernel[1], d=num_3d_feat) + self.pack3 = PackLayerConv3d(n3, pack_kernel[2], d=num_3d_feat) + self.pack4 = PackLayerConv3d(n4, pack_kernel[3], d=num_3d_feat) + self.pack5 = PackLayerConv3d(n5, pack_kernel[4], d=num_3d_feat) + + self.conv1 = Conv2D(ni, n1, 7, 1) + self.conv2 = ResidualBlock(n1, n2, num_blocks[0], 1, dropout=dropout) + self.conv3 = ResidualBlock(n2, n3, num_blocks[1], 1, dropout=dropout) + self.conv4 = ResidualBlock(n3, n4, num_blocks[2], 1, dropout=dropout) + self.conv5 = ResidualBlock(n4, n5, num_blocks[3], 1, dropout=dropout) + + # Decoder + + self.unpack5 = UnpackLayerConv3d(n5, n5o, unpack_kernel[0], d=num_3d_feat) + self.unpack4 = UnpackLayerConv3d(n5, n4o, unpack_kernel[1], d=num_3d_feat) + self.unpack3 = UnpackLayerConv3d(n4, n3o, unpack_kernel[2], d=num_3d_feat) + self.unpack2 = UnpackLayerConv3d(n3, n2o, unpack_kernel[3], d=num_3d_feat) + self.unpack1 = UnpackLayerConv3d(n2, n1o, unpack_kernel[4], d=num_3d_feat) + + self.iconv5 = Conv2D(n5i, n5, iconv_kernel[0], 1) + self.iconv4 = Conv2D(n4i, n4, iconv_kernel[1], 1) + self.iconv3 = Conv2D(n3i, n3, iconv_kernel[2], 1) + self.iconv2 = Conv2D(n2i, n2, iconv_kernel[3], 1) + self.iconv1 = Conv2D(n1i, n1, iconv_kernel[4], 1) + + # Depth Layers + + self.unpack_disps = nn.PixelShuffle(2) + self.unpack_disp4 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None) + self.unpack_disp3 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None) + self.unpack_disp2 = nn.Upsample(scale_factor=2, mode='nearest', align_corners=None) + + self.disp4_layer = InvDepth(n4, out_channels=out_channels) + self.disp3_layer = InvDepth(n3, out_channels=out_channels) + self.disp2_layer = InvDepth(n2, out_channels=out_channels) + self.disp1_layer = InvDepth(n1, out_channels=out_channels) + + self.init_weights() + + def init_weights(self): + """Initializes network weights.""" + for m in self.modules(): + if isinstance(m, (nn.Conv2d, nn.Conv3d)): + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + """ + Runs the network and returns inverse depth maps + (4 scales if training and 1 if not). + """ + x = self.pre_calc(x) + + # Encoder + + x1 = self.conv1(x) + x1p = self.pack1(x1) + x2 = self.conv2(x1p) + x2p = self.pack2(x2) + x3 = self.conv3(x2p) + x3p = self.pack3(x3) + x4 = self.conv4(x3p) + x4p = self.pack4(x4) + x5 = self.conv5(x4p) + x5p = self.pack5(x5) + + # Skips + + skip1 = x + skip2 = x1p + skip3 = x2p + skip4 = x3p + skip5 = x4p + + # Decoder + + unpack5 = self.unpack5(x5p) + if self.version == 'A': + concat5 = torch.cat((unpack5, skip5), 1) + else: + concat5 = unpack5 + skip5 + iconv5 = self.iconv5(concat5) + + unpack4 = self.unpack4(iconv5) + if self.version == 'A': + concat4 = torch.cat((unpack4, skip4), 1) + else: + concat4 = unpack4 + skip4 + iconv4 = self.iconv4(concat4) + disp4 = self.disp4_layer(iconv4) + udisp4 = self.unpack_disp4(disp4) + + unpack3 = self.unpack3(iconv4) + if self.version == 'A': + concat3 = torch.cat((unpack3, skip3, udisp4), 1) + else: + concat3 = torch.cat((unpack3 + skip3, udisp4), 1) + iconv3 = self.iconv3(concat3) + disp3 = self.disp3_layer(iconv3) + udisp3 = self.unpack_disp3(disp3) + + unpack2 = self.unpack2(iconv3) + if self.version == 'A': + concat2 = torch.cat((unpack2, skip2, udisp3), 1) + else: + concat2 = torch.cat((unpack2 + skip2, udisp3), 1) + iconv2 = self.iconv2(concat2) + disp2 = self.disp2_layer(iconv2) + udisp2 = self.unpack_disp2(disp2) + + unpack1 = self.unpack1(iconv2) + if self.version == 'A': + concat1 = torch.cat((unpack1, skip1, udisp2), 1) + else: + concat1 = torch.cat((unpack1 + skip1, udisp2), 1) + iconv1 = self.iconv1(concat1) + disp1 = self.disp1_layer(iconv1) + + if self.training: + return [disp1, disp2, disp3, disp4] + else: + return disp1 diff --git a/packnet_sfm/trainers/base_trainer.py b/packnet_sfm/trainers/base_trainer.py index 59474c86..4da9a952 100644 --- a/packnet_sfm/trainers/base_trainer.py +++ b/packnet_sfm/trainers/base_trainer.py @@ -1,18 +1,21 @@ # Copyright 2020 Toyota Research Institute. All rights reserved. +import torch from tqdm import tqdm from packnet_sfm.utils.logging import prepare_dataset_prefix -def sample_to_cuda(data): +def sample_to_cuda(data, dtype=None): if isinstance(data, str): return data elif isinstance(data, dict): - return {key: sample_to_cuda(data[key]) for key in data.keys()} + return {key: sample_to_cuda(data[key], dtype) for key in data.keys()} elif isinstance(data, list): - return [sample_to_cuda(key) for key in data] + return [sample_to_cuda(val, dtype) for val in data] else: - return data.to('cuda') + # only convert floats (e.g., to half), otherwise preserve (e.g, ints) + dtype = dtype if torch.is_floating_point(data) else None + return data.to('cuda', dtype=dtype) class BaseTrainer: diff --git a/packnet_sfm/trainers/horovod_trainer.py b/packnet_sfm/trainers/horovod_trainer.py index 6b4ebf3a..1b49d27e 100644 --- a/packnet_sfm/trainers/horovod_trainer.py +++ b/packnet_sfm/trainers/horovod_trainer.py @@ -19,6 +19,7 @@ def __init__(self, **kwargs): torch.backends.cudnn.benchmark = True self.avg_loss = AvgMeter(50) + self.dtype = kwargs.get("dtype", None) # just for test for now @property def proc_rank(self): @@ -120,12 +121,13 @@ def validate(self, dataloaders, module): def test(self, module): # Send module to GPU - module = module.to('cuda') + module = module.to('cuda', dtype=self.dtype) # Get test dataloaders test_dataloaders = module.test_dataloader() # Run evaluation self.evaluate(test_dataloaders, module) + @torch.no_grad() def evaluate(self, dataloaders, module): # Set module to eval module.eval() @@ -140,7 +142,7 @@ def evaluate(self, dataloaders, module): # For all batches for i, batch in progress_bar: # Send batch to GPU and take a test step - batch = sample_to_cuda(batch) + batch = sample_to_cuda(batch, self.dtype) output = module.test_step(batch, i, n) # Append output to list of outputs outputs.append(output) diff --git a/packnet_sfm/utils/config.py b/packnet_sfm/utils/config.py index 08392bb6..1aa1facd 100644 --- a/packnet_sfm/utils/config.py +++ b/packnet_sfm/utils/config.py @@ -6,7 +6,7 @@ from packnet_sfm.utils.logging import s3_url, prepare_dataset_prefix from packnet_sfm.utils.horovod import on_rank_0 -from packnet_sfm.utils.types import is_cfg +from packnet_sfm.utils.types import is_cfg, is_list from packnet_sfm.utils.misc import make_list from packnet_sfm.utils.load import load_class, backwards_state_dict @@ -28,11 +28,16 @@ def prep_dataset(config): # If there is no dataset, do nothing if len(config.path) == 0: return config - # Get split length and expand other arguments to the same length - n = len(config.split) + # If cameras is not a double list, make it so + if not config.cameras or not is_list(config.cameras[0]): + config.cameras = [config.cameras] + # Get maximum length and expand other arguments to the same length + n = max(len(config.split), len(config.cameras), len(config.depth_type)) config.dataset = make_list(config.dataset, n) config.path = make_list(config.path, n) + config.split = make_list(config.split, n) config.depth_type = make_list(config.depth_type, n) + config.cameras = make_list(config.cameras, n) if 'repeat' in config: config.repeat = make_list(config.repeat, n) # Return updated configuration diff --git a/packnet_sfm/utils/logging.py b/packnet_sfm/utils/logging.py index 1d10d2ff..ed12876c 100644 --- a/packnet_sfm/utils/logging.py +++ b/packnet_sfm/utils/logging.py @@ -46,11 +46,14 @@ def prepare_dataset_prefix(config, n): prefix : str Dataset prefix for metrics logging """ - return '{}-{}-{}'.format( + prefix = '{}-{}'.format( os.path.splitext(config.path[n].split('/')[-1])[0], - os.path.splitext(os.path.basename(config.split[n]))[0], - config.depth_type[n], - ) + os.path.splitext(os.path.basename(config.split[n]))[0]) + if config.depth_type[n] is not '': + prefix += '-{}'.format(config.depth_type[n]) + if len(config.cameras[n]) == 1: # only allows single cameras + prefix += '-{}'.format(config.cameras[n][0]) + return prefix def s3_url(config): diff --git a/packnet_sfm/utils/save.py b/packnet_sfm/utils/save.py index 61312942..9161ac31 100644 --- a/packnet_sfm/utils/save.py +++ b/packnet_sfm/utils/save.py @@ -7,7 +7,6 @@ from packnet_sfm.utils.logging import prepare_dataset_prefix from packnet_sfm.utils.depth import inv2depth, viz_inv_depth -######################################################################################################################## def save_depth(batch, output, args, dataset, save): """ @@ -64,5 +63,3 @@ def save_depth(batch, output, args, dataset, save): # Write to disk cv2.imwrite('{}/{}.png'.format( save_path, filename[i]), image[:, :, ::-1]) - -######################################################################################################################## \ No newline at end of file diff --git a/scripts/eval.py b/scripts/eval.py index a28538ef..1f9df504 100644 --- a/scripts/eval.py +++ b/scripts/eval.py @@ -1,6 +1,7 @@ # Copyright 2020 Toyota Research Institute. All rights reserved. import argparse +import torch from packnet_sfm import ModelWrapper, HorovodTrainer from packnet_sfm.utils.config import parse_test_file @@ -13,6 +14,7 @@ def parse_args(): parser = argparse.ArgumentParser(description='PackNet-SfM evaluation script') parser.add_argument('--checkpoint', type=str, help='Checkpoint (.ckpt)') parser.add_argument('--config', type=str, default=None, help='Configuration (.yaml)') + parser.add_argument('--half', action="store_true", help='Use half precision (fp16)') args = parser.parse_args() assert args.checkpoint.endswith('.ckpt'), \ 'You need to provide a .ckpt file as checkpoint' @@ -21,7 +23,7 @@ def parse_args(): return args -def test(ckpt_file, cfg_file): +def test(ckpt_file, cfg_file, half): """ Monocular depth estimation test script. @@ -46,6 +48,9 @@ def test(ckpt_file, cfg_file): # Restore model state model_wrapper.load_state_dict(state_dict) + # change to half precision for evaluation if requested + config.arch["dtype"] = torch.float16 if half else None + # Create trainer with args.arch parameters trainer = HorovodTrainer(**config.arch) @@ -55,4 +60,4 @@ def test(ckpt_file, cfg_file): if __name__ == '__main__': args = parse_args() - test(args.checkpoint, args.config) + test(args.checkpoint, args.config, args.half) diff --git a/scripts/infer.py b/scripts/infer.py index d2fe393f..5934cc8b 100644 --- a/scripts/infer.py +++ b/scripts/infer.py @@ -13,7 +13,7 @@ from packnet_sfm.utils.image import load_image from packnet_sfm.utils.config import parse_test_file from packnet_sfm.utils.load import set_debug -from packnet_sfm.utils.depth import viz_inv_depth +from packnet_sfm.utils.depth import inv2depth, viz_inv_depth from packnet_sfm.utils.logging import pcolor @@ -23,14 +23,15 @@ def is_image(file, ext=('.png', '.jpg',)): def parse_args(): - """Parse arguments for training script""" - parser = argparse.ArgumentParser(description='PackNet-SfM evaluation script') + parser = argparse.ArgumentParser(description='PackNet-SfM inference of depth maps from images') parser.add_argument('--checkpoint', type=str, help='Checkpoint (.ckpt)') parser.add_argument('--input', type=str, help='Input file or folder') - parser.add_argument('--output', type=str, help='Output file or foler') + parser.add_argument('--output', type=str, help='Output file or folder') parser.add_argument('--image_shape', type=tuple, default=None, help='Input and output image shape ' '(default: checkpoint\'s config.datasets.augmentation.image_shape)') + parser.add_argument('--half', action="store_true", help='Use half precision (fp16)') + parser.add_argument('--save_npz', action='store_true', help='save in .npz format') args = parser.parse_args() assert args.checkpoint.endswith('.ckpt'), \ 'You need to provide a .ckpt file as checkpoint' @@ -42,7 +43,8 @@ def parse_args(): return args -def process(input_file, output_file, model_wrapper, image_shape): +@torch.no_grad() +def infer_and_save_depth(input_file, output_file, model_wrapper, image_shape, half, save_npz): """ Process a single input file to produce and save visualization @@ -56,11 +58,20 @@ def process(input_file, output_file, model_wrapper, image_shape): Model wrapper used for inference image_shape : Image shape Input image shape - - Returns - ------- + half: bool + use half precision (fp16) + save_npz: bool + save .npz output depth maps if True, else save as png """ + if not is_image(output_file): + # If not an image, assume it's a folder and append the input name + os.makedirs(output_file, exist_ok=True) + output_file = os.path.join(output_file, os.path.basename(input_file)) + + # change to half precision for evaluation if requested + dtype = torch.float16 if half else None + # Load image image = load_image(input_file) # Resize and to tensor @@ -69,50 +80,43 @@ def process(input_file, output_file, model_wrapper, image_shape): # Send image to GPU if available if torch.cuda.is_available(): - image = image.to('cuda:{}'.format(rank())) - - # Depth inference - depth = model_wrapper.depth(image)[0] - - # Prepare RGB image - rgb_i = image[0].permute(1, 2, 0).detach().cpu().numpy() * 255 - # Prepare inverse depth - pred_inv_depth_i = viz_inv_depth(depth[0]) * 255 - # Concatenate both vertically - image = np.concatenate([rgb_i, pred_inv_depth_i], 0) - if not is_image(output_file): - # If not an image, assume it's a folder and append the input name - os.makedirs(output_file, exist_ok=True) - output_file = os.path.join(output_file, os.path.basename(input_file)) - # Save visualization - print('Saving {} to {}'.format( - pcolor(input_file, 'cyan', attrs=['bold']), - pcolor(output_file, 'magenta', attrs=['bold']))) - imwrite(output_file, image[:, :, ::-1]) + image = image.to('cuda:{}'.format(rank()), dtype=dtype) + + # Depth inference (returns predicted inverse depth) + pred_inv_depth = model_wrapper.depth(image)[0] + + if save_npz: + # Get depth from predicted depth map and save to .npz + depth = inv2depth(pred_inv_depth).squeeze().detach().cpu().numpy() + output_file = os.path.splitext(output_file)[0] + ".npz" + print('Saving {} to {}'.format( + pcolor(input_file, 'cyan', attrs=['bold']), + pcolor(output_file, 'magenta', attrs=['bold']))) + np.savez_compressed(output_file, depth=depth) + else: + # Prepare RGB image + rgb = image[0].permute(1, 2, 0).detach().cpu().numpy() * 255 + # Prepare inverse depth + viz_pred_inv_depth = viz_inv_depth(pred_inv_depth[0]) * 255 + # Concatenate both vertically + image = np.concatenate([rgb, viz_pred_inv_depth], 0) + # Save visualization + print('Saving {} to {}'.format( + pcolor(input_file, 'cyan', attrs=['bold']), + pcolor(output_file, 'magenta', attrs=['bold']))) + imwrite(output_file, image[:, :, ::-1]) -def infer(ckpt_file, input_file, output_file, image_shape): - """ - Monocular depth estimation test script. +def main(args): - Parameters - ---------- - ckpt_file : str - Checkpoint path for a pretrained model - input_file : str - File or folder with input images - output_file : str - File or folder with output images - image_shape : tuple - Input image shape (H,W) - """ # Initialize horovod hvd_init() # Parse arguments - config, state_dict = parse_test_file(ckpt_file) + config, state_dict = parse_test_file(args.checkpoint) # If no image shape is provided, use the checkpoint one + image_shape = args.image_shape if image_shape is None: image_shape = config.datasets.augmentation.image_shape @@ -124,26 +128,33 @@ def infer(ckpt_file, input_file, output_file, image_shape): # Restore monodepth_model state model_wrapper.load_state_dict(state_dict) + # change to half precision for evaluation if requested + dtype = torch.float16 if args.half else None + # Send model to GPU if available if torch.cuda.is_available(): - model_wrapper = model_wrapper.to('cuda:{}'.format(rank())) + model_wrapper = model_wrapper.to('cuda:{}'.format(rank()), dtype=dtype) + + # Set to eval mode + model_wrapper.eval() - if os.path.isdir(input_file): + if os.path.isdir(args.input): # If input file is a folder, search for image files files = [] for ext in ['png', 'jpg']: - files.extend(glob((os.path.join(input_file, '*.{}'.format(ext))))) + files.extend(glob((os.path.join(args.input, '*.{}'.format(ext))))) files.sort() print0('Found {} files'.format(len(files))) else: # Otherwise, use it as is - files = [input_file] + files = [args.input] # Process each file - for file in files[rank()::world_size()]: - process(file, output_file, model_wrapper, image_shape) + for fn in files[rank()::world_size()]: + infer_and_save_depth( + fn, args.output, model_wrapper, image_shape, args.half, args.save_npz) if __name__ == '__main__': args = parse_args() - infer(args.checkpoint, args.input, args.output, args.image_shape) + main(args)