提交 fa5a2470 编辑于 作者: huangzhuofei's avatar huangzhuofei
浏览文件

add save vis

无相关合并请求
显示 2760 个添加1054 个删除
+2760 -1054
......@@ -17,18 +17,32 @@ import torch.nn as nn
import torch.utils.checkpoint
from torch.nn.init import trunc_normal_
from .dinov2_layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
from .dinov2_layers import (
Mlp,
PatchEmbed,
SwiGLUFFNFused,
MemEffAttention,
NestedTensorBlock as Block,
)
logger = logging.getLogger("dinov2")
def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
def named_apply(
fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False
) -> nn.Module:
if not depth_first and include_root:
fn(module=module, name=name)
for child_name, child_module in module.named_children():
child_name = ".".join((name, child_name)) if name else child_name
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
named_apply(
fn=fn,
module=child_module,
name=child_name,
depth_first=depth_first,
include_root=True,
)
if depth_first and include_root:
fn(module=module, name=name)
return module
......@@ -94,7 +108,9 @@ class DinoVisionTransformer(nn.Module):
super().__init__()
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
self.num_tokens = 1
self.n_blocks = depth
self.num_heads = num_heads
......@@ -103,20 +119,31 @@ class DinoVisionTransformer(nn.Module):
self.interpolate_antialias = interpolate_antialias
self.interpolate_offset = interpolate_offset
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + self.num_tokens, embed_dim)
)
assert num_register_tokens >= 0
self.register_tokens = (
nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim))
if num_register_tokens
else None
)
if drop_path_uniform is True:
dpr = [drop_path_rate] * depth
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
if ffn_layer == "mlp":
logger.info("using MLP layer as FFN")
......@@ -156,7 +183,9 @@ class DinoVisionTransformer(nn.Module):
chunksize = depth // block_chunks
for i in range(0, depth, chunksize):
# this is to keep the block index consistent if we chunk the block list
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
chunked_blocks.append(
[nn.Identity()] * i + blocks_list[i : i + chunksize]
)
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
else:
self.chunked_blocks = False
......@@ -208,13 +237,17 @@ class DinoVisionTransformer(nn.Module):
)
assert (w0, h0) == patch_pos_embed.shape[-2:]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(
previous_dtype
)
def prepare_tokens_with_masks(self, x, masks=None):
B, nc, w, h = x.shape
x = self.patch_embed(x)
if masks is not None:
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
x = torch.where(
masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x
)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = x + self.interpolate_pos_encoding(x, w, h)
......@@ -232,7 +265,10 @@ class DinoVisionTransformer(nn.Module):
return x
def forward_features_list(self, x_list, masks_list):
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
x = [
self.prepare_tokens_with_masks(x, masks)
for x, masks in zip(x_list, masks_list)
]
for blk in self.blocks:
x = blk(x)
......@@ -273,26 +309,34 @@ class DinoVisionTransformer(nn.Module):
x = self.prepare_tokens_with_masks(x)
# If n is an int, take the n last blocks. If it's a list, take them
output, total_block_len = [], len(self.blocks)
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
blocks_to_take = (
range(total_block_len - n, total_block_len) if isinstance(n, int) else n
)
for i, blk in enumerate(self.blocks):
x = blk(x)
if i in blocks_to_take:
output.append(x)
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
assert len(output) == len(
blocks_to_take
), f"only {len(output)} / {len(blocks_to_take)} blocks found"
return output
def _get_intermediate_layers_chunked(self, x, n=1):
x = self.prepare_tokens_with_masks(x)
output, i, total_block_len = [], 0, len(self.blocks[-1])
# If n is an int, take the n last blocks. If it's a list, take them
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
blocks_to_take = (
range(total_block_len - n, total_block_len) if isinstance(n, int) else n
)
for block_chunk in self.blocks:
for blk in block_chunk[i:]: # Passing the nn.Identity()
x = blk(x)
if i in blocks_to_take:
output.append(x)
i += 1
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
assert len(output) == len(
blocks_to_take
), f"only {len(output)} / {len(blocks_to_take)} blocks found"
return output
def get_intermediate_layers(
......
......@@ -7,34 +7,36 @@ import math
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
def __init__(self, in_planes, planes, norm_fn="group", stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, stride=stride
)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
if norm_fn == "group":
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
elif norm_fn == "batch":
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
elif norm_fn == "instance":
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not (stride == 1 and in_planes == planes):
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
elif norm_fn == "none":
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not (stride == 1 and in_planes == planes):
......@@ -42,11 +44,11 @@ class ResidualBlock(nn.Module):
if stride == 1 and in_planes == planes:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3
)
def forward(self, x):
y = x
......@@ -60,43 +62,44 @@ class ResidualBlock(nn.Module):
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
return self.relu(x + y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
def __init__(self, in_planes, planes, norm_fn="group", stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.conv1 = nn.Conv2d(in_planes, planes // 4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(
planes // 4, planes // 4, kernel_size=3, padding=1, stride=stride
)
self.conv3 = nn.Conv2d(planes // 4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
if norm_fn == "group":
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes // 4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
elif norm_fn == "batch":
self.norm1 = nn.BatchNorm2d(planes // 4)
self.norm2 = nn.BatchNorm2d(planes // 4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
elif norm_fn == "instance":
self.norm1 = nn.InstanceNorm2d(planes // 4)
self.norm2 = nn.InstanceNorm2d(planes // 4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
elif norm_fn == "none":
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
......@@ -105,11 +108,11 @@ class BottleneckBlock(nn.Module):
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4
)
def forward(self, x):
y = x
......@@ -120,31 +123,34 @@ class BottleneckBlock(nn.Module):
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
return self.relu(x + y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, downsample=3):
def __init__(self, output_dim=128, norm_fn="batch", dropout=0.0, downsample=3):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
self.downsample = downsample
if self.norm_fn == 'group':
if self.norm_fn == "group":
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
elif self.norm_fn == "batch":
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
elif self.norm_fn == "instance":
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
elif self.norm_fn == "none":
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1 + (downsample > 2), padding=3)
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=1 + (downsample > 2), padding=3
)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=1 + (downsample > 1))
self.layer3 = self._make_layer(128, stride=1 + (downsample > 0))
......@@ -157,7 +163,7 @@ class BasicEncoder(nn.Module):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
......@@ -168,11 +174,10 @@ class BasicEncoder(nn.Module):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x, dual_inp=False):
# if input is list, combine batch dimension
......@@ -196,8 +201,9 @@ class BasicEncoder(nn.Module):
return x
class MultiBasicEncoder(nn.Module):
def __init__(self, output_dim=[128], norm_fn='batch', dropout=0.0, downsample=3):
def __init__(self, output_dim=[128], norm_fn="batch", dropout=0.0, downsample=3):
super(MultiBasicEncoder, self).__init__()
self.norm_fn = norm_fn
self.downsample = downsample
......@@ -205,19 +211,21 @@ class MultiBasicEncoder(nn.Module):
# self.norm_111 = nn.BatchNorm2d(128, affine=False, track_running_stats=False)
# self.norm_222 = nn.BatchNorm2d(128, affine=False, track_running_stats=False)
if self.norm_fn == 'group':
if self.norm_fn == "group":
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
elif self.norm_fn == "batch":
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
elif self.norm_fn == "instance":
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
elif self.norm_fn == "none":
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=1 + (downsample > 2), padding=3)
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=1 + (downsample > 2), padding=3
)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
......@@ -228,11 +236,12 @@ class MultiBasicEncoder(nn.Module):
self.layer5 = self._make_layer(128, stride=2)
output_list = []
for dim in output_dim:
conv_out = nn.Sequential(
ResidualBlock(128, 128, self.norm_fn, stride=1),
nn.Conv2d(128, dim[2], 3, padding=1))
nn.Conv2d(128, dim[2], 3, padding=1),
)
output_list.append(conv_out)
self.outputs04 = nn.ModuleList(output_list)
......@@ -241,7 +250,8 @@ class MultiBasicEncoder(nn.Module):
for dim in output_dim:
conv_out = nn.Sequential(
ResidualBlock(128, 128, self.norm_fn, stride=1),
nn.Conv2d(128, dim[1], 3, padding=1))
nn.Conv2d(128, dim[1], 3, padding=1),
)
output_list.append(conv_out)
self.outputs08 = nn.ModuleList(output_list)
......@@ -260,7 +270,7 @@ class MultiBasicEncoder(nn.Module):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
......@@ -285,7 +295,7 @@ class MultiBasicEncoder(nn.Module):
x = self.layer3(x)
if dual_inp:
v = x
x = x[:(x.shape[0]//2)]
x = x[: (x.shape[0] // 2)]
outputs04 = [f(x) for f in self.outputs04]
if num_layers == 1:
......@@ -300,7 +310,12 @@ class MultiBasicEncoder(nn.Module):
z = self.layer5(y)
outputs16 = [f(z) for f in self.outputs16]
return (outputs04, outputs08, outputs16, v) if dual_inp else (outputs04, outputs08, outputs16)
return (
(outputs04, outputs08, outputs16, v)
if dual_inp
else (outputs04, outputs08, outputs16)
)
class SubModule(nn.Module):
def __init__(self):
......@@ -310,10 +325,15 @@ class SubModule(nn.Module):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
n = (
m.kernel_size[0]
* m.kernel_size[1]
* m.kernel_size[2]
* m.out_channels
)
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
......@@ -325,24 +345,33 @@ class SubModule(nn.Module):
class Feature(SubModule):
def __init__(self):
super(Feature, self).__init__()
pretrained = True
checkpoint_path="/root/.cache/torch/hub/checkpoints/mobilenetv2_100_ra-b33bc2c4.pth"
model = timm.create_model('mobilenetv2_100', pretrained=pretrained, features_only=True, pretrained_cfg_overlay=dict(file=checkpoint_path))
layers = [1,2,3,5,6]
pretrained = True
checkpoint_path = (
"/root/.cache/torch/hub/checkpoints/mobilenetv2_100_ra-b33bc2c4.pth"
)
model = timm.create_model(
"mobilenetv2_100",
pretrained=pretrained,
features_only=True,
pretrained_cfg_overlay=dict(file=checkpoint_path),
)
layers = [1, 2, 3, 5, 6]
chans = [16, 24, 32, 96, 160]
self.conv_stem = model.conv_stem
self.bn1 = model.bn1
self.block0 = torch.nn.Sequential(*model.blocks[0:layers[0]])
self.block1 = torch.nn.Sequential(*model.blocks[layers[0]:layers[1]])
self.block2 = torch.nn.Sequential(*model.blocks[layers[1]:layers[2]])
self.block3 = torch.nn.Sequential(*model.blocks[layers[2]:layers[3]])
self.block4 = torch.nn.Sequential(*model.blocks[layers[3]:layers[4]])
self.block0 = torch.nn.Sequential(*model.blocks[0 : layers[0]])
self.block1 = torch.nn.Sequential(*model.blocks[layers[0] : layers[1]])
self.block2 = torch.nn.Sequential(*model.blocks[layers[1] : layers[2]])
self.block3 = torch.nn.Sequential(*model.blocks[layers[2] : layers[3]])
self.block4 = torch.nn.Sequential(*model.blocks[layers[3] : layers[4]])
self.deconv32_16 = Conv2x_IN(chans[4], chans[3], deconv=True, concat=True)
self.deconv16_8 = Conv2x_IN(chans[3]*2, chans[2], deconv=True, concat=True)
self.deconv8_4 = Conv2x_IN(chans[2]*2, chans[1], deconv=True, concat=True)
self.conv4 = BasicConv_IN(chans[1]*2, chans[1]*2, kernel_size=3, stride=1, padding=1)
self.deconv16_8 = Conv2x_IN(chans[3] * 2, chans[2], deconv=True, concat=True)
self.deconv8_4 = Conv2x_IN(chans[2] * 2, chans[1], deconv=True, concat=True)
self.conv4 = BasicConv_IN(
chans[1] * 2, chans[1] * 2, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
x = self.bn1(self.conv_stem(x))
......@@ -357,4 +386,3 @@ class Feature(SubModule):
x4 = self.deconv8_4(x8, x4)
x4 = self.conv4(x4)
return [x4, x8, x16, x32]
......@@ -15,40 +15,41 @@ class Combined_Geo_Encoding_Volume:
b, h, w, _, w2 = init_corr.shape
b, c, d, h, w = geo_volume.shape
geo_volume = geo_volume.permute(0, 3, 4, 1, 2).reshape(b*h*w, c, 1, d)
geo_volume = geo_volume.permute(0, 3, 4, 1, 2).reshape(b * h * w, c, 1, d)
init_corr = init_corr.reshape(b*h*w, 1, 1, w2)
init_corr = init_corr.reshape(b * h * w, 1, 1, w2)
self.geo_volume_pyramid.append(geo_volume)
self.init_corr_pyramid.append(init_corr)
for i in range(self.num_levels-1):
geo_volume = F.avg_pool2d(geo_volume, [1,2], stride=[1,2])
for i in range(self.num_levels - 1):
geo_volume = F.avg_pool2d(geo_volume, [1, 2], stride=[1, 2])
self.geo_volume_pyramid.append(geo_volume)
for i in range(self.num_levels-1):
init_corr = F.avg_pool2d(init_corr, [1,2], stride=[1,2])
for i in range(self.num_levels - 1):
init_corr = F.avg_pool2d(init_corr, [1, 2], stride=[1, 2])
self.init_corr_pyramid.append(init_corr)
def __call__(self, disp, coords):
r = self.radius
b, _, h, w = disp.shape
out_pyramid = []
for i in range(self.num_levels):
geo_volume = self.geo_volume_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1)
dx = dx.view(1, 1, 2*r+1, 1).to(disp.device)
x0 = dx + disp.reshape(b*h*w, 1, 1, 1) / 2**i
dx = torch.linspace(-r, r, 2 * r + 1)
dx = dx.view(1, 1, 2 * r + 1, 1).to(disp.device)
x0 = dx + disp.reshape(b * h * w, 1, 1, 1) / 2**i
y0 = torch.zeros_like(x0)
disp_lvl = torch.cat([x0,y0], dim=-1)
disp_lvl = torch.cat([x0, y0], dim=-1)
geo_volume = bilinear_sampler(geo_volume, disp_lvl)
geo_volume = geo_volume.view(b, h, w, -1)
init_corr = self.init_corr_pyramid[i]
init_x0 = coords.reshape(b*h*w, 1, 1, 1)/2**i - disp.reshape(b*h*w, 1, 1, 1) / 2**i + dx
init_coords_lvl = torch.cat([init_x0,y0], dim=-1)
init_x0 = (
coords.reshape(b * h * w, 1, 1, 1) / 2**i
- disp.reshape(b * h * w, 1, 1, 1) / 2**i
+ dx
)
init_coords_lvl = torch.cat([init_x0, y0], dim=-1)
init_corr = bilinear_sampler(init_corr, init_coords_lvl)
init_corr = init_corr.view(b, h, w, -1)
......@@ -57,13 +58,12 @@ class Combined_Geo_Encoding_Volume:
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
B, D, H, W1 = fmap1.shape
_, _, _, W2 = fmap2.shape
fmap1 = fmap1.view(B, D, H, W1)
fmap2 = fmap2.view(B, D, H, W2)
corr = torch.einsum('aijk,aijh->ajkh', fmap1, fmap2)
corr = torch.einsum("aijk,aijh->ajkh", fmap1, fmap2)
corr = corr.reshape(B, H, W1, 1, W2).contiguous()
return corr
\ No newline at end of file
return corr
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -4,24 +4,34 @@ import torch.nn.functional as F
import numpy as np
class BasicConv(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs):
def __init__(
self,
in_channels,
out_channels,
deconv=False,
is_3d=False,
bn=True,
relu=True,
**kwargs
):
super(BasicConv, self).__init__()
self.relu = relu
self.use_bn = bn
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
self.conv = nn.ConvTranspose3d(
in_channels, out_channels, bias=False, **kwargs
)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
self.conv = nn.ConvTranspose2d(
in_channels, out_channels, bias=False, **kwargs
)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
......@@ -31,17 +41,27 @@ class BasicConv(nn.Module):
if self.use_bn:
x = self.bn(x)
if self.relu:
x = nn.LeakyReLU()(x)#, inplace=True)
x = nn.LeakyReLU()(x) # , inplace=True)
return x
class Conv2x(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, concat=True, keep_concat=True, bn=True, relu=True, keep_dispc=False):
def __init__(
self,
in_channels,
out_channels,
deconv=False,
is_3d=False,
concat=True,
keep_concat=True,
bn=True,
relu=True,
keep_dispc=False,
):
super(Conv2x, self).__init__()
self.concat = concat
self.is_3d = is_3d
if deconv and is_3d:
self.is_3d = is_3d
if deconv and is_3d:
kernel = (4, 4, 4)
elif deconv:
kernel = 4
......@@ -52,47 +72,96 @@ class Conv2x(nn.Module):
kernel = (1, 4, 4)
stride = (1, 2, 2)
padding = (0, 1, 1)
self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, stride=stride, padding=padding)
self.conv1 = BasicConv(
in_channels,
out_channels,
deconv,
is_3d,
bn=True,
relu=True,
kernel_size=kernel,
stride=stride,
padding=padding,
)
else:
self.conv1 = BasicConv(in_channels, out_channels, deconv, is_3d, bn=True, relu=True, kernel_size=kernel, stride=2, padding=1)
self.conv1 = BasicConv(
in_channels,
out_channels,
deconv,
is_3d,
bn=True,
relu=True,
kernel_size=kernel,
stride=2,
padding=1,
)
if self.concat:
if self.concat:
mul = 2 if keep_concat else 1
self.conv2 = BasicConv(out_channels*2, out_channels*mul, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
self.conv2 = BasicConv(
out_channels * 2,
out_channels * mul,
False,
is_3d,
bn,
relu,
kernel_size=3,
stride=1,
padding=1,
)
else:
self.conv2 = BasicConv(out_channels, out_channels, False, is_3d, bn, relu, kernel_size=3, stride=1, padding=1)
self.conv2 = BasicConv(
out_channels,
out_channels,
False,
is_3d,
bn,
relu,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, x, rem):
x = self.conv1(x)
if x.shape != rem.shape:
x = F.interpolate(
x,
size=(rem.shape[-2], rem.shape[-1]),
mode='nearest')
x = F.interpolate(x, size=(rem.shape[-2], rem.shape[-1]), mode="nearest")
if self.concat:
x = torch.cat((x, rem), 1)
else:
else:
x = x + rem
x = self.conv2(x)
return x
class BasicConv_IN(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, IN=True, relu=True, **kwargs):
def __init__(
self,
in_channels,
out_channels,
deconv=False,
is_3d=False,
IN=True,
relu=True,
**kwargs
):
super(BasicConv_IN, self).__init__()
self.relu = relu
self.use_in = IN
if is_3d:
if deconv:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs)
self.conv = nn.ConvTranspose3d(
in_channels, out_channels, bias=False, **kwargs
)
else:
self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs)
self.IN = nn.InstanceNorm3d(out_channels)
else:
if deconv:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs)
self.conv = nn.ConvTranspose2d(
in_channels, out_channels, bias=False, **kwargs
)
else:
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.IN = nn.InstanceNorm2d(out_channels)
......@@ -102,17 +171,27 @@ class BasicConv_IN(nn.Module):
if self.use_in:
x = self.IN(x)
if self.relu:
x = nn.LeakyReLU()(x)#, inplace=True)
x = nn.LeakyReLU()(x) # , inplace=True)
return x
class Conv2x_IN(nn.Module):
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, concat=True, keep_concat=True, IN=True, relu=True, keep_dispc=False):
def __init__(
self,
in_channels,
out_channels,
deconv=False,
is_3d=False,
concat=True,
keep_concat=True,
IN=True,
relu=True,
keep_dispc=False,
):
super(Conv2x_IN, self).__init__()
self.concat = concat
self.is_3d = is_3d
if deconv and is_3d:
self.is_3d = is_3d
if deconv and is_3d:
kernel = (4, 4, 4)
elif deconv:
kernel = 4
......@@ -123,26 +202,63 @@ class Conv2x_IN(nn.Module):
kernel = (1, 4, 4)
stride = (1, 2, 2)
padding = (0, 1, 1)
self.conv1 = BasicConv_IN(in_channels, out_channels, deconv, is_3d, IN=True, relu=True, kernel_size=kernel, stride=stride, padding=padding)
self.conv1 = BasicConv_IN(
in_channels,
out_channels,
deconv,
is_3d,
IN=True,
relu=True,
kernel_size=kernel,
stride=stride,
padding=padding,
)
else:
self.conv1 = BasicConv_IN(in_channels, out_channels, deconv, is_3d, IN=True, relu=True, kernel_size=kernel, stride=2, padding=1)
self.conv1 = BasicConv_IN(
in_channels,
out_channels,
deconv,
is_3d,
IN=True,
relu=True,
kernel_size=kernel,
stride=2,
padding=1,
)
if self.concat:
if self.concat:
mul = 2 if keep_concat else 1
self.conv2 = BasicConv_IN(out_channels*2, out_channels*mul, False, is_3d, IN, relu, kernel_size=3, stride=1, padding=1)
self.conv2 = BasicConv_IN(
out_channels * 2,
out_channels * mul,
False,
is_3d,
IN,
relu,
kernel_size=3,
stride=1,
padding=1,
)
else:
self.conv2 = BasicConv_IN(out_channels, out_channels, False, is_3d, IN, relu, kernel_size=3, stride=1, padding=1)
self.conv2 = BasicConv_IN(
out_channels,
out_channels,
False,
is_3d,
IN,
relu,
kernel_size=3,
stride=1,
padding=1,
)
def forward(self, x, rem):
x = self.conv1(x)
if x.shape != rem.shape:
x = F.interpolate(
x,
size=(rem.shape[-2], rem.shape[-1]),
mode='nearest')
x = F.interpolate(x, size=(rem.shape[-2], rem.shape[-1]), mode="nearest")
if self.concat:
x = torch.cat((x, rem), 1)
else:
else:
x = x + rem
x = self.conv2(x)
return x
......@@ -156,53 +272,68 @@ def groupwise_correlation(fea1, fea2, num_groups):
assert cost.shape == (B, num_groups, H, W)
return cost
def build_gwc_volume(refimg_fea, targetimg_fea, maxdisp, num_groups):
B, C, H, W = refimg_fea.shape
volume = refimg_fea.new_zeros([B, num_groups, maxdisp, H, W])
for i in range(maxdisp):
if i > 0:
volume[:, :, i, :, i:] = groupwise_correlation(refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i],
num_groups)
volume[:, :, i, :, i:] = groupwise_correlation(
refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i], num_groups
)
else:
volume[:, :, i, :, :] = groupwise_correlation(refimg_fea, targetimg_fea, num_groups)
volume[:, :, i, :, :] = groupwise_correlation(
refimg_fea, targetimg_fea, num_groups
)
volume = volume.contiguous()
return volume
def norm_correlation(fea1, fea2):
cost = torch.mean(((fea1/(torch.norm(fea1, 2, 1, True)+1e-05)) * (fea2/(torch.norm(fea2, 2, 1, True)+1e-05))), dim=1, keepdim=True)
cost = torch.mean(
(
(fea1 / (torch.norm(fea1, 2, 1, True) + 1e-05))
* (fea2 / (torch.norm(fea2, 2, 1, True) + 1e-05))
),
dim=1,
keepdim=True,
)
return cost
def build_norm_correlation_volume(refimg_fea, targetimg_fea, maxdisp):
B, C, H, W = refimg_fea.shape
volume = refimg_fea.new_zeros([B, 1, maxdisp, H, W])
for i in range(maxdisp):
if i > 0:
volume[:, :, i, :, i:] = norm_correlation(refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i])
volume[:, :, i, :, i:] = norm_correlation(
refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i]
)
else:
volume[:, :, i, :, :] = norm_correlation(refimg_fea, targetimg_fea)
volume = volume.contiguous()
return volume
def correlation(fea1, fea2):
cost = torch.sum((fea1 * fea2), dim=1, keepdim=True)
return cost
def build_correlation_volume(refimg_fea, targetimg_fea, maxdisp):
B, C, H, W = refimg_fea.shape
volume = refimg_fea.new_zeros([B, 1, maxdisp, H, W])
for i in range(maxdisp):
if i > 0:
volume[:, :, i, :, i:] = correlation(refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i])
volume[:, :, i, :, i:] = correlation(
refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i]
)
else:
volume[:, :, i, :, :] = correlation(refimg_fea, targetimg_fea)
volume = volume.contiguous()
return volume
def build_concat_volume(refimg_fea, targetimg_fea, maxdisp):
B, C, H, W = refimg_fea.shape
volume = refimg_fea.new_zeros([B, 2 * C, maxdisp, H, W])
......@@ -216,6 +347,7 @@ def build_concat_volume(refimg_fea, targetimg_fea, maxdisp):
volume = volume.contiguous()
return volume
def disparity_regression(x, maxdisp):
assert len(x.shape) == 4
disp_values = torch.arange(0, maxdisp, dtype=x.dtype, device=x.device)
......@@ -228,26 +360,29 @@ class FeatureAtt(nn.Module):
super(FeatureAtt, self).__init__()
self.feat_att = nn.Sequential(
BasicConv(feat_chan, feat_chan//2, kernel_size=1, stride=1, padding=0),
nn.Conv2d(feat_chan//2, cv_chan, 1))
BasicConv(feat_chan, feat_chan // 2, kernel_size=1, stride=1, padding=0),
nn.Conv2d(feat_chan // 2, cv_chan, 1),
)
def forward(self, cv, feat):
'''
'''
""" """
feat_att = self.feat_att(feat).unsqueeze(2)
cv = torch.sigmoid(feat_att)*cv
cv = torch.sigmoid(feat_att) * cv
return cv
def context_upsample(disp_low, up_weights):
###
# cv (b,1,h,w)
# sp (b,9,4*h,4*w)
###
b, c, h, w = disp_low.shape
disp_unfold = F.unfold(disp_low.reshape(b,c,h,w),3,1,1).reshape(b,-1,h,w)
disp_unfold = F.interpolate(disp_unfold,(h*4,w*4),mode='nearest').reshape(b,9,h*4,w*4)
disp = (disp_unfold*up_weights).sum(1)
return disp
\ No newline at end of file
disp_unfold = F.unfold(disp_low.reshape(b, c, h, w), 3, 1, 1).reshape(b, -1, h, w)
disp_unfold = F.interpolate(disp_unfold, (h * 4, w * 4), mode="nearest").reshape(
b, 9, h * 4, w * 4
)
disp = (disp_unfold * up_weights).sum(1)
return disp
......@@ -13,6 +13,7 @@ class FlowHead(nn.Module):
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class DispHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256, output_dim=1):
super(DispHead, self).__init__()
......@@ -23,12 +24,19 @@ class DispHead(nn.Module):
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim, input_dim, kernel_size=3):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
self.convz = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, kernel_size, padding=kernel_size // 2
)
self.convr = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, kernel_size, padding=kernel_size // 2
)
self.convq = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, kernel_size, padding=kernel_size // 2
)
def forward(self, h, cz, cr, cq, *x_list):
......@@ -36,21 +44,33 @@ class ConvGRU(nn.Module):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx) + cz)
r = torch.sigmoid(self.convr(hx) + cr)
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)) + cq)
h = (1-z) * h + z * q
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)) + cq)
h = (1 - z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convz1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, *x):
# horizontal
......@@ -58,28 +78,29 @@ class SepConvGRU(nn.Module):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
q = torch.tanh(self.convq1(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
q = torch.tanh(self.convq2(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
self.args = args
cor_planes = args.corr_levels * (2*args.corr_radius + 1) * (8+1)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1) * (8 + 1)
self.convc1 = nn.Conv2d(cor_planes, 64, 1, padding=0)
self.convc2 = nn.Conv2d(64, 64, 3, padding=1)
self.convd1 = nn.Conv2d(1, 64, 7, padding=3)
self.convd2 = nn.Conv2d(64, 64, 3, padding=1)
self.conv = nn.Conv2d(64+64, 128-1, 3, padding=1)
self.conv = nn.Conv2d(64 + 64, 128 - 1, 3, padding=1)
def forward(self, disp, corr):
cor = F.relu(self.convc1(corr))
......@@ -91,16 +112,20 @@ class BasicMotionEncoder(nn.Module):
out = F.relu(self.conv(cor_disp))
return torch.cat([out, disp], dim=1)
def pool2x(x):
return F.avg_pool2d(x, 3, stride=2, padding=1)
def pool4x(x):
return F.avg_pool2d(x, 5, stride=4, padding=1)
def interp(x, dest):
interp_args = {'mode': 'bilinear', 'align_corners': True}
interp_args = {"mode": "bilinear", "align_corners": True}
return F.interpolate(x, dest.shape[2:], **interp_args)
class BasicMultiUpdateBlock(nn.Module):
def __init__(self, args, hidden_dims=[]):
super().__init__()
......@@ -108,35 +133,61 @@ class BasicMultiUpdateBlock(nn.Module):
self.encoder = BasicMotionEncoder(args)
encoder_output_dim = 128
self.gru04 = ConvGRU(hidden_dims[2], encoder_output_dim + hidden_dims[1] * (args.n_gru_layers > 1))
self.gru04_2 = ConvGRU(hidden_dims[2], 3 + hidden_dims[1] * (args.n_gru_layers > 1))
self.gru08 = ConvGRU(hidden_dims[1], hidden_dims[0] * (args.n_gru_layers == 3) + hidden_dims[2])
self.gru04 = ConvGRU(
hidden_dims[2],
encoder_output_dim + hidden_dims[1] * (args.n_gru_layers > 1),
)
self.gru04_2 = ConvGRU(
hidden_dims[2], 3 + hidden_dims[1] * (args.n_gru_layers > 1)
)
self.gru08 = ConvGRU(
hidden_dims[1], hidden_dims[0] * (args.n_gru_layers == 3) + hidden_dims[2]
)
self.gru16 = ConvGRU(hidden_dims[0], hidden_dims[1])
self.disp_head = DispHead(hidden_dims[2], hidden_dim=256, output_dim=1)
self.normal_head = DispHead(hidden_dims[2], hidden_dim=256, output_dim=3)
factor = 2**self.args.n_downsample
self.mask_feat_4 = nn.Sequential(
nn.Conv2d(hidden_dims[2], 32, 3, padding=1),
nn.ReLU(inplace=True))
nn.Conv2d(hidden_dims[2], 32, 3, padding=1), nn.ReLU(inplace=True)
)
self.mask = nn.Sequential(
nn.Conv2d(hidden_dims[2], (factor**2)*9, 3, padding=1),
nn.ReLU(inplace=True))
def forward(self, net, net_normal, inp, corr=None, disp=None, normal=None, iter04=True, iter08=True, iter16=True, update=True):
nn.Conv2d(hidden_dims[2], (factor**2) * 9, 3, padding=1),
nn.ReLU(inplace=True),
)
def forward(
self,
net,
net_normal,
inp,
corr=None,
disp=None,
normal=None,
iter04=True,
iter08=True,
iter16=True,
update=True,
):
if iter16:
net[2] = self.gru16(net[2], *(inp[2]), pool2x(net[1]))
if iter08:
if self.args.n_gru_layers > 2:
net[1] = self.gru08(net[1], *(inp[1]), pool2x(net[0]), interp(net[2], net[1]))
net[1] = self.gru08(
net[1], *(inp[1]), pool2x(net[0]), interp(net[2], net[1])
)
else:
net[1] = self.gru08(net[1], *(inp[1]), pool2x(net[0]))
if iter04:
motion_features = self.encoder(disp, corr)
if self.args.n_gru_layers > 1:
net[0] = self.gru04(net[0], *(inp[0]), motion_features, interp(net[1], net[0]))
net_normal = self.gru04_2(net_normal, *(inp[0]), normal, interp(net[1], net_normal))
net[0] = self.gru04(
net[0], *(inp[0]), motion_features, interp(net[1], net[0])
)
net_normal = self.gru04_2(
net_normal, *(inp[0]), normal, interp(net[1], net_normal)
)
else:
net[0] = self.gru04(net[0], *(inp[0]), motion_features)
......
......@@ -8,6 +8,7 @@ from skimage import color, io
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
......@@ -15,39 +16,50 @@ import torch
from torchvision.transforms import ColorJitter, functional, Compose
import torch.nn.functional as F
def get_middlebury_images():
root = "datasets/Middlebury/MiddEval3"
with open(os.path.join(root, "official_train.txt"), 'r') as f:
with open(os.path.join(root, "official_train.txt"), "r") as f:
lines = f.read().splitlines()
return sorted([os.path.join(root, 'trainingQ', f'{name}/im0.png') for name in lines])
return sorted(
[os.path.join(root, "trainingQ", f"{name}/im0.png") for name in lines]
)
def get_eth3d_images():
return sorted(glob('datasets/ETH3D/two_view_training/*/im0.png'))
return sorted(glob("datasets/ETH3D/two_view_training/*/im0.png"))
def get_kitti_images():
return sorted(glob('datasets/KITTI/training/image_2/*_10.png'))
return sorted(glob("datasets/KITTI/training/image_2/*_10.png"))
def transfer_color(image, style_mean, style_stddev):
reference_image_lab = color.rgb2lab(image)
reference_stddev = np.std(reference_image_lab, axis=(0,1), keepdims=True)# + 1
reference_mean = np.mean(reference_image_lab, axis=(0,1), keepdims=True)
reference_stddev = np.std(reference_image_lab, axis=(0, 1), keepdims=True) # + 1
reference_mean = np.mean(reference_image_lab, axis=(0, 1), keepdims=True)
reference_image_lab = reference_image_lab - reference_mean
lamb = style_stddev/reference_stddev
lamb = style_stddev / reference_stddev
style_image_lab = lamb * reference_image_lab
output_image_lab = style_image_lab + style_mean
l, a, b = np.split(output_image_lab, 3, axis=2)
l = l.clip(0, 100)
output_image_lab = np.concatenate((l,a,b), axis=2)
output_image_lab = np.concatenate((l, a, b), axis=2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
output_image_rgb = color.lab2rgb(output_image_lab) * 255
return output_image_rgb
class AdjustGamma(object):
class AdjustGamma(object):
def __init__(self, gamma_min, gamma_max, gain_min=1.0, gain_max=1.0):
self.gamma_min, self.gamma_max, self.gain_min, self.gain_max = gamma_min, gamma_max, gain_min, gain_max
self.gamma_min, self.gamma_max, self.gain_min, self.gain_max = (
gamma_min,
gamma_max,
gain_min,
gain_max,
)
def __call__(self, sample):
gain = random.uniform(self.gain_min, self.gain_max)
......@@ -57,8 +69,18 @@ class AdjustGamma(object):
def __repr__(self):
return f"Adjust Gamma {self.gamma_min}, ({self.gamma_max}) and Gain ({self.gain_min}, {self.gain_max})"
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True, yjitter=False, saturation_range=[0.6,1.4], gamma=[1,1,1,1]):
def __init__(
self,
crop_size,
min_scale=-0.2,
max_scale=0.5,
do_flip=True,
yjitter=False,
saturation_range=[0.6, 1.4],
gamma=[1, 1, 1, 1],
):
# spatial augmentation params
self.crop_size = crop_size
......@@ -75,12 +97,22 @@ class FlowAugmentor:
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = Compose([ColorJitter(brightness=0.4, contrast=0.4, saturation=saturation_range, hue=0.5/3.14), AdjustGamma(*gamma)])
self.photo_aug = Compose(
[
ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=saturation_range,
hue=0.5 / 3.14,
),
AdjustGamma(*gamma),
]
)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
"""Photometric augmentation"""
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
......@@ -90,13 +122,15 @@ class FlowAugmentor:
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
image_stack = np.array(
self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8
)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
"""Occlusion augmentation"""
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
......@@ -106,7 +140,7 @@ class FlowAugmentor:
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
img2[y0 : y0 + dy, x0 : x0 + dx, :] = mean_color
return img1, img2
......@@ -114,8 +148,8 @@ class FlowAugmentor:
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
(self.crop_size[0] + 8) / float(ht), (self.crop_size[1] + 8) / float(wd)
)
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
......@@ -123,30 +157,38 @@ class FlowAugmentor:
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img1 = cv2.resize(
img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR
)
img2 = cv2.resize(
img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR
)
flow = cv2.resize(
flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR
)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob and self.do_flip == 'hf': # h-flip
if np.random.rand() < self.h_flip_prob and self.do_flip == "hf": # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.h_flip_prob and self.do_flip == 'h': # h-flip for stereo
if (
np.random.rand() < self.h_flip_prob and self.do_flip == "h"
): # h-flip for stereo
tmp = img1[:, ::-1]
img1 = img2[:, ::-1]
img2 = tmp
if np.random.rand() < self.v_flip_prob and self.do_flip == 'v': # v-flip
if np.random.rand() < self.v_flip_prob and self.do_flip == "v": # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
......@@ -156,20 +198,19 @@ class FlowAugmentor:
x0 = np.random.randint(2, img1.shape[1] - self.crop_size[1] - 2)
y1 = y0 + np.random.randint(-2, 2 + 1)
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y1:y1+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img1 = img1[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
img2 = img2[y1 : y1 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
flow = flow[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
else:
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
img1 = img1[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
img2 = img2[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
flow = flow[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
......@@ -182,8 +223,18 @@ class FlowAugmentor:
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False, yjitter=False, saturation_range=[0.7,1.3], gamma=[1,1,1,1]):
def __init__(
self,
crop_size,
min_scale=-0.2,
max_scale=0.5,
do_flip=False,
yjitter=False,
saturation_range=[0.7, 1.3],
gamma=[1, 1, 1, 1],
):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
......@@ -198,13 +249,25 @@ class SparseFlowAugmentor:
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = Compose([ColorJitter(brightness=0.3, contrast=0.3, saturation=saturation_range, hue=0.3/3.14), AdjustGamma(*gamma)])
self.photo_aug = Compose(
[
ColorJitter(
brightness=0.3,
contrast=0.3,
saturation=saturation_range,
hue=0.3 / 3.14,
),
AdjustGamma(*gamma),
]
)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
image_stack = np.array(
self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8
)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
......@@ -217,7 +280,7 @@ class SparseFlowAugmentor:
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
img2[y0 : y0 + dy, x0 : x0 + dx, :] = mean_color
return img1, img2
......@@ -230,8 +293,8 @@ class SparseFlowAugmentor:
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
coords0 = coords[valid >= 1]
flow0 = flow[valid >= 1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
......@@ -239,8 +302,8 @@ class SparseFlowAugmentor:
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
xx = np.round(coords1[:, 0]).astype(np.int32)
yy = np.round(coords1[:, 1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
......@@ -260,8 +323,8 @@ class SparseFlowAugmentor:
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
(self.crop_size[0] + 1) / float(ht), (self.crop_size[1] + 1) / float(wd)
)
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
......@@ -269,22 +332,30 @@ class SparseFlowAugmentor:
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
img1 = cv2.resize(
img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR
)
img2 = cv2.resize(
img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR
)
flow, valid = self.resize_sparse_flow_map(
flow, valid, fx=scale_x, fy=scale_y
)
if self.do_flip:
if np.random.rand() < self.h_flip_prob and self.do_flip == 'hf': # h-flip
if np.random.rand() < self.h_flip_prob and self.do_flip == "hf": # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.h_flip_prob and self.do_flip == 'h': # h-flip for stereo
if (
np.random.rand() < self.h_flip_prob and self.do_flip == "h"
): # h-flip for stereo
tmp = img1[:, ::-1]
img1 = img2[:, ::-1]
img2 = tmp
if np.random.rand() < self.v_flip_prob and self.do_flip == 'v': # v-flip
if np.random.rand() < self.v_flip_prob and self.do_flip == "v": # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
......@@ -298,13 +369,12 @@ class SparseFlowAugmentor:
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
img1 = img1[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
img2 = img2[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
flow = flow[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
valid = valid[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
......
......@@ -5,34 +5,37 @@ import re
import json
import imageio
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
"""Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
with open(fn, "rb") as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
print("Magic number incorrect. Invalid .flo file")
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
file = open(file, "rb")
color = None
width = None
......@@ -41,38 +44,43 @@ def readPFM(file):
endian = None
header = file.readline().rstrip()
if header == b'PF':
if header == b"PF":
color = True
elif header == b'Pf':
elif header == b"Pf":
color = False
else:
raise Exception('Not a PFM file.')
raise Exception("Not a PFM file.")
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
raise Exception("Malformed PFM header.")
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = '>' # big-endian
endian = ">" # big-endian
data = np.fromfile(file, endian + 'f')
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writePFM(file, array):
import os
assert type(file) is str and type(array) is np.ndarray and \
os.path.splitext(file)[1] == ".pfm"
with open(file, 'wb') as f:
assert (
type(file) is str
and type(array) is np.ndarray
and os.path.splitext(file)[1] == ".pfm"
)
with open(file, "wb") as f:
H, W = array.shape
headers = ["Pf\n", f"{W} {H}\n", "-1\n"]
for header in headers:
......@@ -81,10 +89,9 @@ def writePFM(file, array):
f.write(array.tobytes())
def writeFlow(filename, uv, v=None):
"""Write optical flow to file.
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
......@@ -92,59 +99,65 @@ def writeFlow(filename,uv,v=None):
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
assert uv.ndim == 3
assert uv.shape[2] == 2
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
assert u.shape == v.shape
height, width = u.shape
f = open(filename, "wb")
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def readDispKITTI(filename):
disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
valid = disp > 0.0
return disp, valid
# Method taken from /n/fs/raft-depth/RAFT-Stereo/datasets/SintelStereo/sdk/python/sintel_io.py
def readDispSintelStereo(file_name):
a = np.array(Image.open(file_name))
d_r, d_g, d_b = np.split(a, axis=2, indices_or_sections=3)
disp = (d_r * 4 + d_g / (2**6) + d_b / (2**14))[..., 0]
mask = np.array(Image.open(file_name.replace('disparities', 'occlusions')))
valid = ((mask == 0) & (disp > 0))
mask = np.array(Image.open(file_name.replace("disparities", "occlusions")))
valid = (mask == 0) & (disp > 0)
return disp, valid
# Method taken from https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
def readDispFallingThings(file_name):
a = np.array(Image.open(file_name))
with open('/'.join(file_name.split('/')[:-1] + ['_camera_settings.json']), 'r') as f:
with open(
"/".join(file_name.split("/")[:-1] + ["_camera_settings.json"]), "r"
) as f:
intrinsics = json.load(f)
fx = intrinsics['camera_settings'][0]['intrinsic_settings']['fx']
fx = intrinsics["camera_settings"][0]["intrinsic_settings"]["fx"]
disp = (fx * 6.0 * 100) / a.astype(np.float32)
valid = disp > 0
return disp, valid
# Method taken from https://github.com/castacks/tartanair_tools/blob/master/data_type.md
def readDispTartanAir(file_name):
depth = np.load(file_name)
......@@ -154,38 +167,41 @@ def readDispTartanAir(file_name):
def readDispMiddlebury(file_name):
assert basename(file_name) == 'disp0GT.pfm'
assert basename(file_name) == "disp0GT.pfm"
disp = readPFM(file_name).astype(np.float32)
assert len(disp.shape) == 2
nocc_pix = file_name.replace('disp0GT.pfm', 'mask0nocc.png')
nocc_pix = file_name.replace("disp0GT.pfm", "mask0nocc.png")
assert exists(nocc_pix)
nocc_pix = imageio.imread(nocc_pix) == 255
assert np.any(nocc_pix)
return disp, nocc_pix
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def readDispCREStereo(filename):
disp = cv2.imread(filename, -1)
valid = disp > 0
return disp.astype(np.float32) / 32.0, valid
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
if ext == ".png" or ext == ".jpeg" or ext == ".ppm" or ext == ".jpg":
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
elif ext == ".bin" or ext == ".raw":
return np.load(file_name)
elif ext == '.flo':
elif ext == ".flo":
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
elif ext == ".pfm":
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return []
\ No newline at end of file
return []
......@@ -16,25 +16,32 @@ def get_normal(target_disp: torch.Tensor):
return target_normal
class InputPadder:
""" Pads images such that dimensions are divisible by 8 """
def __init__(self, dims, mode='sintel', divis_by=8):
"""Pads images such that dimensions are divisible by 8"""
def __init__(self, dims, mode="sintel", divis_by=8):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // divis_by) + 1) * divis_by - self.ht) % divis_by
pad_wd = (((self.wd // divis_by) + 1) * divis_by - self.wd) % divis_by
if mode == 'sintel':
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
if mode == "sintel":
self._pad = [
pad_wd // 2,
pad_wd - pad_wd // 2,
pad_ht // 2,
pad_ht - pad_ht // 2,
]
else:
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
assert all((x.ndim == 4) for x in inputs)
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, x):
assert x.ndim == 4
ht, wd = x.shape[-2:]
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0] : c[1], c[2] : c[3]]
def forward_interpolate(flow):
flow = flow.detach().cpu().numpy()
......@@ -45,7 +52,7 @@ def forward_interpolate(flow):
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
......@@ -58,21 +65,23 @@ def forward_interpolate(flow):
dy = dy[valid]
flow_x = interpolate.griddata(
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
(x1, y1), dx, (x0, y0), method="nearest", fill_value=0
)
flow_y = interpolate.griddata(
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
(x1, y1), dy, (x0, y0), method="nearest", fill_value=0
)
flow = np.stack([flow_x, flow_y], axis=0)
return torch.from_numpy(flow).float()
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
def bilinear_sampler(img, coords, mode="bilinear", mask=False):
"""Wrapper for grid_sample, uses pixel coordinates"""
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
assert torch.unique(ygrid).numel() == 1 and H == 1 # This is a stereo problem
xgrid, ygrid = coords.split([1, 1], dim=-1)
xgrid = 2 * xgrid / (W - 1) - 1
assert torch.unique(ygrid).numel() == 1 and H == 1 # This is a stereo problem
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
......@@ -87,15 +96,18 @@ def coords_grid(batch, ht, wd):
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
def upflow8(flow, mode="bilinear"):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
def gauss_blur(input, N=5, std=1):
B, D, H, W = input.shape
x, y = torch.meshgrid(torch.arange(N).float() - N//2, torch.arange(N).float() - N//2)
unnormalized_gaussian = torch.exp(-(x.pow(2) + y.pow(2)) / (2 * std ** 2))
x, y = torch.meshgrid(
torch.arange(N).float() - N // 2, torch.arange(N).float() - N // 2
)
unnormalized_gaussian = torch.exp(-(x.pow(2) + y.pow(2)) / (2 * std**2))
weights = unnormalized_gaussian / unnormalized_gaussian.sum().clamp(min=1e-4)
weights = weights.view(1,1,N,N).to(input)
output = F.conv2d(input.reshape(B*D,1,H,W), weights, padding=N//2)
weights = weights.view(1, 1, N, N).to(input)
output = F.conv2d(input.reshape(B * D, 1, H, W), weights, padding=N // 2)
return output.view(B, D, H, W)
import sys
sys.path.append('core')
DEVICE = 'cuda'
sys.path.append("core")
DEVICE = "cuda"
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import glob
import numpy as np
......@@ -16,11 +18,13 @@ from matplotlib import pyplot as plt
import os
import cv2
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
def demo(args):
model = torch.nn.DataParallel(IGEVStereo(args), device_ids=[0])
model.load_state_dict(torch.load(args.restore_ckpt))
......@@ -47,40 +51,102 @@ def demo(args):
disp = model(image1, image2, iters=args.valid_iters, test_mode=True)
disp = disp.cpu().numpy()
disp = padder.unpad(disp)
file_stem = imfile1.split('/')[-2]
file_stem = imfile1.split("/")[-2]
filename = os.path.join(output_directory, f"{file_stem}.png")
plt.imsave(output_directory / f"{file_stem}.png", disp.squeeze(), cmap='jet')
plt.imsave(
output_directory / f"{file_stem}.png", disp.squeeze(), cmap="jet"
)
# disp = np.round(disp * 256).astype(np.uint16)
# cv2.imwrite(filename, cv2.applyColorMap(cv2.convertScaleAbs(disp.squeeze(), alpha=0.01),cv2.COLORMAP_JET), [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--restore_ckpt', help="restore checkpoint", default='./pretrained_models/sceneflow/sceneflow.pth')
parser.add_argument('--save_numpy', action='store_true', help='save output as numpy arrays')
parser.add_argument(
"--restore_ckpt",
help="restore checkpoint",
default="./pretrained_models/sceneflow/sceneflow.pth",
)
parser.add_argument(
"--save_numpy", action="store_true", help="save output as numpy arrays"
)
parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames", default="./demo-imgs/*/im0.png")
parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames", default="./demo-imgs/*/im1.png")
parser.add_argument(
"-l",
"--left_imgs",
help="path to all first (left) frames",
default="./demo-imgs/*/im0.png",
)
parser.add_argument(
"-r",
"--right_imgs",
help="path to all second (right) frames",
default="./demo-imgs/*/im1.png",
)
# parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames", default="/data/Middlebury/trainingH/*/im0.png")
# parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames", default="/data/Middlebury/trainingH/*/im1.png")
# parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames", default="/data/ETH3D/two_view_training/*/im0.png")
# parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames", default="/data/ETH3D/two_view_training/*/im1.png")
parser.add_argument('--output_directory', help="directory to save output", default="./demo-output/")
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--valid_iters', type=int, default=32, help='number of flow-field updates during forward pass')
parser.add_argument(
"--output_directory", help="directory to save output", default="./demo-output/"
)
parser.add_argument(
"--mixed_precision", action="store_true", help="use mixed precision"
)
parser.add_argument(
"--valid_iters",
type=int,
default=32,
help="number of flow-field updates during forward pass",
)
# Architecture choices
parser.add_argument('--hidden_dims', nargs='+', type=int, default=[128]*3, help="hidden state and context dimensions")
parser.add_argument('--corr_implementation', choices=["reg", "alt", "reg_cuda", "alt_cuda"], default="reg", help="correlation volume implementation")
parser.add_argument('--shared_backbone', action='store_true', help="use a single backbone for the context and feature encoders")
parser.add_argument('--corr_levels', type=int, default=2, help="number of levels in the correlation pyramid")
parser.add_argument('--corr_radius', type=int, default=4, help="width of the correlation pyramid")
parser.add_argument('--n_downsample', type=int, default=2, help="resolution of the disparity field (1/2^K)")
parser.add_argument('--slow_fast_gru', action='store_true', help="iterate the low-res GRUs more frequently")
parser.add_argument('--n_gru_layers', type=int, default=3, help="number of hidden GRU levels")
parser.add_argument('--max_disp', type=int, default=192, help="max disp of geometry encoding volume")
parser.add_argument(
"--hidden_dims",
nargs="+",
type=int,
default=[128] * 3,
help="hidden state and context dimensions",
)
parser.add_argument(
"--corr_implementation",
choices=["reg", "alt", "reg_cuda", "alt_cuda"],
default="reg",
help="correlation volume implementation",
)
parser.add_argument(
"--shared_backbone",
action="store_true",
help="use a single backbone for the context and feature encoders",
)
parser.add_argument(
"--corr_levels",
type=int,
default=2,
help="number of levels in the correlation pyramid",
)
parser.add_argument(
"--corr_radius", type=int, default=4, help="width of the correlation pyramid"
)
parser.add_argument(
"--n_downsample",
type=int,
default=2,
help="resolution of the disparity field (1/2^K)",
)
parser.add_argument(
"--slow_fast_gru",
action="store_true",
help="iterate the low-res GRUs more frequently",
)
parser.add_argument(
"--n_gru_layers", type=int, default=3, help="number of hidden GRU levels"
)
parser.add_argument(
"--max_disp", type=int, default=192, help="max disp of geometry encoding volume"
)
args = parser.parse_args()
Path(args.output_directory).mkdir(exist_ok=True, parents=True)
......
import sys
sys.path.append('core')
sys.path.append("core")
import cv2
import numpy as np
import glob
......@@ -11,29 +12,90 @@ from igev_stereo import IGEVStereo
import os
import argparse
from utils.utils import InputPadder
torch.backends.cudnn.benchmark = True
half_precision = True
DEVICE = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
DEVICE = "cuda"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description='Iterative Geometry Encoding Volume for Stereo Matching and Multi-View Stereo (IGEV-Stereo)')
parser.add_argument('--restore_ckpt', help="restore checkpoint", default='./pretrained_models/kitti/kitti15.pth')
parser.add_argument('--save_numpy', action='store_true', help='save output as numpy arrays')
parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames", default="/data/KITTI_raw/2011_09_26/2011_09_26_drive_0005_sync/image_02/data/*.png")
parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames", default="/data/KITTI_raw/2011_09_26/2011_09_26_drive_0005_sync/image_03/data/*.png")
parser.add_argument('--mixed_precision', default=True, action='store_true', help='use mixed precision')
parser.add_argument('--valid_iters', type=int, default=16, help='number of flow-field updates during forward pass')
parser.add_argument('--hidden_dims', nargs='+', type=int, default=[128]*3, help="hidden state and context dimensions")
parser.add_argument('--corr_implementation', choices=["reg", "alt", "reg_cuda", "alt_cuda"], default="reg", help="correlation volume implementation")
parser.add_argument('--shared_backbone', action='store_true', help="use a single backbone for the context and feature encoders")
parser.add_argument('--corr_levels', type=int, default=2, help="number of levels in the correlation pyramid")
parser.add_argument('--corr_radius', type=int, default=4, help="width of the correlation pyramid")
parser.add_argument('--n_downsample', type=int, default=2, help="resolution of the disparity field (1/2^K)")
parser.add_argument('--slow_fast_gru', action='store_true', help="iterate the low-res GRUs more frequently")
parser.add_argument('--n_gru_layers', type=int, default=3, help="number of hidden GRU levels")
parser.add_argument('--max_disp', type=int, default=192, help="max disp of geometry encoding volume")
parser = argparse.ArgumentParser(
description="Iterative Geometry Encoding Volume for Stereo Matching and Multi-View Stereo (IGEV-Stereo)"
)
parser.add_argument(
"--restore_ckpt",
help="restore checkpoint",
default="./pretrained_models/kitti/kitti15.pth",
)
parser.add_argument(
"--save_numpy", action="store_true", help="save output as numpy arrays"
)
parser.add_argument(
"-l",
"--left_imgs",
help="path to all first (left) frames",
default="/data/KITTI_raw/2011_09_26/2011_09_26_drive_0005_sync/image_02/data/*.png",
)
parser.add_argument(
"-r",
"--right_imgs",
help="path to all second (right) frames",
default="/data/KITTI_raw/2011_09_26/2011_09_26_drive_0005_sync/image_03/data/*.png",
)
parser.add_argument(
"--mixed_precision", default=True, action="store_true", help="use mixed precision"
)
parser.add_argument(
"--valid_iters",
type=int,
default=16,
help="number of flow-field updates during forward pass",
)
parser.add_argument(
"--hidden_dims",
nargs="+",
type=int,
default=[128] * 3,
help="hidden state and context dimensions",
)
parser.add_argument(
"--corr_implementation",
choices=["reg", "alt", "reg_cuda", "alt_cuda"],
default="reg",
help="correlation volume implementation",
)
parser.add_argument(
"--shared_backbone",
action="store_true",
help="use a single backbone for the context and feature encoders",
)
parser.add_argument(
"--corr_levels",
type=int,
default=2,
help="number of levels in the correlation pyramid",
)
parser.add_argument(
"--corr_radius", type=int, default=4, help="width of the correlation pyramid"
)
parser.add_argument(
"--n_downsample",
type=int,
default=2,
help="resolution of the disparity field (1/2^K)",
)
parser.add_argument(
"--slow_fast_gru",
action="store_true",
help="iterate the low-res GRUs more frequently",
)
parser.add_argument(
"--n_gru_layers", type=int, default=3, help="number of hidden GRU levels"
)
parser.add_argument(
"--max_disp", type=int, default=192, help="max disp of geometry encoding volume"
)
args = parser.parse_args()
model = torch.nn.DataParallel(IGEVStereo(args), device_ids=[0])
......@@ -52,10 +114,13 @@ def load_image(imfile):
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
if __name__ == '__main__':
if __name__ == "__main__":
fps_list = np.array([])
videoWrite = cv2.VideoWriter('./IGEV_Stereo.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 10, (1242, 750))
videoWrite = cv2.VideoWriter(
"./IGEV_Stereo.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 10, (1242, 750)
)
for (imfile1, imfile2) in tqdm(list(zip(left_images, right_images))):
image1 = load_image(imfile1)
image2 = load_image(imfile2)
......@@ -73,23 +138,28 @@ if __name__ == '__main__':
end.record()
torch.cuda.synchronize()
runtime = start.elapsed_time(end)
fps = 1000/runtime
fps = 1000 / runtime
fps_list = np.append(fps_list, fps)
if len(fps_list) > 5:
fps_list = fps_list[-5:]
avg_fps = np.mean(fps_list)
print('Stereo runtime: {:.3f}'.format(1000/avg_fps))
print("Stereo runtime: {:.3f}".format(1000 / avg_fps))
disp_np = (2*disp).data.cpu().numpy().squeeze().astype(np.uint8)
disp_np = (2 * disp).data.cpu().numpy().squeeze().astype(np.uint8)
disp_np = cv2.applyColorMap(disp_np, cv2.COLORMAP_PLASMA)
image_np = np.array(Image.open(imfile1)).astype(np.uint8)
image_np = np.array(Image.open(imfile1)).astype(np.uint8)
out_img = np.concatenate((image_np, disp_np), 0)
cv2.putText(
out_img,
"%.1f fps" % (avg_fps),
(10, image_np.shape[0]+30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('img', out_img)
(10, image_np.shape[0] + 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 255, 255),
2,
cv2.LINE_AA,
)
cv2.imshow("img", out_img)
cv2.waitKey(1)
videoWrite.write(out_img)
videoWrite.release()
此差异已折叠。
......@@ -9,23 +9,25 @@ from tqdm import tqdm
class CREStereoDataset(data.Dataset):
def __init__(self, root='/workspace/datasets/CREStereo/stereo_trainset/crestereo/'):
def __init__(self, root="/workspace/datasets/CREStereo/stereo_trainset/crestereo/"):
assert os.path.exists(root)
image1_list = sorted(glob(os.path.join(root, '*/*_left.jpg')))
image2_list = sorted(glob(os.path.join(root, '*/*_right.jpg')))
disp_list = sorted(glob(os.path.join(root, '*/*_left.disp.png')))
image1_list = sorted(glob(os.path.join(root, "*/*_left.jpg")))
image2_list = sorted(glob(os.path.join(root, "*/*_right.jpg")))
disp_list = sorted(glob(os.path.join(root, "*/*_left.disp.png")))
self.image_list = []
self.disparity_list = []
for idx, (img1, img2, disp) in enumerate(zip(image1_list, image2_list, disp_list)):
self.image_list += [ [img1, img2] ]
self.disparity_list += [ disp ]
for idx, (img1, img2, disp) in enumerate(
zip(image1_list, image2_list, disp_list)
):
self.image_list += [[img1, img2]]
self.disparity_list += [disp]
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
# img1 = cv2.imread(self.image_list[index][0])
# img2 = cv2.imread(self.image_list[index][1])
......@@ -36,15 +38,22 @@ class CREStereoDataset(data.Dataset):
# return img1, img2, disp
return disp
if __name__ == '__main__':
if __name__ == "__main__":
cre_dataset = CREStereoDataset()
cre_loader = data.DataLoader(cre_dataset, batch_size=16,
pin_memory=True, shuffle=False, num_workers=16, drop_last=False)
f = open('test.txt', 'w')
cre_loader = data.DataLoader(
cre_dataset,
batch_size=16,
pin_memory=True,
shuffle=False,
num_workers=16,
drop_last=False,
)
f = open("test.txt", "w")
for i_batch, disp_gt in enumerate(tqdm(cre_loader)):
# import ipdb;ipdb.set_trace()
f.write("min_disp: %f, max_disp: %f\n" % (disp_gt.min(), disp_gt.max()))
# if i_batch > 20:
# break
f.close()
\ No newline at end of file
f.close()
此差异已折叠。
此差异已折叠。
支持 Markdown
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册