mirror of
https://github.com/vale981/ray
synced 2025-03-07 02:51:39 -05:00
115 lines
3.5 KiB
Python
115 lines
3.5 KiB
Python
""" Code adapted from https://github.com/ikostrikov/pytorch-a3c"""
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn as nn
|
|
|
|
|
|
def normc_initializer(std=1.0):
|
|
def initializer(tensor):
|
|
tensor.data.normal_(0, 1)
|
|
tensor.data *= std / torch.sqrt(
|
|
tensor.data.pow(2).sum(1, keepdim=True))
|
|
|
|
return initializer
|
|
|
|
|
|
def valid_padding(in_size, filter_size, stride_size):
|
|
"""Note: Padding is added to match TF conv2d `same` padding. See
|
|
www.tensorflow.org/versions/r0.12/api_docs/python/nn/convolution
|
|
|
|
Params:
|
|
in_size (tuple): Rows (Height), Column (Width) for input
|
|
stride_size (tuple): Rows (Height), Column (Width) for stride
|
|
filter_size (tuple): Rows (Height), Column (Width) for filter
|
|
|
|
Output:
|
|
padding (tuple): For input into torch.nn.ZeroPad2d
|
|
output (tuple): Output shape after padding and convolution
|
|
"""
|
|
in_height, in_width = in_size
|
|
filter_height, filter_width = filter_size
|
|
stride_height, stride_width = stride_size
|
|
|
|
out_height = np.ceil(float(in_height) / float(stride_height))
|
|
out_width = np.ceil(float(in_width) / float(stride_width))
|
|
|
|
pad_along_height = int(
|
|
((out_height - 1) * stride_height + filter_height - in_height))
|
|
pad_along_width = int(
|
|
((out_width - 1) * stride_width + filter_width - in_width))
|
|
pad_top = pad_along_height // 2
|
|
pad_bottom = pad_along_height - pad_top
|
|
pad_left = pad_along_width // 2
|
|
pad_right = pad_along_width - pad_left
|
|
padding = (pad_left, pad_right, pad_top, pad_bottom)
|
|
output = (out_height, out_width)
|
|
return padding, output
|
|
|
|
|
|
def _get_activation_fn(name):
|
|
activation = None
|
|
if name == "tanh":
|
|
activation = nn.Tanh
|
|
elif name == "relu":
|
|
activation = nn.ReLU
|
|
else:
|
|
raise ValueError("Unknown activation: {}".format(name))
|
|
return activation
|
|
|
|
|
|
class SlimConv2d(nn.Module):
|
|
"""Simple mock of tf.slim Conv2d"""
|
|
|
|
def __init__(self,
|
|
in_channels,
|
|
out_channels,
|
|
kernel,
|
|
stride,
|
|
padding,
|
|
initializer=nn.init.xavier_uniform_,
|
|
activation_fn=nn.ReLU,
|
|
bias_init=0):
|
|
super(SlimConv2d, self).__init__()
|
|
layers = []
|
|
if padding:
|
|
layers.append(nn.ZeroPad2d(padding))
|
|
conv = nn.Conv2d(in_channels, out_channels, kernel, stride)
|
|
if initializer:
|
|
initializer(conv.weight)
|
|
nn.init.constant_(conv.bias, bias_init)
|
|
|
|
layers.append(conv)
|
|
if activation_fn:
|
|
layers.append(activation_fn())
|
|
self._model = nn.Sequential(*layers)
|
|
|
|
def forward(self, x):
|
|
return self._model(x)
|
|
|
|
|
|
class SlimFC(nn.Module):
|
|
"""Simple PyTorch version of `linear` function"""
|
|
|
|
def __init__(self,
|
|
in_size,
|
|
out_size,
|
|
initializer=None,
|
|
activation_fn=None,
|
|
bias_init=0):
|
|
super(SlimFC, self).__init__()
|
|
layers = []
|
|
linear = nn.Linear(in_size, out_size)
|
|
if initializer:
|
|
initializer(linear.weight)
|
|
nn.init.constant_(linear.bias, bias_init)
|
|
layers.append(linear)
|
|
if activation_fn:
|
|
layers.append(activation_fn())
|
|
self._model = nn.Sequential(*layers)
|
|
|
|
def forward(self, x):
|
|
return self._model(x)
|