PyTorch Environment
Default environment for PyTorch
This notebook describes and creates the default PyTorch machine learning environment in Nextjournal. Check out the showcase if you want to see what the environment contains. To see how it’s built, see setup.
Nextjournal's PyTorch environment runs PyTorch v1.3.1, and is configured to use Nvidia CUDA v10.2.
Showcase
Training
Adapted from the PyTorch MNIST example. Imports:
from __future__ import print_functionimport torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.optim as optimfrom torchvision import datasets, transformsDefine network.
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1)Train and test functions.
def train(args, model, device, train_loader, optimizer, epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % args.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()))def test(args, model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) # sum up batch loss test_loss += F.nll_loss(output, target, reduction='sum').item() # get the index of the max log-probability pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))Set options and train.
args = type('', (), {})()args.batch_size = 64args.test_batch_size = 1000args.epochs = 10args.lr = 0.01args.momentum = 0.5args.no_cuda = Falseargs.seed = 1args.log_interval = 10args.save_model = Trueuse_cuda = not args.no_cuda and torch.cuda.is_available()torch.manual_seed(args.seed)device = torch.device("cuda" if use_cuda else "cpu")kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs)test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs)model = Net().to(device)optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader)if (args.save_model): torch.save(model.state_dict(),"/results/mnist_cnn.pt")Using a Pre-trained Model
Adapted from the fast-neural-style example.
Download the PyTorch example repo to get the relevant module files for the 'fast-neural-style' example. PYTHONPATH is set to the example's directory in the Runtime Settings, so import will work.
The pre-trained models are downloaded, saved to results, and locked. Unzip them.
wget --progress=dot:giga -O /results/saved_models.zip \ https://www.dropbox.com/s/lrvwfehqdcxoza8/saved_models.zip?dl=1unzip saved_models.zip -d /Imports.
import argparseimport osimport sysimport timeimport reimport numpy as npimport torchfrom torch.optim import Adamfrom torch.utils.data import DataLoaderfrom torchvision import datasetsfrom torchvision import transformsimport torch.onnximport utilsfrom transformer_net import TransformerNetfrom vgg import Vgg16The main styling function.
def stylize(args): device = torch.device("cuda" if args.cuda else "cpu") content_image = utils.load_image(args.content_image, scale=args.content_scale) content_transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)) ]) content_image = content_transform(content_image) content_image = content_image.unsqueeze(0).to(device) if args.model.endswith(".onnx"): output = stylize_onnx_caffe2(content_image, args) else: with torch.no_grad(): style_model = TransformerNet() state_dict = torch.load(args.model) # remove saved deprecated running_* keys in InstanceNorm from the checkpoint for k in list(state_dict.keys()): if re.search(r'in\d+\.running_(mean|var)$', k): del state_dict[k] style_model.load_state_dict(state_dict) style_model.to(device) if args.export_onnx: assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx" output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu() else: output = style_model(content_image).cpu() utils.save_image(args.output_image, output[0])Helper function, used with ONNX model files.
def stylize_onnx_caffe2(content_image, args): """ Read ONNX model and run it using Caffe2 """ assert not args.export_onnx import onnx import onnx_caffe2.backend model = onnx.load(args.model) prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU') inp = {model.graph.input[0].name: content_image.numpy()} c2_out = prepared_backend.run(inp)[0] return torch.from_numpy(c2_out)Let's make a Euphonium look fancy.

It's fast, so we might as well run through all four models.
args = type('', (), {})()args.content_scale = Noneargs.cuda = 1args.export_onnx = ""args.content_image = Euphonium_Boosey_and_hawkes.jpgfor style in ["candy","udnie","rain_princess","mosaic"]: args.output_image = "/results/styled-{}.jpg".format(style) args.model = "/saved_models/{}.pth".format(style) stylize(args)Setup
Build the PyTorch Environment
Install the dependencies via conda. Nvidia drivers and libraries are loaded by setting the NEXTJOURNAL_MOUNT_CUDA environment variable.
conda install -c defaults -c intel \ mkl-include mkl-dnn \ pyyaml typingconda install -c pytorch cuda100 magma-cuda102conda clean -qtipyldconfigInstall torch and torchvision via pip. This ensures conda doesn't force redundant installs of cudatoolkit and cudnn.
pip install \ -f https://download.pytorch.org/whl/cu102/torch_stable.html \ torch torchvision torchcsprng torchaudio torchtext \ torch_model_archiver torchserveTesting
import torchprint(torch.cuda.current_device())print(torch.cuda.device(0))print(torch.cuda.device_count())print(torch.cuda.get_device_name(0))print(torch.cuda.is_available())print(torch.rand(100,100).cuda())import torchvision.opsconda list torchpip listnvidia-smi