- Cerebrium
- Deployment Examples
XGBoost
XGBoost
from sklearn.datasets import load_iris
from xgboost import XGBClassifier
iris = load_iris()
X, y = iris.data, iris.target
xgb = XGBClassifier()
xgb.fit(X, y)
# Save to XGB JSON
xgb.save_model("iris.json")
from cerebrium import deploy, model_api_request, model_type
endpoint = deploy((model_type.XGBOOST_CLASSIFIER, "iris.json"), "xgb-test-model" , "<API_KEY>")
response = model_api_request(endpoint, X[0].tolist(), "<API_KEY>")
print(response["data"])
SKLearn
SKLearn
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import pickle
iris = load_iris()
X, y = iris.data, iris.target
rf = RandomForestClassifier()
rf.fit(X, y)
# Save to pickle
with open("iris.pkl", "wb") as f:
pickle.dump(rf, f)
from cerebrium import deploy, model_api_request, model_type
endpoint = deploy((model_type.SKLEARN, "iris.pkl"), "sk-test-model" , "<API_KEY>")
response = model_api_request(endpoint, X[0].tolist(), "<API_KEY>")
print(response["data"])
PyTorch
pytorch.ipynb
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
batch_size = 64
num_classes = 10
learning_rate = 0.001
num_epochs = 20
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Use transforms.compose method to reformat images for modeling,
# and save to variable all_transforms for later use
all_transforms = transforms.Compose([transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
])
# Create Training dataset
train_dataset = torchvision.datasets.CIFAR10(root = './data',
train = True,
transform = all_transforms,
download = True)
# Create Testing dataset
test_dataset = torchvision.datasets.CIFAR10(root = './data',
train = False,
transform = all_transforms,
download=True)
# Instantiate loader objects to facilitate processing
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset,
batch_size = batch_size,
shuffle = True)
# Create Neural Network
class ConvNeuralNet(nn.Module):
def __init__(self, num_classes):
super(ConvNeuralNet, self).__init__()
self.conv_layer1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3)
self.conv_layer2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3)
self.max_pool1 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.conv_layer3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3)
self.conv_layer4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3)
self.max_pool2 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.fc1 = nn.Linear(1600, 128)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
out = self.conv_layer1(x)
out = self.conv_layer2(out)
out = self.max_pool1(out)
out = self.conv_layer3(out)
out = self.conv_layer4(out)
out = self.max_pool2(out)
out = out.reshape(out.size(0), -1)
out = self.fc1(out)
out = self.relu1(out)
out = self.fc2(out)
return out
model = ConvNeuralNet(num_classes)
# Create loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay = 0.005, momentum = 0.9)
total_step = len(train_loader)
# Train our model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for images, labels in train_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the {} train images: {} %'.format(50000, 100 * correct / total))
# This is where the magic happens!
from cerebrium import deploy, model_type
## With Cloudpickle
import cloudpickle
with open("torch_model.pkl", "wb") as f:
cloudpickle.dump(model, f)
deploy((model_type.TORCH, "torch_model.pkl"), "torch-model-cp", "<API_KEY>")
## With TorchScript
scripted_model = torch.jit.script(model)
scripted_model.save("torch_model.pt")
deploy((model_type.TORCH, "torch_model.pt"), "torch-model-ts", "<API_KEY>")