Coding Flashcards

1
Q

Backpropagation (PyTorch Code)

A

Sample input and target output

import torch
import torch.nn as nn
import torch.optim as optim

— 1. Model and Data —
# Simple linear model with 2 inputs and 1 output
model = nn.Linear(2, 1)

input_data = torch.tensor([[1.0, 2.0]])
target_output = torch.tensor([[5.0]])

— 2. Loss Function and Optimizer —
loss_fn = nn.MSELoss() # Mean Squared Error loss
optimizer = optim.SGD(model.parameters(), lr=0.01) # Stochastic Gradient Descent

— 3. Training Loop —
for epoch in range(100): # Example: Train for 100 epochs
# 3.1 Forward Pass
output = model(input_data)
loss = loss_fn(output, target_output)

# 3.2 Backpropagation
optimizer.zero_grad()  # Reset gradients from previous step
loss.backward()        # Calculate gradients
optimizer.step()       # Update model parameters 

# Example: Print status occasionally
if epoch % 10 == 0:
    print(f'Epoch {epoch}, Loss: {loss.item():.4f}')
How well did you know this?
1
Not at all
2
3
4
5
Perfectly
2
Q

Data Loader (PyTorch Code)

A

import torch
from torch.utils.data import Dataset, DataLoader

— 1. Custom Dataset —
class MyDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels

def \_\_len\_\_(self):
    return len(self.data)

def \_\_getitem\_\_(self, index):
    return self.data[index], self.labels[index]

— 2. Sample Data —
data = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) # Example features
labels = torch.tensor([0, 1, 0]) # Example labels

— 3. Create Dataset and DataLoader —
dataset = MyDataset(data, labels)
dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

— 4. Using the DataLoader —
for batch_idx, (batch_data, batch_labels) in enumerate(dataloader):
print(f”Batch {batch_idx}:”)
print(“Features:”, batch_data)
print(“Labels:”, batch_labels)

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
3
Q

Evaluation mode (PyTorch Code)

A

import torch
import torch.nn as nn

Define a sample model
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3)
self.dropout = nn.Dropout(0.5) # Dropout layer example
self.fc = nn.Linear(128, 10)

def forward(self, x):
    # ... model layers ...
    return x

Create a model instance
model = MyModel()

Evaluation mode
model.eval() # Key line for evaluation mode

Perform inference (Example)
with torch.no_grad(): # Optionally disable gradient calculation
input_data = torch.randn(1, 1, 28, 28) # Sample input
output = model(input_data)

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
4
Q

Forward propagation (PyTorch Code)

A

import torch
import torch.nn as nn

— 1. Define the Model —
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(2, 5) # Input features:2, Output features: 5
self.relu = nn.ReLU() # Activation function
self.linear2 = nn.Linear(5, 1) # Output layer

def forward(self, x):
    x = self.linear1(x)
    x = self.relu(x)
    x = self.linear2(x)
    return x

— 2. Create Model and Input —
model = MyModel()
input_data = torch.tensor([1.0, 2.0]) # Sample input

— 3. Perform Forward Propagation —
output = model(input_data)
print(output)

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
5
Q

NN Class (PyTorch Code)

A

import torch
import torch.nn as nn

— 1. Define the Neural Network Class —
class MyNeuralNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(hidden_size, output_size)

def forward(self, x):
    x = self.linear1(x)
    x = self.relu(x)
    x = self.linear2(x)
    return x

— 2. Create a Neural Network Instance —
input_size = 2 # Number of input features
hidden_size = 5 # Number of neurons in the hidden layer
output_size = 1 # Number of output neurons

model = MyNeuralNetwork(input_size, hidden_size, output_size)

— 3. Use the Model — (Example)
input_data = torch.tensor([1.0, 2.0])
output = model(input_data)
print(output)

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
6
Q

Train Loop (PyTorch Code)

A

import torch
import torch.nn as nn
import torch.optim as optim

Define model architecture
model = nn.Sequential(
nn.Linear(in_features=784, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=10)
)

Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)

Training loop
for epoch in range(num_epochs):
for inputs, targets in train_loader:
optimizer.zero_grad() # Zero gradients
outputs = model(inputs) # Forward pass
loss = criterion(outputs, targets) # Compute loss
loss.backward() # Backward pass
optimizer.step() # Update parameters

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
7
Q

Custom data set (code)

A

import torch
from torch.utils.data import Dataset

class CustomDataset(Dataset):
“"”A template for creating custom PyTorch datasets.”””

def \_\_init\_\_(self, data, labels, transform=None):
    """
    Initializes the dataset.

    Args:
        data: Your data points (e.g., images, text, etc.). Data format depends on the nature of your dataset.
        labels: Corresponding labels for each data point.
        transform: Optional. A callable to apply transformations to your data (e.g., image resizing, data augmentation)
    """
    self.data = data
    self.labels = labels
    self.transform = transform

def \_\_len\_\_(self):
    """
    Returns the length of the dataset (number of data samples).
    """
    return len(self.data)

def \_\_getitem\_\_(self, index):
    """
    Retrieves a single data sample and its label at the given index.

    Args:
        index: The index of the data sample to retrieve.

    Returns:
        A tuple containing: (data_sample, label)
    """
    data_sample = self.data[index]
    label = self.labels[index]

    if self.transform:
        data_sample = self.transform(data_sample)

    return data_sample, label 

Example usage (assuming image data):
from torchvision import transforms

my_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

dataset = CustomDataset(data=image_paths, labels=image_labels, transform=my_transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=True)

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
8
Q

Updating weights and biases (Code)

A

import torch
import torch.nn as nn
import torch.optim as optim

— Sample model and data —
model = nn.Linear(2, 1) # Simple linear model
input_data = torch.tensor([[1.0, 2.0]])
target_output = torch.tensor([[4.0]])

— Optimizer and loss function —
optimizer = optim.SGD(model.parameters(), lr=0.05) # Stochastic Gradient Descent
loss_fn = nn.MSELoss() # Mean squared error

— Training loop (single iteration) —
optimizer.zero_grad() # Clear previous gradients
output = model(input_data)
loss = loss_fn(output, target_output)
loss.backward() # Calculate gradients
optimizer.step() # Update parameters based on gradients

How well did you know this?
1
Not at all
2
3
4
5
Perfectly
9
Q

Sequential leyers (Code)

A

from tensorflow.keras import models, layers

Create a Sequential model
model = models.Sequential()

Add input and convolutional layers for image processing
model.add(layers.Conv2D(32, (3, 3), activation=’relu’, input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation=’relu’))
model.add(layers.MaxPooling2D((2, 2)))

Flatten the output for classification
model.add(layers.Flatten())

Add dense layers for classification
model.add(layers.Dense(64, activation=’relu’))
model.add(layers.Dense(10, activation=’softmax’)) # 10 classes for output

How well did you know this?
1
Not at all
2
3
4
5
Perfectly