Viewing a single comment thread. View all comments

International_Deer27 OP t1_j6x0tpy wrote

I've simplified my model a lot to only take into account 2000x1 tensors as input for X and the prediction is either 0 or 1 as before. I've made it using nn.Sequential with only a few layers to be easier to follow:

import torch

import torch.nn as nn

from torch.utils.data import Dataset, DataLoader

import torch.nn.functional as F

from sklearn.model_selection import train_test_split

import numpy as np

import matplotlib as plt

df_Y_MACE = np.array(df_Y_MACE)

df_X_MACE1 = []

for i in range(len(df_X_MACE)):

df_X_MACE1.append(df_X_MACE[i][0])

df_X_MACE1 = np.array(df_X_MACE1)

X = torch.from_numpy(df_X_MACE1).float()

Y = torch.from_numpy(df_Y_MACE).float()

# Define the dataset

class ECGDataset(Dataset):

def __init__(self, data, labels):

self.data = data

self.labels = labels

def __len__(self):

return len(self.data)

def __getitem__(self, idx):

return self.data[idx], self.labels[idx]

# Split the data into training and testing sets

train_data, test_data, train_labels, test_labels = train_test_split(X, Y, test_size=0.8)

# Create the dataset and data loader

train_dataset = ECGDataset(train_data, train_labels)

test_dataset = ECGDataset(test_data, test_labels)

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# Define the CNN

class ECGClassifier(nn.Module):

def __init__(self):

super(ECGClassifier, self).__init__()

self.ECG_seq = nn.Sequential(nn.Conv1d(1, 32, kernel_size = 50, stride = 5), nn.ReLU(), nn.MaxPool1d(7,2), nn.Linear(193,1))

self.fc = nn.Linear(32, 1)

self.sigmoid = nn.Sigmoid()

def forward(self, x):

x = x.unsqueeze(1)

out = self.ECG_seq(x)

out = self.fc(out.view(-1,32))

out = self.sigmoid(out)

return out

# Define the model and move it to the device

device = torch.device('cpu')

model = ECGClassifier()

model = model.to(device)

model = model.float()

# Define the loss function and optimizer

criterion = nn.BCELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.01)

total_loss = []

# Train the model

for epoch in range(5):

for i, (data, labels) in enumerate(train_loader):

data, labels = data.to(device), labels.to(device)

# Forward pass

with torch.set_grad_enabled(True):

outputs = model(data)

labels = labels.unsqueeze(1)

loss = criterion(outputs, labels)

total_loss.append(loss)

# Backward and optimize

optimizer.zero_grad()

loss.backward()

optimizer.step()

print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, 5, loss.item()))

1