Skip to content
This repository was archived by the owner on Apr 14, 2022. It is now read-only.
This repository was archived by the owner on Apr 14, 2022. It is now read-only.

Python Language server takes a huge amount of RAM (more than 10+GB) #1426

Closed
@Coderx7

Description

@Coderx7

Today I faced this issue! it took more than 10GB+ of my system RAM. I cant reproduce it now (see below), it seems when this happens, the intellisense also stops working. and analysis goes on forever!

I seemed to me, this happens when I try to code (use intellisense) when the language server is still analyzing something, and that just kills the intellisense for good! and each time I try to write new code, and analysis continues, more ram is consumed. I'm not sure about this but I got this feeling.
I attached some log files, hopefully they should show something.

I'm using the latest version

Python Language Server:
Microsoft Python Language Server version 0.3.46.0
Python Extension :
2019.8.29288 (6 August 2019)
VSCode info

Version: 1.37.0 (user setup)
Commit: 036a6b1d3ac84e5ca96a17a44e63a87971f8fcc8
Date: 2019-08-08T02:33:50.993Z
Electron: 4.2.7
Chrome: 69.0.3497.128
Node.js: 10.11.0
V8: 6.9.427.31-electron.0
OS: Windows_NT x64 10.0.17763

UPDATE:

OK this happened again and I could save the log and record it before the vscode crashed!
Here is the console log :
languagesever_ipython_vscode3

Here is the second vslog and the the language server log (copied form output tab):
download 2 log files separately .
and this is the sample test code I used :

#%%
# in the name of God

import torch
import torch.nn as nn
import torchvision
#import torchvision.transforms as transforms
from torchvision import transforms
from torchvision import datasets
from torch import cuda

# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(f'training happens on : {device}')

# Hyper parameters

num_epochs = 5
num_classes = 2
batch_size = 100
learning_rate = 0.001
# number of threads for faster IO
num_workers = 2

# MY dataset
transformations = transforms.Compose([transforms.Resize(256),
                                      transforms.ToTensor(),
                                      transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                                           std=(0.5, 0.5, 0.5))
                                      ])

train_dataset = datasets.ImageFolder(root=r'C:\Users\Mariane\Desktop\Testpy\train_data',
                                    transform=transformations)
# you didnt provide a test data so we are using the training data for the sake of 
# our experiment!
test_dataset = datasets.ImageFolder(root=r'C:\Users\Mariane\Desktop\Testpy\train_data',
                                    transform=transformations)

# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=num_workers)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=num_workers)


# Convolutional neural network (two convolutional layers)
def conv_bn_relu(in_channel, out_channel, kernel_size=3, stride=1, padding=1,
                 bias=False, batchnorm=True):

    layers = nn.ModuleList()
    conv_layer = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
                           kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
    layers.append(conv_layer)
    if batchnorm:
        layers.append(nn.BatchNorm2d(out_channel))
    layers.append(nn.ReLU())
    return nn.Sequential(*layers)

# for using functional form of classes and methods
import torch.nn.functional as F
class ConvNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ConvNet, self).__init__()
        self.net = nn.Sequential(conv_bn_relu(3, 16, 5, 1, 2),
                                 nn.MaxPool2d(kernel_size=2, stride=2),
                                 conv_bn_relu(16, 32, 5, 1, 2),
                                 nn.MaxPool2d(kernel_size=2, stride=2)
                                 )
        # method 2 
        # self.layer1 = conv_bn_relu(3, 16, 5, 1, 2)
        # self.layer2 = conv_bn_relu(16, 32, 5, 1, 2)
        # self.maxpool = nn.MaxPool2d(2, 2)
        self.fc = nn.Linear(32, num_classes)
        
    def forward(self, x):
        # first method 
        out = self.net(x)
        # second method : 
        # out = self.layer1(x)
        #print(f'layer1: output.shape : {out.shape}')
        # out = self.maxpool(out)
        #print(f'maxpool: output.shape : {out.shape}')
        # out = self.layer2(out)
        #print(f'layer2: output.shape : {out.shape}')
        # out = self.maxpool(out)
        #print(f'maxpool: output.shape : {out.shape}')
        out = F.avg_pool2d(out, out.size()[2:])
        out = out.view(x.size(0), -1)
        # print(out.shape)
        out = self.fc(out)
        return out


model = ConvNet(num_classes).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):

    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)

        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i+1) % 100 == 0:
            print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                  .format(epoch+1, num_epochs, i+1, total_step, loss.item()))


# Test the model
model.eval()  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)

        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)

        total += labels.size(0)
        correct += (predicted == labels).float().sum().item()

    print('Test Accuracy of the model on the 10000 test images: {} %'.format(
        100 * correct / total))


# Save the model checkpoint

torch.save(model.state_dict(), 'model.ckpt')
```

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions