import time
from tqdm import tqdm
with tqdm(total=200) as pbar:
for i in range(20):
pbar.update(10)
time.sleep(.1)
# 结果如下,一共更新了20次
0%| | 0/200 [00:00<?, ?it/s]
10%|█ | 20/200 [00:00<00:00, 199.48it/s]
15%|█▌ | 30/200 [00:00<00:01, 150.95it/s]
20%|██ | 40/200 [00:00<00:01, 128.76it/s]
25%|██▌ | 50/200 [00:00<00:01, 115.72it/s]
from tqdm import tqdm
import time
#total参数设置进度条的总长度
with tqdm(total=100) as pbar:
for i in range(100):
time.sleep(0.1)
pbar.update(1) #每次更新进度条的长度
#结果
0%| | 0/100 [00:00<?, ?it/s]
1%| | 1/100 [00:00<00:09, 9.98it/s]
2%|▏ | 2/100 [00:00<00:09, 9.83it/s]
3%|▎ | 3/100 [00:00<00:10, 9.65it/s]
4%|▍ | 4/100 [00:00<00:10, 9.53it/s]
5%|▌ | 5/100 [00:00<00:09, 9.55it/s]
...
100%|██████████| 100/100 [00:10<00:00, 9.45it/s]
from tqdm import tqdm
import time
#total参数设置进度条的总长度
pbar = tqdm(total=100)
for i in range(100):
time.sleep(0.05)
#每次更新进度条的长度
pbar.update(1)
#别忘了关闭占用的资源
pbar.close()
from tqdm import trange
from random import random,randint
import time
with trange(10) as t:
for i in t:
#设置进度条左边显示的信息
t.set_description("GEN %i"%i)
#设置进度条右边显示的信息
t.set_postfix(loss=random(),gen=randint(1,999),str="h",lst=[1,2])
time.sleep(0.1)
from tqdm import tqdm
import time
for i in tqdm(range(1000), ncols=10):
time.sleep(0.001)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from tqdm import tqdm
class CNN(nn.Module):
def __init__(self,in_channels=1,num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1,out_channels=8,kernel_size=(3,3),stride=(1,1),padding=(1,1))
self.pool = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
self.conv2 = nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(3,3),stride=(1,1),padding=(1,1))
self.fc1 = nn.Linear(16*7*7,num_classes)
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0],-1)
x = self.fc1(x)
return x
# Set device
device = torch.device("cuda"if torch.cuda.is_available() else "cpu")
print(device)
# Hyperparameters
in_channels = 1
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 5
# Load Data
train_dataset = datasets.MNIST(root="dataset/",train=True,transform=transforms.ToTensor(),download=True)
train_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_dataset = datasets.MNIST(root="dataset/",train=False,transform=transforms.ToTensor(),download=True)
test_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
# Initialize network
model = CNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
# for data,targets in tqdm(train_loadr,leave=False) # 进度显示在一行
for data,targets in tqdm(train_loader):
# Get data to cuda if possible
data = data.to(device=device)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores,targets)
# backward
optimizer.zero_grad()
loss.backward()
# gardient descent or adam step
optimizer.step()
for data,targets in tqdm(train_loadr,leave=False) # 进度显示在一行
for index,(data,targets) in tqdm(enumerate(train_loader),total=len(train_loader),leave = True):
for epoch in range(num_epochs):
losses = []
accuracy = []
# for data,targets in tqdm(train_loadr,leave=False) # 进度显示在一行
loop = tqdm((train_loader), total = len(train_loader))
for data,targets in loop:
# Get data to cuda if possible
data = data.to(device=device)
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores,targets)
losses.append(loss)
# backward
optimizer.zero_grad()
loss.backward()
_,predictions = scores.max(1)
num_correct = (predictions == targets).sum()
running_train_acc = float(num_correct) / float(data.shape[0])
accuracy.append(running_train_acc)
# gardient descent or adam step
optimizer.step()
loop.set_description(f'Epoch [{epoch}/{num_epochs}]')
loop.set_postfix(loss = loss.item(),acc = running_train_acc)