时间:2021-05-22
我就废话不多说了,直接上代码吧!
from os import listdirimport osfrom time import time import torch.utils.data as dataimport torchvision.transforms as transformsfrom torch.utils.data import DataLoader def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='=', empty=' ', tip='>', begin='[', end=']', done="[DONE]", clear=True): percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength if iteration != total: bar = bar + tip bar = bar + empty * (length - filledLength - len(tip)) display = '\r{prefix}{begin}{bar}{end} {percent}%{suffix}' \ .format(prefix=prefix, begin=begin, bar=bar, end=end, percent=percent, suffix=suffix) print(display, end=''), # comma after print() required for python 2 if iteration == total: # print with newline on complete if clear: # display given complete message with spaces to 'erase' previous progress bar finish = '\r{prefix}{done}'.format(prefix=prefix, done=done) if hasattr(str, 'decode'): # handle python 2 non-unicode strings for proper length measure finish = finish.decode('utf-8') display = display.decode('utf-8') clear = ' ' * max(len(display) - len(finish), 0) print(finish + clear) else: print('') class DatasetFromFolder(data.Dataset): def __init__(self, image_dir): super(DatasetFromFolder, self).__init__() self.photo_path = os.path.join(image_dir, "a") self.sketch_path = os.path.join(image_dir, "b") self.image_filenames = [x for x in listdir(self.photo_path) if is_image_file(x)] transform_list = [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] self.transform = transforms.Compose(transform_list) def __getitem__(self, index): # Load Image input = load_img(os.path.join(self.photo_path, self.image_filenames[index])) input = self.transform(input) target = load_img(os.path.join(self.sketch_path, self.image_filenames[index])) target = self.transform(target) return input, target def __len__(self): return len(self.image_filenames) if __name__ == '__main__': dataset = DatasetFromFolder("./dataset/facades/train") dataloader = DataLoader(dataset=dataset, num_workers=8, batch_size=1, shuffle=True) total = len(dataloader) for epoch in range(20): t0 = time() for i, batch in enumerate(dataloader): real_a, real_b = batch[0], batch[1] printProgressBar(i + 1, total + 1, length=20, prefix='Epoch %s ' % str(1), suffix=', d_loss: %d' % 1) printProgressBar(total, total, done='Epoch [%s] ' % str(epoch) + ', time: %.2f s' % (time() - t0) )以上这篇pytorch 批次遍历数据集打印数据的例子就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。
本文实例分析了JS遍历数组及打印数组的方法。分享给大家供大家参考,具体如下:一直头痛与JS的打印数组,也没看到什么比较好的方法,写一个简单的方法备用。//遍历数
pytorch官网给出的例子中都是使用了已经定义好的特殊数据集接口来加载数据,而且其使用的数据都是官方给出的数据。如果我们有自己收集的数据集,如何用来训练网络呢
在使用pytorch训练模型,经常需要加载大量图片数据,因此pytorch提供了好用的数据加载工具Dataloader。为了实现小批量循环读取大型数据集,在Da
场景提供一种可以遍历聚合对象的方式。又称为:游标cursor模式聚合对象:存储数据迭代器:遍历数据JDK内置的迭代器(List/Set)packagecom.q
BarTender调用数据库批量打印标签时,如果每条数据的打印数量通常是一个定量,用户便可以将打印的数量事先在数据库中与数据对应好,然后直接设置BarTende