Kauf mir einen Kaffee☕
*Mein Beitrag erklärt Places365.
Places365() kann den Places365-Datensatz wie unten gezeigt verwenden:
*Memos:
from torchvision.datasets import Places365 from torchvision.datasets.folder import default_loader trainstd_large_data = Places365( root="data" ) trainstd_large_data = Places365( root="data", split="train-standard", small=False, download=False, transform=None, target_transform=None, loader=default_loader ) trainstd_small_data = Places365( root="data", split="train-standard", small=True ) trainchal_large_data = Places365( root="data", split="train-challenge", small=False ) trainchal_small_data = Places365( root="data", split="train-challenge", small=True ) val_large_data = Places365( root="data", split="val", small=False ) val_small_data = Places365( root="data", split="val", small=True ) len(trainstd_large_data), len(trainstd_small_data) # (1803460, 1803460) len(trainchal_large_data), len(trainchal_small_data) # (8026628, 8026628) len(val_large_data), len(val_small_data) # (36500, 36500) trainstd_large_data # Dataset Places365 # Number of datapoints: 1803460 # Root location: data # Split: train-standard # Small: False trainstd_large_data.root # 'data' trainstd_large_data.split # 'train-standard' trainstd_large_data.small # False trainstd_large_data.download_devkit trainstd_large_data.download_images # <bound method Places365.download_devkit of Dataset Places365 # Number of datapoints: 1803460 # Root location: data # Split: train-standard # Small: False> print(trainstd_large_data.transform) # None print(trainstd_large_data.target_transform) # None trainstd_large_data.loader # <function torchvision.datasets.folder.default_loader(path: str) -> Any> len(trainstd_large_data.classes), trainstd_large_data.classes # (365, # ['/a/airfield', '/a/airplane_cabin', '/a/airport_terminal', # '/a/alcove', '/a/alley', '/a/amphitheater', '/a/amusement_arcade', # '/a/amusement_park', '/a/apartment_building/outdoor', # '/a/aquarium', '/a/aqueduct', '/a/arcade', '/a/arch', # '/a/archaelogical_excavation', ..., '/y/youth_hostel', '/z/zen_garden']) trainstd_large_data[0] # (<PIL.Image.Image image mode=RGB size=683x512>, 0) trainstd_large_data[1] # (<PIL.Image.Image image mode=RGB size=768x512>, 0) trainstd_large_data[2] # (<PIL.Image.Image image mode=RGB size=718x512>, 0) trainstd_large_data[5000] # (<PIL.Image.Image image mode=RGB size=512x683 at 0x1E7834F4770>, 1) trainstd_large_data[10000] # (<PIL.Image.Image image mode=RGB size=683x512 at 0x1E7834A8110>, 2) trainstd_small_data[0] # (<PIL.Image.Image image mode=RGB size=256x256>, 0) trainstd_small_data[1] # (<PIL.Image.Image image mode=RGB size=256x256>, 0) trainstd_small_data[2] # (<PIL.Image.Image image mode=RGB size=256x256>, 0) trainstd_small_data[5000] # (<PIL.Image.Image image mode=RGB size=256x256>, 1) trainstd_small_data[10000] # (<PIL.Image.Image image mode=RGB size=256x256>, 2) trainchal_large_data[0] # (<PIL.Image.Image image mode=RGB size=683x512 at 0x156E22BB680>, 0) trainchal_large_data[1] # (<PIL.Image.Image image mode=RGB size=768x512 at 0x156DF8213D0>, 0) trainchal_large_data[2] # (<PIL.Image.Image image mode=RGB size=718x512 at 0x156DF8213D0>, 0) trainchal_large_data[38567] # (<PIL.Image.Image image mode=RGB size=512x683 at 0x156DF8213D0>, 1) trainchal_large_data[47891] # (<PIL.Image.Image image mode=RGB size=683x512 at 0x156DF8213D0>, 2) trainchal_small_data[0] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2955B625CA0>, 0) trainchal_small_data[1] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2950D2A8350>, 0) trainchal_small_data[2] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2950D2A82C0>, 0) trainchal_small_data[38567] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2955B3BF6B0>, 1) trainchal_small_data[47891] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2955B3DD4F0>, 2) val_large_data[0] # (<PIL.Image.Image image mode=RGB size=512x772 at 0x295408DA750>, 165) val_large_data[1] # (<PIL.Image.Image image mode=RGB size=600x493 at 0x29561D468D0>, 358) val_large_data[2] # (<PIL.Image.Image image mode=RGB size=763x512 at 0x2955E09DD60>, 93) val_large_data[3] # (<PIL.Image.Image image mode=RGB size=827x512 at 0x29540938A70>, 164) val_large_data[4] # (<PIL.Image.Image image mode=RGB size=772x512 at 0x29562600650>, 289) val_small_data[0] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2950D34C500>, 165) val_small_data[1] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x29540892870>, 358) val_small_data[2] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x2954085DBB0>, 93) val_small_data[3] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x29561E348C0>, 164) val_small_data[4] # (<PIL.Image.Image image mode=RGB size=256x256 at 0x29560A415B0>, 289) import matplotlib.pyplot as plt def show_images(data, ims, main_title=None): plt.figure(figsize=(12, 6)) plt.suptitle(t=main_title, y=1.0, fontsize=14) for i, j in enumerate(iterable=ims, start=1): plt.subplot(2, 5, i) im, lab = data[j] plt.imshow(X=im) plt.title(label=lab) plt.tight_layout(h_pad=3.0) plt.show() trainstd_ims = (0, 1, 2, 5000, 10000, 15000, 20000, 25000, 30000, 35000) trainchal_ims = (0, 1, 2, 38567, 47891, 74902, 98483, 137663, 150035, 161052) val_ims = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9) show_images(data=trainstd_large_data, ims=trainstd_ims, main_title="trainstd_large_data") show_images(data=trainstd_small_data, ims=trainstd_ims, main_title="trainstd_small_data") show_images(data=trainchal_large_data, ims=trainchal_ims, main_title="trainchal_large_data") show_images(data=trainchal_small_data, ims=trainchal_ims, main_title="trainchal_small_data") show_images(data=val_large_data, ims=val_ims, main_title="val_large_data") show_images(data=val_small_data, ims=val_ims, main_title="val_small_data")
Das obige ist der detaillierte Inhalt vonOrte in PyTorch. Für weitere Informationen folgen Sie bitte anderen verwandten Artikeln auf der PHP chinesischen Website!