Skip to content

Commit 92deea6

Browse files
Upgraded to SD v1.5. Choose your model with a single define. (Maybe this should be a config parameter...)
1 parent cf11945 commit 92deea6

File tree

1 file changed

+8
-5
lines changed

1 file changed

+8
-5
lines changed

sdd.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,21 +30,24 @@
3030
"0": {"name": "RTX 2080 Ti", "lock": Lock()},
3131
}
3232

33+
# MODEL = "CompVis/stable-diffusion-v1-4"
34+
MODEL = "runwayml/stable-diffusion-v1-5"
35+
3336
# Supress some unnecessary warnings when loading the CLIPTextModel
3437
logging.set_verbosity_error()
3538

3639
if not torch.cuda.is_available():
3740
raise RuntimeError('No CUDA device available, exiting.')
3841

3942
# Load the autoencoder model which will be used to decode the latents into image space.
40-
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_auth_token=True)
43+
vae = AutoencoderKL.from_pretrained(MODEL, subfolder="vae", use_auth_token=True)
4144

4245
# Load the tokenizer and text encoder to tokenize and encode the text.
4346
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
4447
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
4548

4649
# The UNet model for generating the latents.
47-
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet", use_auth_token=True)
50+
unet = UNet2DConditionModel.from_pretrained(MODEL, subfolder="unet", use_auth_token=True)
4851

4952
# The noise scheduler
5053
scheduler = LMSDiscreteScheduler(
@@ -132,9 +135,9 @@ def generate_image(prompt, seed, steps, width=512, height=512, guidance=7.5):
132135

133136
# Set the EXIF data. See PIL.ExifTags.TAGS to map numbers to names.
134137
exif = out.getexif()
135-
exif[271] = prompt # Make
136-
exif[272] = 'Stable Diffusion v1.4' # Model
137-
exif[305] = f'seed={seed}, steps={steps}' # Software
138+
exif[271] = prompt # exif: Make
139+
exif[272] = MODEL # exif: Model
140+
exif[305] = f'seed={seed}, steps={steps}' # exif: Software
138141

139142
buf = BytesIO()
140143
out.save(buf, format="JPEG", quality=85, exif=exif)

0 commit comments

Comments
 (0)