gstaff commited on
Commit
6ae9204
·
1 Parent(s): b25f794

Reduce text and image size to reduce compute load.

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -76,7 +76,7 @@ def gen_image(prompt):
76
  pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
77
  custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float32,
78
  use_auth_token=AUTH_TOKEN)
79
- images = pipeline.text2img(prompt, width=512, height=512).images
80
  # images = model.generate_images(
81
  # text=prompt,
82
  # seed=-1,
@@ -117,7 +117,7 @@ def gen_monster_text(name):
117
  inp = tensor(prompt_ids)[None].cuda() # Use .cuda() for torch GPU
118
  else:
119
  inp = tensor(prompt_ids)[None]
120
- preds = learner.model.generate(inp, max_length=1024, num_beams=5, temperature=1.5, do_sample=True,
121
  repetition_penalty=1.2)
122
  result = tokenizer.decode(preds[0].cpu().numpy())
123
  result = result.split('###')[0].replace(r'\r\n', '\n').replace('\r', '').replace(r'\r', '')
 
76
  pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
77
  custom_pipeline="stable_diffusion_mega", torch_dtype=torch.float32,
78
  use_auth_token=AUTH_TOKEN)
79
+ images = pipeline.text2img(prompt, width=256, height=256).images
80
  # images = model.generate_images(
81
  # text=prompt,
82
  # seed=-1,
 
117
  inp = tensor(prompt_ids)[None].cuda() # Use .cuda() for torch GPU
118
  else:
119
  inp = tensor(prompt_ids)[None]
120
+ preds = learner.model.generate(inp, max_length=512, num_beams=5, temperature=1.5, do_sample=True,
121
  repetition_penalty=1.2)
122
  result = tokenizer.decode(preds[0].cpu().numpy())
123
  result = result.split('###')[0].replace(r'\r\n', '\n').replace('\r', '').replace(r'\r', '')