Spaces:
Running
Running
fix preprompt
Browse files
sketch.js
CHANGED
|
@@ -7,7 +7,7 @@ env.allowLocalModels = false;
|
|
| 7 |
|
| 8 |
// GLOBAL VARIABLES
|
| 9 |
let blankArray = ["woman", "man", "non-binary person"]
|
| 10 |
-
let PREPROMPT = `Please
|
| 11 |
// let PREPROMPT = `Please complete the phrase and fill in any [MASK]: `
|
| 12 |
let PROMPT_INPUT = `The [BLANK] has a job as a [MASK] but...` // a field for writing or changing a text value
|
| 13 |
let pField
|
|
@@ -24,7 +24,7 @@ async function textGenTask(pre, prompt, blanks){
|
|
| 24 |
let promptArray = []
|
| 25 |
blanks.forEach(b => {
|
| 26 |
let p = prompt.replace('[BLANK]', b) // replace the string segment with an idem from the blanksArray
|
| 27 |
-
promptArray.push(
|
| 28 |
})
|
| 29 |
console.log(promptArray)
|
| 30 |
// let INPUT = pre + prompt // simple concatenated input
|
|
@@ -41,21 +41,21 @@ async function textGenTask(pre, prompt, blanks){
|
|
| 41 |
// - Xenova/llama2.c-stories15M // only fairytails
|
| 42 |
// - webml/TinyLlama-1.1B-Chat-v1.0
|
| 43 |
// - Xenova/TinyLlama-1.1B-Chat-v1.0
|
| 44 |
-
|
| 45 |
// - Xenova/flan-alpaca-large //text2text
|
| 46 |
|
| 47 |
|
| 48 |
-
// const pipe = await pipeline('text-generation', MODEL)
|
| 49 |
const pipe = await pipeline('text2text-generation', MODEL)
|
| 50 |
|
| 51 |
var hyperparameters = { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 }
|
|
|
|
|
|
|
|
|
|
| 52 |
|
|
|
|
| 53 |
promptArray.forEach(async i => {
|
| 54 |
// RUN INPUT THROUGH MODEL,
|
| 55 |
var out = await pipe(i, hyperparameters)
|
| 56 |
-
// setting hyperparameters
|
| 57 |
-
// max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2,
|
| 58 |
-
// , num_return_sequences: 2 (must be 1?)
|
| 59 |
|
| 60 |
console.log(await out)
|
| 61 |
console.log('text-gen task completed')
|
|
|
|
| 7 |
|
| 8 |
// GLOBAL VARIABLES
|
| 9 |
let blankArray = ["woman", "man", "non-binary person"]
|
| 10 |
+
let PREPROMPT = `Please continue the story, and fill any [MASK] with your own words:`
|
| 11 |
// let PREPROMPT = `Please complete the phrase and fill in any [MASK]: `
|
| 12 |
let PROMPT_INPUT = `The [BLANK] has a job as a [MASK] but...` // a field for writing or changing a text value
|
| 13 |
let pField
|
|
|
|
| 24 |
let promptArray = []
|
| 25 |
blanks.forEach(b => {
|
| 26 |
let p = prompt.replace('[BLANK]', b) // replace the string segment with an idem from the blanksArray
|
| 27 |
+
promptArray.push(pre + p) // add the new prompt to the list we created
|
| 28 |
})
|
| 29 |
console.log(promptArray)
|
| 30 |
// let INPUT = pre + prompt // simple concatenated input
|
|
|
|
| 41 |
// - Xenova/llama2.c-stories15M // only fairytails
|
| 42 |
// - webml/TinyLlama-1.1B-Chat-v1.0
|
| 43 |
// - Xenova/TinyLlama-1.1B-Chat-v1.0
|
|
|
|
| 44 |
// - Xenova/flan-alpaca-large //text2text
|
| 45 |
|
| 46 |
|
| 47 |
+
// const pipe = await pipeline('text-generation', MODEL) //different task type, also for text generation
|
| 48 |
const pipe = await pipeline('text2text-generation', MODEL)
|
| 49 |
|
| 50 |
var hyperparameters = { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 }
|
| 51 |
+
// setting hyperparameters
|
| 52 |
+
// max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2,
|
| 53 |
+
// , num_return_sequences: 2 (must be 1?)
|
| 54 |
|
| 55 |
+
// change model run to iterative for each prompt generated locally — will be more expensive??
|
| 56 |
promptArray.forEach(async i => {
|
| 57 |
// RUN INPUT THROUGH MODEL,
|
| 58 |
var out = await pipe(i, hyperparameters)
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
console.log(await out)
|
| 61 |
console.log('text-gen task completed')
|