fix: Argument passing in dsp

This commit is contained in:
Leni Aniva 2024-10-03 12:03:33 -07:00
parent c91024f201
commit f1e996baae
Signed by: aniva
GPG Key ID: 4D9B1C8D10EA4C50
3 changed files with 30 additions and 192 deletions

View File

@ -1,162 +0,0 @@
"""
core part of data for prompt for dsp:
"nl_problem": ..., # x*_nl
"nl_solution": ..., # y*_nl = draft*
"fl_problem": ..., # x*_fl
"fl_partial_sketch": ..., # z_fl example = sketch
"src_header_fl_problem": ..., #src_header_x*_fl
"fl_header_sketch": ..., # hz_fl suggested header
"""
import json
import sys
from pathlib import Path
from typing import Optional
# just an example of stop tokens from the MATH eval code
# STOP_TOKENS: list[str] = ["Solution:", "Problem:", "Question:", "USER:", "USER:", "USER", "ASSISTANT:", "ASSISTANT", "Instruction:", "Instruction", "Response:", "Response"]
default_path_2_examples = '~/gold-ai-olympiad/data/debug/toy_example1_dsp/dsp_debug5_sf/dsp_debug5_sf_train.json'
# -- Prompt draft (P_draft) for Lean 4
"""
Draft an informal solution similar to the one below.
The informal solution will be used to sketch a formal proof in the Lean 4 Proof Assistant.
Here are some examples:
Informal:
(*### Problem\n\n
[...nl/i problem text...]\n\n
### Solution\n\n
[...nl/i solution/draft text...]\n\n
*)\n\n
Informal:
(*### Problem\n\n
{nl_problem}
### Solution\n\n
[...Model Completion...]
"""
SYSTEM_PROMPT_DRAFT_V0 = 'You are an expert mathematician and an expert in the Lean 4 Proof Assistant.'
STOP_TOKENS_DRAFT_V0: list[str] = ['Informal:', '(*### Problem']
prompt_draft_template_lean4_v0 = ("Draft an informal solution similar to the one below. "
"The informal solution will be used to sketch a formal proof in the Lean 4 Proof Assistant. "
"Here are some examples of informal problem solutions pairs:\n")
def get_prompt_draft_template_4_lean_v0(
path_2_examples: str = default_path_2_examples,
start: int = 0,
end: int = sys.maxsize,
prompt_draft_template_4_lean: Optional[str] = prompt_draft_template_lean4_v0,
verbose: bool = False,
):
path_2_examples = Path(path_2_examples).expanduser()
# load json file with list of dicts from file in one line
with open(path_2_examples, 'r') as f:
examples: list[dict] = json.load(f)
print(f'{len(examples)=}') if verbose else None
examples = examples[start:end]
# -- Create prompt by appending few shot examples
for example in examples:
nl_problem = example['nl_problem']
new_few_shot_example = "\nInformal:\n(*### Problem\n\n" + ' '.join(nl_problem)
nl_solution_sketch = example['nl_solution_sketch']
new_few_shot_example += "\n\n### Solution\n\n" + ' '.join(nl_solution_sketch) + "*)\n"
prompt_draft_template_4_lean += new_few_shot_example
# Add part to elicit model to do task
prompt_draft_template_4_lean += "\nInformal: \n(*### Problem\n\n{nl_problem}\n\n### Solution\n"
# Return
print(prompt_draft_template_4_lean) if verbose else None
return prompt_draft_template_4_lean
prompt_draft_template_lean4_v0 = get_prompt_draft_template_4_lean_v0()
# -- Prompt sketch (P_sketch) for Lean 4
"""
[... Translate informal draft to a formal sketch in Lean 4. Here are some examples: ...]
Informal:\n
(*### Problem\n\n
[...nl/i problem text...]\n\n
### Solution\n\n
[...nl/i solution/draft text...]\n\n
*)\n\n
Formal:\n
[...fl/i problem text...]
[...fl/i partial sketch text...]
\n\n
Informal:\n
(*### Problem\n\n
{nl_problem}
### Solution\n\n
{nl_solution}
*)\n\n
Formal:\n
{fl_problem}
[...Model Completion...]
"""
# tasks is mostly writing lean but perhaps making it think it's good at maths is also good? we could later test just focusing system prompting it to be good at Lean 4.
SYSTEM_PROMPT_SKETCH_V0 = 'You are an expert mathematician and an expert in the Lean 4 Proof Assistant.'
STOP_TOKENS_SKETCH_V0: list[str] = ['Informal:', '(*### Problem', '###Solution', 'Formal:']
prompt_sketch_template_lean4_v0 = ("Translate the informal solution into a sketch in the "
"formal Lean 4 proof. Add <TODO_PROOF_OR_HAMMER> in the formal sketch whenever possible. "
"<TODO_PROOF_OR_HAMMER> will be used to call a automated theorem prover or tactic in Lean 4. "
"Here are some examples:\n"
)
def get_prompt_sketch_template_4_lean_v0(
path_2_examples: str = default_path_2_examples,
start: int = 0,
end: int = sys.maxsize,
prompt_sketch_template_4_lean: Optional[str] = prompt_sketch_template_lean4_v0,
autoformalize_prob_in_prompt: Optional[bool] = False,
verbose: bool = False,
):
path_2_examples = Path(path_2_examples).expanduser()
# load json file with list of dicts from file in one line
with open(path_2_examples, 'r') as f:
examples: list[dict] = json.load(f)
print(f'{len(examples)=}') if verbose else None
examples = examples[start:end]
# -- Create prompt by appending few shot examples
for example in examples:
# TODO: might need to figure out the header thing
nl_problem = example['nl_problem']
new_few_shot_example = "\nInformal:\n(*### Problem\n\n" + ' '.join(nl_problem)
nl_solution_sketch = example['nl_solution_sketch']
new_few_shot_example += "\n\n### Solution\n\n" + ' '.join(nl_solution_sketch) + "*)\n"
fl_problem = example['fl_problem']
fl_header_sketch = example['fl_header_sketch']
fl_header_sketch = '\n'.join(fl_header_sketch) + '\n\n'
new_few_shot_example += "\nFormal:\n"+ fl_header_sketch + ' '.join(fl_problem)
fl_partial_sketch = example['fl_partial_sketch']
new_few_shot_example += ' '.join(fl_partial_sketch)
prompt_sketch_template_4_lean += new_few_shot_example
# Add part to elicit model to do task
if autoformalize_prob_in_prompt:
prompt_sketch_template_4_lean += "\nInformal:\n(*### Problem\n\n{nl_problem}\n\n### Solution\n\n{nl_solution}*)\n\nFormal:\n"
else:
prompt_sketch_template_4_lean += "\nInformal:\n(*### Problem\n\n{nl_problem}\n\n### Solution\n\n{nl_solution}*)\n\nFormal:\n{fl_problem}"
# Return
print(prompt_sketch_template_4_lean) if verbose else None
return prompt_sketch_template_4_lean
prompt_sketch_template_lean4_v0 = get_prompt_sketch_template_4_lean_v0()
# -- Main
def main(
verbose: bool = True,
):
# -- Print Prompt Draft
# print('-- Prompt Draft --')
# print(prompt_draft_template_lean4_v0)
# -- Print Prompt Sketch
print('-- Prompt Sketch --')
sketch_prompt: str = get_prompt_sketch_template_4_lean_v0(verbose=verbose)
# print(prompt_sketch_template_lean4_v0)
print(sketch_prompt)
if __name__ == '__main__':
import time
start = time.time()
# fire.Fire()
main()
end = time.time()
print(f'Time elapsed: {end - start} seconds, or {(end - start) / 60} minutes, or {(end - start) / 3600} hours.')

View File

@ -1,4 +1,4 @@
import sys, os, json, argparse import sys, os, json
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Union, Any from typing import Union, Any
@ -218,25 +218,10 @@ experiment_dir = Path(__file__).resolve().parent
# -- Main # -- Main
def main( def main(args):
path_2_eval_dataset: str = experiment_dir / 'debug/toy_example1_dsp/dsp_debug5_sf/dsp_debug5_sf_train.json',
# model: str = 'deepseek-ai/deepseek-math-7b-instruct',
# model: str = 'gpt2',
# model: str = 'gpt-3.5-turbo',
model: str = 'gpt-4o',
start: int = 0,
end: int = sys.maxsize,
# end: int = 10, # do 10 so enough boxed qs are there
batch_size: int = 10, # putnam has 348
n_samples: int = 1, # num seqs to return for given prompt
max_tokens: int = 2048,
top_p: float = 0.95,
temperature: float = 0.8,
**kwargs,
):
import time import time
start_time = time.time() start_time = time.time()
path_2_eval_dataset = Path(path_2_eval_dataset).expanduser() path_2_eval_dataset = Path(args.eval_dataset).expanduser()
print(f'{path_2_eval_dataset=}') print(f'{path_2_eval_dataset=}')
server = Server() server = Server()
@ -255,10 +240,22 @@ def main(
# - Run DSP for Lean # - Run DSP for Lean
api_key = os.environ['OPENAI_API_KEY'] api_key = os.environ['OPENAI_API_KEY']
draft_sampling_params = SamplingParams(n=n_samples, max_tokens=max_tokens, top_p=top_p, temperature=temperature, stop=STOP_TOKENS_DRAFT_V0) draft_sampling_params = SamplingParams(
sketch_sampling_params = SamplingParams(n=n_samples, max_tokens=max_tokens, top_p=top_p, temperature=temperature, stop=STOP_TOKENS_SKETCH_V0) n=args.n_samples,
max_tokens=args.max_tokens,
top_p=args.top_p,
temperature=args.temperature,
stop=STOP_TOKENS_DRAFT_V0,
)
sketch_sampling_params = SamplingParams(
n=args.n_samples,
max_tokens=args.max_tokens,
top_p=args.top_p,
temperature=args.temperature,
stop=STOP_TOKENS_SKETCH_V0,
)
eng: OpenAI_DSP_Engine = OpenAI_DSP_Engine( eng: OpenAI_DSP_Engine = OpenAI_DSP_Engine(
model=model, model=args.model,
api_key=api_key, api_key=api_key,
verbose_init=True, verbose_init=True,
draft_sampling_params=draft_sampling_params, draft_sampling_params=draft_sampling_params,
@ -276,9 +273,12 @@ def main(
# run.finish() # run.finish()
if __name__ == "__main__": if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
prog='DSP', prog='DSP',
description="Draft-Sketch-Prove on Lean code" description="Draft-Sketch-Prove on Lean code",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
) )
parser.add_argument( parser.add_argument(
'mode', 'mode',
@ -290,19 +290,19 @@ if __name__ == "__main__":
help="Evaluation dataset path", help="Evaluation dataset path",
default=experiment_dir / 'debug/toy_example1_dsp/dsp_debug5_sf/dsp_debug5_sf_train.json', default=experiment_dir / 'debug/toy_example1_dsp/dsp_debug5_sf/dsp_debug5_sf_train.json',
) )
parser.add_argument("--model", help="Model", default="gpt-4o") parser.add_argument("--model", help="Model", default="gpt-4o", choices=["gpt2", "gpt-3.5-turbo", "gpt-4o", "deepseek-ai/deepseek-math-7b-instruct"])
parser.add_argument("--start", default=0) parser.add_argument("--start", default=0)
parser.add_argument("--end", default=sys.maxsize) parser.add_argument("--end", default=sys.maxsize)
parser.add_argument("--batchsize", default=10) parser.add_argument("--batchsize", default=10, help="putnam has 348")
parser.add_argument("--n-samples", default=1) parser.add_argument("--n-samples", default=1, help="num seqs to return for given prompt")
parser.add_argument("--max-tokens", default=2048) parser.add_argument("--max-tokens", default=2048, help="Maximum number of tokens in one sample")
parser.add_argument("--top-p", default=0.95) parser.add_argument("--top-p", default=0.95, help="Sampling top p")
parser.add_argument("--temperature", default=0.8) parser.add_argument("--temperature", default=0.8, help="Sampling temperature")
parser.add_argument("--verbose", action='store_true') parser.add_argument("--verbose", action='store_true')
args = parser.parse_args() args = parser.parse_args()
if args.mode == "eval": if args.mode == "eval":
main(**args) main(args)
elif args.mode == "prompts": elif args.mode == "prompts":
prompt = get_prompt_sketch_template_4_lean_v0(verbose=args.verbose) prompt = get_prompt_sketch_template_4_lean_v0(verbose=args.verbose)
print(prompt) print(prompt)

2
src

@ -1 +1 @@
Subproject commit 143cd289bbc4ea93f0889cf05737c3b6f90a51df Subproject commit 10cb32e03f43e9306203d1c4a3852573ec55c4f2