Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 12 additions & 11 deletions lm-hackers.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,8 @@
"metadata": {},
"outputs": [],
"source": [
"from openai import ChatCompletion,Completion"
"from openai import OpenAI # New OpenAI api\n",
"client = OpenAI() # Uses default api key"
]
},
{
Expand All @@ -436,7 +437,7 @@
"source": [
"aussie_sys = \"You are an Aussie LLM that uses Aussie slang and analogies whenever possible.\"\n",
"\n",
"c = ChatCompletion.create(\n",
"c = client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[{\"role\": \"system\", \"content\": aussie_sys},\n",
" {\"role\": \"user\", \"content\": \"What is money?\"}])"
Expand Down Expand Up @@ -468,7 +469,7 @@
}
],
"source": [
"c['choices'][0]['message']['content']"
"c.choices[0].message.content"
]
},
{
Expand All @@ -478,7 +479,7 @@
"metadata": {},
"outputs": [],
"source": [
"from fastcore.utils import nested_idx"
"#from fastcore.utils import nested_idx"
]
},
{
Expand All @@ -488,7 +489,7 @@
"metadata": {},
"outputs": [],
"source": [
"def response(compl): print(nested_idx(compl, 'choices', 0, 'message', 'content'))"
"def response(compl): print(c.choices[0].message.content)"
]
},
{
Expand Down Expand Up @@ -580,7 +581,7 @@
"metadata": {},
"outputs": [],
"source": [
"c = ChatCompletion.create(\n",
"c = client.chat.completions.create( \n",
" model=\"gpt-3.5-turbo\",\n",
" messages=[{\"role\": \"system\", \"content\": aussie_sys},\n",
" {\"role\": \"user\", \"content\": \"What is money?\"},\n",
Expand Down Expand Up @@ -617,7 +618,7 @@
" msgs = []\n",
" if system: msgs.append({\"role\": \"system\", \"content\": system})\n",
" msgs.append({\"role\": \"user\", \"content\": user})\n",
" return ChatCompletion.create(model=model, messages=msgs, **kwargs)"
" return client.chat.completions.create(model=model, messages=msgs, **kwargs)"
]
},
{
Expand Down Expand Up @@ -657,7 +658,7 @@
"source": [
"def call_api(prompt, model=\"gpt-3.5-turbo\"):\n",
" msgs = [{\"role\": \"user\", \"content\": prompt}]\n",
" try: return ChatCompletion.create(model=model, messages=msgs)\n",
" try: return client.chat.completions.create(model=model, messages=msgs)\n",
" except openai.error.RateLimitError as e:\n",
" retry_after = int(e.headers.get(\"retry-after\", 60))\n",
" print(f\"Rate limit exceeded, waiting for {retry_after} seconds...\")\n",
Expand Down Expand Up @@ -713,8 +714,8 @@
"metadata": {},
"outputs": [],
"source": [
"c = Completion.create(prompt=\"Australian Jeremy Howard is \",\n",
" model=\"gpt-3.5-turbo-instruct\", echo=True, logprobs=5)"
"c = client.completions.create(prompt=\"Australian Jeremy Howard is \",\n",
" model=\"gpt-3.5-turbo-instruct\", echo=False, logprobs=5)"
]
},
{
Expand Down Expand Up @@ -1015,7 +1016,7 @@
"metadata": {},
"outputs": [],
"source": [
"c = ChatCompletion.create(\n",
"c = client.chat.completions.create(\n",
" model=\"gpt-3.5-turbo\",\n",
" functions=[schema(python)],\n",
" messages=[{\"role\": \"user\", \"content\": \"What is 12 factorial?\"},\n",
Expand Down