Skip to content

Commit 0f5715a

Browse files
committed
Update Codespace with latest changes.
1 parent 2268ec2 commit 0f5715a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+248
-135
lines changed

.devcontainer/Dockerfile

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
ARG VARIANT="focal"
2+
FROM buildpack-deps:${VARIANT}-curl
3+
4+
LABEL dev.containers.features="common"
5+
6+
COPY first-run-notice.txt /tmp/scripts/
7+
8+
# Move first run notice to right spot
9+
RUN mkdir -p "/usr/local/etc/vscode-dev-containers/" \
10+
&& mv -f /tmp/scripts/first-run-notice.txt /usr/local/etc/vscode-dev-containers/
11+
12+
# Remove scripts now that we're done with them
13+
RUN rm -rf /tmp/scripts

.devcontainer/bootstrap

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#!/bin/bash
2+
3+
ROOT_DIR=/workspaces/codespaces-models
4+
5+
npm install ${ROOT_DIR}
6+
7+
pip install -r ${ROOT_DIR}/requirements.txt
8+

.devcontainer/devcontainer.json

Lines changed: 50 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,44 @@
11
{
2-
"image": "mcr.microsoft.com/devcontainers/universal:2",
3-
"updateContentCommand": "npm install; pip install -r requirements.txt",
4-
"postStartCommand": "npm install; pip install -r requirements.txt",
2+
"build": {
3+
"dockerfile": "./Dockerfile",
4+
"context": "."
5+
},
6+
"features": {
7+
"ghcr.io/devcontainers/features/common-utils:2": {
8+
"username": "codespace",
9+
"userUid": "1000",
10+
"userGid": "1000"
11+
},
12+
"ghcr.io/devcontainers/features/node:1": {
13+
"version": "20"
14+
},
15+
"ghcr.io/devcontainers/features/python:1": {
16+
"version": "3.11.9",
17+
"installJupyterLab": "false"
18+
},
19+
"ghcr.io/devcontainers/features/git:1": {
20+
"version": "latest",
21+
"ppa": "false"
22+
},
23+
"ghcr.io/devcontainers/features/git-lfs:1": {
24+
"version": "latest"
25+
},
26+
"ghcr.io/devcontainers/features/github-cli:1": {
27+
"version": "latest"
28+
}
29+
},
30+
"overrideFeatureInstallOrder": [
31+
"ghcr.io/devcontainers/features/common-utils",
32+
"ghcr.io/devcontainers/features/git",
33+
"ghcr.io/devcontainers/features/node",
34+
"ghcr.io/devcontainers/features/python",
35+
"ghcr.io/devcontainers/features/git-lfs",
36+
"ghcr.io/devcontainers/features/github-cli"
37+
],
38+
"remoteUser": "codespace",
39+
"containerUser": "codespace",
40+
"updateContentCommand": "${containerWorkspaceFolder}/.devcontainer/bootstrap",
41+
"postStartCommand": "${containerWorkspaceFolder}/.devcontainer/bootstrap",
542
"customizations": {
643
"codespaces": {
744
"disableAutomaticConfiguration": true,
@@ -16,10 +53,19 @@
1653
"ms-toolsai.prompty"
1754
],
1855
"settings": {
56+
/*
57+
NOTE: excluding these Python environments causes Jupyter to select the remaining environment by default
58+
The default environment will be: /usr/local/python/current/bin/python
59+
*/
60+
"jupyter.kernels.excludePythonEnvironments": [
61+
"/usr/local/python/current/bin/python3",
62+
"/usr/bin/python3",
63+
"/bin/python3"
64+
],
1965
"workbench.editorAssociations": {
2066
"*.md": "vscode.markdown.preview.editor"
2167
}
2268
}
2369
}
2470
}
25-
}
71+
}

.devcontainer/first-run-notice.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
👋 Welcome to your shiny new Codespace for interacting with GitHub Models! We've got everything fired up and ready for you to explore AI Models hosted on Azure AI.
2+
3+
Take a look at the README to find all of the information you need to get started.
4+

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
.vscode/
1+
.vscode/*
2+
!.vscode/extensions.json
3+
!.vscode/launch.json
24
__pycache__/
35
.env
46
.DS_Store

.vscode/extensions.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"unwantedRecommendations": [
3+
"ms-azuretools.vscode-docker"
4+
]
5+
}

.vscode/launch.json

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
"configurations": [
3+
{
4+
"name": "Run JavaScript Sample",
5+
"program": "${file}",
6+
"cwd": "${fileDirname}",
7+
"envFile": "${workspaceFolder}/.env",
8+
"outputCapture": "std",
9+
"request": "launch",
10+
"skipFiles": [
11+
"<node_internals>/**"
12+
],
13+
"type": "node"
14+
},
15+
{
16+
"name": "Run Python Sample",
17+
"program": "${file}",
18+
"cwd": "${fileDirname}",
19+
"envFile": "${workspaceFolder}/.env",
20+
"redirectOutput": false,
21+
"request": "launch",
22+
"type": "debugpy"
23+
}
24+
]
25+
}

cookbooks/python/langchain/lc_openai_getting_started.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
5353
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
5454
"\n",
55-
"GPT_MODEL = \"gpt-4o\"\n",
55+
"GPT_MODEL = \"gpt-4o-mini\"\n",
5656
"\n",
5757
"llm = ChatOpenAI(model=GPT_MODEL)"
5858
]
@@ -373,7 +373,7 @@
373373
"name": "python",
374374
"nbconvert_exporter": "python",
375375
"pygments_lexer": "ipython3",
376-
"version": "3.10.14"
376+
"version": "3.11.9"
377377
}
378378
},
379379
"nbformat": 4,

cookbooks/python/llamaindex/rag_getting_started.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
"\n",
3939
"To run RAG, you need 2 models: a chat model, and an embedding model. The GitHub Model service offers different options.\n",
4040
"\n",
41-
"For instance you could use an Azure OpenAI chat model (`gpt-4o`) and embedding model (`text-embedding-3-small`), or a Cohere chat model (`Cohere-command-r-plus`) and embedding model (`Cohere-embed-v3-multilingual`).\n",
41+
"For instance you could use an Azure OpenAI chat model (`gpt-4o-mini`) and embedding model (`text-embedding-3-small`), or a Cohere chat model (`Cohere-command-r-plus`) and embedding model (`Cohere-embed-v3-multilingual`).\n",
4242
"\n",
4343
"We'll proceed using some of the Azure OpenAI models below. You can find [how to leverage Cohere models in the LlamaIndex documentation](https://docs.llamaindex.ai/en/stable/examples/llm/cohere/).\n",
4444
"\n",
@@ -89,7 +89,7 @@
8989
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
9090
"\n",
9191
"llm = OpenAI(\n",
92-
" model=\"gpt-4o\",\n",
92+
" model=\"gpt-4o-mini\",\n",
9393
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
9494
" api_base=os.getenv(\"OPENAI_BASE_URL\"),\n",
9595
")\n",
@@ -258,7 +258,7 @@
258258
"name": "python",
259259
"nbconvert_exporter": "python",
260260
"pygments_lexer": "ipython3",
261-
"version": "3.10.14"
261+
"version": "3.11.9"
262262
}
263263
},
264264
"nbformat": 4,

cookbooks/python/mistralai/evaluation.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -525,7 +525,7 @@
525525
"name": "python",
526526
"nbconvert_exporter": "python",
527527
"pygments_lexer": "ipython3",
528-
"version": "3.10.13"
528+
"version": "3.11.9"
529529
}
530530
},
531531
"nbformat": 4,

cookbooks/python/mistralai/function_calling.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@
349349
"name": "python",
350350
"nbconvert_exporter": "python",
351351
"pygments_lexer": "ipython3",
352-
"version": "3.10.13"
352+
"version": "3.11.9"
353353
}
354354
},
355355
"nbformat": 4,

cookbooks/python/mistralai/prefix_use_cases.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -868,7 +868,7 @@
868868
"name": "python",
869869
"nbconvert_exporter": "python",
870870
"pygments_lexer": "ipython3",
871-
"version": "3.10.13"
871+
"version": "3.11.9"
872872
}
873873
},
874874
"nbformat": 4,

cookbooks/python/mistralai/prompting_capabilities.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@
505505
"name": "python",
506506
"nbconvert_exporter": "python",
507507
"pygments_lexer": "ipython3",
508-
"version": "3.10.13"
508+
"version": "3.11.9"
509509
}
510510
},
511511
"nbformat": 4,

cookbooks/python/openai/Data_extraction_transformation.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@
100100
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
101101
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
102102
"\n",
103-
"GPT_MODEL = \"gpt-4o\"\n",
103+
"GPT_MODEL = \"gpt-4o-mini\"\n",
104104
"\n",
105105
"client = OpenAI()"
106106
]
@@ -193,7 +193,7 @@
193193
" \"\"\"\n",
194194
" \n",
195195
" response = client.chat.completions.create(\n",
196-
" model=\"gpt-4o\",\n",
196+
" model=\"gpt-4o-mini\",\n",
197197
" response_format={ \"type\": \"json_object\" },\n",
198198
" messages=[\n",
199199
" {\n",
@@ -491,7 +491,7 @@
491491
" \"\"\"\n",
492492
" \n",
493493
" response = client.chat.completions.create(\n",
494-
" model=\"gpt-4o\",\n",
494+
" model=\"gpt-4o-mini\",\n",
495495
" response_format={ \"type\": \"json_object\" },\n",
496496
" messages=[\n",
497497
" {\n",
@@ -813,7 +813,7 @@
813813
"name": "python",
814814
"nbconvert_exporter": "python",
815815
"pygments_lexer": "ipython3",
816-
"version": "3.10.13"
816+
"version": "3.11.9"
817817
}
818818
},
819819
"nbformat": 4,

cookbooks/python/openai/Developing_hallucination_guardrails.ipynb

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
5555
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
5656
"\n",
57-
"GPT_MODEL = \"gpt-4o\"\n",
57+
"GPT_MODEL = \"gpt-4o-mini\"\n",
5858
"\n",
5959
"client = OpenAI()"
6060
]
@@ -179,7 +179,7 @@
179179
" {\"role\": \"assistant\", \"content\": assistant_policy_example_1},\n",
180180
" {\"role\": \"user\", \"content\": input_message},\n",
181181
" ],\n",
182-
" model=\"gpt-4o\"\n",
182+
" model=\"gpt-4o-mini\"\n",
183183
" )\n",
184184
" \n",
185185
" return response.choices[0].message.content\n",
@@ -378,7 +378,7 @@
378378
" ]\n",
379379
"\n",
380380
" response = client.chat.completions.create(\n",
381-
" model=\"gpt-4o\",\n",
381+
" model=\"gpt-4o-mini\",\n",
382382
" messages=messages,\n",
383383
" temperature=0.7,\n",
384384
" n=10,\n",
@@ -695,7 +695,7 @@
695695
" ]\n",
696696
"\n",
697697
" response = client.chat.completions.create(\n",
698-
" model=\"gpt-4o\",\n",
698+
" model=\"gpt-4o-mini\",\n",
699699
" messages=messages,\n",
700700
" temperature=0.7,\n",
701701
" n=10\n",
@@ -844,7 +844,7 @@
844844
"name": "python",
845845
"nbconvert_exporter": "python",
846846
"pygments_lexer": "ipython3",
847-
"version": "3.10.13"
847+
"version": "3.11.9"
848848
}
849849
},
850850
"nbformat": 4,

cookbooks/python/openai/How_to_call_functions_with_chat_models.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
7979
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
8080
"\n",
81-
"GPT_MODEL = \"gpt-4o\"\n",
81+
"GPT_MODEL = \"gpt-4o-mini\"\n",
8282
"\n",
8383
"client = OpenAI()"
8484
]
@@ -451,7 +451,7 @@
451451
"source": [
452452
"### Parallel Function Calling\n",
453453
"\n",
454-
"Newer models such as gpt-4o or gpt-3.5-turbo can call multiple functions in one turn."
454+
"Newer models such as gpt-4o-mini or gpt-3.5-turbo can call multiple functions in one turn."
455455
]
456456
},
457457
{
@@ -700,7 +700,7 @@
700700
"}]\n",
701701
"\n",
702702
"response = client.chat.completions.create(\n",
703-
" model='gpt-4o', \n",
703+
" model='gpt-4o-mini', \n",
704704
" messages=messages, \n",
705705
" tools= tools, \n",
706706
" tool_choice=\"auto\"\n",
@@ -747,7 +747,7 @@
747747
" # Step 4: Invoke the chat completions API with the function response appended to the messages list\n",
748748
" # Note that messages with role 'tool' must be a response to a preceding message with 'tool_calls'\n",
749749
" model_response_with_function_call = client.chat.completions.create(\n",
750-
" model=\"gpt-4o\",\n",
750+
" model=\"gpt-4o-mini\",\n",
751751
" messages=messages,\n",
752752
" ) # get a new response from the model where it can see the function response\n",
753753
" print(model_response_with_function_call.choices[0].message.content)\n",
@@ -766,7 +766,7 @@
766766
"source": [
767767
"## Next Steps\n",
768768
"\n",
769-
"See our other notebook [Data extraction and transformation](Data_extraction_transformation.ipynb) which shows how to extract data from documents using gpt-4o."
769+
"See our other notebook [Data extraction and transformation](Data_extraction_transformation.ipynb) which shows how to extract data from documents using gpt-4o-mini."
770770
]
771771
}
772772
],
@@ -786,7 +786,7 @@
786786
"name": "python",
787787
"nbconvert_exporter": "python",
788788
"pygments_lexer": "ipython3",
789-
"version": "3.10.13"
789+
"version": "3.11.9"
790790
}
791791
},
792792
"nbformat": 4,

cookbooks/python/openai/How_to_stream_completions.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
5050
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
5151
"\n",
52-
"GPT_MODEL = \"gpt-4o\"\n",
52+
"GPT_MODEL = \"gpt-4o-mini\"\n",
5353
"\n",
5454
"client = OpenAI()"
5555
]
@@ -169,7 +169,7 @@
169169
"source": [
170170
"### 3. How much time is saved by streaming a chat completion\n",
171171
"\n",
172-
"Now let's ask `gpt-4o` to count to 100 again, and see how long it takes."
172+
"Now let's ask `gpt-4o-mini` to count to 100 again, and see how long it takes."
173173
]
174174
},
175175
{
@@ -255,7 +255,7 @@
255255
"name": "python",
256256
"nbconvert_exporter": "python",
257257
"pygments_lexer": "ipython3",
258-
"version": "3.10.13"
258+
"version": "3.11.9"
259259
},
260260
"orig_nbformat": 4,
261261
"vscode": {

cookbooks/python/openai/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,6 @@ The samples were modified slightly to better run with the GitHub Models service.
77

88
- [How to process image and video with GPT-4](how_to_process_image_and_video_with_gpt4o.ipynb): This notebook shows how to process images and videos with GPT-4.
99
- [How to call functions with chat models](How_to_call_functions_with_chat_models.ipynb): This notebook shows how to get GPT-4o to determing which of a set of functions to call to answer a user's question.
10-
- [Data extraction and transformation](Data_extraction_transformation.ipynb): This notebook shows how to extract data from documents using gpt-4o.
10+
- [Data extraction and transformation](Data_extraction_transformation.ipynb): This notebook shows how to extract data from documents using gpt-4o-mini.
1111
- [How to stream completions](How_to_stream_completions.ipynb): This notebook shows detailed instructions on how to stream chat completions.
1212
- [Developing Hallucination Guardrails](Developing_hallucination_guardrails.ipynb): Develop an output guardrail that specifically checks model outputs for hallucinations

0 commit comments

Comments
 (0)