Skip to content

feat: Add support for tools from github enterprise. #534

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 33 additions & 32 deletions docs/docs/04-command-line-reference/gptscript.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,38 +12,39 @@ gptscript [flags] PROGRAM_FILE [INPUT...]
### Options

```
--cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR)
--chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE)
-C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR)
--color Use color in output (default true) ($GPTSCRIPT_COLOR)
--config string Path to GPTScript config file ($GPTSCRIPT_CONFIG)
--confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM)
--credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default")
--credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE)
--debug Enable debug logging ($GPTSCRIPT_DEBUG)
--debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES)
--default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o")
--default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER)
--disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE)
--disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI)
--dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE)
--events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO)
--force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT)
--force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL)
-h, --help help for gptscript
-f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE)
--list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS)
--list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS)
--no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC)
--openai-api-key string OpenAI API KEY ($OPENAI_API_KEY)
--openai-base-url string OpenAI base URL ($OPENAI_BASE_URL)
--openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID)
-o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT)
-q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET)
--save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE)
--sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL)
--ui Launch the UI ($GPTSCRIPT_UI)
--workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE)
--cache-dir string Directory to store cache (default: $XDG_CACHE_HOME/gptscript) ($GPTSCRIPT_CACHE_DIR)
--chat-state string The chat state to continue, or null to start a new chat and return the state ($GPTSCRIPT_CHAT_STATE)
-C, --chdir string Change current working directory ($GPTSCRIPT_CHDIR)
--color Use color in output (default true) ($GPTSCRIPT_COLOR)
--config string Path to GPTScript config file ($GPTSCRIPT_CONFIG)
--confirm Prompt before running potentially dangerous commands ($GPTSCRIPT_CONFIRM)
--credential-context string Context name in which to store credentials ($GPTSCRIPT_CREDENTIAL_CONTEXT) (default "default")
--credential-override strings Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234) ($GPTSCRIPT_CREDENTIAL_OVERRIDE)
--debug Enable debug logging ($GPTSCRIPT_DEBUG)
--debug-messages Enable logging of chat completion calls ($GPTSCRIPT_DEBUG_MESSAGES)
--default-model string Default LLM model to use ($GPTSCRIPT_DEFAULT_MODEL) (default "gpt-4o")
--default-model-provider string Default LLM model provider to use, this will override OpenAI settings ($GPTSCRIPT_DEFAULT_MODEL_PROVIDER)
--disable-cache Disable caching of LLM API responses ($GPTSCRIPT_DISABLE_CACHE)
--disable-tui Don't use chat TUI but instead verbose output ($GPTSCRIPT_DISABLE_TUI)
--dump-state string Dump the internal execution state to a file ($GPTSCRIPT_DUMP_STATE)
--events-stream-to string Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\.\pipe\my-pipe) ($GPTSCRIPT_EVENTS_STREAM_TO)
--force-chat Force an interactive chat session if even the top level tool is not a chat tool ($GPTSCRIPT_FORCE_CHAT)
--force-sequential Force parallel calls to run sequentially ($GPTSCRIPT_FORCE_SEQUENTIAL)
--github-enterprise-hostname string The host name for a Github Enterprise instance to enable for remote loading ($GPTSCRIPT_GITHUB_ENTERPRISE_HOSTNAME)
-h, --help help for gptscript
-f, --input string Read input from a file ("-" for stdin) ($GPTSCRIPT_INPUT_FILE)
--list-models List the models available and exit ($GPTSCRIPT_LIST_MODELS)
--list-tools List built-in tools and exit ($GPTSCRIPT_LIST_TOOLS)
--no-trunc Do not truncate long log messages ($GPTSCRIPT_NO_TRUNC)
--openai-api-key string OpenAI API KEY ($OPENAI_API_KEY)
--openai-base-url string OpenAI base URL ($OPENAI_BASE_URL)
--openai-org-id string OpenAI organization ID ($OPENAI_ORG_ID)
-o, --output string Save output to a file, or - for stdout ($GPTSCRIPT_OUTPUT)
-q, --quiet No output logging (set --quiet=false to force on even when there is no TTY) ($GPTSCRIPT_QUIET)
--save-chat-state-file string A file to save the chat state to so that a conversation can be resumed with --chat-state ($GPTSCRIPT_SAVE_CHAT_STATE_FILE)
--sub-tool string Use tool of this name, not the first tool in file ($GPTSCRIPT_SUB_TOOL)
--ui Launch the UI ($GPTSCRIPT_UI)
--workspace string Directory to use for the workspace, if specified it will not be deleted on exit ($GPTSCRIPT_WORKSPACE)
```

### SEE ALSO
Expand Down
44 changes: 25 additions & 19 deletions pkg/cli/gptscript.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/gptscript-ai/gptscript/pkg/gptscript"
"github.com/gptscript-ai/gptscript/pkg/input"
"github.com/gptscript-ai/gptscript/pkg/loader"
"github.com/gptscript-ai/gptscript/pkg/loader/github"
"github.com/gptscript-ai/gptscript/pkg/monitor"
"github.com/gptscript-ai/gptscript/pkg/mvl"
"github.com/gptscript-ai/gptscript/pkg/openai"
Expand Down Expand Up @@ -54,25 +55,26 @@ type GPTScript struct {
Output string `usage:"Save output to a file, or - for stdout" short:"o"`
EventsStreamTo string `usage:"Stream events to this location, could be a file descriptor/handle (e.g. fd://2), filename, or named pipe (e.g. \\\\.\\pipe\\my-pipe)" name:"events-stream-to"`
// Input should not be using GPTSCRIPT_INPUT env var because that is the same value that is set in tool executions
Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"`
SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"`
Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"`
ListModels bool `usage:"List the models available and exit" local:"true"`
ListTools bool `usage:"List built-in tools and exit" local:"true"`
ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"`
Chdir string `usage:"Change current working directory" short:"C"`
Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"`
Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"`
CredentialContext string `usage:"Context name in which to store credentials" default:"default"`
CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"`
ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"`
ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"`
ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"`
Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"`
UI bool `usage:"Launch the UI" local:"true" name:"ui"`
DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"`
SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"`
DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"`
Input string `usage:"Read input from a file (\"-\" for stdin)" short:"f" env:"GPTSCRIPT_INPUT_FILE"`
SubTool string `usage:"Use tool of this name, not the first tool in file" local:"true"`
Assemble bool `usage:"Assemble tool to a single artifact, saved to --output" hidden:"true" local:"true"`
ListModels bool `usage:"List the models available and exit" local:"true"`
ListTools bool `usage:"List built-in tools and exit" local:"true"`
ListenAddress string `usage:"Server listen address" default:"127.0.0.1:0" hidden:"true"`
Chdir string `usage:"Change current working directory" short:"C"`
Daemon bool `usage:"Run tool as a daemon" local:"true" hidden:"true"`
Ports string `usage:"The port range to use for ephemeral daemon ports (ex: 11000-12000)" hidden:"true"`
CredentialContext string `usage:"Context name in which to store credentials" default:"default"`
CredentialOverride []string `usage:"Credentials to override (ex: --credential-override github.com/example/cred-tool:API_TOKEN=1234)"`
ChatState string `usage:"The chat state to continue, or null to start a new chat and return the state" local:"true"`
ForceChat bool `usage:"Force an interactive chat session if even the top level tool is not a chat tool" local:"true"`
ForceSequential bool `usage:"Force parallel calls to run sequentially" local:"true"`
Workspace string `usage:"Directory to use for the workspace, if specified it will not be deleted on exit"`
UI bool `usage:"Launch the UI" local:"true" name:"ui"`
DisableTUI bool `usage:"Don't use chat TUI but instead verbose output" local:"true" name:"disable-tui"`
SaveChatStateFile string `usage:"A file to save the chat state to so that a conversation can be resumed with --chat-state" local:"true"`
DefaultModelProvider string `usage:"Default LLM model provider to use, this will override OpenAI settings"`
GithubEnterpriseHostname string `usage:"The host name for a Github Enterprise instance to enable for remote loading" local:"true"`

readData []byte
}
Expand Down Expand Up @@ -334,6 +336,10 @@ func (r *GPTScript) Run(cmd *cobra.Command, args []string) (retErr error) {
return err
}

if r.GithubEnterpriseHostname != "" {
loader.AddVSC(github.LoaderForPrefix(r.GithubEnterpriseHostname))
}

// If the user is trying to launch the chat-builder UI, then set up the tool and options here.
if r.UI {
if os.Getenv(system.BinEnvVar) == "" {
Expand Down
87 changes: 63 additions & 24 deletions pkg/loader/github/github.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package github

import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
Expand All @@ -18,52 +19,63 @@ import (
"github.com/gptscript-ai/gptscript/pkg/types"
)

const (
GithubPrefix = "github.com/"
githubRepoURL = "https://github.com/%s/%s.git"
githubDownloadURL = "https://raw.githubusercontent.com/%s/%s/%s/%s"
githubCommitURL = "https://api.github.com/repos/%s/%s/commits/%s"
)
type Config struct {
Prefix string
RepoURL string
DownloadURL string
CommitURL string
AuthToken string
}

var (
githubAuthToken = os.Getenv("GITHUB_AUTH_TOKEN")
log = mvl.Package()
log = mvl.Package()
defaultGithubConfig = &Config{
Prefix: "github.com/",
RepoURL: "https://github.com/%s/%s.git",
DownloadURL: "https://raw.githubusercontent.com/%s/%s/%s/%s",
CommitURL: "https://api.github.com/repos/%s/%s/commits/%s",
AuthToken: os.Getenv("GITHUB_AUTH_TOKEN"),
}
)

func init() {
loader.AddVSC(Load)
}

func getCommitLsRemote(ctx context.Context, account, repo, ref string) (string, error) {
url := fmt.Sprintf(githubRepoURL, account, repo)
func getCommitLsRemote(ctx context.Context, account, repo, ref string, config *Config) (string, error) {
url := fmt.Sprintf(config.RepoURL, account, repo)
return git.LsRemote(ctx, url, ref)
}

// regexp to match a git commit id
var commitRegexp = regexp.MustCompile("^[a-f0-9]{40}$")

func getCommit(ctx context.Context, account, repo, ref string) (string, error) {
func getCommit(ctx context.Context, account, repo, ref string, config *Config) (string, error) {
if commitRegexp.MatchString(ref) {
return ref, nil
}

url := fmt.Sprintf(githubCommitURL, account, repo, ref)
url := fmt.Sprintf(config.CommitURL, account, repo, ref)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return "", fmt.Errorf("failed to create request of %s/%s at %s: %w", account, repo, url, err)
}

if githubAuthToken != "" {
req.Header.Add("Authorization", "Bearer "+githubAuthToken)
if config.AuthToken != "" {
req.Header.Add("Authorization", "Bearer "+config.AuthToken)
}

resp, err := http.DefaultClient.Do(req)
client := http.DefaultClient
if req.Host == config.Prefix && strings.ToLower(os.Getenv("GH_ENTERPRISE_SKIP_VERIFY")) == "true" {
client = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
}
resp, err := client.Do(req)
if err != nil {
return "", err
} else if resp.StatusCode != http.StatusOK {
c, _ := io.ReadAll(resp.Body)
resp.Body.Close()
commit, fallBackErr := getCommitLsRemote(ctx, account, repo, ref)
commit, fallBackErr := getCommitLsRemote(ctx, account, repo, ref, config)
if fallBackErr == nil {
return commit, nil
}
Expand All @@ -88,8 +100,28 @@ func getCommit(ctx context.Context, account, repo, ref string) (string, error) {
return commit.SHA, nil
}

func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string, *types.Repo, bool, error) {
if !strings.HasPrefix(urlName, GithubPrefix) {
func LoaderForPrefix(prefix string) func(context.Context, *cache.Client, string) (string, string, *types.Repo, bool, error) {
return func(ctx context.Context, c *cache.Client, urlName string) (string, string, *types.Repo, bool, error) {
return LoadWithConfig(ctx, c, urlName, NewGithubEnterpriseConfig(prefix))
}
}

func Load(ctx context.Context, c *cache.Client, urlName string) (string, string, *types.Repo, bool, error) {
return LoadWithConfig(ctx, c, urlName, defaultGithubConfig)
}

func NewGithubEnterpriseConfig(prefix string) *Config {
return &Config{
Prefix: prefix,
RepoURL: fmt.Sprintf("https://%s/%%s/%%s.git", prefix),
DownloadURL: fmt.Sprintf("https://raw.%s/%%s/%%s/%%s/%%s", prefix),
CommitURL: fmt.Sprintf("https://%s/api/v3/repos/%%s/%%s/commits/%%s", prefix),
AuthToken: os.Getenv("GH_ENTERPRISE_TOKEN"),
}
}

func LoadWithConfig(ctx context.Context, _ *cache.Client, urlName string, config *Config) (string, string, *types.Repo, bool, error) {
if !strings.HasPrefix(urlName, config.Prefix) {
return "", "", nil, false, nil
}

Expand All @@ -107,12 +139,12 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string,
account, repo := parts[1], parts[2]
path := strings.Join(parts[3:], "/")

ref, err := getCommit(ctx, account, repo, ref)
ref, err := getCommit(ctx, account, repo, ref, config)
if err != nil {
return "", "", nil, false, err
}

downloadURL := fmt.Sprintf(githubDownloadURL, account, repo, ref, path)
downloadURL := fmt.Sprintf(config.DownloadURL, account, repo, ref, path)
if path == "" || path == "/" || !strings.Contains(parts[len(parts)-1], ".") {
var (
testPath string
Expand All @@ -124,13 +156,20 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string,
} else {
testPath = path + "/" + ext
}
testURL = fmt.Sprintf(githubDownloadURL, account, repo, ref, testPath)
testURL = fmt.Sprintf(config.DownloadURL, account, repo, ref, testPath)
if i == len(types.DefaultFiles)-1 {
// no reason to test the last one, we are just going to use it. Being that the default list is only
// two elements this loop could have been one check, but hey over-engineered code ftw.
break
}
if resp, err := http.Head(testURL); err == nil {
headReq, err := http.NewRequest("HEAD", testURL, nil)
if err != nil {
break
}
if config.AuthToken != "" {
headReq.Header.Add("Authorization", "Bearer "+config.AuthToken)
}
if resp, err := http.DefaultClient.Do(headReq); err == nil {
_ = resp.Body.Close()
if resp.StatusCode == 200 {
break
Expand All @@ -141,9 +180,9 @@ func Load(ctx context.Context, _ *cache.Client, urlName string) (string, string,
path = testPath
}

return downloadURL, githubAuthToken, &types.Repo{
return downloadURL, config.AuthToken, &types.Repo{
VCS: "git",
Root: fmt.Sprintf(githubRepoURL, account, repo),
Root: fmt.Sprintf(config.RepoURL, account, repo),
Path: gpath.Dir(path),
Name: gpath.Base(path),
Revision: ref,
Expand Down
Loading