diff --git a/cmd/dagger/cmd/doc.go b/cmd/dagger/cmd/doc.go index 48025d42..38ae93bb 100644 --- a/cmd/dagger/cmd/doc.go +++ b/cmd/dagger/cmd/doc.go @@ -20,7 +20,6 @@ import ( "go.dagger.io/dagger/cmd/dagger/cmd/common" "go.dagger.io/dagger/cmd/dagger/logger" "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/environment" "go.dagger.io/dagger/pkg" "golang.org/x/term" ) @@ -55,23 +54,6 @@ type Package struct { func Parse(ctx context.Context, packageName string, val *compiler.Value) *Package { lg := log.Ctx(ctx) - parseValues := func(field string, values []*compiler.Value) []Value { - val := []Value{} - - for _, i := range values { - v := Value{} - v.Name = strings.TrimPrefix( - i.Path().String(), - field+".", - ) - v.Type = common.FormatValue(i) - v.Description = common.ValueDocOneLine(i) - val = append(val, v) - } - - return val - } - fields, err := val.Fields(cue.Definitions(true)) if err != nil { lg.Fatal().Err(err).Msg("cannot get fields") @@ -104,14 +86,6 @@ func Parse(ctx context.Context, packageName string, val *compiler.Value) *Packag field.Name = name field.Description = common.ValueDocOneLine(v) - // Inputs - inp := environment.ScanInputs(ctx, v) - field.Inputs = parseValues(field.Name, inp) - - // Outputs - out := environment.ScanOutputs(ctx, v) - field.Outputs = parseValues(field.Name, out) - pkg.Fields = append(pkg.Fields, field) } diff --git a/cmd/dagger/cmd/up.go b/cmd/dagger/cmd/up.go index bf7c15c8..f1123507 100644 --- a/cmd/dagger/cmd/up.go +++ b/cmd/dagger/cmd/up.go @@ -2,19 +2,14 @@ package cmd import ( "context" - "errors" "fmt" "os" - "cuelang.org/go/cue" "go.dagger.io/dagger/client" "go.dagger.io/dagger/cmd/dagger/cmd/common" "go.dagger.io/dagger/cmd/dagger/logger" - "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/environment" "go.dagger.io/dagger/mod" "go.dagger.io/dagger/plan" - "go.dagger.io/dagger/plancontext" "go.dagger.io/dagger/solver" "golang.org/x/term" @@ -68,58 +63,6 @@ var upCmd = &cobra.Command{ return } - project := common.CurrentProject(ctx) - st := common.CurrentEnvironmentState(ctx, project) - - lg = lg.With(). - Str("environment", st.Name). - Logger() - - universeUpdateCh := make(chan bool) - go func() { - universeUpdateCh <- checkUniverseVersion(ctx, project.Path) - }() - - doneCh := common.TrackProjectCommand(ctx, cmd, project, st) - - env, err := environment.New(st) - if err != nil { - lg.Fatal().Err(err).Msg("unable to create environment") - } - - err = cl.Do(ctx, env.Context(), func(ctx context.Context, s solver.Solver) error { - // check that all inputs are set - if err := checkInputs(ctx, env); err != nil { - return err - } - - if err := env.Up(ctx, s); err != nil { - return err - } - - st.Computed = env.Computed().JSON().PrettyString() - if err := project.Save(ctx, st); err != nil { - return err - } - - // FIXME: `ListOutput` is printing to Stdout directly which messes - // up the TTY logger. - if tty != nil { - tty.Stop() - } - return nil - }) - - <-doneCh - - if err != nil { - lg.Fatal().Err(err).Msg("failed to up environment") - } - - // Warn universe version if out of date - if update := <-universeUpdateCh; update { - fmt.Println("A new version of universe is available, please run 'dagger mod get alpha.dagger.io'") - } }, } @@ -173,57 +116,8 @@ func europaUp(ctx context.Context, cl *client.Client, args ...string) error { }) } -func checkInputs(ctx context.Context, env *environment.Environment) error { - lg := log.Ctx(ctx) - warnOnly := viper.GetBool("force") - - notConcreteInputs := []*compiler.Value{} - inputs, err := env.ScanInputs(ctx, true) - if err != nil { - lg.Error().Err(err).Msg("failed to scan inputs") - return err - } - - for _, i := range inputs { - isConcrete := (i.IsConcreteR(cue.Optional(true)) == nil) - switch { - case plancontext.IsSecretValue(i): - if _, err := env.Context().Secrets.FromValue(i); err != nil { - isConcrete = false - } - case plancontext.IsFSValue(i): - if _, err := env.Context().FS.FromValue(i); err != nil { - isConcrete = false - } - case plancontext.IsServiceValue(i): - if _, err := env.Context().Services.FromValue(i); err != nil { - isConcrete = false - } - } - - if !isConcrete { - notConcreteInputs = append(notConcreteInputs, i) - } - } - - for _, i := range notConcreteInputs { - if warnOnly { - lg.Warn().Str("input", i.Path().String()).Msg("required input is missing") - } else { - lg.Error().Str("input", i.Path().String()).Msg("required input is missing") - } - } - - if !warnOnly && len(notConcreteInputs) > 0 { - return errors.New("some required inputs are not set, please re-run with `--force` if you think it's a mistake") - } - - return nil -} - func init() { upCmd.Flags().BoolP("force", "f", false, "Force up, disable inputs check") - upCmd.Flags().String("output", "", "Write computed output. Prints on stdout if set to-") upCmd.Flags().StringArrayP("with", "w", []string{}, "") upCmd.Flags().StringP("target", "t", "", "Run a single target of the DAG (for debugging only)") upCmd.Flags().Bool("no-vendor", false, "Force up, disable inputs check") diff --git a/cmd/dagger/logger/tty.go b/cmd/dagger/logger/tty.go index 6fa22a41..a1bb958a 100644 --- a/cmd/dagger/logger/tty.go +++ b/cmd/dagger/logger/tty.go @@ -14,14 +14,14 @@ import ( "github.com/containerd/console" "github.com/morikuni/aec" "github.com/tonistiigi/vt100" - "go.dagger.io/dagger/environment" + "go.dagger.io/dagger/plan/task" ) type Event map[string]interface{} type Group struct { Name string - State environment.State + State task.State Events []Event Started *time.Time Completed *time.Time @@ -43,7 +43,7 @@ func (l *Logs) Add(event Event) error { l.l.Lock() defer l.l.Unlock() - task, ok := event["task"].(string) + taskPath, ok := event["task"].(string) if !ok { l.Messages = append(l.Messages, Message{ Event: event, @@ -52,12 +52,8 @@ func (l *Logs) Add(event Event) error { return nil } - // Hide `#up.*` from log group names - // FIXME: remove in Europa - groupKey := strings.Split(task, ".#up")[0] - // Hide hidden fields (e.g. `._*`) from log group names - groupKey = strings.Split(groupKey, "._")[0] + groupKey := strings.Split(taskPath, "._")[0] group := l.groups[groupKey] @@ -82,8 +78,8 @@ func (l *Logs) Add(event Event) error { // For each task in a group, the status will transition from computing to complete, then back to computing and so on. // The transition is fast enough not to cause a problem. if st, ok := event["state"].(string); ok { - group.State = environment.State(st) - if group.State == environment.StateComputing { + group.State = task.State(st) + if group.State == task.StateComputing { group.Completed = nil } else { now := time.Now() @@ -228,7 +224,7 @@ func (c *TTYOutput) linesPerGroup(width, height int) int { runningGroups := 0 for _, message := range c.logs.Messages { - if group := message.Group; group != nil && group.State == environment.StateComputing { + if group := message.Group; group != nil && group.State == task.StateComputing { runningGroups++ } } @@ -268,13 +264,13 @@ func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int { prefix := "" switch group.State { - case environment.StateComputing: + case task.StateComputing: prefix = "[+]" - case environment.StateCanceled: + case task.StateCanceled: prefix = "[✗]" - case environment.StateFailed: + case task.StateFailed: prefix = "[✗]" - case environment.StateCompleted: + case task.StateCompleted: prefix = "[✔]" } @@ -298,13 +294,13 @@ func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int { // color switch group.State { - case environment.StateComputing: + case task.StateComputing: out = aec.Apply(out, aec.LightBlueF) - case environment.StateCanceled: + case task.StateCanceled: out = aec.Apply(out, aec.LightYellowF) - case environment.StateFailed: + case task.StateFailed: out = aec.Apply(out, aec.LightRedF) - case environment.StateCompleted: + case task.StateCompleted: out = aec.Apply(out, aec.LightGreenF) } @@ -314,19 +310,19 @@ func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int { printEvents := []Event{} switch group.State { - case environment.StateComputing: + case task.StateComputing: printEvents = group.Events // for computing tasks, show only last N if len(printEvents) > maxLines { printEvents = printEvents[len(printEvents)-maxLines:] } - case environment.StateCanceled: + case task.StateCanceled: // for completed tasks, don't show any logs printEvents = []Event{} - case environment.StateFailed: + case task.StateFailed: // for failed, show all logs printEvents = group.Events - case environment.StateCompleted: + case task.StateCompleted: // for completed tasks, don't show any logs printEvents = []Event{} } diff --git a/environment/environment.go b/environment/environment.go deleted file mode 100644 index fe4b4609..00000000 --- a/environment/environment.go +++ /dev/null @@ -1,212 +0,0 @@ -package environment - -import ( - "context" - "fmt" - - "cuelang.org/go/cue" - cueflow "cuelang.org/go/tools/flow" - "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/plancontext" - "go.dagger.io/dagger/solver" - "go.dagger.io/dagger/state" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/rs/zerolog/log" -) - -type Environment struct { - state *state.State - - // Layer 1: plan configuration - plan *compiler.Value - - // Layer 2: user inputs - input *compiler.Value - - // plan + inputs - src *compiler.Value - - // Layer 3: computed values - computed *compiler.Value -} - -func New(st *state.State) (*Environment, error) { - var err error - - e := &Environment{ - state: st, - } - - e.plan, err = st.CompilePlan(context.TODO()) - if err != nil { - return nil, err - } - - e.input, err = st.CompileInputs() - if err != nil { - return nil, err - } - - e.computed = compiler.NewValue() - - e.src = compiler.NewValue() - if err := e.src.FillPath(cue.MakePath(), e.plan); err != nil { - return nil, err - } - if err := e.src.FillPath(cue.MakePath(), e.input); err != nil { - return nil, err - } - - return e, nil -} - -func (e *Environment) Name() string { - return e.state.Name -} - -func (e *Environment) Computed() *compiler.Value { - return e.computed -} - -func (e *Environment) Context() *plancontext.Context { - return e.state.Context -} - -// Up missing values in environment configuration, and write them to state. -func (e *Environment) Up(ctx context.Context, s solver.Solver) error { - ctx, span := otel.Tracer("dagger").Start(ctx, "environment.Up") - defer span.End() - - // Orchestrate execution with cueflow - flow := cueflow.New( - &cueflow.Config{}, - e.src.Cue(), - newTaskFunc(newPipelineRunner(e.computed, s, e.state.Context)), - ) - if err := flow.Run(ctx); err != nil { - return err - } - - // FIXME: canceling the context makes flow return `nil` - // Check explicitly if the context is canceled. - select { - case <-ctx.Done(): - return ctx.Err() - default: - return nil - } -} - -type DownOpts struct{} - -func (e *Environment) Down(ctx context.Context, _ *DownOpts) error { - panic("NOT IMPLEMENTED") -} - -type QueryOpts struct{} - -func newTaskFunc(runner cueflow.RunnerFunc) cueflow.TaskFunc { - return func(flowVal cue.Value) (cueflow.Runner, error) { - v := compiler.Wrap(flowVal) - if !IsComponent(v) { - // No compute script - return nil, nil - } - return runner, nil - } -} - -func newPipelineRunner(computed *compiler.Value, s solver.Solver, pctx *plancontext.Context) cueflow.RunnerFunc { - return cueflow.RunnerFunc(func(t *cueflow.Task) error { - ctx := t.Context() - lg := log. - Ctx(ctx). - With(). - Str("task", t.Path().String()). - Logger() - ctx = lg.WithContext(ctx) - - ctx, span := otel.Tracer("dagger").Start(ctx, fmt.Sprintf("compute: %s", t.Path().String())) - defer span.End() - - for _, dep := range t.Dependencies() { - lg. - Debug(). - Str("dependency", dep.Path().String()). - Msg("dependency detected") - } - v := compiler.Wrap(t.Value()) - - p := NewPipeline(v, s, pctx) - err := p.Run(ctx) - if err != nil { - // Record the error - span.AddEvent("command", trace.WithAttributes( - attribute.String("error", err.Error()), - )) - - return err - } - - // Mirror the computed values in both `Task` and `Result` - if !p.Computed().IsConcrete() { - return nil - } - - if err := t.Fill(p.Computed().Cue()); err != nil { - lg. - Error(). - Err(err). - Msg("failed to fill task") - return err - } - - // Merge task value into output - if err := computed.FillPath(t.Path(), p.Computed()); err != nil { - lg. - Error(). - Err(err). - Msg("failed to fill task result") - return err - } - - return nil - }) -} - -func (e *Environment) ScanInputs(ctx context.Context, mergeUserInputs bool) ([]*compiler.Value, error) { - src := e.plan - - if mergeUserInputs { - src = e.src - } - - return ScanInputs(ctx, src), nil -} - -func (e *Environment) ScanOutputs(ctx context.Context) ([]*compiler.Value, error) { - src := compiler.NewValue() - if err := src.FillPath(cue.MakePath(), e.plan); err != nil { - return nil, err - } - if err := src.FillPath(cue.MakePath(), e.input); err != nil { - return nil, err - } - - if e.state.Computed != "" { - computed, err := compiler.DecodeJSON("", []byte(e.state.Computed)) - if err != nil { - return nil, err - } - - if err := src.FillPath(cue.MakePath(), computed); err != nil { - return nil, err - } - } - - return ScanOutputs(ctx, src), nil -} diff --git a/environment/inputs_scan.go b/environment/inputs_scan.go deleted file mode 100644 index ddb6d91d..00000000 --- a/environment/inputs_scan.go +++ /dev/null @@ -1,82 +0,0 @@ -package environment - -import ( - "context" - - "cuelang.org/go/cue" - "go.dagger.io/dagger/compiler" -) - -func isReference(val cue.Value) bool { - isRef := func(v cue.Value) bool { - _, ref := v.ReferencePath() - - if ref.String() == "" || v.Path().String() == ref.String() { - // not a reference - return false - } - - for _, s := range ref.Selectors() { - if s.IsDefinition() { - // if we reference to a definition, we skip the check - return false - } - } - - return true - } - - op, vals := val.Expr() - if op == cue.NoOp { - return isRef(val) - } - - for _, v := range vals { - // if the expr has an op (& or |, etc...), check the expr values, recursively - if isReference(v) { - return true - } - } - - return isRef(val) -} - -func ScanInputs(ctx context.Context, value *compiler.Value) []*compiler.Value { - inputs := []*compiler.Value{} - - value.Walk( - func(val *compiler.Value) bool { - if isReference(val.Cue()) { - return false - } - - if !val.HasAttr("input") { - return true - } - - inputs = append(inputs, val) - - return true - }, nil, - ) - - return inputs -} - -func ScanOutputs(ctx context.Context, value *compiler.Value) []*compiler.Value { - inputs := []*compiler.Value{} - - value.Walk( - func(val *compiler.Value) bool { - if !val.HasAttr("output") { - return true - } - - inputs = append(inputs, val) - - return true - }, nil, - ) - - return inputs -} diff --git a/environment/pipeline.go b/environment/pipeline.go deleted file mode 100644 index 6e5f08f2..00000000 --- a/environment/pipeline.go +++ /dev/null @@ -1,1248 +0,0 @@ -package environment - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/fs" - "net" - "net/url" - "strings" - "time" - - "cuelang.org/go/cue" - bkplatforms "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - bk "github.com/moby/buildkit/client" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - dockerfilebuilder "github.com/moby/buildkit/frontend/dockerfile/builder" - "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - bkgw "github.com/moby/buildkit/frontend/gateway/client" - bkpb "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - "github.com/rs/zerolog/log" - "gopkg.in/yaml.v3" - - "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/plancontext" - "go.dagger.io/dagger/solver" -) - -type State string - -const ( - StateComputing = State("computing") - StateCanceled = State("canceled") - StateFailed = State("failed") - StateCompleted = State("completed") -) - -// An execution pipeline -type Pipeline struct { - code *compiler.Value - name string - s solver.Solver - state llb.State - pctx *plancontext.Context - result bkgw.Reference - image dockerfile2llb.Image - computed *compiler.Value -} - -func NewPipeline(code *compiler.Value, s solver.Solver, pctx *plancontext.Context) *Pipeline { - return &Pipeline{ - code: code, - name: code.Path().String(), - pctx: pctx, - s: s, - state: llb.Scratch(), - computed: compiler.NewValue(), - } -} - -func (p *Pipeline) WithCustomName(name string) *Pipeline { - p.name = name - return p -} - -func (p *Pipeline) State() llb.State { - return p.state -} - -func (p *Pipeline) Result() (llb.State, error) { - if p.result == nil { - return llb.Scratch(), nil - } - return p.result.ToState() -} - -func (p *Pipeline) FS() fs.FS { - return solver.NewBuildkitFS(p.result) -} - -func (p *Pipeline) ImageConfig() dockerfile2llb.Image { - return p.image -} - -func (p *Pipeline) Computed() *compiler.Value { - return p.computed -} - -func IsComponent(v *compiler.Value) bool { - return v.Lookup("#up").Exists() -} - -func (p *Pipeline) ops() ([]*compiler.Value, error) { - ops := []*compiler.Value{} - - // dagger.#FS forward compat - // FIXME: remove this - if plancontext.IsFSValue(p.code) { - ops = append(ops, p.code) - } - - // 1. attachment array - if IsComponent(p.code) { - xops, err := p.code.Lookup("#up").List() - if err != nil { - return nil, err - } - // 'from' has an executable attached - ops = append(ops, xops...) - // 2. individual op - } else if _, err := p.code.Lookup("do").String(); err == nil { - ops = append(ops, p.code) - // 3. op array - } else if xops, err := p.code.List(); err == nil { - ops = append(ops, xops...) - } else { - // 4. error - source, err := p.code.Source() - if err != nil { - panic(err) - } - return nil, fmt.Errorf("not executable: %s (%s)", source, p.code.Path().String()) - } - return ops, nil -} - -func (p *Pipeline) Run(ctx context.Context) error { - lg := log. - Ctx(ctx). - With(). - Str("task", p.name). - Logger() - - start := time.Now() - - lg. - Info(). - Str("state", string(StateComputing)). - Msg(string(StateComputing)) - - err := p.run(ctx) - - if err != nil { - // FIXME: this should use errdefs.IsCanceled(err) - if strings.Contains(err.Error(), "context canceled") { - lg. - Error(). - Dur("duration", time.Since(start)). - Str("state", string(StateCanceled)). - Msg(string(StateCanceled)) - } else { - lg. - Error(). - Dur("duration", time.Since(start)). - Err(err). - Str("state", string(StateFailed)). - Msg(string(StateFailed)) - } - return err - } - - lg. - Info(). - Dur("duration", time.Since(start)). - Str("state", string(StateCompleted)). - Msg(string(StateCompleted)) - return nil -} - -func (p *Pipeline) run(ctx context.Context) error { - ops, err := p.ops() - if err != nil { - return err - } - - // Execute each operation in sequence - for idx, op := range ops { - // If op not concrete, interrupt without error. - // This allows gradual resolution: - // compute what you can compute.. leave the rest incomplete. - if err := op.IsConcreteR(); err != nil { - log. - Ctx(ctx). - Warn(). - Str("original_cue_error", err.Error()). - Int("op", idx). - Msg("pipeline was partially executed because of missing inputs") - return nil - } - p.state, err = p.doOp(ctx, op, p.state) - if err != nil { - return err - } - // Force a buildkit solve request at each operation, - // so that errors map to the correct cue path. - // FIXME: might as well change FS to make every operation - // synchronous. - p.result, err = p.s.Solve(ctx, p.state, p.pctx.Platform.Get()) - if err != nil { - return err - } - } - - return nil -} - -func (p *Pipeline) doOp(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - // dagger.#FS forward compat - // FIXME: remove this - if plancontext.IsFSValue(op) { - fs, err := p.pctx.FS.FromValue(op) - if err != nil { - return st, nil - } - return fs.State() - } - - do, err := op.Lookup("do").String() - if err != nil { - return st, err - } - // FIXME: make this more readable then promote to INFO - // we need a readable trace of what operations are executed. - log. - Ctx(ctx). - Debug(). - Str("pipeline", p.name). - Str("do", do). - Msg("executing operation") - switch do { - case "copy": - return p.Copy(ctx, op, st) - case "exec": - return p.Exec(ctx, op, st) - case "export": - return p.Export(ctx, op, st) - case "docker-login": - return p.DockerLogin(ctx, op, st) - case "fetch-container": - return p.FetchContainer(ctx, op, st) - case "push-container": - return p.PushContainer(ctx, op, st) - case "save-image": - return p.SaveImage(ctx, op, st) - case "fetch-git": - return p.FetchGit(ctx, op, st) - case "fetch-http": - return p.FetchHTTP(ctx, op, st) - case "local": - return p.Local(ctx, op, st) - case "load": - return p.Load(ctx, op, st) - case "workdir": - return p.Workdir(ctx, op, st) - case "subdir": - return p.Subdir(ctx, op, st) - case "docker-build": - return p.DockerBuild(ctx, op, st) - case "write-file": - return p.WriteFile(ctx, op, st) - case "mkdir": - return p.Mkdir(ctx, op, st) - default: - return st, fmt.Errorf("invalid operation: %s", op.JSON()) - } -} - -func (p *Pipeline) vertexNamef(format string, a ...interface{}) string { - prefix := fmt.Sprintf("@%s@", p.name) - name := fmt.Sprintf(format, a...) - return prefix + " " + name -} - -func (p *Pipeline) Workdir(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - path, err := op.Lookup("path").String() - if err != nil { - return st, err - } - return st.Dir(path), nil -} - -func (p *Pipeline) Subdir(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - // FIXME: this could be more optimized by carrying subdir path as metadata, - // and using it in copy, load or mount. - - dir, err := op.Lookup("dir").String() - if err != nil { - return st, err - } - return llb.Scratch().File( - llb.Copy( - st, - dir, - "/", - &llb.CopyInfo{ - CopyDirContentsOnly: true, - }, - ), - llb.WithCustomName(p.vertexNamef("Subdir %s", dir)), - ), nil -} - -func (p *Pipeline) Copy(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - // Decode copy options - src, err := op.Lookup("src").String() - if err != nil { - return st, err - } - dest, err := op.Lookup("dest").String() - if err != nil { - return st, err - } - // Execute 'from' in a tmp pipeline, and use the resulting fs - from := NewPipeline(op.Lookup("from"), p.s, p.pctx) - if err := from.Run(ctx); err != nil { - return st, err - } - return st.File( - llb.Copy( - from.State(), - src, - dest, - // FIXME: allow more configurable llb options - // For now we define the following convenience presets: - &llb.CopyInfo{ - CopyDirContentsOnly: true, - CreateDestPath: true, - AllowWildcard: true, - }, - ), - llb.WithCustomName(p.vertexNamef("Copy %s %s", src, dest)), - ), nil -} - -func (p *Pipeline) Local(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - dir, err := op.Lookup("dir").String() - if err != nil { - return st, err - } - - opts := []llb.LocalOption{ - llb.WithCustomName(p.vertexNamef("Local %s", dir)), - // Without hint, multiple `llb.Local` operations on the - // same path get a different digest. - llb.SessionID(p.s.SessionID()), - llb.SharedKeyHint(dir), - } - - includes, err := op.Lookup("include").List() - if err != nil { - return st, err - } - if len(includes) > 0 { - includePatterns := []string{} - for _, i := range includes { - pattern, err := i.String() - if err != nil { - return st, err - } - includePatterns = append(includePatterns, pattern) - } - opts = append(opts, llb.IncludePatterns(includePatterns)) - } - - excludes, err := op.Lookup("exclude").List() - if err != nil { - return st, err - } - // Excludes .dagger directory by default - excludePatterns := []string{"**/.dagger/"} - if len(excludes) > 0 { - for _, i := range excludes { - pattern, err := i.String() - if err != nil { - return st, err - } - excludePatterns = append(excludePatterns, pattern) - } - } - opts = append(opts, llb.ExcludePatterns(excludePatterns)) - - // FIXME: Remove the `Copy` and use `Local` directly. - // - // Copy'ing is a costly operation which should be unnecessary. - // However, using llb.Local directly breaks caching sometimes for unknown reasons. - return st.File( - llb.Copy( - llb.Local( - dir, - opts..., - ), - "/", - "/", - ), - llb.WithCustomName(p.vertexNamef("Local %s [copy]", dir)), - ), nil -} - -func (p *Pipeline) Exec(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - opts := []llb.RunOption{} - var cmd struct { - Args []string - Dir string - Always bool - } - - if err := op.Decode(&cmd); err != nil { - return st, err - } - // args - opts = append(opts, llb.Args(cmd.Args)) - // dir - opts = append(opts, llb.Dir(cmd.Dir)) - - // env - if env := op.Lookup("env"); env.Exists() { - envs, err := op.Lookup("env").Fields() - if err != nil { - return st, err - } - for _, env := range envs { - v, err := env.Value.String() - if err != nil { - return st, err - } - opts = append(opts, llb.AddEnv(env.Label(), v)) - } - } - - // always? - if cmd.Always { - // FIXME: also disables persistent cache directories - // There's an ongoing proposal that would fix this: https://github.com/moby/buildkit/issues/1213 - opts = append(opts, llb.IgnoreCache) - } - - if hosts := op.Lookup("hosts"); hosts.Exists() { - fields, err := hosts.Fields() - if err != nil { - return st, err - } - for _, host := range fields { - s, err := host.Value.String() - if err != nil { - return st, err - } - - if err != nil { - return st, err - } - opts = append(opts, llb.AddExtraHost(host.Label(), net.ParseIP(s))) - } - } - - if user := op.Lookup("user"); user.Exists() { - u, err := user.String() - if err != nil { - return st, err - } - opts = append(opts, llb.User(u)) - } - - // mounts - if mounts := op.Lookup("mount"); mounts.Exists() { - mntOpts, err := p.mountAll(ctx, mounts) - if err != nil { - return st, err - } - opts = append(opts, mntOpts...) - } - - // marker for status events - // FIXME - args := make([]string, 0, len(cmd.Args)) - for _, a := range cmd.Args { - args = append(args, fmt.Sprintf("%q", a)) - } - opts = append(opts, llb.WithCustomName(p.vertexNamef("Exec [%s]", strings.Join(args, ", ")))) - - // --> Execute - return st.Run(opts...).Root(), nil -} - -func (p *Pipeline) mountAll(ctx context.Context, mounts *compiler.Value) ([]llb.RunOption, error) { - opts := []llb.RunOption{} - fields, err := mounts.Fields() - if err != nil { - return nil, err - } - for _, mnt := range fields { - o, err := p.mount(ctx, mnt.Label(), mnt.Value) - if err != nil { - return nil, err - } - opts = append(opts, o) - } - return opts, err -} - -func (p *Pipeline) mount(ctx context.Context, dest string, mnt *compiler.Value) (llb.RunOption, error) { - if s, err := mnt.String(); err == nil { - // eg. mount: "/foo": "cache" - switch s { - case "cache": - return llb.AddMount( - dest, - llb.Scratch().File( - llb.Mkdir("/cache", fs.FileMode(0755)), - llb.WithCustomName(p.vertexNamef("Mkdir /cache (cache mount %s)", dest)), - ), - // FIXME: disabled persistent cache mount (gh issue #495) - // llb.Scratch(), - // llb.AsPersistentCacheDir( - // p.canonicalPath(mnt), - // llb.CacheMountShared, - // ), - ), nil - case "tmpfs": - return llb.AddMount( - dest, - llb.Scratch(), - llb.Tmpfs(), - ), nil - default: - return nil, fmt.Errorf("invalid mount source: %q", s) - } - } - - // eg. mount: "/foo": secret: mysecret - if secret := mnt.Lookup("secret"); secret.Exists() { - s, err := p.pctx.Secrets.FromValue(secret) - if err != nil { - return nil, err - } - - return llb.AddSecret(dest, - llb.SecretID(s.ID()), - llb.SecretFileOpt(0, 0, 0400), // uid, gid, mask) - ), nil - } - - // eg. mount: "/var/run/docker.sock": stream: mystream - if stream := mnt.Lookup("stream"); stream.Exists() { - s, err := p.pctx.Services.FromValue(stream) - if err != nil { - return nil, err - } - - return llb.AddSSHSocket( - llb.SSHID(s.ID()), - llb.SSHSocketTarget(dest), - ), nil - } - - // eg. mount: "/foo": { from: www.source } - if !mnt.Lookup("from").Exists() { - return nil, fmt.Errorf("invalid mount: should have %s structure", - "{from: _, path: string | *\"/\"}") - } - from := NewPipeline(mnt.Lookup("from"), p.s, p.pctx) - if err := from.Run(ctx); err != nil { - return nil, err - } - // possibly construct mount options for LLB from - var mo []llb.MountOption - // handle "path" option - if mp := mnt.Lookup("path"); mp.Exists() { - mps, err := mp.String() - if err != nil { - return nil, err - } - mo = append(mo, llb.SourcePath(mps)) - } - return llb.AddMount(dest, from.State(), mo...), nil -} - -// canonicalPath returns the canonical path of `v` -// If the pipeline is a reference to another pipeline, `canonicalPath()` will -// return the path of the reference of `v`. -// FIXME: this doesn't work with references of references. -func (p *Pipeline) canonicalPath(v *compiler.Value) string { - // value path - vPath := v.Path().Selectors() - - // pipeline path - pipelinePath := p.code.Path().Selectors() - - // check if the pipeline is a reference - _, ref := p.code.ReferencePath() - if len(ref.Selectors()) == 0 { - return v.Path().String() - } - canonicalPipelinePath := ref.Selectors() - - // replace the pipeline path with the canonical pipeline path - // 1. strip the pipeline path from the value path - vPath = vPath[len(pipelinePath):] - // 2. inject the canonical pipeline path - vPath = append(canonicalPipelinePath, vPath...) - - return cue.MakePath(vPath...).String() -} - -func (p *Pipeline) Export(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - source, err := op.Lookup("source").String() - if err != nil { - return st, err - } - format, err := op.Lookup("format").String() - if err != nil { - return st, err - } - contents, err := fs.ReadFile(p.FS(), source) - if err != nil { - return st, fmt.Errorf("export %s: %w", source, err) - } - switch format { - case "string": - log. - Ctx(ctx). - Debug(). - Bytes("contents", contents). - Msg("exporting string") - - if err := p.computed.FillPath(cue.MakePath(), string(contents)); err != nil { - return st, err - } - case "json": - var o interface{} - o, err := unmarshalAnything(contents, json.Unmarshal) - if err != nil { - return st, err - } - - log. - Ctx(ctx). - Debug(). - Interface("contents", o). - Msg("exporting json") - - if err := p.computed.FillPath(cue.MakePath(), o); err != nil { - return st, err - } - case "yaml": - var o interface{} - o, err := unmarshalAnything(contents, yaml.Unmarshal) - if err != nil { - return st, err - } - - log. - Ctx(ctx). - Debug(). - Interface("contents", o). - Msg("exporting yaml") - - if err := p.computed.FillPath(cue.MakePath(), o); err != nil { - return st, err - } - default: - return st, fmt.Errorf("unsupported export format: %q", format) - } - return st, nil -} - -type unmarshaller func([]byte, interface{}) error - -func unmarshalAnything(data []byte, fn unmarshaller) (interface{}, error) { - // unmarshalling a map into interface{} yields an error: - // "unsupported Go type for map key (interface {})" - // we want to attempt to unmarshal to a map[string]interface{} first - var oMap map[string]interface{} - if err := fn(data, &oMap); err == nil { - return oMap, nil - } - - // If the previous attempt didn't work, we might be facing a scalar (e.g. - // bool). - // Try to unmarshal to interface{} directly. - var o interface{} - err := fn(data, &o) - return o, err -} - -// parseStringOrSecret retrieve secret as plain text or retrieve string -func parseStringOrSecret(pctx *plancontext.Context, v *compiler.Value) (string, error) { - // Check if the value is a string, return as is - if value, err := v.String(); err == nil { - return value, nil - } - - // If we get here, it's a secret - secret, err := pctx.Secrets.FromValue(v) - if err != nil { - return "", err - } - return secret.PlainText(), nil -} - -func (p *Pipeline) Load(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - // Execute 'from' in a tmp pipeline, and use the resulting fs - from := NewPipeline(op.Lookup("from"), p.s, p.pctx) - if err := from.Run(ctx); err != nil { - return st, err - } - p.image = from.ImageConfig() - return from.State(), nil -} - -func (p *Pipeline) DockerLogin(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - username, err := op.Lookup("username").String() - if err != nil { - return st, err - } - - // FIXME If we could create secret directly in `cue`, we could avoid using - // that function - // But currently it's not possible because ECR secret's is a string - // so we need to handle both options (string & secret) - secretValue, err := parseStringOrSecret(p.pctx, op.Lookup("secret")) - if err != nil { - return st, err - } - - target, err := op.Lookup("target").String() - if err != nil { - return st, err - } - - p.s.AddCredentials(target, username, secretValue) - log. - Ctx(ctx). - Debug(). - Str("target", target). - Msg("docker login to registry") - - return st, nil -} - -func (p *Pipeline) FetchContainer(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - rawRef, err := op.Lookup("ref").String() - if err != nil { - return st, err - } - - ref, err := reference.ParseNormalizedNamed(rawRef) - if err != nil { - return st, fmt.Errorf("failed to parse ref %s: %w", rawRef, err) - } - // Add the default tag "latest" to a reference if it only has a repo name. - ref = reference.TagNameOnly(ref) - - st = llb.Image( - ref.String(), - llb.WithCustomName(p.vertexNamef("FetchContainer %s", rawRef)), - ) - - // Load image metadata and convert to to LLB. - platform := p.pctx.Platform.Get() - p.image, _, err = p.s.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{ - LogName: p.vertexNamef("load metadata for %s", ref.String()), - Platform: &platform, - }) - if err != nil { - return st, err - } - - return applyImageToState(p.image, st), nil -} - -// applyImageToState converts an image config into LLB instructions -func applyImageToState(image dockerfile2llb.Image, st llb.State) llb.State { - // FIXME: there are unhandled sections of the image config - for _, env := range image.Config.Env { - k, v := parseKeyValue(env) - st = st.AddEnv(k, v) - } - if image.Config.WorkingDir != "" { - st = st.Dir(image.Config.WorkingDir) - } - if image.Config.User != "" { - st = st.User(image.Config.User) - } - return st -} - -func parseKeyValue(env string) (string, string) { - parts := strings.SplitN(env, "=", 2) - v := "" - if len(parts) > 1 { - v = parts[1] - } - - return parts[0], v -} - -func (p *Pipeline) PushContainer(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - rawRef, err := op.Lookup("ref").String() - if err != nil { - return st, err - } - - ref, err := reference.ParseNormalizedNamed(rawRef) - if err != nil { - return st, fmt.Errorf("failed to parse ref %s: %w", rawRef, err) - } - // Add the default tag "latest" to a reference if it only has a repo name. - ref = reference.TagNameOnly(ref) - - resp, err := p.s.Export(ctx, p.State(), &p.image, bk.ExportEntry{ - Type: bk.ExporterImage, - Attrs: map[string]string{ - "name": ref.String(), - "push": "true", - }, - }, p.pctx.Platform.Get()) - - if err != nil { - return st, err - } - - if dgst, ok := resp.ExporterResponse["containerimage.digest"]; ok { - imageRef := fmt.Sprintf( - "%s@%s", - resp.ExporterResponse["image.name"], - dgst, - ) - - return st.File( - llb.Mkdir("/dagger", fs.FileMode(0755)), - llb.WithCustomName(p.vertexNamef("Mkdir /dagger")), - ).File( - llb.Mkfile("/dagger/image_digest", fs.FileMode(0644), []byte(dgst)), - llb.WithCustomName(p.vertexNamef("Storing image digest to /dagger/image_digest")), - ).File( - llb.Mkfile("/dagger/image_ref", fs.FileMode(0644), []byte(imageRef)), - llb.WithCustomName(p.vertexNamef("Storing image ref to /dagger/image_ref")), - ), nil - } - - return st, err -} - -func (p *Pipeline) SaveImage(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - tag, err := op.Lookup("tag").String() - if err != nil { - return st, err - } - - dest, err := op.Lookup("dest").String() - if err != nil { - return st, err - } - - pipeR, pipeW := io.Pipe() - var ( - errCh = make(chan error) - image []byte - ) - go func() { - var err error - - image, err = io.ReadAll(pipeR) - errCh <- err - }() - - resp, err := p.s.Export(ctx, p.State(), &p.image, bk.ExportEntry{ - Type: bk.ExporterDocker, - Attrs: map[string]string{ - "name": tag, - }, - Output: func(_ map[string]string) (io.WriteCloser, error) { - return pipeW, nil - }, - }, p.pctx.Platform.Get()) - - if err != nil { - return st, err - } - - if err := <-errCh; err != nil { - return st, err - } - - if id, ok := resp.ExporterResponse["containerimage.config.digest"]; ok { - st = st.File( - llb.Mkdir("/dagger", fs.FileMode(0755)), - llb.WithCustomName(p.vertexNamef("Mkdir /dagger")), - ).File( - llb.Mkfile("/dagger/image_id", fs.FileMode(0644), []byte(id)), - llb.WithCustomName(p.vertexNamef("Storing image id to /dagger/image_id")), - ) - } - - return st.File( - llb.Mkfile(dest, 0644, image), - llb.WithCustomName(p.vertexNamef("SaveImage %s", dest)), - ), nil -} - -func (p *Pipeline) FetchGit(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - remote, err := op.Lookup("remote").String() - if err != nil { - return st, err - } - ref, err := op.Lookup("ref").String() - if err != nil { - return st, err - } - - remoteRedacted := remote - if u, err := url.Parse(remote); err == nil { - remoteRedacted = u.Redacted() - } - - gitOpts := []llb.GitOption{} - var opts struct { - KeepGitDir bool - } - - if err := op.Decode(&opts); err != nil { - return st, err - } - - if opts.KeepGitDir { - gitOpts = append(gitOpts, llb.KeepGitDir()) - } - // Secret - if authToken := op.Lookup("authToken"); authToken.Exists() { - authTokenSecret, err := p.pctx.Secrets.FromValue(authToken) - if err != nil { - return st, err - } - gitOpts = append(gitOpts, llb.AuthTokenSecret(authTokenSecret.ID())) - } - if authHeader := op.Lookup("authHeader"); authHeader.Exists() { - authHeaderSecret, err := p.pctx.Secrets.FromValue(authHeader) - if err != nil { - return st, err - } - gitOpts = append(gitOpts, llb.AuthHeaderSecret(authHeaderSecret.ID())) - } - - gitOpts = append(gitOpts, llb.WithCustomName(p.vertexNamef("FetchGit %s@%s", remoteRedacted, ref))) - - return llb.Git( - remote, - ref, - gitOpts..., - ), nil -} - -func (p *Pipeline) FetchHTTP(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - link, err := op.Lookup("url").String() - if err != nil { - return st, err - } - - linkRedacted := link - if u, err := url.Parse(link); err == nil { - linkRedacted = u.Redacted() - } - - httpOpts := []llb.HTTPOption{} - var opts struct { - Checksum string - Filename string - Mode int64 - UID int - GID int - } - - if err := op.Decode(&opts); err != nil { - return st, err - } - - if opts.Checksum != "" { - dgst, err := digest.Parse(opts.Checksum) - if err != nil { - return st, err - } - httpOpts = append(httpOpts, llb.Checksum(dgst)) - } - if opts.Filename != "" { - httpOpts = append(httpOpts, llb.Filename(opts.Filename)) - } - if opts.Mode != 0 { - httpOpts = append(httpOpts, llb.Chmod(fs.FileMode(opts.Mode))) - } - if opts.UID != 0 && opts.GID != 0 { - httpOpts = append(httpOpts, llb.Chown(opts.UID, opts.GID)) - } - - httpOpts = append(httpOpts, llb.WithCustomName(p.vertexNamef("FetchHTTP %s", linkRedacted))) - - return llb.HTTP( - link, - httpOpts..., - ), nil -} - -func (p *Pipeline) DockerBuild(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - var ( - dockerContext = op.Lookup("context") - dockerfile = op.Lookup("dockerfile") - - contextDef *bkpb.Definition - dockerfileDef *bkpb.Definition - - err error - ) - - if !dockerContext.Exists() && !dockerfile.Exists() { - return st, errors.New("context or dockerfile required") - } - - // docker build context. This can come from another component, so we need to - // compute it first. - if dockerContext.Exists() { - from := NewPipeline(op.Lookup("context"), p.s, p.pctx) - if err := from.Run(ctx); err != nil { - return st, err - } - contextDef, err = p.s.Marshal(ctx, from.State()) - if err != nil { - return st, err - } - dockerfileDef = contextDef - } - - // Inlined dockerfile: need to be converted to LLB - if dockerfile.Exists() { - content, err := dockerfile.String() - if err != nil { - return st, err - } - dockerfileDef, err = p.s.Marshal(ctx, - llb.Scratch().File( - llb.Mkfile("/Dockerfile", 0644, []byte(content)), - ), - ) - if err != nil { - return st, err - } - if contextDef == nil { - contextDef = dockerfileDef - } - } - - opts, err := dockerBuildOpts(op, p.pctx) - if err != nil { - return st, err - } - - if p.s.NoCache() { - opts["no-cache"] = "" - } - - // Set platform to configured one if no one is defined - if opts["platform"] == "" { - opts["platform"] = bkplatforms.Format(p.pctx.Platform.Get()) - } - - req := bkgw.SolveRequest{ - Frontend: "dockerfile.v0", - FrontendOpt: opts, - FrontendInputs: map[string]*bkpb.Definition{ - dockerfilebuilder.DefaultLocalNameContext: contextDef, - dockerfilebuilder.DefaultLocalNameDockerfile: dockerfileDef, - }, - } - - res, err := p.s.SolveRequest(ctx, req) - if err != nil { - return st, err - } - if meta, ok := res.Metadata[exptypes.ExporterImageConfigKey]; ok { - if err := json.Unmarshal(meta, &p.image); err != nil { - return st, fmt.Errorf("failed to unmarshal image config: %w", err) - } - } - - ref, err := res.SingleRef() - if err != nil { - return st, err - } - st, err = ref.ToState() - if err != nil { - return st, err - } - return applyImageToState(p.image, st), nil -} - -func dockerBuildOpts(op *compiler.Value, pctx *plancontext.Context) (map[string]string, error) { - opts := map[string]string{} - - if dockerfilePath := op.Lookup("dockerfilePath"); dockerfilePath.Exists() { - filename, err := dockerfilePath.String() - if err != nil { - return nil, err - } - opts["filename"] = filename - } - - if target := op.Lookup("target"); target.Exists() { - tgr, err := target.String() - if err != nil { - return nil, err - } - opts["target"] = tgr - } - - if hosts := op.Lookup("hosts"); hosts.Exists() { - p := []string{} - fields, err := hosts.Fields() - if err != nil { - return nil, err - } - for _, host := range fields { - s, err := host.Value.String() - if err != nil { - return nil, err - } - p = append(p, host.Label()+"="+s) - } - if len(p) > 0 { - opts["add-hosts"] = strings.Join(p, ",") - } - } - - if buildArgs := op.Lookup("buildArg"); buildArgs.Exists() { - fields, err := buildArgs.Fields() - if err != nil { - return nil, err - } - for _, buildArg := range fields { - v, err := parseStringOrSecret(pctx, buildArg.Value) - if err != nil { - return nil, err - } - opts["build-arg:"+buildArg.Label()] = v - } - } - - if labels := op.Lookup("label"); labels.Exists() { - fields, err := labels.Fields() - if err != nil { - return nil, err - } - for _, label := range fields { - s, err := label.Value.String() - if err != nil { - return nil, err - } - opts["label:"+label.Label()] = s - } - } - - if platforms := op.Lookup("platforms"); platforms.Exists() { - p := []string{} - list, err := platforms.List() - if err != nil { - return nil, err - } - - for _, platform := range list { - s, err := platform.String() - if err != nil { - return nil, err - } - p = append(p, s) - } - - if len(p) > 0 { - opts["platform"] = strings.Join(p, ",") - } - if len(p) > 1 { - opts["multi-platform"] = "true" - } - } - - return opts, nil -} - -func (p *Pipeline) WriteFile(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - var content []byte - var err error - - switch kind := op.Lookup("content").Kind(); kind { - case cue.BytesKind: - content, err = op.Lookup("content").Bytes() - case cue.StringKind: - var str string - str, err = op.Lookup("content").String() - if err == nil { - content = []byte(str) - } - case cue.BottomKind: - err = fmt.Errorf("%s: WriteFile content is not set", p.canonicalPath(op)) - default: - err = fmt.Errorf("%s: unhandled data type in WriteFile: %s", p.canonicalPath(op), kind) - } - if err != nil { - return st, err - } - - dest, err := op.Lookup("dest").String() - if err != nil { - return st, err - } - - mode, err := op.Lookup("mode").Int64() - if err != nil { - return st, err - } - - return st.File( - llb.Mkfile(dest, fs.FileMode(mode), content), - llb.WithCustomName(p.vertexNamef("WriteFile %s", dest)), - ), nil -} - -func (p *Pipeline) Mkdir(ctx context.Context, op *compiler.Value, st llb.State) (llb.State, error) { - pathString, err := op.Lookup("path").String() - if err != nil { - return st, err - } - - dir, err := op.Lookup("dir").String() - if err != nil { - return st, err - } - - mode, err := op.Lookup("mode").Int64() - if err != nil { - return st, err - } - - return st.Dir(dir).File( - llb.Mkdir(pathString, fs.FileMode(mode)), - llb.WithCustomName(p.vertexNamef("Mkdir %s", pathString)), - ), nil -} diff --git a/plan/plan.go b/plan/plan.go index 361fb3cf..0354c0f0 100644 --- a/plan/plan.go +++ b/plan/plan.go @@ -10,7 +10,6 @@ import ( cueflow "cuelang.org/go/tools/flow" "github.com/rs/zerolog/log" "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/environment" "go.dagger.io/dagger/pkg" "go.dagger.io/dagger/plan/task" "go.dagger.io/dagger/plancontext" @@ -207,7 +206,7 @@ func newRunner(pctx *plancontext.Context, s solver.Solver, computed *compiler.Va ctx, span := otel.Tracer("dagger").Start(ctx, fmt.Sprintf("up: %s", t.Path().String())) defer span.End() - lg.Info().Str("state", string(environment.StateComputing)).Msg(string(environment.StateComputing)) + lg.Info().Str("state", string(task.StateComputing)).Msg(string(task.StateComputing)) // Debug: dump dependencies for _, dep := range t.Dependencies() { @@ -219,14 +218,14 @@ func newRunner(pctx *plancontext.Context, s solver.Solver, computed *compiler.Va if err != nil { // FIXME: this should use errdefs.IsCanceled(err) if strings.Contains(err.Error(), "context canceled") { - lg.Error().Dur("duration", time.Since(start)).Str("state", string(environment.StateCanceled)).Msg(string(environment.StateCanceled)) + lg.Error().Dur("duration", time.Since(start)).Str("state", string(task.StateCanceled)).Msg(string(task.StateCanceled)) } else { - lg.Error().Dur("duration", time.Since(start)).Err(err).Str("state", string(environment.StateFailed)).Msg(string(environment.StateFailed)) + lg.Error().Dur("duration", time.Since(start)).Err(err).Str("state", string(task.StateFailed)).Msg(string(task.StateFailed)) } return fmt.Errorf("%s: %w", t.Path().String(), err) } - lg.Info().Dur("duration", time.Since(start)).Str("state", string(environment.StateCompleted)).Msg(string(environment.StateCompleted)) + lg.Info().Dur("duration", time.Since(start)).Str("state", string(task.StateCompleted)).Msg(string(task.StateCompleted)) // If the result is not concrete (e.g. empty value), there's nothing to merge. if !result.IsConcrete() { diff --git a/plan/task/pipeline.go b/plan/task/pipeline.go deleted file mode 100644 index 980990d4..00000000 --- a/plan/task/pipeline.go +++ /dev/null @@ -1,27 +0,0 @@ -package task - -import ( - "context" - - "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/environment" - "go.dagger.io/dagger/plancontext" - "go.dagger.io/dagger/solver" -) - -func init() { - Register("#up", func() Task { return &pipelineTask{} }) -} - -// pipelineTask is an adapter for legacy pipelines (`#up`). -// FIXME: remove once fully migrated to Europa. -type pipelineTask struct { -} - -func (c *pipelineTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) { - p := environment.NewPipeline(v, s, pctx) - if err := p.Run(ctx); err != nil { - return nil, err - } - return p.Computed(), nil -} diff --git a/plan/task/task.go b/plan/task/task.go index dfccd762..2379f597 100644 --- a/plan/task/task.go +++ b/plan/task/task.go @@ -8,7 +8,6 @@ import ( "cuelang.org/go/cue" "go.dagger.io/dagger/compiler" - "go.dagger.io/dagger/environment" "go.dagger.io/dagger/pkg" "go.dagger.io/dagger/plancontext" "go.dagger.io/dagger/solver" @@ -23,6 +22,16 @@ var ( cue.Hid("_name", pkg.DaggerPackage)) ) +// State is the state of the task. +type State string + +const ( + StateComputing = State("computing") + StateCanceled = State("canceled") + StateFailed = State("failed") + StateCompleted = State("completed") +) + type NewFunc func() Task type Task interface { @@ -52,9 +61,9 @@ func New(typ string) Task { func Lookup(v *compiler.Value) (Task, error) { // FIXME: legacy pipelines - if environment.IsComponent(v) { - return New("#up"), nil - } + // if environment.IsComponent(v) { + // return New("#up"), nil + // } if v.Kind() != cue.StructKind { return nil, ErrNotTask diff --git a/tests/tasks/pull/cue.mod/pkg/.gitignore b/tests/tasks/pull/cue.mod/pkg/.gitignore index d587a7ab..331f139d 100644 --- a/tests/tasks/pull/cue.mod/pkg/.gitignore +++ b/tests/tasks/pull/cue.mod/pkg/.gitignore @@ -1,5 +1,5 @@ # generated by dagger +dagger.lock alpha.dagger.io dagger.io -universe.dagger.io -dagger.lock +universe.dagger.io \ No newline at end of file