mirror of
https://github.com/zhigang1992/esbuild.git
synced 2026-01-12 17:13:19 +08:00
add an internal-only timing flag
This commit is contained in:
6
Makefile
6
Makefile
@@ -586,7 +586,7 @@ bench-three: bench-three-esbuild bench-three-rollup bench-three-webpack bench-th
|
||||
|
||||
bench-three-esbuild: esbuild | bench/three
|
||||
rm -fr bench/three/esbuild
|
||||
time -p ./esbuild --bundle --global-name=THREE --sourcemap --minify bench/three/src/entry.js --outfile=bench/three/esbuild/entry.esbuild.js
|
||||
time -p ./esbuild --bundle --global-name=THREE --sourcemap --minify bench/three/src/entry.js --outfile=bench/three/esbuild/entry.esbuild.js --timing
|
||||
du -h bench/three/esbuild/entry.esbuild.js*
|
||||
shasum bench/three/esbuild/entry.esbuild.js*
|
||||
|
||||
@@ -695,7 +695,7 @@ bench-rome: bench-rome-esbuild bench-rome-webpack bench-rome-webpack5 bench-rome
|
||||
|
||||
bench-rome-esbuild: esbuild | bench/rome bench/rome-verify
|
||||
rm -fr bench/rome/esbuild
|
||||
time -p ./esbuild --bundle --sourcemap --minify bench/rome/src/entry.ts --outfile=bench/rome/esbuild/rome.esbuild.js --platform=node
|
||||
time -p ./esbuild --bundle --sourcemap --minify bench/rome/src/entry.ts --outfile=bench/rome/esbuild/rome.esbuild.js --platform=node --timing
|
||||
du -h bench/rome/esbuild/rome.esbuild.js*
|
||||
shasum bench/rome/esbuild/rome.esbuild.js*
|
||||
cd bench/rome-verify && rm -fr esbuild && ROME_CACHE=0 node ../rome/esbuild/rome.esbuild.js bundle packages/rome esbuild
|
||||
@@ -835,7 +835,7 @@ READMIN_ESBUILD_FLAGS += --define:global=window
|
||||
READMIN_ESBUILD_FLAGS += --loader:.js=jsx
|
||||
READMIN_ESBUILD_FLAGS += --minify
|
||||
READMIN_ESBUILD_FLAGS += --sourcemap
|
||||
READMIN_ESBUILD_FLAGS += --log-level=debug
|
||||
READMIN_ESBUILD_FLAGS += --timing
|
||||
|
||||
bench-readmin-esbuild: esbuild | bench/readmin
|
||||
rm -fr bench/readmin/esbuild
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/evanw/esbuild/internal/api_helpers"
|
||||
"github.com/evanw/esbuild/internal/logger"
|
||||
"github.com/evanw/esbuild/pkg/cli"
|
||||
)
|
||||
@@ -142,6 +143,11 @@ func main() {
|
||||
case strings.HasPrefix(arg, "--trace="):
|
||||
traceFile = arg[len("--trace="):]
|
||||
|
||||
case strings.HasPrefix(arg, "--timing"):
|
||||
// This is a hidden flag because it's only intended for debugging esbuild
|
||||
// itself. The output is not documented and not stable.
|
||||
api_helpers.UseTimer = true
|
||||
|
||||
case strings.HasPrefix(arg, "--cpuprofile="):
|
||||
cpuprofileFile = arg[len("--cpuprofile="):]
|
||||
|
||||
|
||||
7
internal/api_helpers/use_timer.go
Normal file
7
internal/api_helpers/use_timer.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package api_helpers
|
||||
|
||||
// This flag is set by the CLI to activate the timer. It's put here instead of
|
||||
// by the timer to discourage code from checking this flag. Only the code that
|
||||
// creates the root timer should check this flag. Other code should check that
|
||||
// the timer is not null to detect if the timer is being used or not.
|
||||
var UseTimer bool
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -914,6 +913,7 @@ type scanner struct {
|
||||
res resolver.Resolver
|
||||
caches *cache.CacheSet
|
||||
options config.Options
|
||||
timer *helpers.Timer
|
||||
|
||||
// This is not guarded by a mutex because it's only ever modified by a single
|
||||
// thread. Note that not all results in the "results" array are necessarily
|
||||
@@ -937,11 +937,10 @@ func ScanBundle(
|
||||
caches *cache.CacheSet,
|
||||
entryPoints []EntryPoint,
|
||||
options config.Options,
|
||||
timer *helpers.Timer,
|
||||
) Bundle {
|
||||
start := time.Now()
|
||||
if log.Level <= logger.LevelVerbose {
|
||||
log.AddVerbose(nil, logger.Loc{}, "Started the scan phase")
|
||||
}
|
||||
timer.Begin("Scan phase")
|
||||
defer timer.End("Scan phase")
|
||||
|
||||
applyOptionDefaults(&options)
|
||||
|
||||
@@ -951,6 +950,7 @@ func ScanBundle(
|
||||
res: res,
|
||||
caches: caches,
|
||||
options: options,
|
||||
timer: timer,
|
||||
results: make([]parseResult, 0, caches.SourceIndexCache.LenHint()),
|
||||
visited: make(map[logger.Path]uint32),
|
||||
resultChannel: make(chan parseResult),
|
||||
@@ -977,10 +977,6 @@ func ScanBundle(
|
||||
s.scanAllDependencies()
|
||||
files := s.processScannedFiles()
|
||||
|
||||
if log.Level <= logger.LevelVerbose {
|
||||
log.AddVerbose(nil, logger.Loc{}, fmt.Sprintf("Ended the scan phase (%dms)", time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return Bundle{
|
||||
fs: fs,
|
||||
res: res,
|
||||
@@ -1125,6 +1121,9 @@ func (s *scanner) allocateSourceIndex(path logger.Path, kind cache.SourceIndexKi
|
||||
}
|
||||
|
||||
func (s *scanner) preprocessInjectedFiles() {
|
||||
s.timer.Begin("Preprocess injected files")
|
||||
defer s.timer.End("Preprocess injected files")
|
||||
|
||||
injectedFiles := make([]config.InjectedFile, 0, len(s.options.InjectedDefines)+len(s.options.InjectAbsPaths))
|
||||
duplicateInjectedFiles := make(map[string]bool)
|
||||
injectWaitGroup := sync.WaitGroup{}
|
||||
@@ -1213,6 +1212,9 @@ func (s *scanner) preprocessInjectedFiles() {
|
||||
}
|
||||
|
||||
func (s *scanner) addEntryPoints(entryPoints []EntryPoint) []graph.EntryPoint {
|
||||
s.timer.Begin("Add entry points")
|
||||
defer s.timer.End("Add entry points")
|
||||
|
||||
// Reserve a slot for each entry point
|
||||
entryMetas := make([]graph.EntryPoint, 0, len(entryPoints)+1)
|
||||
|
||||
@@ -1463,6 +1465,9 @@ func lowestCommonAncestorDirectory(fs fs.FS, entryPoints []graph.EntryPoint) str
|
||||
}
|
||||
|
||||
func (s *scanner) scanAllDependencies() {
|
||||
s.timer.Begin("Scan all dependencies")
|
||||
defer s.timer.End("Scan all dependencies")
|
||||
|
||||
// Continue scanning until all dependencies have been discovered
|
||||
for s.remaining > 0 {
|
||||
result := <-s.resultChannel
|
||||
@@ -1515,6 +1520,9 @@ func (s *scanner) scanAllDependencies() {
|
||||
}
|
||||
|
||||
func (s *scanner) processScannedFiles() []scannerFile {
|
||||
s.timer.Begin("Process scanned files")
|
||||
defer s.timer.End("Process scanned files")
|
||||
|
||||
// Now that all files have been scanned, process the final file import records
|
||||
for i, result := range s.results {
|
||||
if !result.ok {
|
||||
@@ -1830,11 +1838,9 @@ func applyOptionDefaults(options *config.Options) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.OutputFile, string) {
|
||||
start := time.Now()
|
||||
if log.Level <= logger.LevelVerbose {
|
||||
log.AddVerbose(nil, logger.Loc{}, "Started the compile phase")
|
||||
}
|
||||
func (b *Bundle) Compile(log logger.Log, options config.Options, timer *helpers.Timer) ([]graph.OutputFile, string) {
|
||||
timer.Begin("Compile phase")
|
||||
defer timer.End("Compile phase")
|
||||
|
||||
applyOptionDefaults(&options)
|
||||
|
||||
@@ -1852,13 +1858,14 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.Output
|
||||
allReachableFiles := findReachableFiles(files, b.entryPoints)
|
||||
|
||||
// Compute source map data in parallel with linking
|
||||
timer.Begin("Spawn source map tasks")
|
||||
dataForSourceMaps := b.computeDataForSourceMapsInParallel(&options, allReachableFiles)
|
||||
timer.End("Spawn source map tasks")
|
||||
|
||||
var resultGroups [][]graph.OutputFile
|
||||
if options.CodeSplitting {
|
||||
// If code splitting is enabled, link all entry points together
|
||||
c := newLinkerContext(&options, log, b.fs, b.res, files, b.entryPoints, allReachableFiles, dataForSourceMaps)
|
||||
resultGroups = [][]graph.OutputFile{c.link()}
|
||||
if options.CodeSplitting || len(b.entryPoints) == 1 {
|
||||
// If code splitting is enabled or if there's only one entry point, link all entry points together
|
||||
resultGroups = [][]graph.OutputFile{link(&options, timer, log, b.fs, b.res, files, b.entryPoints, allReachableFiles, dataForSourceMaps)}
|
||||
} else {
|
||||
// Otherwise, link each entry point with the runtime file separately
|
||||
waitGroup := sync.WaitGroup{}
|
||||
@@ -1867,9 +1874,10 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.Output
|
||||
waitGroup.Add(1)
|
||||
go func(i int, entryPoint graph.EntryPoint) {
|
||||
entryPoints := []graph.EntryPoint{entryPoint}
|
||||
forked := timer.Fork()
|
||||
reachableFiles := findReachableFiles(files, entryPoints)
|
||||
c := newLinkerContext(&options, log, b.fs, b.res, files, entryPoints, reachableFiles, dataForSourceMaps)
|
||||
resultGroups[i] = c.link()
|
||||
resultGroups[i] = link(&options, forked, log, b.fs, b.res, files, entryPoints, reachableFiles, dataForSourceMaps)
|
||||
timer.Join(forked)
|
||||
waitGroup.Done()
|
||||
}(i, entryPoint)
|
||||
}
|
||||
@@ -1885,7 +1893,9 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.Output
|
||||
// Also generate the metadata file if necessary
|
||||
var metafileJSON string
|
||||
if options.NeedsMetafile {
|
||||
timer.Begin("Generate metadata JSON")
|
||||
metafileJSON = b.generateMetadataJSON(outputFiles, allReachableFiles, options.ASCIIOnly)
|
||||
timer.End("Generate metadata JSON")
|
||||
}
|
||||
|
||||
if !options.WriteToStdout {
|
||||
@@ -1940,10 +1950,6 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.Output
|
||||
outputFiles = outputFiles[:end]
|
||||
}
|
||||
|
||||
if log.Level <= logger.LevelVerbose {
|
||||
log.AddVerbose(nil, logger.Loc{}, fmt.Sprintf("Ended the compile phase (%dms)", time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return outputFiles, metafileJSON
|
||||
}
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ func (s *suite) expectBundled(t *testing.T, args bundled) {
|
||||
for _, path := range args.entryPaths {
|
||||
entryPoints = append(entryPoints, EntryPoint{InputPath: path})
|
||||
}
|
||||
bundle := ScanBundle(log, fs, resolver, caches, entryPoints, args.options)
|
||||
bundle := ScanBundle(log, fs, resolver, caches, entryPoints, args.options, nil)
|
||||
msgs := log.Done()
|
||||
assertLog(t, msgs, args.expectedScanLog)
|
||||
|
||||
@@ -109,7 +109,7 @@ func (s *suite) expectBundled(t *testing.T, args bundled) {
|
||||
|
||||
log = logger.NewDeferLog()
|
||||
args.options.OmitRuntimeForTests = true
|
||||
results, _ := bundle.Compile(log, args.options)
|
||||
results, _ := bundle.Compile(log, args.options, nil)
|
||||
msgs = log.Done()
|
||||
assertLog(t, msgs, args.expectedCompileLog)
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
|
||||
type linkerContext struct {
|
||||
options *config.Options
|
||||
timer *helpers.Timer
|
||||
log logger.Log
|
||||
fs fs.FS
|
||||
res resolver.Resolver
|
||||
@@ -170,8 +172,9 @@ func wrappedLog(log logger.Log) logger.Log {
|
||||
return log
|
||||
}
|
||||
|
||||
func newLinkerContext(
|
||||
func link(
|
||||
options *config.Options,
|
||||
timer *helpers.Timer,
|
||||
log logger.Log,
|
||||
fs fs.FS,
|
||||
res resolver.Resolver,
|
||||
@@ -179,22 +182,28 @@ func newLinkerContext(
|
||||
entryPoints []graph.EntryPoint,
|
||||
reachableFiles []uint32,
|
||||
dataForSourceMaps func() []dataForSourceMap,
|
||||
) linkerContext {
|
||||
) []graph.OutputFile {
|
||||
timer.Begin("Link")
|
||||
defer timer.End("Link")
|
||||
|
||||
log = wrappedLog(log)
|
||||
|
||||
timer.Begin("Clone linker graph")
|
||||
c := linkerContext{
|
||||
options: options,
|
||||
timer: timer,
|
||||
log: log,
|
||||
fs: fs,
|
||||
res: res,
|
||||
dataForSourceMaps: dataForSourceMaps,
|
||||
graph: graph.MakeLinkerGraph(
|
||||
graph: graph.CloneLinkerGraph(
|
||||
inputFiles,
|
||||
reachableFiles,
|
||||
entryPoints,
|
||||
options.CodeSplitting,
|
||||
),
|
||||
}
|
||||
timer.End("Clone linker graph")
|
||||
|
||||
for _, entryPoint := range entryPoints {
|
||||
if repr, ok := c.graph.Files[entryPoint.SourceIndex].InputFile.Repr.(*graph.JSRepr); ok {
|
||||
@@ -225,24 +234,6 @@ func newLinkerContext(
|
||||
c.unboundModuleRef = js_ast.InvalidRef
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *linkerContext) generateUniqueKeyPrefix() bool {
|
||||
var data [12]byte
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
if _, err := rand.Read(data[:]); err != nil {
|
||||
c.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
||||
// This is 16 bytes and shouldn't generate escape characters when put into strings
|
||||
c.uniqueKeyPrefix = base64.URLEncoding.EncodeToString(data[:])
|
||||
c.uniqueKeyPrefixBytes = []byte(c.uniqueKeyPrefix)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *linkerContext) link() []graph.OutputFile {
|
||||
if !c.generateUniqueKeyPrefix() {
|
||||
return nil
|
||||
}
|
||||
@@ -271,6 +262,20 @@ func (c *linkerContext) link() []graph.OutputFile {
|
||||
return c.generateChunksInParallel(chunks)
|
||||
}
|
||||
|
||||
func (c *linkerContext) generateUniqueKeyPrefix() bool {
|
||||
var data [12]byte
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
if _, err := rand.Read(data[:]); err != nil {
|
||||
c.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error()))
|
||||
return false
|
||||
}
|
||||
|
||||
// This is 16 bytes and shouldn't generate escape characters when put into strings
|
||||
c.uniqueKeyPrefix = base64.URLEncoding.EncodeToString(data[:])
|
||||
c.uniqueKeyPrefixBytes = []byte(c.uniqueKeyPrefix)
|
||||
return true
|
||||
}
|
||||
|
||||
// Currently the automatic chunk generation algorithm should by construction
|
||||
// never generate chunks that import each other since files are allocated to
|
||||
// chunks based on which entry points they are reachable from.
|
||||
@@ -309,6 +314,9 @@ func (c *linkerContext) enforceNoCyclicChunkImports(chunks []chunkInfo) {
|
||||
}
|
||||
|
||||
func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []graph.OutputFile {
|
||||
c.timer.Begin("Generate chunks")
|
||||
defer c.timer.End("Generate chunks")
|
||||
|
||||
// Generate each chunk on a separate goroutine
|
||||
generateWaitGroup := sync.WaitGroup{}
|
||||
generateWaitGroup.Add(len(chunks))
|
||||
@@ -349,6 +357,7 @@ func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []graph.Out
|
||||
}
|
||||
|
||||
// Generate the final output files by joining file pieces together
|
||||
c.timer.Begin("Generate final output files")
|
||||
var resultsWaitGroup sync.WaitGroup
|
||||
results := make([][]graph.OutputFile, len(chunks))
|
||||
resultsWaitGroup.Add(len(chunks))
|
||||
@@ -428,6 +437,7 @@ func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []graph.Out
|
||||
}(chunkIndex, chunk)
|
||||
}
|
||||
resultsWaitGroup.Wait()
|
||||
c.timer.End("Generate final output files")
|
||||
|
||||
// Merge the output files from the different goroutines together in order
|
||||
outputFilesLen := 0
|
||||
@@ -612,6 +622,9 @@ func (c *linkerContext) pathRelativeToOutbase(
|
||||
}
|
||||
|
||||
func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) {
|
||||
c.timer.Begin("Compute cross-chunk dependencies")
|
||||
defer c.timer.End("Compute cross-chunk dependencies")
|
||||
|
||||
jsChunks := 0
|
||||
for _, chunk := range chunks {
|
||||
if _, ok := chunk.chunkRepr.(*chunkReprJS); ok {
|
||||
@@ -964,6 +977,9 @@ func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[js_ast.Ref]bo
|
||||
}
|
||||
|
||||
func (c *linkerContext) scanImportsAndExports() {
|
||||
c.timer.Begin("Scan imports and exports")
|
||||
defer c.timer.End("Scan imports and exports")
|
||||
|
||||
// Step 1: Figure out what modules must be CommonJS
|
||||
for _, sourceIndex := range c.graph.ReachableFiles {
|
||||
file := &c.graph.Files[sourceIndex]
|
||||
@@ -2331,17 +2347,21 @@ func (c *linkerContext) advanceImportTracker(tracker importTracker) (importTrack
|
||||
|
||||
func (c *linkerContext) treeShakingAndCodeSplitting() {
|
||||
// Tree shaking: Each entry point marks all files reachable from itself
|
||||
c.timer.Begin("Tree shaking")
|
||||
for _, entryPoint := range c.graph.EntryPoints() {
|
||||
c.markFileLiveForTreeShaking(entryPoint.SourceIndex)
|
||||
}
|
||||
c.timer.End("Tree shaking")
|
||||
|
||||
// Code splitting: Determine which entry points can reach which files. This
|
||||
// has to happen after tree shaking because there is an implicit dependency
|
||||
// between live parts within the same file. All liveness has to be computed
|
||||
// first before determining which entry points can reach which files.
|
||||
c.timer.Begin("Code splitting")
|
||||
for i, entryPoint := range c.graph.EntryPoints() {
|
||||
c.markFileReachableForCodeSplitting(entryPoint.SourceIndex, uint(i), 0)
|
||||
}
|
||||
c.timer.End("Code splitting")
|
||||
}
|
||||
|
||||
func (c *linkerContext) markFileReachableForCodeSplitting(sourceIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) {
|
||||
@@ -2529,6 +2549,9 @@ func sanitizeFilePathForVirtualModulePath(path string) string {
|
||||
}
|
||||
|
||||
func (c *linkerContext) computeChunks() []chunkInfo {
|
||||
c.timer.Begin("Compute chunks")
|
||||
defer c.timer.End("Compute chunks")
|
||||
|
||||
jsChunks := make(map[string]chunkInfo)
|
||||
cssChunks := make(map[string]chunkInfo)
|
||||
|
||||
@@ -3753,8 +3776,17 @@ func (c *linkerContext) generateEntryPointTailJS(
|
||||
return
|
||||
}
|
||||
|
||||
func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []uint32) renamer.Renamer {
|
||||
func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []uint32, timer *helpers.Timer) renamer.Renamer {
|
||||
if c.options.MinifyIdentifiers {
|
||||
timer.Begin("Minify symbols")
|
||||
defer timer.End("Minify symbols")
|
||||
} else {
|
||||
timer.Begin("Rename symbols")
|
||||
defer timer.End("Rename symbols")
|
||||
}
|
||||
|
||||
// Determine the reserved names (e.g. can't generate the name "if")
|
||||
timer.Begin("Compute reserved names")
|
||||
moduleScopes := make([]*js_ast.Scope, len(filesInOrder))
|
||||
for i, sourceIndex := range filesInOrder {
|
||||
moduleScopes[i] = c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope
|
||||
@@ -3766,6 +3798,7 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
reservedNames["require"] = 1
|
||||
reservedNames["Promise"] = 1
|
||||
}
|
||||
timer.End("Compute reserved names")
|
||||
|
||||
// Minification uses frequency analysis to give shorter names to more frequent symbols
|
||||
if c.options.MinifyIdentifiers {
|
||||
@@ -3777,6 +3810,7 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
r := renamer.NewMinifyRenamer(c.graph.Symbols, firstTopLevelSlots, reservedNames)
|
||||
|
||||
// Accumulate symbol usage counts into their slots
|
||||
timer.Begin("Accumulate symbol counts")
|
||||
freq := js_ast.CharFreq{}
|
||||
for _, sourceIndex := range filesInOrder {
|
||||
repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
|
||||
@@ -3805,6 +3839,7 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
}
|
||||
}
|
||||
}
|
||||
timer.End("Accumulate symbol counts")
|
||||
|
||||
// Add all of the character frequency histograms for all files in this
|
||||
// chunk together, then use it to compute the character sequence used to
|
||||
@@ -3813,7 +3848,9 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
// it's a very small win, we still do it because it's simple to do and very
|
||||
// cheap to compute.
|
||||
minifier := freq.Compile()
|
||||
timer.Begin("Assign names by frequency")
|
||||
r.AssignNamesByFrequency(&minifier)
|
||||
timer.End("Assign names by frequency")
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -3822,6 +3859,7 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
nestedScopes := make(map[uint32][]*js_ast.Scope)
|
||||
|
||||
// Make sure imports get a chance to be renamed
|
||||
timer.Begin("Add top-level symbols")
|
||||
var sorted renamer.StableRefArray
|
||||
for _, imports := range chunk.chunkRepr.(*chunkReprJS).importsFromOtherChunks {
|
||||
for _, item := range imports {
|
||||
@@ -3931,23 +3969,35 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui
|
||||
|
||||
nestedScopes[sourceIndex] = scopes
|
||||
}
|
||||
timer.End("Add top-level symbols")
|
||||
|
||||
// Recursively rename symbols in child scopes now that all top-level
|
||||
// symbols have been renamed. This is done in parallel because the symbols
|
||||
// inside nested scopes are independent and can't conflict.
|
||||
timer.Begin("Assign names by scope")
|
||||
r.AssignNamesByScope(nestedScopes)
|
||||
timer.End("Assign names by scope")
|
||||
return r
|
||||
}
|
||||
|
||||
func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) {
|
||||
chunk := &chunks[chunkIndex]
|
||||
|
||||
timer := c.timer.Fork()
|
||||
if timer != nil {
|
||||
timeName := fmt.Sprintf("Generate chunk %q", path.Clean(config.TemplateToString(chunk.finalTemplate)))
|
||||
timer.Begin(timeName)
|
||||
defer c.timer.Join(timer)
|
||||
defer timer.End(timeName)
|
||||
}
|
||||
|
||||
chunkRepr := chunk.chunkRepr.(*chunkReprJS)
|
||||
compileResults := make([]compileResultJS, 0, len(chunk.partsInChunkInOrder))
|
||||
runtimeMembers := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members
|
||||
commonJSRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__commonJS"].Ref)
|
||||
esmRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__esm"].Ref)
|
||||
toModuleRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__toModule"].Ref)
|
||||
r := c.renameSymbolsInChunk(chunk, chunk.filesInChunkInOrder)
|
||||
r := c.renameSymbolsInChunk(chunk, chunk.filesInChunkInOrder, timer)
|
||||
dataForSourceMaps := c.dataForSourceMaps()
|
||||
|
||||
// Note: This contains placeholders instead of what the placeholders are
|
||||
@@ -3960,6 +4010,7 @@ func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chun
|
||||
chunkAbsDir := c.fs.Dir(c.fs.Join(c.options.AbsOutputDir, config.TemplateToString(chunk.finalTemplate)))
|
||||
|
||||
// Generate JavaScript for each file in parallel
|
||||
timer.Begin("Print JavaScript files")
|
||||
waitGroup := sync.WaitGroup{}
|
||||
for _, partRange := range chunk.partsInChunkInOrder {
|
||||
// Skip the runtime in test output
|
||||
@@ -4027,6 +4078,8 @@ func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chun
|
||||
}
|
||||
|
||||
waitGroup.Wait()
|
||||
timer.End("Print JavaScript files")
|
||||
timer.Begin("Join JavaScript files")
|
||||
|
||||
j := helpers.Joiner{}
|
||||
prevOffset := sourcemap.LineColumnOffset{}
|
||||
@@ -4276,12 +4329,15 @@ func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chun
|
||||
j.AddString("\n")
|
||||
}
|
||||
|
||||
if c.options.SourceMap != config.SourceMapNone {
|
||||
chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps)
|
||||
}
|
||||
|
||||
// The JavaScript contents are done now that the source map comment is in
|
||||
jsContents := j.Done()
|
||||
timer.End("Join JavaScript files")
|
||||
|
||||
if c.options.SourceMap != config.SourceMapNone {
|
||||
timer.Begin("Generate source map")
|
||||
chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps)
|
||||
timer.End("Generate source map")
|
||||
}
|
||||
|
||||
// End the metadata lazily. The final output size is not known until the
|
||||
// final import paths are substituted into the output pieces generated below.
|
||||
@@ -4364,9 +4420,19 @@ type externalImportCSS struct {
|
||||
|
||||
func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) {
|
||||
chunk := &chunks[chunkIndex]
|
||||
|
||||
timer := c.timer.Fork()
|
||||
if timer != nil {
|
||||
timeName := fmt.Sprintf("Generate chunk %q", path.Clean(config.TemplateToString(chunk.finalTemplate)))
|
||||
timer.Begin(timeName)
|
||||
defer c.timer.Join(timer)
|
||||
defer timer.End(timeName)
|
||||
}
|
||||
|
||||
compileResults := make([]compileResultCSS, 0, len(chunk.filesInChunkInOrder))
|
||||
|
||||
// Generate CSS for each file in parallel
|
||||
timer.Begin("Print CSS files")
|
||||
waitGroup := sync.WaitGroup{}
|
||||
for _, sourceIndex := range chunk.filesInChunkInOrder {
|
||||
// Create a goroutine for this file
|
||||
@@ -4407,6 +4473,8 @@ func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chu
|
||||
}
|
||||
|
||||
waitGroup.Wait()
|
||||
timer.End("Print CSS files")
|
||||
timer.Begin("Join CSS files")
|
||||
j := helpers.Joiner{}
|
||||
newlineBeforeComment := false
|
||||
|
||||
@@ -4522,6 +4590,7 @@ func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chu
|
||||
|
||||
// The CSS contents are done now that the source map comment is in
|
||||
cssContents := j.Done()
|
||||
timer.End("Join CSS files")
|
||||
|
||||
// End the metadata lazily. The final output size is not known until the
|
||||
// final import paths are substituted into the output pieces generated below.
|
||||
|
||||
@@ -101,7 +101,7 @@ type LinkerGraph struct {
|
||||
StableSourceIndices []uint32
|
||||
}
|
||||
|
||||
func MakeLinkerGraph(
|
||||
func CloneLinkerGraph(
|
||||
inputFiles []InputFile,
|
||||
reachableFiles []uint32,
|
||||
originalEntryPoints []EntryPoint,
|
||||
|
||||
97
internal/helpers/timer.go
Normal file
97
internal/helpers/timer.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/evanw/esbuild/internal/logger"
|
||||
)
|
||||
|
||||
type Timer struct {
|
||||
mutex sync.Mutex
|
||||
data []timerData
|
||||
}
|
||||
|
||||
type timerData struct {
|
||||
name string
|
||||
time time.Time
|
||||
isEnd bool
|
||||
}
|
||||
|
||||
func (t *Timer) Begin(name string) {
|
||||
if t != nil {
|
||||
t.data = append(t.data, timerData{
|
||||
name: name,
|
||||
time: time.Now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timer) End(name string) {
|
||||
if t != nil {
|
||||
t.data = append(t.data, timerData{
|
||||
name: name,
|
||||
time: time.Now(),
|
||||
isEnd: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timer) Fork() *Timer {
|
||||
if t != nil {
|
||||
return &Timer{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Timer) Join(other *Timer) {
|
||||
if t != nil && other != nil {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
t.data = append(t.data, other.data...)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timer) Log(log logger.Log) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
type pair struct {
|
||||
timerData
|
||||
index uint32
|
||||
}
|
||||
|
||||
var notes []logger.MsgData
|
||||
var stack []pair
|
||||
indent := 0
|
||||
|
||||
for _, item := range t.data {
|
||||
if !item.isEnd {
|
||||
top := pair{timerData: item, index: uint32(len(notes))}
|
||||
notes = append(notes, logger.MsgData{})
|
||||
stack = append(stack, top)
|
||||
indent++
|
||||
} else {
|
||||
indent--
|
||||
last := len(stack) - 1
|
||||
top := stack[last]
|
||||
stack = stack[:last]
|
||||
if item.name != top.name {
|
||||
panic("Internal error")
|
||||
}
|
||||
notes[top.index].Text = fmt.Sprintf("%s%s: %dms",
|
||||
strings.Repeat(" ", indent),
|
||||
top.name,
|
||||
item.time.Sub(top.time).Milliseconds())
|
||||
}
|
||||
}
|
||||
|
||||
log.AddMsg(logger.Msg{
|
||||
Kind: logger.Info,
|
||||
Data: logger.MsgData{Text: "Timing information (times may not nest hierarchically up due to parallelism)"},
|
||||
Notes: notes,
|
||||
})
|
||||
}
|
||||
@@ -49,6 +49,7 @@ type MsgKind uint8
|
||||
const (
|
||||
Error MsgKind = iota
|
||||
Warning
|
||||
Info
|
||||
Note
|
||||
Debug
|
||||
Verbose
|
||||
@@ -60,6 +61,8 @@ func (kind MsgKind) String() string {
|
||||
return "error"
|
||||
case Warning:
|
||||
return "warning"
|
||||
case Info:
|
||||
return "info"
|
||||
case Note:
|
||||
return "note"
|
||||
case Debug:
|
||||
@@ -419,6 +422,11 @@ func NewStderrLog(options OutputOptions) Log {
|
||||
writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
|
||||
}
|
||||
|
||||
case Info:
|
||||
if options.LogLevel <= LevelInfo {
|
||||
writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
|
||||
}
|
||||
|
||||
case Error:
|
||||
hasErrors = true
|
||||
if options.LogLevel <= LevelError {
|
||||
@@ -913,11 +921,14 @@ func msgString(includeSource bool, terminalInfo TerminalInfo, kind MsgKind, data
|
||||
|
||||
switch kind {
|
||||
case Verbose:
|
||||
kindColor = colors.Green
|
||||
kindColor = colors.Cyan
|
||||
|
||||
case Debug:
|
||||
kindColor = colors.Blue
|
||||
|
||||
case Info:
|
||||
kindColor = colors.Green
|
||||
|
||||
case Error:
|
||||
kindColor = colors.Red
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/evanw/esbuild/internal/api_helpers"
|
||||
"github.com/evanw/esbuild/internal/ast"
|
||||
"github.com/evanw/esbuild/internal/bundler"
|
||||
"github.com/evanw/esbuild/internal/cache"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/evanw/esbuild/internal/config"
|
||||
"github.com/evanw/esbuild/internal/fs"
|
||||
"github.com/evanw/esbuild/internal/graph"
|
||||
"github.com/evanw/esbuild/internal/helpers"
|
||||
"github.com/evanw/esbuild/internal/js_ast"
|
||||
"github.com/evanw/esbuild/internal/js_lexer"
|
||||
"github.com/evanw/esbuild/internal/js_parser"
|
||||
@@ -910,14 +912,19 @@ func rebuildImpl(
|
||||
// Stop now if there were errors
|
||||
resolver := resolver.NewResolver(realFS, log, caches, options)
|
||||
if !log.HasErrors() {
|
||||
var timer *helpers.Timer
|
||||
if api_helpers.UseTimer {
|
||||
timer = &helpers.Timer{}
|
||||
}
|
||||
|
||||
// Scan over the bundle
|
||||
bundle := bundler.ScanBundle(log, realFS, resolver, caches, entryPoints, options)
|
||||
bundle := bundler.ScanBundle(log, realFS, resolver, caches, entryPoints, options, timer)
|
||||
watchData = realFS.WatchData()
|
||||
|
||||
// Stop now if there were errors
|
||||
if !log.HasErrors() {
|
||||
// Compile the bundle
|
||||
results, metafile := bundle.Compile(log, options)
|
||||
results, metafile := bundle.Compile(log, options, timer)
|
||||
metafileJSON = metafile
|
||||
|
||||
// Stop now if there were errors
|
||||
@@ -926,8 +933,9 @@ func rebuildImpl(
|
||||
log.AlmostDone()
|
||||
|
||||
if buildOpts.Write {
|
||||
// Special-case writing to stdout
|
||||
timer.Begin("Write output files")
|
||||
if options.WriteToStdout {
|
||||
// Special-case writing to stdout
|
||||
if len(results) != 1 {
|
||||
log.AddError(nil, logger.Loc{}, fmt.Sprintf(
|
||||
"Internal error: did not expect to generate %d files when writing to stdout", len(results)))
|
||||
@@ -961,6 +969,7 @@ func rebuildImpl(
|
||||
}
|
||||
waitGroup.Wait()
|
||||
}
|
||||
timer.End("Write output files")
|
||||
}
|
||||
|
||||
// Return the results
|
||||
@@ -976,6 +985,8 @@ func rebuildImpl(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timer.Log(log)
|
||||
}
|
||||
|
||||
// End the log now, which may print a message
|
||||
@@ -1281,16 +1292,23 @@ func transformImpl(input string, transformOpts TransformOptions) TransformResult
|
||||
|
||||
// Stop now if there were errors
|
||||
if !log.HasErrors() {
|
||||
var timer *helpers.Timer
|
||||
if api_helpers.UseTimer {
|
||||
timer = &helpers.Timer{}
|
||||
}
|
||||
|
||||
// Scan over the bundle
|
||||
mockFS := fs.MockFS(make(map[string]string))
|
||||
resolver := resolver.NewResolver(mockFS, log, caches, options)
|
||||
bundle := bundler.ScanBundle(log, mockFS, resolver, caches, nil, options)
|
||||
bundle := bundler.ScanBundle(log, mockFS, resolver, caches, nil, options, timer)
|
||||
|
||||
// Stop now if there were errors
|
||||
if !log.HasErrors() {
|
||||
// Compile the bundle
|
||||
results, _ = bundle.Compile(log, options)
|
||||
results, _ = bundle.Compile(log, options, timer)
|
||||
}
|
||||
|
||||
timer.Log(log)
|
||||
}
|
||||
|
||||
// Return the results
|
||||
|
||||
Reference in New Issue
Block a user