finder.go 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565
  1. // Copyright 2017 Google Inc. All rights reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package finder
  15. import (
  16. "bufio"
  17. "bytes"
  18. "encoding/json"
  19. "errors"
  20. "fmt"
  21. "io"
  22. "os"
  23. "path/filepath"
  24. "runtime"
  25. "sort"
  26. "strings"
  27. "sync"
  28. "sync/atomic"
  29. "time"
  30. "android/soong/finder/fs"
  31. )
  32. // This file provides a Finder struct that can quickly search for files satisfying
  33. // certain criteria.
  34. // This Finder gets its speed partially from parallelism and partially from caching.
  35. // If a Stat call returns the same result as last time, then it means Finder
  36. // can skip the ReadDir call for that dir.
  37. // The primary data structure used by the finder is the field Finder.nodes ,
  38. // which is a tree of nodes of type *pathMap .
  39. // Each node represents a directory on disk, along with its stats, subdirectories,
  40. // and contained files.
  41. // The common use case for the Finder is that the caller creates a Finder and gives
  42. // it the same query that was given to it in the previous execution.
  43. // In this situation, the major events that take place are:
  44. // 1. The Finder begins to load its db
  45. // 2. The Finder begins to stat the directories mentioned in its db (using multiple threads)
  46. // Calling Stat on each of these directories is generally a large fraction of the total time
  47. // 3. The Finder begins to construct a separate tree of nodes in each of its threads
  48. // 4. The Finder merges the individual node trees into the main node tree
  49. // 5. The Finder may call ReadDir a few times if there are a few directories that are out-of-date
  50. // These ReadDir calls might prompt additional Stat calls, etc
  51. // 6. The Finder waits for all loading to complete
  52. // 7. The Finder searches the cache for files matching the user's query (using multiple threads)
  53. // These are the invariants regarding concurrency:
  54. // 1. The public methods of Finder are threadsafe.
  55. // The public methods are only performance-optimized for one caller at a time, however.
  56. // For the moment, multiple concurrent callers shouldn't expect any better performance than
  57. // multiple serial callers.
  58. // 2. While building the node tree, only one thread may ever access the <children> collection of a
  59. // *pathMap at once.
  60. // a) The thread that accesses the <children> collection is the thread that discovers the
  61. // children (by reading them from the cache or by having received a response to ReadDir).
  62. // 1) Consequently, the thread that discovers the children also spawns requests to stat
  63. // subdirs.
  64. // b) Consequently, while building the node tree, no thread may do a lookup of its
  65. // *pathMap via filepath because another thread may be adding children to the
  66. // <children> collection of an ancestor node. Additionally, in rare cases, another thread
  67. // may be removing children from an ancestor node if the children were only discovered to
  68. // be irrelevant after calling ReadDir (which happens if a prune-file was just added).
  69. // 3. No query will begin to be serviced until all loading (both reading the db
  70. // and scanning the filesystem) is complete.
  71. // Tests indicate that it only takes about 10% as long to search the in-memory cache as to
  72. // generate it, making this not a huge loss in performance.
  73. // 4. The parsing of the db and the initial setup of the pathMap tree must complete before
  74. // beginning to call listDirSync (because listDirSync can create new entries in the pathMap)
  75. // see cmd/finder.go or finder_test.go for usage examples
  76. // Update versionString whenever making a backwards-incompatible change to the cache file format
  77. const versionString = "Android finder version 1"
  78. // a CacheParams specifies which files and directories the user wishes be scanned and
  79. // potentially added to the cache
  80. type CacheParams struct {
  81. // WorkingDirectory is used as a base for any relative file paths given to the Finder
  82. WorkingDirectory string
  83. // RootDirs are the root directories used to initiate the search
  84. RootDirs []string
  85. // Whether symlinks are followed. If set, symlinks back to their own parent
  86. // directory don't work.
  87. FollowSymlinks bool
  88. // ExcludeDirs are directory names that if encountered are removed from the search
  89. ExcludeDirs []string
  90. // PruneFiles are file names that if encountered prune their entire directory
  91. // (including siblings)
  92. PruneFiles []string
  93. // IncludeFiles are file names to include as matches
  94. IncludeFiles []string
  95. // IncludeSuffixes are filename suffixes to include as matches.
  96. IncludeSuffixes []string
  97. }
  98. // a cacheConfig stores the inputs that determine what should be included in the cache
  99. type cacheConfig struct {
  100. CacheParams
  101. // FilesystemView is a unique identifier telling which parts of which file systems
  102. // are readable by the Finder. In practice its value is essentially username@hostname.
  103. // FilesystemView is set to ensure that a cache file copied to another host or
  104. // found by another user doesn't inadvertently get reused.
  105. FilesystemView string
  106. }
  107. func (p *cacheConfig) Dump() ([]byte, error) {
  108. bytes, err := json.Marshal(p)
  109. return bytes, err
  110. }
  111. // a cacheMetadata stores version information about the cache
  112. type cacheMetadata struct {
  113. // The Version enables the Finder to determine whether it can even parse the file
  114. // If the version changes, the entire cache file must be regenerated
  115. Version string
  116. // The CacheParams enables the Finder to determine whether the parameters match
  117. // If the CacheParams change, the Finder can choose how much of the cache file to reuse
  118. // (although in practice, the Finder will probably choose to ignore the entire file anyway)
  119. Config cacheConfig
  120. }
  121. type Logger interface {
  122. Output(calldepth int, s string) error
  123. }
  124. // the Finder is the main struct that callers will want to use
  125. type Finder struct {
  126. // configuration
  127. DbPath string
  128. numDbLoadingThreads int
  129. numSearchingThreads int
  130. cacheMetadata cacheMetadata
  131. logger Logger
  132. filesystem fs.FileSystem
  133. // temporary state
  134. threadPool *threadPool
  135. mutex sync.Mutex
  136. fsErrs []fsErr
  137. errlock sync.Mutex
  138. shutdownWaitgroup sync.WaitGroup
  139. // non-temporary state
  140. modifiedFlag int32
  141. nodes pathMap
  142. }
  143. var defaultNumThreads = runtime.NumCPU() * 2
  144. // New creates a new Finder for use
  145. func New(cacheParams CacheParams, filesystem fs.FileSystem,
  146. logger Logger, dbPath string) (f *Finder, err error) {
  147. return newImpl(cacheParams, filesystem, logger, dbPath, defaultNumThreads)
  148. }
  149. // newImpl is like New but accepts more params
  150. func newImpl(cacheParams CacheParams, filesystem fs.FileSystem,
  151. logger Logger, dbPath string, numThreads int) (f *Finder, err error) {
  152. numDbLoadingThreads := numThreads
  153. numSearchingThreads := numThreads
  154. metadata := cacheMetadata{
  155. Version: versionString,
  156. Config: cacheConfig{
  157. CacheParams: cacheParams,
  158. FilesystemView: filesystem.ViewId(),
  159. },
  160. }
  161. f = &Finder{
  162. numDbLoadingThreads: numDbLoadingThreads,
  163. numSearchingThreads: numSearchingThreads,
  164. cacheMetadata: metadata,
  165. logger: logger,
  166. filesystem: filesystem,
  167. nodes: *newPathMap("/"),
  168. DbPath: dbPath,
  169. shutdownWaitgroup: sync.WaitGroup{},
  170. }
  171. f.loadFromFilesystem()
  172. // check for any filesystem errors
  173. err = f.getErr()
  174. if err != nil {
  175. return nil, err
  176. }
  177. // confirm that every path mentioned in the CacheConfig exists
  178. for _, path := range cacheParams.RootDirs {
  179. if !filepath.IsAbs(path) {
  180. path = filepath.Join(f.cacheMetadata.Config.WorkingDirectory, path)
  181. }
  182. node := f.nodes.GetNode(filepath.Clean(path), false)
  183. if node == nil || node.ModTime == 0 {
  184. return nil, fmt.Errorf("path %v was specified to be included in the cache but does not exist\n", path)
  185. }
  186. }
  187. return f, nil
  188. }
  189. // FindNamed searches for every cached file
  190. func (f *Finder) FindAll() []string {
  191. return f.FindAt("/")
  192. }
  193. // FindNamed searches for every cached file under <rootDir>
  194. func (f *Finder) FindAt(rootDir string) []string {
  195. filter := func(entries DirEntries) (dirNames []string, fileNames []string) {
  196. return entries.DirNames, entries.FileNames
  197. }
  198. return f.FindMatching(rootDir, filter)
  199. }
  200. // FindNamed searches for every cached file named <fileName>
  201. func (f *Finder) FindNamed(fileName string) []string {
  202. return f.FindNamedAt("/", fileName)
  203. }
  204. // FindNamedAt searches under <rootPath> for every file named <fileName>
  205. // The reason a caller might use FindNamedAt instead of FindNamed is if they want
  206. // to limit their search to a subset of the cache
  207. func (f *Finder) FindNamedAt(rootPath string, fileName string) []string {
  208. filter := func(entries DirEntries) (dirNames []string, fileNames []string) {
  209. matches := []string{}
  210. for _, foundName := range entries.FileNames {
  211. if foundName == fileName {
  212. matches = append(matches, foundName)
  213. }
  214. }
  215. return entries.DirNames, matches
  216. }
  217. return f.FindMatching(rootPath, filter)
  218. }
  219. // FindFirstNamed searches for every file named <fileName>
  220. // Whenever it finds a match, it stops search subdirectories
  221. func (f *Finder) FindFirstNamed(fileName string) []string {
  222. return f.FindFirstNamedAt("/", fileName)
  223. }
  224. // FindFirstNamedAt searches for every file named <fileName>
  225. // Whenever it finds a match, it stops search subdirectories
  226. func (f *Finder) FindFirstNamedAt(rootPath string, fileName string) []string {
  227. filter := func(entries DirEntries) (dirNames []string, fileNames []string) {
  228. matches := []string{}
  229. for _, foundName := range entries.FileNames {
  230. if foundName == fileName {
  231. matches = append(matches, foundName)
  232. }
  233. }
  234. if len(matches) > 0 {
  235. return []string{}, matches
  236. }
  237. return entries.DirNames, matches
  238. }
  239. return f.FindMatching(rootPath, filter)
  240. }
  241. // FindMatching is the most general exported function for searching for files in the cache
  242. // The WalkFunc will be invoked repeatedly and is expected to modify the provided DirEntries
  243. // in place, removing file paths and directories as desired.
  244. // WalkFunc will be invoked potentially many times in parallel, and must be threadsafe.
  245. func (f *Finder) FindMatching(rootPath string, filter WalkFunc) []string {
  246. // set up some parameters
  247. scanStart := time.Now()
  248. var isRel bool
  249. workingDir := f.cacheMetadata.Config.WorkingDirectory
  250. isRel = !filepath.IsAbs(rootPath)
  251. if isRel {
  252. rootPath = filepath.Join(workingDir, rootPath)
  253. }
  254. rootPath = filepath.Clean(rootPath)
  255. // ensure nothing else is using the Finder
  256. f.verbosef("FindMatching waiting for finder to be idle\n")
  257. f.lock()
  258. defer f.unlock()
  259. node := f.nodes.GetNode(rootPath, false)
  260. if node == nil {
  261. f.verbosef("No data for path %v ; apparently not included in cache params: %v\n",
  262. rootPath, f.cacheMetadata.Config.CacheParams)
  263. // path is not found; don't do a search
  264. return []string{}
  265. }
  266. // search for matching files
  267. f.verbosef("Finder finding %v using cache\n", rootPath)
  268. results := f.findInCacheMultithreaded(node, filter, f.numSearchingThreads)
  269. // format and return results
  270. if isRel {
  271. for i := 0; i < len(results); i++ {
  272. results[i] = strings.Replace(results[i], workingDir+"/", "", 1)
  273. }
  274. }
  275. sort.Strings(results)
  276. f.verbosef("Found %v files under %v in %v using cache\n",
  277. len(results), rootPath, time.Since(scanStart))
  278. return results
  279. }
  280. // Shutdown declares that the finder is no longer needed and waits for its cleanup to complete
  281. // Currently, that only entails waiting for the database dump to complete.
  282. func (f *Finder) Shutdown() {
  283. f.WaitForDbDump()
  284. }
  285. // WaitForDbDump returns once the database has been written to f.DbPath.
  286. func (f *Finder) WaitForDbDump() {
  287. f.shutdownWaitgroup.Wait()
  288. }
  289. // End of public api
  290. func (f *Finder) goDumpDb() {
  291. if f.wasModified() {
  292. f.shutdownWaitgroup.Add(1)
  293. go func() {
  294. err := f.dumpDb()
  295. if err != nil {
  296. f.verbosef("%v\n", err)
  297. }
  298. f.shutdownWaitgroup.Done()
  299. }()
  300. } else {
  301. f.verbosef("Skipping dumping unmodified db\n")
  302. }
  303. }
  304. // joinCleanPaths is like filepath.Join but is faster because
  305. // joinCleanPaths doesn't have to support paths ending in "/" or containing ".."
  306. func joinCleanPaths(base string, leaf string) string {
  307. if base == "" {
  308. return leaf
  309. }
  310. if base == "/" {
  311. return base + leaf
  312. }
  313. if leaf == "" {
  314. return base
  315. }
  316. return base + "/" + leaf
  317. }
  318. func (f *Finder) verbosef(format string, args ...interface{}) {
  319. f.logger.Output(2, fmt.Sprintf(format, args...))
  320. }
  321. // loadFromFilesystem populates the in-memory cache based on the contents of the filesystem
  322. func (f *Finder) loadFromFilesystem() {
  323. f.threadPool = newThreadPool(f.numDbLoadingThreads)
  324. err := f.startFromExternalCache()
  325. if err != nil {
  326. f.startWithoutExternalCache()
  327. }
  328. f.goDumpDb()
  329. f.threadPool = nil
  330. }
  331. func (f *Finder) startFind(path string) {
  332. if !filepath.IsAbs(path) {
  333. path = filepath.Join(f.cacheMetadata.Config.WorkingDirectory, path)
  334. }
  335. node := f.nodes.GetNode(path, true)
  336. f.statDirAsync(node)
  337. }
  338. func (f *Finder) lock() {
  339. f.mutex.Lock()
  340. }
  341. func (f *Finder) unlock() {
  342. f.mutex.Unlock()
  343. }
  344. // a statResponse is the relevant portion of the response from the filesystem to a Stat call
  345. type statResponse struct {
  346. ModTime int64
  347. Inode uint64
  348. Device uint64
  349. }
  350. // a pathAndStats stores a path and its stats
  351. type pathAndStats struct {
  352. statResponse
  353. Path string
  354. }
  355. // a dirFullInfo stores all of the relevant information we know about a directory
  356. type dirFullInfo struct {
  357. pathAndStats
  358. FileNames []string
  359. }
  360. // a PersistedDirInfo is the information about a dir that we save to our cache on disk
  361. type PersistedDirInfo struct {
  362. // These field names are short because they are repeated many times in the output json file
  363. P string // path
  364. T int64 // modification time
  365. I uint64 // inode number
  366. F []string // relevant filenames contained
  367. }
  368. // a PersistedDirs is the information that we persist for a group of dirs
  369. type PersistedDirs struct {
  370. // the device on which each directory is stored
  371. Device uint64
  372. // the common root path to which all contained dirs are relative
  373. Root string
  374. // the directories themselves
  375. Dirs []PersistedDirInfo
  376. }
  377. // a CacheEntry is the smallest unit that can be read and parsed from the cache (on disk) at a time
  378. type CacheEntry []PersistedDirs
  379. // a DirEntries lists the files and directories contained directly within a specific directory
  380. type DirEntries struct {
  381. Path string
  382. // elements of DirNames are just the dir names; they don't include any '/' character
  383. DirNames []string
  384. // elements of FileNames are just the file names; they don't include '/' character
  385. FileNames []string
  386. }
  387. // a WalkFunc is the type that is passed into various Find functions for determining which
  388. // directories the caller wishes be walked. The WalkFunc is expected to decide which
  389. // directories to walk and which files to consider as matches to the original query.
  390. type WalkFunc func(DirEntries) (dirs []string, files []string)
  391. // a mapNode stores the relevant stats about a directory to be stored in a pathMap
  392. type mapNode struct {
  393. statResponse
  394. FileNames []string
  395. }
  396. // a pathMap implements the directory tree structure of nodes
  397. type pathMap struct {
  398. mapNode
  399. path string
  400. children map[string]*pathMap
  401. // number of descendent nodes, including self
  402. approximateNumDescendents int
  403. }
  404. func newPathMap(path string) *pathMap {
  405. result := &pathMap{path: path, children: make(map[string]*pathMap, 4),
  406. approximateNumDescendents: 1}
  407. return result
  408. }
  409. // GetNode returns the node at <path>
  410. func (m *pathMap) GetNode(path string, createIfNotFound bool) *pathMap {
  411. if len(path) > 0 && path[0] == '/' {
  412. path = path[1:]
  413. }
  414. node := m
  415. for {
  416. if path == "" {
  417. return node
  418. }
  419. index := strings.Index(path, "/")
  420. var firstComponent string
  421. if index >= 0 {
  422. firstComponent = path[:index]
  423. path = path[index+1:]
  424. } else {
  425. firstComponent = path
  426. path = ""
  427. }
  428. child, found := node.children[firstComponent]
  429. if !found {
  430. if createIfNotFound {
  431. child = node.newChild(firstComponent)
  432. } else {
  433. return nil
  434. }
  435. }
  436. node = child
  437. }
  438. }
  439. func (m *pathMap) newChild(name string) (child *pathMap) {
  440. path := joinCleanPaths(m.path, name)
  441. newChild := newPathMap(path)
  442. m.children[name] = newChild
  443. return m.children[name]
  444. }
  445. func (m *pathMap) UpdateNumDescendents() int {
  446. count := 1
  447. for _, child := range m.children {
  448. count += child.approximateNumDescendents
  449. }
  450. m.approximateNumDescendents = count
  451. return count
  452. }
  453. func (m *pathMap) UpdateNumDescendentsRecursive() {
  454. for _, child := range m.children {
  455. child.UpdateNumDescendentsRecursive()
  456. }
  457. m.UpdateNumDescendents()
  458. }
  459. func (m *pathMap) MergeIn(other *pathMap) {
  460. for key, theirs := range other.children {
  461. ours, found := m.children[key]
  462. if found {
  463. ours.MergeIn(theirs)
  464. } else {
  465. m.children[key] = theirs
  466. }
  467. }
  468. if other.ModTime != 0 {
  469. m.mapNode = other.mapNode
  470. }
  471. m.UpdateNumDescendents()
  472. }
  473. func (m *pathMap) DumpAll() []dirFullInfo {
  474. results := []dirFullInfo{}
  475. m.dumpInto("", &results)
  476. return results
  477. }
  478. func (m *pathMap) dumpInto(path string, results *[]dirFullInfo) {
  479. *results = append(*results,
  480. dirFullInfo{
  481. pathAndStats{statResponse: m.statResponse, Path: path},
  482. m.FileNames},
  483. )
  484. for key, child := range m.children {
  485. childPath := joinCleanPaths(path, key)
  486. if len(childPath) == 0 || childPath[0] != '/' {
  487. childPath = "/" + childPath
  488. }
  489. child.dumpInto(childPath, results)
  490. }
  491. }
  492. // a semaphore can be locked by up to <capacity> callers at once
  493. type semaphore struct {
  494. pool chan bool
  495. }
  496. func newSemaphore(capacity int) *semaphore {
  497. return &semaphore{pool: make(chan bool, capacity)}
  498. }
  499. func (l *semaphore) Lock() {
  500. l.pool <- true
  501. }
  502. func (l *semaphore) Unlock() {
  503. <-l.pool
  504. }
  505. // A threadPool runs goroutines and supports throttling and waiting.
  506. // Without throttling, Go may exhaust the maximum number of various resources, such as
  507. // threads or file descriptors, and crash the program.
  508. type threadPool struct {
  509. receivedRequests sync.WaitGroup
  510. activeRequests semaphore
  511. }
  512. func newThreadPool(maxNumConcurrentThreads int) *threadPool {
  513. return &threadPool{
  514. receivedRequests: sync.WaitGroup{},
  515. activeRequests: *newSemaphore(maxNumConcurrentThreads),
  516. }
  517. }
  518. // Run requests to run the given function in its own goroutine
  519. func (p *threadPool) Run(function func()) {
  520. p.receivedRequests.Add(1)
  521. // If Run() was called from within a goroutine spawned by this threadPool,
  522. // then we may need to return from Run() before having capacity to actually
  523. // run <function>.
  524. //
  525. // It's possible that the body of <function> contains a statement (such as a syscall)
  526. // that will cause Go to pin it to a thread, or will contain a statement that uses
  527. // another resource that is in short supply (such as a file descriptor), so we can't
  528. // actually run <function> until we have capacity.
  529. //
  530. // However, the semaphore used for synchronization is implemented via a channel and
  531. // shouldn't require a new thread for each access.
  532. go func() {
  533. p.activeRequests.Lock()
  534. function()
  535. p.activeRequests.Unlock()
  536. p.receivedRequests.Done()
  537. }()
  538. }
  539. // Wait waits until all goroutines are done, just like sync.WaitGroup's Wait
  540. func (p *threadPool) Wait() {
  541. p.receivedRequests.Wait()
  542. }
  543. type fsErr struct {
  544. path string
  545. err error
  546. }
  547. func (e fsErr) String() string {
  548. return e.path + ": " + e.err.Error()
  549. }
  550. func (f *Finder) serializeCacheEntry(dirInfos []dirFullInfo) ([]byte, error) {
  551. // group each dirFullInfo by its Device, to avoid having to repeat it in the output
  552. dirsByDevice := map[uint64][]PersistedDirInfo{}
  553. for _, entry := range dirInfos {
  554. _, found := dirsByDevice[entry.Device]
  555. if !found {
  556. dirsByDevice[entry.Device] = []PersistedDirInfo{}
  557. }
  558. dirsByDevice[entry.Device] = append(dirsByDevice[entry.Device],
  559. PersistedDirInfo{P: entry.Path, T: entry.ModTime, I: entry.Inode, F: entry.FileNames})
  560. }
  561. cacheEntry := CacheEntry{}
  562. for device, infos := range dirsByDevice {
  563. // find common prefix
  564. prefix := ""
  565. if len(infos) > 0 {
  566. prefix = infos[0].P
  567. }
  568. for _, info := range infos {
  569. for !strings.HasPrefix(info.P+"/", prefix+"/") {
  570. prefix = filepath.Dir(prefix)
  571. if prefix == "/" {
  572. break
  573. }
  574. }
  575. }
  576. // remove common prefix
  577. for i := range infos {
  578. suffix := strings.Replace(infos[i].P, prefix, "", 1)
  579. if len(suffix) > 0 && suffix[0] == '/' {
  580. suffix = suffix[1:]
  581. }
  582. infos[i].P = suffix
  583. }
  584. // turn the map (keyed by device) into a list of structs with labeled fields
  585. // this is to improve readability of the output
  586. cacheEntry = append(cacheEntry, PersistedDirs{Device: device, Root: prefix, Dirs: infos})
  587. }
  588. // convert to json.
  589. // it would save some space to use a different format than json for the db file,
  590. // but the space and time savings are small, and json is easy for humans to read
  591. bytes, err := json.Marshal(cacheEntry)
  592. return bytes, err
  593. }
  594. func (f *Finder) parseCacheEntry(bytes []byte) ([]dirFullInfo, error) {
  595. var cacheEntry CacheEntry
  596. err := json.Unmarshal(bytes, &cacheEntry)
  597. if err != nil {
  598. return nil, err
  599. }
  600. // convert from a CacheEntry to a []dirFullInfo (by copying a few fields)
  601. capacity := 0
  602. for _, element := range cacheEntry {
  603. capacity += len(element.Dirs)
  604. }
  605. nodes := make([]dirFullInfo, capacity)
  606. count := 0
  607. for _, element := range cacheEntry {
  608. for _, dir := range element.Dirs {
  609. path := joinCleanPaths(element.Root, dir.P)
  610. nodes[count] = dirFullInfo{
  611. pathAndStats: pathAndStats{
  612. statResponse: statResponse{
  613. ModTime: dir.T, Inode: dir.I, Device: element.Device,
  614. },
  615. Path: path},
  616. FileNames: dir.F}
  617. count++
  618. }
  619. }
  620. return nodes, nil
  621. }
  622. // We use the following separator byte to distinguish individually parseable blocks of json
  623. // because we know this separator won't appear in the json that we're parsing.
  624. //
  625. // The newline byte can only appear in a UTF-8 stream if the newline character appears, because:
  626. // - The newline character is encoded as "0000 1010" in binary ("0a" in hex)
  627. // - UTF-8 dictates that bytes beginning with a "0" bit are never emitted as part of a multibyte
  628. // character.
  629. //
  630. // We know that the newline character will never appear in our json string, because:
  631. // - If a newline character appears as part of a data string, then json encoding will
  632. // emit two characters instead: '\' and 'n'.
  633. // - The json encoder that we use doesn't emit the optional newlines between any of its
  634. // other outputs.
  635. const lineSeparator = byte('\n')
  636. func (f *Finder) readLine(reader *bufio.Reader) ([]byte, error) {
  637. return reader.ReadBytes(lineSeparator)
  638. }
  639. // validateCacheHeader reads the cache header from cacheReader and tells whether the cache is compatible with this Finder
  640. func (f *Finder) validateCacheHeader(cacheReader *bufio.Reader) bool {
  641. cacheVersionBytes, err := f.readLine(cacheReader)
  642. if err != nil {
  643. f.verbosef("Failed to read database header; database is invalid\n")
  644. return false
  645. }
  646. if len(cacheVersionBytes) > 0 && cacheVersionBytes[len(cacheVersionBytes)-1] == lineSeparator {
  647. cacheVersionBytes = cacheVersionBytes[:len(cacheVersionBytes)-1]
  648. }
  649. cacheVersionString := string(cacheVersionBytes)
  650. currentVersion := f.cacheMetadata.Version
  651. if cacheVersionString != currentVersion {
  652. f.verbosef("Version changed from %q to %q, database is not applicable\n", cacheVersionString, currentVersion)
  653. return false
  654. }
  655. cacheParamBytes, err := f.readLine(cacheReader)
  656. if err != nil {
  657. f.verbosef("Failed to read database search params; database is invalid\n")
  658. return false
  659. }
  660. if len(cacheParamBytes) > 0 && cacheParamBytes[len(cacheParamBytes)-1] == lineSeparator {
  661. cacheParamBytes = cacheParamBytes[:len(cacheParamBytes)-1]
  662. }
  663. currentParamBytes, err := f.cacheMetadata.Config.Dump()
  664. if err != nil {
  665. panic("Finder failed to serialize its parameters")
  666. }
  667. cacheParamString := string(cacheParamBytes)
  668. currentParamString := string(currentParamBytes)
  669. if cacheParamString != currentParamString {
  670. f.verbosef("Params changed from %q to %q, database is not applicable\n", cacheParamString, currentParamString)
  671. return false
  672. }
  673. return true
  674. }
  675. // loadBytes compares the cache info in <data> to the state of the filesystem
  676. // loadBytes returns a map representing <data> and also a slice of dirs that need to be re-walked
  677. func (f *Finder) loadBytes(id int, data []byte) (m *pathMap, dirsToWalk []string, err error) {
  678. helperStartTime := time.Now()
  679. cachedNodes, err := f.parseCacheEntry(data)
  680. if err != nil {
  681. return nil, nil, fmt.Errorf("Failed to parse block %v: %v\n", id, err.Error())
  682. }
  683. unmarshalDate := time.Now()
  684. f.verbosef("Unmarshaled %v objects for %v in %v\n",
  685. len(cachedNodes), id, unmarshalDate.Sub(helperStartTime))
  686. tempMap := newPathMap("/")
  687. stats := make([]statResponse, len(cachedNodes))
  688. for i, node := range cachedNodes {
  689. // check the file system for an updated timestamp
  690. stats[i] = f.statDirSync(node.Path)
  691. }
  692. dirsToWalk = []string{}
  693. for i, cachedNode := range cachedNodes {
  694. updated := stats[i]
  695. // save the cached value
  696. container := tempMap.GetNode(cachedNode.Path, true)
  697. container.mapNode = mapNode{statResponse: updated}
  698. // if the metadata changed and the directory still exists, then
  699. // make a note to walk it later
  700. if !f.isInfoUpToDate(cachedNode.statResponse, updated) && updated.ModTime != 0 {
  701. f.setModified()
  702. // make a note that the directory needs to be walked
  703. dirsToWalk = append(dirsToWalk, cachedNode.Path)
  704. } else {
  705. container.mapNode.FileNames = cachedNode.FileNames
  706. }
  707. }
  708. // count the number of nodes to improve our understanding of the shape of the tree,
  709. // thereby improving parallelism of subsequent searches
  710. tempMap.UpdateNumDescendentsRecursive()
  711. f.verbosef("Statted inodes of block %v in %v\n", id, time.Now().Sub(unmarshalDate))
  712. return tempMap, dirsToWalk, nil
  713. }
  714. // startFromExternalCache loads the cache database from disk
  715. // startFromExternalCache waits to return until the load of the cache db is complete, but
  716. // startFromExternalCache does not wait for all every listDir() or statDir() request to complete
  717. func (f *Finder) startFromExternalCache() (err error) {
  718. startTime := time.Now()
  719. dbPath := f.DbPath
  720. // open cache file and validate its header
  721. reader, err := f.filesystem.Open(dbPath)
  722. if err != nil {
  723. return errors.New("No data to load from database\n")
  724. }
  725. defer reader.Close()
  726. bufferedReader := bufio.NewReader(reader)
  727. if !f.validateCacheHeader(bufferedReader) {
  728. return errors.New("Cache header does not match")
  729. }
  730. f.verbosef("Database header matches, will attempt to use database %v\n", f.DbPath)
  731. // read the file and spawn threads to process it
  732. nodesToWalk := [][]*pathMap{}
  733. mainTree := newPathMap("/")
  734. // read the blocks and stream them into <blockChannel>
  735. type dataBlock struct {
  736. id int
  737. err error
  738. data []byte
  739. }
  740. blockChannel := make(chan dataBlock, f.numDbLoadingThreads)
  741. readBlocks := func() {
  742. index := 0
  743. for {
  744. // It takes some time to unmarshal the input from json, so we want
  745. // to unmarshal it in parallel. In order to find valid places to
  746. // break the input, we scan for the line separators that we inserted
  747. // (for this purpose) when we dumped the database.
  748. data, err := f.readLine(bufferedReader)
  749. var response dataBlock
  750. done := false
  751. if err != nil && err != io.EOF {
  752. response = dataBlock{id: index, err: err, data: nil}
  753. done = true
  754. } else {
  755. done = (err == io.EOF)
  756. response = dataBlock{id: index, err: nil, data: data}
  757. }
  758. blockChannel <- response
  759. index++
  760. duration := time.Since(startTime)
  761. f.verbosef("Read block %v after %v\n", index, duration)
  762. if done {
  763. f.verbosef("Read %v blocks in %v\n", index, duration)
  764. close(blockChannel)
  765. return
  766. }
  767. }
  768. }
  769. go readBlocks()
  770. // Read from <blockChannel> and stream the responses into <resultChannel>.
  771. type workResponse struct {
  772. id int
  773. err error
  774. tree *pathMap
  775. updatedDirs []string
  776. }
  777. resultChannel := make(chan workResponse)
  778. processBlocks := func() {
  779. numProcessed := 0
  780. threadPool := newThreadPool(f.numDbLoadingThreads)
  781. for {
  782. // get a block to process
  783. block, received := <-blockChannel
  784. if !received {
  785. break
  786. }
  787. if block.err != nil {
  788. resultChannel <- workResponse{err: block.err}
  789. break
  790. }
  791. numProcessed++
  792. // wait until there is CPU available to process it
  793. threadPool.Run(
  794. func() {
  795. processStartTime := time.Now()
  796. f.verbosef("Starting to process block %v after %v\n",
  797. block.id, processStartTime.Sub(startTime))
  798. tempMap, updatedDirs, err := f.loadBytes(block.id, block.data)
  799. var response workResponse
  800. if err != nil {
  801. f.verbosef(
  802. "Block %v failed to parse with error %v\n",
  803. block.id, err)
  804. response = workResponse{err: err}
  805. } else {
  806. response = workResponse{
  807. id: block.id,
  808. err: nil,
  809. tree: tempMap,
  810. updatedDirs: updatedDirs,
  811. }
  812. }
  813. f.verbosef("Processed block %v in %v\n",
  814. block.id, time.Since(processStartTime),
  815. )
  816. resultChannel <- response
  817. },
  818. )
  819. }
  820. threadPool.Wait()
  821. f.verbosef("Finished processing %v blocks in %v\n",
  822. numProcessed, time.Since(startTime))
  823. close(resultChannel)
  824. }
  825. go processBlocks()
  826. // Read from <resultChannel> and use the results
  827. combineResults := func() (err error) {
  828. for {
  829. result, received := <-resultChannel
  830. if !received {
  831. break
  832. }
  833. if err != nil {
  834. // In case of an error, wait for work to complete before
  835. // returning the error. This ensures that any subsequent
  836. // work doesn't need to compete for resources (and possibly
  837. // fail due to, for example, a filesystem limit on the number of
  838. // concurrently open files) with past work.
  839. continue
  840. }
  841. if result.err != nil {
  842. err = result.err
  843. continue
  844. }
  845. // update main tree
  846. mainTree.MergeIn(result.tree)
  847. // record any new directories that we will need to Stat()
  848. updatedNodes := make([]*pathMap, len(result.updatedDirs))
  849. for j, dir := range result.updatedDirs {
  850. node := mainTree.GetNode(dir, false)
  851. updatedNodes[j] = node
  852. }
  853. nodesToWalk = append(nodesToWalk, updatedNodes)
  854. }
  855. return err
  856. }
  857. err = combineResults()
  858. if err != nil {
  859. return err
  860. }
  861. f.nodes = *mainTree
  862. // after having loaded the entire db and therefore created entries for
  863. // the directories we know of, now it's safe to start calling ReadDir on
  864. // any updated directories
  865. for i := range nodesToWalk {
  866. f.listDirsAsync(nodesToWalk[i])
  867. }
  868. f.verbosef("Loaded db and statted known dirs in %v\n", time.Since(startTime))
  869. f.threadPool.Wait()
  870. f.verbosef("Loaded db and statted all dirs in %v\n", time.Now().Sub(startTime))
  871. return err
  872. }
  873. // startWithoutExternalCache starts scanning the filesystem according to the cache config
  874. // startWithoutExternalCache should be called if startFromExternalCache is not applicable
  875. func (f *Finder) startWithoutExternalCache() {
  876. startTime := time.Now()
  877. configDirs := f.cacheMetadata.Config.RootDirs
  878. // clean paths
  879. candidates := make([]string, len(configDirs))
  880. for i, dir := range configDirs {
  881. candidates[i] = filepath.Clean(dir)
  882. }
  883. // remove duplicates
  884. dirsToScan := make([]string, 0, len(configDirs))
  885. for _, candidate := range candidates {
  886. include := true
  887. for _, included := range dirsToScan {
  888. if included == "/" || strings.HasPrefix(candidate+"/", included+"/") {
  889. include = false
  890. break
  891. }
  892. }
  893. if include {
  894. dirsToScan = append(dirsToScan, candidate)
  895. }
  896. }
  897. // start searching finally
  898. for _, path := range dirsToScan {
  899. f.verbosef("Starting find of %v\n", path)
  900. f.startFind(path)
  901. }
  902. f.threadPool.Wait()
  903. f.verbosef("Scanned filesystem (not using cache) in %v\n", time.Now().Sub(startTime))
  904. }
  905. // isInfoUpToDate tells whether <new> can confirm that results computed at <old> are still valid
  906. func (f *Finder) isInfoUpToDate(old statResponse, new statResponse) (equal bool) {
  907. if old.Inode != new.Inode {
  908. return false
  909. }
  910. if old.ModTime != new.ModTime {
  911. return false
  912. }
  913. if old.Device != new.Device {
  914. return false
  915. }
  916. return true
  917. }
  918. func (f *Finder) wasModified() bool {
  919. return atomic.LoadInt32(&f.modifiedFlag) > 0
  920. }
  921. func (f *Finder) setModified() {
  922. var newVal int32
  923. newVal = 1
  924. atomic.StoreInt32(&f.modifiedFlag, newVal)
  925. }
  926. // sortedDirEntries exports directory entries to facilitate dumping them to the external cache
  927. func (f *Finder) sortedDirEntries() []dirFullInfo {
  928. startTime := time.Now()
  929. nodes := make([]dirFullInfo, 0)
  930. for _, node := range f.nodes.DumpAll() {
  931. if node.ModTime != 0 {
  932. nodes = append(nodes, node)
  933. }
  934. }
  935. discoveryDate := time.Now()
  936. f.verbosef("Generated %v cache entries in %v\n", len(nodes), discoveryDate.Sub(startTime))
  937. less := func(i int, j int) bool {
  938. return nodes[i].Path < nodes[j].Path
  939. }
  940. sort.Slice(nodes, less)
  941. sortDate := time.Now()
  942. f.verbosef("Sorted %v cache entries in %v\n", len(nodes), sortDate.Sub(discoveryDate))
  943. return nodes
  944. }
  945. // serializeDb converts the cache database into a form to save to disk
  946. func (f *Finder) serializeDb() ([]byte, error) {
  947. // sort dir entries
  948. var entryList = f.sortedDirEntries()
  949. // Generate an output file that can be conveniently loaded using the same number of threads
  950. // as were used in this execution (because presumably that will be the number of threads
  951. // used in the next execution too)
  952. // generate header
  953. header := []byte{}
  954. header = append(header, []byte(f.cacheMetadata.Version)...)
  955. header = append(header, lineSeparator)
  956. configDump, err := f.cacheMetadata.Config.Dump()
  957. if err != nil {
  958. return nil, err
  959. }
  960. header = append(header, configDump...)
  961. // serialize individual blocks in parallel
  962. numBlocks := f.numDbLoadingThreads
  963. if numBlocks > len(entryList) {
  964. numBlocks = len(entryList)
  965. }
  966. blocks := make([][]byte, 1+numBlocks)
  967. blocks[0] = header
  968. blockMin := 0
  969. wg := sync.WaitGroup{}
  970. var errLock sync.Mutex
  971. for i := 1; i <= numBlocks; i++ {
  972. // identify next block
  973. blockMax := len(entryList) * i / numBlocks
  974. block := entryList[blockMin:blockMax]
  975. // process block
  976. wg.Add(1)
  977. go func(index int, block []dirFullInfo) {
  978. byteBlock, subErr := f.serializeCacheEntry(block)
  979. f.verbosef("Serialized block %v into %v bytes\n", index, len(byteBlock))
  980. if subErr != nil {
  981. f.verbosef("%v\n", subErr.Error())
  982. errLock.Lock()
  983. err = subErr
  984. errLock.Unlock()
  985. } else {
  986. blocks[index] = byteBlock
  987. }
  988. wg.Done()
  989. }(i, block)
  990. blockMin = blockMax
  991. }
  992. wg.Wait()
  993. if err != nil {
  994. return nil, err
  995. }
  996. content := bytes.Join(blocks, []byte{lineSeparator})
  997. return content, nil
  998. }
  999. // dumpDb saves the cache database to disk
  1000. func (f *Finder) dumpDb() error {
  1001. startTime := time.Now()
  1002. f.verbosef("Dumping db\n")
  1003. tempPath := f.DbPath + ".tmp"
  1004. bytes, err := f.serializeDb()
  1005. if err != nil {
  1006. return err
  1007. }
  1008. serializeDate := time.Now()
  1009. f.verbosef("Serialized db in %v\n", serializeDate.Sub(startTime))
  1010. // dump file and atomically move
  1011. err = f.filesystem.WriteFile(tempPath, bytes, 0777)
  1012. if err != nil {
  1013. return err
  1014. }
  1015. err = f.filesystem.Rename(tempPath, f.DbPath)
  1016. if err != nil {
  1017. return err
  1018. }
  1019. f.verbosef("Wrote db in %v\n", time.Now().Sub(serializeDate))
  1020. return nil
  1021. }
  1022. // canIgnoreFsErr checks for certain classes of filesystem errors that are safe to ignore
  1023. func (f *Finder) canIgnoreFsErr(err error) bool {
  1024. pathErr, isPathErr := err.(*os.PathError)
  1025. if !isPathErr {
  1026. // Don't recognize this error
  1027. return false
  1028. }
  1029. if os.IsPermission(pathErr) {
  1030. // Permission errors are ignored:
  1031. // https://issuetracker.google.com/37553659
  1032. // https://github.com/google/kati/pull/116
  1033. return true
  1034. }
  1035. if pathErr.Err == os.ErrNotExist {
  1036. // If a directory doesn't exist, that generally means the cache is out-of-date
  1037. return true
  1038. }
  1039. // Don't recognize this error
  1040. return false
  1041. }
  1042. // onFsError should be called whenever a potentially fatal error is returned from a filesystem call
  1043. func (f *Finder) onFsError(path string, err error) {
  1044. if !f.canIgnoreFsErr(err) {
  1045. // We could send the errors through a channel instead, although that would cause this call
  1046. // to block unless we preallocated a sufficient buffer or spawned a reader thread.
  1047. // Although it wouldn't be too complicated to spawn a reader thread, it's still slightly
  1048. // more convenient to use a lock. Only in an unusual situation should this code be
  1049. // invoked anyway.
  1050. f.errlock.Lock()
  1051. f.fsErrs = append(f.fsErrs, fsErr{path: path, err: err})
  1052. f.errlock.Unlock()
  1053. }
  1054. }
  1055. // discardErrsForPrunedPaths removes any errors for paths that are no longer included in the cache
  1056. func (f *Finder) discardErrsForPrunedPaths() {
  1057. // This function could be somewhat inefficient due to being single-threaded,
  1058. // but the length of f.fsErrs should be approximately 0, so it shouldn't take long anyway.
  1059. relevantErrs := make([]fsErr, 0, len(f.fsErrs))
  1060. for _, fsErr := range f.fsErrs {
  1061. path := fsErr.path
  1062. node := f.nodes.GetNode(path, false)
  1063. if node != nil {
  1064. // The path in question wasn't pruned due to a failure to process a parent directory.
  1065. // So, the failure to process this path is important
  1066. relevantErrs = append(relevantErrs, fsErr)
  1067. }
  1068. }
  1069. f.fsErrs = relevantErrs
  1070. }
  1071. // getErr returns an error based on previous calls to onFsErr, if any
  1072. func (f *Finder) getErr() error {
  1073. f.discardErrsForPrunedPaths()
  1074. numErrs := len(f.fsErrs)
  1075. if numErrs < 1 {
  1076. return nil
  1077. }
  1078. maxNumErrsToInclude := 10
  1079. message := ""
  1080. if numErrs > maxNumErrsToInclude {
  1081. message = fmt.Sprintf("finder encountered %v errors: %v...", numErrs, f.fsErrs[:maxNumErrsToInclude])
  1082. } else {
  1083. message = fmt.Sprintf("finder encountered %v errors: %v", numErrs, f.fsErrs)
  1084. }
  1085. return errors.New(message)
  1086. }
  1087. func (f *Finder) statDirAsync(dir *pathMap) {
  1088. node := dir
  1089. path := dir.path
  1090. f.threadPool.Run(
  1091. func() {
  1092. updatedStats := f.statDirSync(path)
  1093. if !f.isInfoUpToDate(node.statResponse, updatedStats) {
  1094. node.mapNode = mapNode{
  1095. statResponse: updatedStats,
  1096. FileNames: []string{},
  1097. }
  1098. f.setModified()
  1099. if node.statResponse.ModTime != 0 {
  1100. // modification time was updated, so re-scan for
  1101. // child directories
  1102. f.listDirAsync(dir)
  1103. }
  1104. }
  1105. },
  1106. )
  1107. }
  1108. func (f *Finder) statDirSync(path string) statResponse {
  1109. fileInfo, err := f.filesystem.Lstat(path)
  1110. var stats statResponse
  1111. if err != nil {
  1112. // possibly record this error
  1113. f.onFsError(path, err)
  1114. // in case of a failure to stat the directory, treat the directory as missing (modTime = 0)
  1115. return stats
  1116. }
  1117. modTime := fileInfo.ModTime()
  1118. stats = statResponse{}
  1119. inode, err := f.filesystem.InodeNumber(fileInfo)
  1120. if err != nil {
  1121. panic(fmt.Sprintf("Could not get inode number of %v: %v\n", path, err.Error()))
  1122. }
  1123. stats.Inode = inode
  1124. device, err := f.filesystem.DeviceNumber(fileInfo)
  1125. if err != nil {
  1126. panic(fmt.Sprintf("Could not get device number of %v: %v\n", path, err.Error()))
  1127. }
  1128. stats.Device = device
  1129. permissionsChangeTime, err := f.filesystem.PermTime(fileInfo)
  1130. if err != nil {
  1131. panic(fmt.Sprintf("Could not get permissions modification time (CTime) of %v: %v\n", path, err.Error()))
  1132. }
  1133. // We're only interested in knowing whether anything about the directory
  1134. // has changed since last check, so we use the latest of the two
  1135. // modification times (content modification (mtime) and
  1136. // permission modification (ctime))
  1137. if permissionsChangeTime.After(modTime) {
  1138. modTime = permissionsChangeTime
  1139. }
  1140. stats.ModTime = modTime.UnixNano()
  1141. return stats
  1142. }
  1143. func (f *Finder) shouldIncludeFile(fileName string) bool {
  1144. for _, includedName := range f.cacheMetadata.Config.IncludeFiles {
  1145. if fileName == includedName {
  1146. return true
  1147. }
  1148. }
  1149. for _, includeSuffix := range f.cacheMetadata.Config.IncludeSuffixes {
  1150. if strings.HasSuffix(fileName, includeSuffix) {
  1151. return true
  1152. }
  1153. }
  1154. return false
  1155. }
  1156. // pruneCacheCandidates removes the items that we don't want to include in our persistent cache
  1157. func (f *Finder) pruneCacheCandidates(items *DirEntries) {
  1158. for _, fileName := range items.FileNames {
  1159. for _, abortedName := range f.cacheMetadata.Config.PruneFiles {
  1160. if fileName == abortedName {
  1161. items.FileNames = []string{}
  1162. items.DirNames = []string{}
  1163. return
  1164. }
  1165. }
  1166. }
  1167. // remove any files that aren't the ones we want to include
  1168. writeIndex := 0
  1169. for _, fileName := range items.FileNames {
  1170. if f.shouldIncludeFile(fileName) {
  1171. items.FileNames[writeIndex] = fileName
  1172. writeIndex++
  1173. }
  1174. }
  1175. // resize
  1176. items.FileNames = items.FileNames[:writeIndex]
  1177. writeIndex = 0
  1178. for _, dirName := range items.DirNames {
  1179. items.DirNames[writeIndex] = dirName
  1180. // ignore other dirs that are known to not be inputs to the build process
  1181. include := true
  1182. for _, excludedName := range f.cacheMetadata.Config.ExcludeDirs {
  1183. if dirName == excludedName {
  1184. // don't include
  1185. include = false
  1186. break
  1187. }
  1188. }
  1189. if include {
  1190. writeIndex++
  1191. }
  1192. }
  1193. // resize
  1194. items.DirNames = items.DirNames[:writeIndex]
  1195. }
  1196. func (f *Finder) listDirsAsync(nodes []*pathMap) {
  1197. f.threadPool.Run(
  1198. func() {
  1199. for i := range nodes {
  1200. f.listDirSync(nodes[i])
  1201. }
  1202. },
  1203. )
  1204. }
  1205. func (f *Finder) listDirAsync(node *pathMap) {
  1206. f.threadPool.Run(
  1207. func() {
  1208. f.listDirSync(node)
  1209. },
  1210. )
  1211. }
  1212. func (f *Finder) listDirSync(dir *pathMap) {
  1213. path := dir.path
  1214. children, err := f.filesystem.ReadDir(path)
  1215. if err != nil {
  1216. // possibly record this error
  1217. f.onFsError(path, err)
  1218. // if listing the contents of the directory fails (presumably due to
  1219. // permission denied), then treat the directory as empty
  1220. children = nil
  1221. }
  1222. var subdirs []string
  1223. var subfiles []string
  1224. for _, child := range children {
  1225. linkBits := child.Mode() & os.ModeSymlink
  1226. isLink := linkBits != 0
  1227. if isLink {
  1228. childPath := filepath.Join(path, child.Name())
  1229. childStat, err := f.filesystem.Stat(childPath)
  1230. if err != nil {
  1231. // If stat fails this is probably a broken or dangling symlink, treat it as a file.
  1232. subfiles = append(subfiles, child.Name())
  1233. } else if childStat.IsDir() {
  1234. // Skip symlink dirs if not requested otherwise. Android has a number
  1235. // of symlinks creating infinite source trees which would otherwise get
  1236. // us in an infinite loop.
  1237. // TODO(b/197349722): Revisit this once symlink loops are banned in the
  1238. // source tree.
  1239. if f.cacheMetadata.Config.FollowSymlinks {
  1240. subdirs = append(subdirs, child.Name())
  1241. }
  1242. } else {
  1243. // We do have to support symlink files because the link name might be
  1244. // different than the target name
  1245. // (for example, Android.bp -> build/soong/root.bp)
  1246. subfiles = append(subfiles, child.Name())
  1247. }
  1248. } else if child.IsDir() {
  1249. subdirs = append(subdirs, child.Name())
  1250. } else {
  1251. subfiles = append(subfiles, child.Name())
  1252. }
  1253. }
  1254. parentNode := dir
  1255. entry := &DirEntries{Path: path, DirNames: subdirs, FileNames: subfiles}
  1256. f.pruneCacheCandidates(entry)
  1257. // create a pathMap node for each relevant subdirectory
  1258. relevantChildren := map[string]*pathMap{}
  1259. for _, subdirName := range entry.DirNames {
  1260. childNode, found := parentNode.children[subdirName]
  1261. // if we already knew of this directory, then we already have a request pending to Stat it
  1262. // if we didn't already know of this directory, then we must Stat it now
  1263. if !found {
  1264. childNode = parentNode.newChild(subdirName)
  1265. f.statDirAsync(childNode)
  1266. }
  1267. relevantChildren[subdirName] = childNode
  1268. }
  1269. // Note that in rare cases, it's possible that we're reducing the set of
  1270. // children via this statement, if these are all true:
  1271. // 1. we previously had a cache that knew about subdirectories of parentNode
  1272. // 2. the user created a prune-file (described in pruneCacheCandidates)
  1273. // inside <parentNode>, which specifies that the contents of parentNode
  1274. // are to be ignored.
  1275. // The fact that it's possible to remove children here means that *pathMap structs
  1276. // must not be looked up from f.nodes by filepath (and instead must be accessed by
  1277. // direct pointer) until after every listDirSync completes
  1278. parentNode.FileNames = entry.FileNames
  1279. parentNode.children = relevantChildren
  1280. }
  1281. // listMatches takes a node and a function that specifies which subdirectories and
  1282. // files to include, and listMatches returns the matches
  1283. func (f *Finder) listMatches(node *pathMap,
  1284. filter WalkFunc) (subDirs []*pathMap, filePaths []string) {
  1285. entries := DirEntries{
  1286. FileNames: node.FileNames,
  1287. }
  1288. entries.DirNames = make([]string, 0, len(node.children))
  1289. for childName := range node.children {
  1290. entries.DirNames = append(entries.DirNames, childName)
  1291. }
  1292. dirNames, fileNames := filter(entries)
  1293. subDirs = []*pathMap{}
  1294. filePaths = make([]string, 0, len(fileNames))
  1295. for _, fileName := range fileNames {
  1296. filePaths = append(filePaths, joinCleanPaths(node.path, fileName))
  1297. }
  1298. subDirs = make([]*pathMap, 0, len(dirNames))
  1299. for _, childName := range dirNames {
  1300. child, ok := node.children[childName]
  1301. if ok {
  1302. subDirs = append(subDirs, child)
  1303. }
  1304. }
  1305. return subDirs, filePaths
  1306. }
  1307. // findInCacheMultithreaded spawns potentially multiple goroutines with which to search the cache.
  1308. func (f *Finder) findInCacheMultithreaded(node *pathMap, filter WalkFunc,
  1309. approxNumThreads int) []string {
  1310. if approxNumThreads < 2 {
  1311. // Done spawning threads; process remaining directories
  1312. return f.findInCacheSinglethreaded(node, filter)
  1313. }
  1314. totalWork := 0
  1315. for _, child := range node.children {
  1316. totalWork += child.approximateNumDescendents
  1317. }
  1318. childrenResults := make(chan []string, len(node.children))
  1319. subDirs, filePaths := f.listMatches(node, filter)
  1320. // process child directories
  1321. for _, child := range subDirs {
  1322. numChildThreads := approxNumThreads * child.approximateNumDescendents / totalWork
  1323. childProcessor := func(child *pathMap) {
  1324. childResults := f.findInCacheMultithreaded(child, filter, numChildThreads)
  1325. childrenResults <- childResults
  1326. }
  1327. // If we're allowed to use more than 1 thread to process this directory,
  1328. // then instead we use 1 thread for each subdirectory.
  1329. // It would be strange to spawn threads for only some subdirectories.
  1330. go childProcessor(child)
  1331. }
  1332. // collect results
  1333. for i := 0; i < len(subDirs); i++ {
  1334. childResults := <-childrenResults
  1335. filePaths = append(filePaths, childResults...)
  1336. }
  1337. close(childrenResults)
  1338. return filePaths
  1339. }
  1340. // findInCacheSinglethreaded synchronously searches the cache for all matching file paths
  1341. // note findInCacheSinglethreaded runs 2X to 4X as fast by being iterative rather than recursive
  1342. func (f *Finder) findInCacheSinglethreaded(node *pathMap, filter WalkFunc) []string {
  1343. if node == nil {
  1344. return []string{}
  1345. }
  1346. nodes := []*pathMap{node}
  1347. matches := []string{}
  1348. for len(nodes) > 0 {
  1349. currentNode := nodes[0]
  1350. nodes = nodes[1:]
  1351. subDirs, filePaths := f.listMatches(currentNode, filter)
  1352. nodes = append(nodes, subDirs...)
  1353. matches = append(matches, filePaths...)
  1354. }
  1355. return matches
  1356. }