Dia antes primera install
This commit is contained in:
16
node_modules/cacache/LICENSE.md
generated
vendored
Normal file
16
node_modules/cacache/LICENSE.md
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) npm, Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for
|
||||
any purpose with or without fee is hereby granted, provided that the
|
||||
above copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS
|
||||
ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
|
||||
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
||||
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
|
||||
USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
703
node_modules/cacache/README.md
generated
vendored
Normal file
703
node_modules/cacache/README.md
generated
vendored
Normal file
@@ -0,0 +1,703 @@
|
||||
# cacache [](https://npm.im/cacache) [](https://npm.im/cacache) [](https://travis-ci.org/npm/cacache) [](https://ci.appveyor.com/project/npm/cacache) [](https://coveralls.io/github/npm/cacache?branch=latest)
|
||||
|
||||
[`cacache`](https://github.com/npm/cacache) is a Node.js library for managing
|
||||
local key and content address caches. It's really fast, really good at
|
||||
concurrency, and it will never give you corrupted data, even if cache files
|
||||
get corrupted or manipulated.
|
||||
|
||||
On systems that support user and group settings on files, cacache will
|
||||
match the `uid` and `gid` values to the folder where the cache lives, even
|
||||
when running as `root`.
|
||||
|
||||
It was written to be used as [npm](https://npm.im)'s local cache, but can
|
||||
just as easily be used on its own.
|
||||
|
||||
## Install
|
||||
|
||||
`$ npm install --save cacache`
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Example](#example)
|
||||
* [Features](#features)
|
||||
* [Contributing](#contributing)
|
||||
* [API](#api)
|
||||
* [Using localized APIs](#localized-api)
|
||||
* Reading
|
||||
* [`ls`](#ls)
|
||||
* [`ls.stream`](#ls-stream)
|
||||
* [`get`](#get-data)
|
||||
* [`get.stream`](#get-stream)
|
||||
* [`get.info`](#get-info)
|
||||
* [`get.hasContent`](#get-hasContent)
|
||||
* Writing
|
||||
* [`put`](#put-data)
|
||||
* [`put.stream`](#put-stream)
|
||||
* [`rm.all`](#rm-all)
|
||||
* [`rm.entry`](#rm-entry)
|
||||
* [`rm.content`](#rm-content)
|
||||
* [`index.compact`](#index-compact)
|
||||
* [`index.insert`](#index-insert)
|
||||
* Utilities
|
||||
* [`clearMemoized`](#clear-memoized)
|
||||
* [`tmp.mkdir`](#tmp-mkdir)
|
||||
* [`tmp.withTmp`](#with-tmp)
|
||||
* Integrity
|
||||
* [Subresource Integrity](#integrity)
|
||||
* [`verify`](#verify)
|
||||
* [`verify.lastRun`](#verify-last-run)
|
||||
|
||||
### Example
|
||||
|
||||
```javascript
|
||||
const cacache = require('cacache')
|
||||
const fs = require('fs')
|
||||
|
||||
const tarball = '/path/to/mytar.tgz'
|
||||
const cachePath = '/tmp/my-toy-cache'
|
||||
const key = 'my-unique-key-1234'
|
||||
|
||||
// Cache it! Use `cachePath` as the root of the content cache
|
||||
cacache.put(cachePath, key, '10293801983029384').then(integrity => {
|
||||
console.log(`Saved content to ${cachePath}.`)
|
||||
})
|
||||
|
||||
const destination = '/tmp/mytar.tgz'
|
||||
|
||||
// Copy the contents out of the cache and into their destination!
|
||||
// But this time, use stream instead!
|
||||
cacache.get.stream(
|
||||
cachePath, key
|
||||
).pipe(
|
||||
fs.createWriteStream(destination)
|
||||
).on('finish', () => {
|
||||
console.log('done extracting!')
|
||||
})
|
||||
|
||||
// The same thing, but skip the key index.
|
||||
cacache.get.byDigest(cachePath, integrityHash).then(data => {
|
||||
fs.writeFile(destination, data, err => {
|
||||
console.log('tarball data fetched based on its sha512sum and written out!')
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
* Extraction by key or by content address (shasum, etc)
|
||||
* [Subresource Integrity](#integrity) web standard support
|
||||
* Multi-hash support - safely host sha1, sha512, etc, in a single cache
|
||||
* Automatic content deduplication
|
||||
* Fault tolerance (immune to corruption, partial writes, process races, etc)
|
||||
* Consistency guarantees on read and write (full data verification)
|
||||
* Lockless, high-concurrency cache access
|
||||
* Streaming support
|
||||
* Promise support
|
||||
* Fast -- sub-millisecond reads and writes including verification
|
||||
* Arbitrary metadata storage
|
||||
* Garbage collection and additional offline verification
|
||||
* Thorough test coverage
|
||||
* There's probably a bloom filter in there somewhere. Those are cool, right? 🤔
|
||||
|
||||
### Contributing
|
||||
|
||||
The cacache team enthusiastically welcomes contributions and project participation! There's a bunch of things you can do if you want to contribute! The [Contributor Guide](CONTRIBUTING.md) has all the information you need for everything from reporting bugs to contributing entire new features. Please don't hesitate to jump in if you'd like to, or even ask us questions if something isn't clear.
|
||||
|
||||
All participants and maintainers in this project are expected to follow [Code of Conduct](CODE_OF_CONDUCT.md), and just generally be excellent to each other.
|
||||
|
||||
Please refer to the [Changelog](CHANGELOG.md) for project history details, too.
|
||||
|
||||
Happy hacking!
|
||||
|
||||
### API
|
||||
|
||||
#### <a name="ls"></a> `> cacache.ls(cache) -> Promise<Object>`
|
||||
|
||||
Lists info for all entries currently in the cache as a single large object. Each
|
||||
entry in the object will be keyed by the unique index key, with corresponding
|
||||
[`get.info`](#get-info) objects as the values.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.ls(cachePath).then(console.log)
|
||||
// Output
|
||||
{
|
||||
'my-thing': {
|
||||
key: 'my-thing',
|
||||
integrity: 'sha512-BaSe64/EnCoDED+HAsh=='
|
||||
path: '.testcache/content/deadbeef', // joined with `cachePath`
|
||||
time: 12345698490,
|
||||
size: 4023948,
|
||||
metadata: {
|
||||
name: 'blah',
|
||||
version: '1.2.3',
|
||||
description: 'this was once a package but now it is my-thing'
|
||||
}
|
||||
},
|
||||
'other-thing': {
|
||||
key: 'other-thing',
|
||||
integrity: 'sha1-ANothER+hasH=',
|
||||
path: '.testcache/content/bada55',
|
||||
time: 11992309289,
|
||||
size: 111112
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### <a name="ls-stream"></a> `> cacache.ls.stream(cache) -> Readable`
|
||||
|
||||
Lists info for all entries currently in the cache as a single large object.
|
||||
|
||||
This works just like [`ls`](#ls), except [`get.info`](#get-info) entries are
|
||||
returned as `'data'` events on the returned stream.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.ls.stream(cachePath).on('data', console.log)
|
||||
// Output
|
||||
{
|
||||
key: 'my-thing',
|
||||
integrity: 'sha512-BaSe64HaSh',
|
||||
path: '.testcache/content/deadbeef', // joined with `cachePath`
|
||||
time: 12345698490,
|
||||
size: 13423,
|
||||
metadata: {
|
||||
name: 'blah',
|
||||
version: '1.2.3',
|
||||
description: 'this was once a package but now it is my-thing'
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
key: 'other-thing',
|
||||
integrity: 'whirlpool-WoWSoMuchSupport',
|
||||
path: '.testcache/content/bada55',
|
||||
time: 11992309289,
|
||||
size: 498023984029
|
||||
}
|
||||
|
||||
{
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
#### <a name="get-data"></a> `> cacache.get(cache, key, [opts]) -> Promise({data, metadata, integrity})`
|
||||
|
||||
Returns an object with the cached data, digest, and metadata identified by
|
||||
`key`. The `data` property of this object will be a `Buffer` instance that
|
||||
presumably holds some data that means something to you. I'm sure you know what
|
||||
to do with it! cacache just won't care.
|
||||
|
||||
`integrity` is a [Subresource
|
||||
Integrity](#integrity)
|
||||
string. That is, a string that can be used to verify `data`, which looks like
|
||||
`<hash-algorithm>-<base64-integrity-hash>`.
|
||||
|
||||
If there is no content identified by `key`, or if the locally-stored data does
|
||||
not pass the validity checksum, the promise will be rejected.
|
||||
|
||||
A sub-function, `get.byDigest` may be used for identical behavior, except lookup
|
||||
will happen by integrity hash, bypassing the index entirely. This version of the
|
||||
function *only* returns `data` itself, without any wrapper.
|
||||
|
||||
See: [options](#get-options)
|
||||
|
||||
##### Note
|
||||
|
||||
This function loads the entire cache entry into memory before returning it. If
|
||||
you're dealing with Very Large data, consider using [`get.stream`](#get-stream)
|
||||
instead.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
// Look up by key
|
||||
cache.get(cachePath, 'my-thing').then(console.log)
|
||||
// Output:
|
||||
{
|
||||
metadata: {
|
||||
thingName: 'my'
|
||||
},
|
||||
integrity: 'sha512-BaSe64HaSh',
|
||||
data: Buffer#<deadbeef>,
|
||||
size: 9320
|
||||
}
|
||||
|
||||
// Look up by digest
|
||||
cache.get.byDigest(cachePath, 'sha512-BaSe64HaSh').then(console.log)
|
||||
// Output:
|
||||
Buffer#<deadbeef>
|
||||
```
|
||||
|
||||
#### <a name="get-stream"></a> `> cacache.get.stream(cache, key, [opts]) -> Readable`
|
||||
|
||||
Returns a [Readable Stream](https://nodejs.org/api/stream.html#stream_readable_streams) of the cached data identified by `key`.
|
||||
|
||||
If there is no content identified by `key`, or if the locally-stored data does
|
||||
not pass the validity checksum, an error will be emitted.
|
||||
|
||||
`metadata` and `integrity` events will be emitted before the stream closes, if
|
||||
you need to collect that extra data about the cached entry.
|
||||
|
||||
A sub-function, `get.stream.byDigest` may be used for identical behavior,
|
||||
except lookup will happen by integrity hash, bypassing the index entirely. This
|
||||
version does not emit the `metadata` and `integrity` events at all.
|
||||
|
||||
See: [options](#get-options)
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
// Look up by key
|
||||
cache.get.stream(
|
||||
cachePath, 'my-thing'
|
||||
).on('metadata', metadata => {
|
||||
console.log('metadata:', metadata)
|
||||
}).on('integrity', integrity => {
|
||||
console.log('integrity:', integrity)
|
||||
}).pipe(
|
||||
fs.createWriteStream('./x.tgz')
|
||||
)
|
||||
// Outputs:
|
||||
metadata: { ... }
|
||||
integrity: 'sha512-SoMeDIGest+64=='
|
||||
|
||||
// Look up by digest
|
||||
cache.get.stream.byDigest(
|
||||
cachePath, 'sha512-SoMeDIGest+64=='
|
||||
).pipe(
|
||||
fs.createWriteStream('./x.tgz')
|
||||
)
|
||||
```
|
||||
|
||||
#### <a name="get-info"></a> `> cacache.get.info(cache, key) -> Promise`
|
||||
|
||||
Looks up `key` in the cache index, returning information about the entry if
|
||||
one exists.
|
||||
|
||||
##### Fields
|
||||
|
||||
* `key` - Key the entry was looked up under. Matches the `key` argument.
|
||||
* `integrity` - [Subresource Integrity hash](#integrity) for the content this entry refers to.
|
||||
* `path` - Filesystem path where content is stored, joined with `cache` argument.
|
||||
* `time` - Timestamp the entry was first added on.
|
||||
* `metadata` - User-assigned metadata associated with the entry/content.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.get.info(cachePath, 'my-thing').then(console.log)
|
||||
|
||||
// Output
|
||||
{
|
||||
key: 'my-thing',
|
||||
integrity: 'sha256-MUSTVERIFY+ALL/THINGS=='
|
||||
path: '.testcache/content/deadbeef',
|
||||
time: 12345698490,
|
||||
size: 849234,
|
||||
metadata: {
|
||||
name: 'blah',
|
||||
version: '1.2.3',
|
||||
description: 'this was once a package but now it is my-thing'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### <a name="get-hasContent"></a> `> cacache.get.hasContent(cache, integrity) -> Promise`
|
||||
|
||||
Looks up a [Subresource Integrity hash](#integrity) in the cache. If content
|
||||
exists for this `integrity`, it will return an object, with the specific single integrity hash
|
||||
that was found in `sri` key, and the size of the found content as `size`. If no content exists for this integrity, it will return `false`.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.get.hasContent(cachePath, 'sha256-MUSTVERIFY+ALL/THINGS==').then(console.log)
|
||||
|
||||
// Output
|
||||
{
|
||||
sri: {
|
||||
source: 'sha256-MUSTVERIFY+ALL/THINGS==',
|
||||
algorithm: 'sha256',
|
||||
digest: 'MUSTVERIFY+ALL/THINGS==',
|
||||
options: []
|
||||
},
|
||||
size: 9001
|
||||
}
|
||||
|
||||
cacache.get.hasContent(cachePath, 'sha521-NOT+IN/CACHE==').then(console.log)
|
||||
|
||||
// Output
|
||||
false
|
||||
```
|
||||
|
||||
##### <a name="get-options"></a> Options
|
||||
|
||||
##### `opts.integrity`
|
||||
If present, the pre-calculated digest for the inserted content. If this option
|
||||
is provided and does not match the post-insertion digest, insertion will fail
|
||||
with an `EINTEGRITY` error.
|
||||
|
||||
##### `opts.memoize`
|
||||
|
||||
Default: null
|
||||
|
||||
If explicitly truthy, cacache will read from memory and memoize data on bulk read. If `false`, cacache will read from disk data. Reader functions by default read from in-memory cache.
|
||||
|
||||
##### `opts.size`
|
||||
If provided, the data stream will be verified to check that enough data was
|
||||
passed through. If there's more or less data than expected, insertion will fail
|
||||
with an `EBADSIZE` error.
|
||||
|
||||
|
||||
#### <a name="put-data"></a> `> cacache.put(cache, key, data, [opts]) -> Promise`
|
||||
|
||||
Inserts data passed to it into the cache. The returned Promise resolves with a
|
||||
digest (generated according to [`opts.algorithms`](#optsalgorithms)) after the
|
||||
cache entry has been successfully written.
|
||||
|
||||
See: [options](#put-options)
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
fetch(
|
||||
'https://registry.npmjs.org/cacache/-/cacache-1.0.0.tgz'
|
||||
).then(data => {
|
||||
return cacache.put(cachePath, 'registry.npmjs.org|cacache@1.0.0', data)
|
||||
}).then(integrity => {
|
||||
console.log('integrity hash is', integrity)
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="put-stream"></a> `> cacache.put.stream(cache, key, [opts]) -> Writable`
|
||||
|
||||
Returns a [Writable
|
||||
Stream](https://nodejs.org/api/stream.html#stream_writable_streams) that inserts
|
||||
data written to it into the cache. Emits an `integrity` event with the digest of
|
||||
written contents when it succeeds.
|
||||
|
||||
See: [options](#put-options)
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
request.get(
|
||||
'https://registry.npmjs.org/cacache/-/cacache-1.0.0.tgz'
|
||||
).pipe(
|
||||
cacache.put.stream(
|
||||
cachePath, 'registry.npmjs.org|cacache@1.0.0'
|
||||
).on('integrity', d => console.log(`integrity digest is ${d}`))
|
||||
)
|
||||
```
|
||||
|
||||
##### <a name="put-options"></a> Options
|
||||
|
||||
##### `opts.metadata`
|
||||
|
||||
Arbitrary metadata to be attached to the inserted key.
|
||||
|
||||
##### `opts.size`
|
||||
|
||||
If provided, the data stream will be verified to check that enough data was
|
||||
passed through. If there's more or less data than expected, insertion will fail
|
||||
with an `EBADSIZE` error.
|
||||
|
||||
##### `opts.integrity`
|
||||
|
||||
If present, the pre-calculated digest for the inserted content. If this option
|
||||
is provided and does not match the post-insertion digest, insertion will fail
|
||||
with an `EINTEGRITY` error.
|
||||
|
||||
`algorithms` has no effect if this option is present.
|
||||
|
||||
##### `opts.algorithms`
|
||||
|
||||
Default: ['sha512']
|
||||
|
||||
Hashing algorithms to use when calculating the [subresource integrity
|
||||
digest](#integrity)
|
||||
for inserted data. Can use any algorithm listed in `crypto.getHashes()` or
|
||||
`'omakase'`/`'お任せします'` to pick a random hash algorithm on each insertion. You
|
||||
may also use any anagram of `'modnar'` to use this feature.
|
||||
|
||||
Currently only supports one algorithm at a time (i.e., an array length of
|
||||
exactly `1`). Has no effect if `opts.integrity` is present.
|
||||
|
||||
##### `opts.memoize`
|
||||
|
||||
Default: null
|
||||
|
||||
If provided, cacache will memoize the given cache insertion in memory, bypassing
|
||||
any filesystem checks for that key or digest in future cache fetches. Nothing
|
||||
will be written to the in-memory cache unless this option is explicitly truthy.
|
||||
|
||||
If `opts.memoize` is an object or a `Map`-like (that is, an object with `get`
|
||||
and `set` methods), it will be written to instead of the global memoization
|
||||
cache.
|
||||
|
||||
Reading from disk data can be forced by explicitly passing `memoize: false` to
|
||||
the reader functions, but their default will be to read from memory.
|
||||
|
||||
##### `opts.tmpPrefix`
|
||||
Default: null
|
||||
|
||||
Prefix to append on the temporary directory name inside the cache's tmp dir.
|
||||
|
||||
#### <a name="rm-all"></a> `> cacache.rm.all(cache) -> Promise`
|
||||
|
||||
Clears the entire cache. Mainly by blowing away the cache directory itself.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.rm.all(cachePath).then(() => {
|
||||
console.log('THE APOCALYPSE IS UPON US 😱')
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="rm-entry"></a> `> cacache.rm.entry(cache, key, [opts]) -> Promise`
|
||||
|
||||
Alias: `cacache.rm`
|
||||
|
||||
Removes the index entry for `key`. Content will still be accessible if
|
||||
requested directly by content address ([`get.stream.byDigest`](#get-stream)).
|
||||
|
||||
By default, this appends a new entry to the index with an integrity of `null`.
|
||||
If `opts.removeFully` is set to `true` then the index file itself will be
|
||||
physically deleted rather than appending a `null`.
|
||||
|
||||
To remove the content itself (which might still be used by other entries), use
|
||||
[`rm.content`](#rm-content). Or, to safely vacuum any unused content, use
|
||||
[`verify`](#verify).
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.rm.entry(cachePath, 'my-thing').then(() => {
|
||||
console.log('I did not like it anyway')
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="rm-content"></a> `> cacache.rm.content(cache, integrity) -> Promise`
|
||||
|
||||
Removes the content identified by `integrity`. Any index entries referring to it
|
||||
will not be usable again until the content is re-added to the cache with an
|
||||
identical digest.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.rm.content(cachePath, 'sha512-SoMeDIGest/IN+BaSE64==').then(() => {
|
||||
console.log('data for my-thing is gone!')
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="index-compact"></a> `> cacache.index.compact(cache, key, matchFn, [opts]) -> Promise`
|
||||
|
||||
Uses `matchFn`, which must be a synchronous function that accepts two entries
|
||||
and returns a boolean indicating whether or not the two entries match, to
|
||||
deduplicate all entries in the cache for the given `key`.
|
||||
|
||||
If `opts.validateEntry` is provided, it will be called as a function with the
|
||||
only parameter being a single index entry. The function must return a Boolean,
|
||||
if it returns `true` the entry is considered valid and will be kept in the index,
|
||||
if it returns `false` the entry will be removed from the index.
|
||||
|
||||
If `opts.validateEntry` is not provided, however, every entry in the index will
|
||||
be deduplicated and kept until the first `null` integrity is reached, removing
|
||||
all entries that were written before the `null`.
|
||||
|
||||
The deduplicated list of entries is both written to the index, replacing the
|
||||
existing content, and returned in the Promise.
|
||||
|
||||
#### <a name="index-insert"></a> `> cacache.index.insert(cache, key, integrity, opts) -> Promise`
|
||||
|
||||
Writes an index entry to the cache for the given `key` without writing content.
|
||||
|
||||
It is assumed if you are using this method, you have already stored the content
|
||||
some other way and you only wish to add a new index to that content. The `metadata`
|
||||
and `size` properties are read from `opts` and used as part of the index entry.
|
||||
|
||||
Returns a Promise resolving to the newly added entry.
|
||||
|
||||
#### <a name="clear-memoized"></a> `> cacache.clearMemoized()`
|
||||
|
||||
Completely resets the in-memory entry cache.
|
||||
|
||||
#### <a name="tmp-mkdir"></a> `> tmp.mkdir(cache, opts) -> Promise<Path>`
|
||||
|
||||
Returns a unique temporary directory inside the cache's `tmp` dir. This
|
||||
directory will use the same safe user assignment that all the other stuff use.
|
||||
|
||||
Once the directory is made, it's the user's responsibility that all files
|
||||
within are given the appropriate `gid`/`uid` ownership settings to match
|
||||
the rest of the cache. If not, you can ask cacache to do it for you by
|
||||
calling [`tmp.fix()`](#tmp-fix), which will fix all tmp directory
|
||||
permissions.
|
||||
|
||||
If you want automatic cleanup of this directory, use
|
||||
[`tmp.withTmp()`](#with-tpm)
|
||||
|
||||
See: [options](#tmp-options)
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.tmp.mkdir(cache).then(dir => {
|
||||
fs.writeFile(path.join(dir, 'blablabla'), Buffer#<1234>, ...)
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="tmp-fix"></a> `> tmp.fix(cache) -> Promise`
|
||||
|
||||
Sets the `uid` and `gid` properties on all files and folders within the tmp
|
||||
folder to match the rest of the cache.
|
||||
|
||||
Use this after manually writing files into [`tmp.mkdir`](#tmp-mkdir) or
|
||||
[`tmp.withTmp`](#with-tmp).
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.tmp.mkdir(cache).then(dir => {
|
||||
writeFile(path.join(dir, 'file'), someData).then(() => {
|
||||
// make sure we didn't just put a root-owned file in the cache
|
||||
cacache.tmp.fix().then(() => {
|
||||
// all uids and gids match now
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="with-tmp"></a> `> tmp.withTmp(cache, opts, cb) -> Promise`
|
||||
|
||||
Creates a temporary directory with [`tmp.mkdir()`](#tmp-mkdir) and calls `cb`
|
||||
with it. The created temporary directory will be removed when the return value
|
||||
of `cb()` resolves, the tmp directory will be automatically deleted once that
|
||||
promise completes.
|
||||
|
||||
The same caveats apply when it comes to managing permissions for the tmp dir's
|
||||
contents.
|
||||
|
||||
See: [options](#tmp-options)
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.tmp.withTmp(cache, dir => {
|
||||
return fs.writeFileAsync(path.join(dir, 'blablabla'), Buffer#<1234>, ...)
|
||||
}).then(() => {
|
||||
// `dir` no longer exists
|
||||
})
|
||||
```
|
||||
|
||||
##### <a name="tmp-options"></a> Options
|
||||
|
||||
##### `opts.tmpPrefix`
|
||||
Default: null
|
||||
|
||||
Prefix to append on the temporary directory name inside the cache's tmp dir.
|
||||
|
||||
#### <a name="integrity"></a> Subresource Integrity Digests
|
||||
|
||||
For content verification and addressing, cacache uses strings following the
|
||||
[Subresource
|
||||
Integrity spec](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity).
|
||||
That is, any time cacache expects an `integrity` argument or option, it
|
||||
should be in the format `<hashAlgorithm>-<base64-hash>`.
|
||||
|
||||
One deviation from the current spec is that cacache will support any hash
|
||||
algorithms supported by the underlying Node.js process. You can use
|
||||
`crypto.getHashes()` to see which ones you can use.
|
||||
|
||||
##### Generating Digests Yourself
|
||||
|
||||
If you have an existing content shasum, they are generally formatted as a
|
||||
hexadecimal string (that is, a sha1 would look like:
|
||||
`5f5513f8822fdbe5145af33b64d8d970dcf95c6e`). In order to be compatible with
|
||||
cacache, you'll need to convert this to an equivalent subresource integrity
|
||||
string. For this example, the corresponding hash would be:
|
||||
`sha1-X1UT+IIv2+UUWvM7ZNjZcNz5XG4=`.
|
||||
|
||||
If you want to generate an integrity string yourself for existing data, you can
|
||||
use something like this:
|
||||
|
||||
```javascript
|
||||
const crypto = require('crypto')
|
||||
const hashAlgorithm = 'sha512'
|
||||
const data = 'foobarbaz'
|
||||
|
||||
const integrity = (
|
||||
hashAlgorithm +
|
||||
'-' +
|
||||
crypto.createHash(hashAlgorithm).update(data).digest('base64')
|
||||
)
|
||||
```
|
||||
|
||||
You can also use [`ssri`](https://npm.im/ssri) to have a richer set of functionality
|
||||
around SRI strings, including generation, parsing, and translating from existing
|
||||
hex-formatted strings.
|
||||
|
||||
#### <a name="verify"></a> `> cacache.verify(cache, opts) -> Promise`
|
||||
|
||||
Checks out and fixes up your cache:
|
||||
|
||||
* Cleans up corrupted or invalid index entries.
|
||||
* Custom entry filtering options.
|
||||
* Garbage collects any content entries not referenced by the index.
|
||||
* Checks integrity for all content entries and removes invalid content.
|
||||
* Fixes cache ownership.
|
||||
* Removes the `tmp` directory in the cache and all its contents.
|
||||
|
||||
When it's done, it'll return an object with various stats about the verification
|
||||
process, including amount of storage reclaimed, number of valid entries, number
|
||||
of entries removed, etc.
|
||||
|
||||
##### <a name="verify-options"></a> Options
|
||||
|
||||
##### `opts.concurrency`
|
||||
|
||||
Default: 20
|
||||
|
||||
Number of concurrently read files in the filesystem while doing clean up.
|
||||
|
||||
##### `opts.filter`
|
||||
Receives a formatted entry. Return false to remove it.
|
||||
Note: might be called more than once on the same entry.
|
||||
|
||||
##### `opts.log`
|
||||
Custom logger function:
|
||||
```
|
||||
log: { silly () {} }
|
||||
log.silly('verify', 'verifying cache at', cache)
|
||||
```
|
||||
|
||||
##### Example
|
||||
|
||||
```sh
|
||||
echo somegarbage >> $CACHEPATH/content/deadbeef
|
||||
```
|
||||
|
||||
```javascript
|
||||
cacache.verify(cachePath).then(stats => {
|
||||
// deadbeef collected, because of invalid checksum.
|
||||
console.log('cache is much nicer now! stats:', stats)
|
||||
})
|
||||
```
|
||||
|
||||
#### <a name="verify-last-run"></a> `> cacache.verify.lastRun(cache) -> Promise`
|
||||
|
||||
Returns a `Date` representing the last time `cacache.verify` was run on `cache`.
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
cacache.verify(cachePath).then(() => {
|
||||
cacache.verify.lastRun(cachePath).then(lastTime => {
|
||||
console.log('cacache.verify was last called on' + lastTime)
|
||||
})
|
||||
})
|
||||
```
|
||||
237
node_modules/cacache/get.js
generated
vendored
Normal file
237
node_modules/cacache/get.js
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
'use strict'
|
||||
|
||||
const Collect = require('minipass-collect')
|
||||
const Minipass = require('minipass')
|
||||
const Pipeline = require('minipass-pipeline')
|
||||
const fs = require('fs')
|
||||
const util = require('util')
|
||||
|
||||
const index = require('./lib/entry-index')
|
||||
const memo = require('./lib/memoization')
|
||||
const read = require('./lib/content/read')
|
||||
|
||||
const writeFile = util.promisify(fs.writeFile)
|
||||
|
||||
function getData (cache, key, opts = {}) {
|
||||
const { integrity, memoize, size } = opts
|
||||
const memoized = memo.get(cache, key, opts)
|
||||
if (memoized && memoize !== false) {
|
||||
return Promise.resolve({
|
||||
metadata: memoized.entry.metadata,
|
||||
data: memoized.data,
|
||||
integrity: memoized.entry.integrity,
|
||||
size: memoized.entry.size,
|
||||
})
|
||||
}
|
||||
|
||||
return index.find(cache, key, opts).then((entry) => {
|
||||
if (!entry)
|
||||
throw new index.NotFoundError(cache, key)
|
||||
|
||||
return read(cache, entry.integrity, { integrity, size }).then((data) => {
|
||||
if (memoize)
|
||||
memo.put(cache, entry, data, opts)
|
||||
|
||||
return {
|
||||
data,
|
||||
metadata: entry.metadata,
|
||||
size: entry.size,
|
||||
integrity: entry.integrity,
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
module.exports = getData
|
||||
|
||||
function getDataByDigest (cache, key, opts = {}) {
|
||||
const { integrity, memoize, size } = opts
|
||||
const memoized = memo.get.byDigest(cache, key, opts)
|
||||
if (memoized && memoize !== false)
|
||||
return Promise.resolve(memoized)
|
||||
|
||||
return read(cache, key, { integrity, size }).then((res) => {
|
||||
if (memoize)
|
||||
memo.put.byDigest(cache, key, res, opts)
|
||||
return res
|
||||
})
|
||||
}
|
||||
module.exports.byDigest = getDataByDigest
|
||||
|
||||
function getDataSync (cache, key, opts = {}) {
|
||||
const { integrity, memoize, size } = opts
|
||||
const memoized = memo.get(cache, key, opts)
|
||||
|
||||
if (memoized && memoize !== false) {
|
||||
return {
|
||||
metadata: memoized.entry.metadata,
|
||||
data: memoized.data,
|
||||
integrity: memoized.entry.integrity,
|
||||
size: memoized.entry.size,
|
||||
}
|
||||
}
|
||||
const entry = index.find.sync(cache, key, opts)
|
||||
if (!entry)
|
||||
throw new index.NotFoundError(cache, key)
|
||||
const data = read.sync(cache, entry.integrity, {
|
||||
integrity: integrity,
|
||||
size: size,
|
||||
})
|
||||
const res = {
|
||||
metadata: entry.metadata,
|
||||
data: data,
|
||||
size: entry.size,
|
||||
integrity: entry.integrity,
|
||||
}
|
||||
if (memoize)
|
||||
memo.put(cache, entry, res.data, opts)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
module.exports.sync = getDataSync
|
||||
|
||||
function getDataByDigestSync (cache, digest, opts = {}) {
|
||||
const { integrity, memoize, size } = opts
|
||||
const memoized = memo.get.byDigest(cache, digest, opts)
|
||||
|
||||
if (memoized && memoize !== false)
|
||||
return memoized
|
||||
|
||||
const res = read.sync(cache, digest, {
|
||||
integrity: integrity,
|
||||
size: size,
|
||||
})
|
||||
if (memoize)
|
||||
memo.put.byDigest(cache, digest, res, opts)
|
||||
|
||||
return res
|
||||
}
|
||||
module.exports.sync.byDigest = getDataByDigestSync
|
||||
|
||||
const getMemoizedStream = (memoized) => {
|
||||
const stream = new Minipass()
|
||||
stream.on('newListener', function (ev, cb) {
|
||||
ev === 'metadata' && cb(memoized.entry.metadata)
|
||||
ev === 'integrity' && cb(memoized.entry.integrity)
|
||||
ev === 'size' && cb(memoized.entry.size)
|
||||
})
|
||||
stream.end(memoized.data)
|
||||
return stream
|
||||
}
|
||||
|
||||
function getStream (cache, key, opts = {}) {
|
||||
const { memoize, size } = opts
|
||||
const memoized = memo.get(cache, key, opts)
|
||||
if (memoized && memoize !== false)
|
||||
return getMemoizedStream(memoized)
|
||||
|
||||
const stream = new Pipeline()
|
||||
index
|
||||
.find(cache, key)
|
||||
.then((entry) => {
|
||||
if (!entry)
|
||||
throw new index.NotFoundError(cache, key)
|
||||
|
||||
stream.emit('metadata', entry.metadata)
|
||||
stream.emit('integrity', entry.integrity)
|
||||
stream.emit('size', entry.size)
|
||||
stream.on('newListener', function (ev, cb) {
|
||||
ev === 'metadata' && cb(entry.metadata)
|
||||
ev === 'integrity' && cb(entry.integrity)
|
||||
ev === 'size' && cb(entry.size)
|
||||
})
|
||||
|
||||
const src = read.readStream(
|
||||
cache,
|
||||
entry.integrity,
|
||||
{ ...opts, size: typeof size !== 'number' ? entry.size : size }
|
||||
)
|
||||
|
||||
if (memoize) {
|
||||
const memoStream = new Collect.PassThrough()
|
||||
memoStream.on('collect', data => memo.put(cache, entry, data, opts))
|
||||
stream.unshift(memoStream)
|
||||
}
|
||||
stream.unshift(src)
|
||||
})
|
||||
.catch((err) => stream.emit('error', err))
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
module.exports.stream = getStream
|
||||
|
||||
function getStreamDigest (cache, integrity, opts = {}) {
|
||||
const { memoize } = opts
|
||||
const memoized = memo.get.byDigest(cache, integrity, opts)
|
||||
if (memoized && memoize !== false) {
|
||||
const stream = new Minipass()
|
||||
stream.end(memoized)
|
||||
return stream
|
||||
} else {
|
||||
const stream = read.readStream(cache, integrity, opts)
|
||||
if (!memoize)
|
||||
return stream
|
||||
|
||||
const memoStream = new Collect.PassThrough()
|
||||
memoStream.on('collect', data => memo.put.byDigest(
|
||||
cache,
|
||||
integrity,
|
||||
data,
|
||||
opts
|
||||
))
|
||||
return new Pipeline(stream, memoStream)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.stream.byDigest = getStreamDigest
|
||||
|
||||
function info (cache, key, opts = {}) {
|
||||
const { memoize } = opts
|
||||
const memoized = memo.get(cache, key, opts)
|
||||
if (memoized && memoize !== false)
|
||||
return Promise.resolve(memoized.entry)
|
||||
else
|
||||
return index.find(cache, key)
|
||||
}
|
||||
module.exports.info = info
|
||||
|
||||
function copy (cache, key, dest, opts = {}) {
|
||||
if (read.copy) {
|
||||
return index.find(cache, key, opts).then((entry) => {
|
||||
if (!entry)
|
||||
throw new index.NotFoundError(cache, key)
|
||||
return read.copy(cache, entry.integrity, dest, opts)
|
||||
.then(() => {
|
||||
return {
|
||||
metadata: entry.metadata,
|
||||
size: entry.size,
|
||||
integrity: entry.integrity,
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return getData(cache, key, opts).then((res) => {
|
||||
return writeFile(dest, res.data).then(() => {
|
||||
return {
|
||||
metadata: res.metadata,
|
||||
size: res.size,
|
||||
integrity: res.integrity,
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
module.exports.copy = copy
|
||||
|
||||
function copyByDigest (cache, key, dest, opts = {}) {
|
||||
if (read.copy)
|
||||
return read.copy(cache, key, dest, opts).then(() => key)
|
||||
|
||||
return getDataByDigest(cache, key, opts).then((res) => {
|
||||
return writeFile(dest, res).then(() => key)
|
||||
})
|
||||
}
|
||||
module.exports.copy.byDigest = copyByDigest
|
||||
|
||||
module.exports.hasContent = read.hasContent
|
||||
46
node_modules/cacache/index.js
generated
vendored
Normal file
46
node_modules/cacache/index.js
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
'use strict'
|
||||
|
||||
const ls = require('./ls.js')
|
||||
const get = require('./get.js')
|
||||
const put = require('./put.js')
|
||||
const rm = require('./rm.js')
|
||||
const verify = require('./verify.js')
|
||||
const { clearMemoized } = require('./lib/memoization.js')
|
||||
const tmp = require('./lib/util/tmp.js')
|
||||
const index = require('./lib/entry-index.js')
|
||||
|
||||
module.exports.index = {}
|
||||
module.exports.index.compact = index.compact
|
||||
module.exports.index.insert = index.insert
|
||||
|
||||
module.exports.ls = ls
|
||||
module.exports.ls.stream = ls.stream
|
||||
|
||||
module.exports.get = get
|
||||
module.exports.get.byDigest = get.byDigest
|
||||
module.exports.get.sync = get.sync
|
||||
module.exports.get.sync.byDigest = get.sync.byDigest
|
||||
module.exports.get.stream = get.stream
|
||||
module.exports.get.stream.byDigest = get.stream.byDigest
|
||||
module.exports.get.copy = get.copy
|
||||
module.exports.get.copy.byDigest = get.copy.byDigest
|
||||
module.exports.get.info = get.info
|
||||
module.exports.get.hasContent = get.hasContent
|
||||
module.exports.get.hasContent.sync = get.hasContent.sync
|
||||
|
||||
module.exports.put = put
|
||||
module.exports.put.stream = put.stream
|
||||
|
||||
module.exports.rm = rm.entry
|
||||
module.exports.rm.all = rm.all
|
||||
module.exports.rm.entry = module.exports.rm
|
||||
module.exports.rm.content = rm.content
|
||||
|
||||
module.exports.clearMemoized = clearMemoized
|
||||
|
||||
module.exports.tmp = {}
|
||||
module.exports.tmp.mkdir = tmp.mkdir
|
||||
module.exports.tmp.withTmp = tmp.withTmp
|
||||
|
||||
module.exports.verify = verify
|
||||
module.exports.verify.lastRun = verify.lastRun
|
||||
29
node_modules/cacache/lib/content/path.js
generated
vendored
Normal file
29
node_modules/cacache/lib/content/path.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
'use strict'
|
||||
|
||||
const contentVer = require('../../package.json')['cache-version'].content
|
||||
const hashToSegments = require('../util/hash-to-segments')
|
||||
const path = require('path')
|
||||
const ssri = require('ssri')
|
||||
|
||||
// Current format of content file path:
|
||||
//
|
||||
// sha512-BaSE64Hex= ->
|
||||
// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee
|
||||
//
|
||||
module.exports = contentPath
|
||||
|
||||
function contentPath (cache, integrity) {
|
||||
const sri = ssri.parse(integrity, { single: true })
|
||||
// contentPath is the *strongest* algo given
|
||||
return path.join(
|
||||
contentDir(cache),
|
||||
sri.algorithm,
|
||||
...hashToSegments(sri.hexDigest())
|
||||
)
|
||||
}
|
||||
|
||||
module.exports.contentDir = contentDir
|
||||
|
||||
function contentDir (cache) {
|
||||
return path.join(cache, `content-v${contentVer}`)
|
||||
}
|
||||
244
node_modules/cacache/lib/content/read.js
generated
vendored
Normal file
244
node_modules/cacache/lib/content/read.js
generated
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const ssri = require('ssri')
|
||||
const contentPath = require('./path')
|
||||
const Pipeline = require('minipass-pipeline')
|
||||
|
||||
const lstat = util.promisify(fs.lstat)
|
||||
const readFile = util.promisify(fs.readFile)
|
||||
|
||||
module.exports = read
|
||||
|
||||
const MAX_SINGLE_READ_SIZE = 64 * 1024 * 1024
|
||||
function read (cache, integrity, opts = {}) {
|
||||
const { size } = opts
|
||||
return withContentSri(cache, integrity, (cpath, sri) => {
|
||||
// get size
|
||||
return lstat(cpath).then(stat => ({ stat, cpath, sri }))
|
||||
}).then(({ stat, cpath, sri }) => {
|
||||
if (typeof size === 'number' && stat.size !== size)
|
||||
throw sizeError(size, stat.size)
|
||||
|
||||
if (stat.size > MAX_SINGLE_READ_SIZE)
|
||||
return readPipeline(cpath, stat.size, sri, new Pipeline()).concat()
|
||||
|
||||
return readFile(cpath, null).then((data) => {
|
||||
if (!ssri.checkData(data, sri))
|
||||
throw integrityError(sri, cpath)
|
||||
|
||||
return data
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
const readPipeline = (cpath, size, sri, stream) => {
|
||||
stream.push(
|
||||
new fsm.ReadStream(cpath, {
|
||||
size,
|
||||
readSize: MAX_SINGLE_READ_SIZE,
|
||||
}),
|
||||
ssri.integrityStream({
|
||||
integrity: sri,
|
||||
size,
|
||||
})
|
||||
)
|
||||
return stream
|
||||
}
|
||||
|
||||
module.exports.sync = readSync
|
||||
|
||||
function readSync (cache, integrity, opts = {}) {
|
||||
const { size } = opts
|
||||
return withContentSriSync(cache, integrity, (cpath, sri) => {
|
||||
const data = fs.readFileSync(cpath)
|
||||
if (typeof size === 'number' && size !== data.length)
|
||||
throw sizeError(size, data.length)
|
||||
|
||||
if (ssri.checkData(data, sri))
|
||||
return data
|
||||
|
||||
throw integrityError(sri, cpath)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.stream = readStream
|
||||
module.exports.readStream = readStream
|
||||
|
||||
function readStream (cache, integrity, opts = {}) {
|
||||
const { size } = opts
|
||||
const stream = new Pipeline()
|
||||
withContentSri(cache, integrity, (cpath, sri) => {
|
||||
// just lstat to ensure it exists
|
||||
return lstat(cpath).then((stat) => ({ stat, cpath, sri }))
|
||||
}).then(({ stat, cpath, sri }) => {
|
||||
if (typeof size === 'number' && size !== stat.size)
|
||||
return stream.emit('error', sizeError(size, stat.size))
|
||||
|
||||
readPipeline(cpath, stat.size, sri, stream)
|
||||
}, er => stream.emit('error', er))
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
let copyFile
|
||||
if (fs.copyFile) {
|
||||
module.exports.copy = copy
|
||||
module.exports.copy.sync = copySync
|
||||
copyFile = util.promisify(fs.copyFile)
|
||||
}
|
||||
|
||||
function copy (cache, integrity, dest) {
|
||||
return withContentSri(cache, integrity, (cpath, sri) => {
|
||||
return copyFile(cpath, dest)
|
||||
})
|
||||
}
|
||||
|
||||
function copySync (cache, integrity, dest) {
|
||||
return withContentSriSync(cache, integrity, (cpath, sri) => {
|
||||
return fs.copyFileSync(cpath, dest)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.hasContent = hasContent
|
||||
|
||||
function hasContent (cache, integrity) {
|
||||
if (!integrity)
|
||||
return Promise.resolve(false)
|
||||
|
||||
return withContentSri(cache, integrity, (cpath, sri) => {
|
||||
return lstat(cpath).then((stat) => ({ size: stat.size, sri, stat }))
|
||||
}).catch((err) => {
|
||||
if (err.code === 'ENOENT')
|
||||
return false
|
||||
|
||||
if (err.code === 'EPERM') {
|
||||
/* istanbul ignore else */
|
||||
if (process.platform !== 'win32')
|
||||
throw err
|
||||
else
|
||||
return false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.hasContent.sync = hasContentSync
|
||||
|
||||
function hasContentSync (cache, integrity) {
|
||||
if (!integrity)
|
||||
return false
|
||||
|
||||
return withContentSriSync(cache, integrity, (cpath, sri) => {
|
||||
try {
|
||||
const stat = fs.lstatSync(cpath)
|
||||
return { size: stat.size, sri, stat }
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT')
|
||||
return false
|
||||
|
||||
if (err.code === 'EPERM') {
|
||||
/* istanbul ignore else */
|
||||
if (process.platform !== 'win32')
|
||||
throw err
|
||||
else
|
||||
return false
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function withContentSri (cache, integrity, fn) {
|
||||
const tryFn = () => {
|
||||
const sri = ssri.parse(integrity)
|
||||
// If `integrity` has multiple entries, pick the first digest
|
||||
// with available local data.
|
||||
const algo = sri.pickAlgorithm()
|
||||
const digests = sri[algo]
|
||||
|
||||
if (digests.length <= 1) {
|
||||
const cpath = contentPath(cache, digests[0])
|
||||
return fn(cpath, digests[0])
|
||||
} else {
|
||||
// Can't use race here because a generic error can happen before
|
||||
// a ENOENT error, and can happen before a valid result
|
||||
return Promise
|
||||
.all(digests.map((meta) => {
|
||||
return withContentSri(cache, meta, fn)
|
||||
.catch((err) => {
|
||||
if (err.code === 'ENOENT') {
|
||||
return Object.assign(
|
||||
new Error('No matching content found for ' + sri.toString()),
|
||||
{ code: 'ENOENT' }
|
||||
)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}))
|
||||
.then((results) => {
|
||||
// Return the first non error if it is found
|
||||
const result = results.find((r) => !(r instanceof Error))
|
||||
if (result)
|
||||
return result
|
||||
|
||||
// Throw the No matching content found error
|
||||
const enoentError = results.find((r) => r.code === 'ENOENT')
|
||||
if (enoentError)
|
||||
throw enoentError
|
||||
|
||||
// Throw generic error
|
||||
throw results.find((r) => r instanceof Error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
tryFn()
|
||||
.then(resolve)
|
||||
.catch(reject)
|
||||
} catch (err) {
|
||||
reject(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
function withContentSriSync (cache, integrity, fn) {
|
||||
const sri = ssri.parse(integrity)
|
||||
// If `integrity` has multiple entries, pick the first digest
|
||||
// with available local data.
|
||||
const algo = sri.pickAlgorithm()
|
||||
const digests = sri[algo]
|
||||
if (digests.length <= 1) {
|
||||
const cpath = contentPath(cache, digests[0])
|
||||
return fn(cpath, digests[0])
|
||||
} else {
|
||||
let lastErr = null
|
||||
for (const meta of digests) {
|
||||
try {
|
||||
return withContentSriSync(cache, meta, fn)
|
||||
} catch (err) {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
throw lastErr
|
||||
}
|
||||
}
|
||||
|
||||
function sizeError (expected, found) {
|
||||
const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
|
||||
err.expected = expected
|
||||
err.found = found
|
||||
err.code = 'EBADSIZE'
|
||||
return err
|
||||
}
|
||||
|
||||
function integrityError (sri, path) {
|
||||
const err = new Error(`Integrity verification failed for ${sri} (${path})`)
|
||||
err.code = 'EINTEGRITY'
|
||||
err.sri = sri
|
||||
err.path = path
|
||||
return err
|
||||
}
|
||||
19
node_modules/cacache/lib/content/rm.js
generated
vendored
Normal file
19
node_modules/cacache/lib/content/rm.js
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const contentPath = require('./path')
|
||||
const { hasContent } = require('./read')
|
||||
const rimraf = util.promisify(require('rimraf'))
|
||||
|
||||
module.exports = rm
|
||||
|
||||
function rm (cache, integrity) {
|
||||
return hasContent(cache, integrity).then((content) => {
|
||||
// ~pretty~ sure we can't end up with a content lacking sri, but be safe
|
||||
if (content && content.sri)
|
||||
return rimraf(contentPath(cache, content.sri)).then(() => true)
|
||||
else
|
||||
return false
|
||||
})
|
||||
}
|
||||
189
node_modules/cacache/lib/content/write.js
generated
vendored
Normal file
189
node_modules/cacache/lib/content/write.js
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const contentPath = require('./path')
|
||||
const fixOwner = require('../util/fix-owner')
|
||||
const fs = require('fs')
|
||||
const moveFile = require('../util/move-file')
|
||||
const Minipass = require('minipass')
|
||||
const Pipeline = require('minipass-pipeline')
|
||||
const Flush = require('minipass-flush')
|
||||
const path = require('path')
|
||||
const rimraf = util.promisify(require('rimraf'))
|
||||
const ssri = require('ssri')
|
||||
const uniqueFilename = require('unique-filename')
|
||||
const { disposer } = require('./../util/disposer')
|
||||
const fsm = require('fs-minipass')
|
||||
|
||||
const writeFile = util.promisify(fs.writeFile)
|
||||
|
||||
module.exports = write
|
||||
|
||||
function write (cache, data, opts = {}) {
|
||||
const { algorithms, size, integrity } = opts
|
||||
if (algorithms && algorithms.length > 1)
|
||||
throw new Error('opts.algorithms only supports a single algorithm for now')
|
||||
|
||||
if (typeof size === 'number' && data.length !== size)
|
||||
return Promise.reject(sizeError(size, data.length))
|
||||
|
||||
const sri = ssri.fromData(data, algorithms ? { algorithms } : {})
|
||||
if (integrity && !ssri.checkData(data, integrity, opts))
|
||||
return Promise.reject(checksumError(integrity, sri))
|
||||
|
||||
return disposer(makeTmp(cache, opts), makeTmpDisposer,
|
||||
(tmp) => {
|
||||
return writeFile(tmp.target, data, { flag: 'wx' })
|
||||
.then(() => moveToDestination(tmp, cache, sri, opts))
|
||||
})
|
||||
.then(() => ({ integrity: sri, size: data.length }))
|
||||
}
|
||||
|
||||
module.exports.stream = writeStream
|
||||
|
||||
// writes proxied to the 'inputStream' that is passed to the Promise
|
||||
// 'end' is deferred until content is handled.
|
||||
class CacacheWriteStream extends Flush {
|
||||
constructor (cache, opts) {
|
||||
super()
|
||||
this.opts = opts
|
||||
this.cache = cache
|
||||
this.inputStream = new Minipass()
|
||||
this.inputStream.on('error', er => this.emit('error', er))
|
||||
this.inputStream.on('drain', () => this.emit('drain'))
|
||||
this.handleContentP = null
|
||||
}
|
||||
|
||||
write (chunk, encoding, cb) {
|
||||
if (!this.handleContentP) {
|
||||
this.handleContentP = handleContent(
|
||||
this.inputStream,
|
||||
this.cache,
|
||||
this.opts
|
||||
)
|
||||
}
|
||||
return this.inputStream.write(chunk, encoding, cb)
|
||||
}
|
||||
|
||||
flush (cb) {
|
||||
this.inputStream.end(() => {
|
||||
if (!this.handleContentP) {
|
||||
const e = new Error('Cache input stream was empty')
|
||||
e.code = 'ENODATA'
|
||||
// empty streams are probably emitting end right away.
|
||||
// defer this one tick by rejecting a promise on it.
|
||||
return Promise.reject(e).catch(cb)
|
||||
}
|
||||
this.handleContentP.then(
|
||||
(res) => {
|
||||
res.integrity && this.emit('integrity', res.integrity)
|
||||
res.size !== null && this.emit('size', res.size)
|
||||
cb()
|
||||
},
|
||||
(er) => cb(er)
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
function writeStream (cache, opts = {}) {
|
||||
return new CacacheWriteStream(cache, opts)
|
||||
}
|
||||
|
||||
function handleContent (inputStream, cache, opts) {
|
||||
return disposer(makeTmp(cache, opts), makeTmpDisposer, (tmp) => {
|
||||
return pipeToTmp(inputStream, cache, tmp.target, opts)
|
||||
.then((res) => {
|
||||
return moveToDestination(
|
||||
tmp,
|
||||
cache,
|
||||
res.integrity,
|
||||
opts
|
||||
).then(() => res)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function pipeToTmp (inputStream, cache, tmpTarget, opts) {
|
||||
let integrity
|
||||
let size
|
||||
const hashStream = ssri.integrityStream({
|
||||
integrity: opts.integrity,
|
||||
algorithms: opts.algorithms,
|
||||
size: opts.size,
|
||||
})
|
||||
hashStream.on('integrity', i => {
|
||||
integrity = i
|
||||
})
|
||||
hashStream.on('size', s => {
|
||||
size = s
|
||||
})
|
||||
|
||||
const outStream = new fsm.WriteStream(tmpTarget, {
|
||||
flags: 'wx',
|
||||
})
|
||||
|
||||
// NB: this can throw if the hashStream has a problem with
|
||||
// it, and the data is fully written. but pipeToTmp is only
|
||||
// called in promisory contexts where that is handled.
|
||||
const pipeline = new Pipeline(
|
||||
inputStream,
|
||||
hashStream,
|
||||
outStream
|
||||
)
|
||||
|
||||
return pipeline.promise()
|
||||
.then(() => ({ integrity, size }))
|
||||
.catch(er => rimraf(tmpTarget).then(() => {
|
||||
throw er
|
||||
}))
|
||||
}
|
||||
|
||||
function makeTmp (cache, opts) {
|
||||
const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
|
||||
return fixOwner.mkdirfix(cache, path.dirname(tmpTarget)).then(() => ({
|
||||
target: tmpTarget,
|
||||
moved: false,
|
||||
}))
|
||||
}
|
||||
|
||||
function makeTmpDisposer (tmp) {
|
||||
if (tmp.moved)
|
||||
return Promise.resolve()
|
||||
|
||||
return rimraf(tmp.target)
|
||||
}
|
||||
|
||||
function moveToDestination (tmp, cache, sri, opts) {
|
||||
const destination = contentPath(cache, sri)
|
||||
const destDir = path.dirname(destination)
|
||||
|
||||
return fixOwner
|
||||
.mkdirfix(cache, destDir)
|
||||
.then(() => {
|
||||
return moveFile(tmp.target, destination)
|
||||
})
|
||||
.then(() => {
|
||||
tmp.moved = true
|
||||
return fixOwner.chownr(cache, destination)
|
||||
})
|
||||
}
|
||||
|
||||
function sizeError (expected, found) {
|
||||
const err = new Error(`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
|
||||
err.expected = expected
|
||||
err.found = found
|
||||
err.code = 'EBADSIZE'
|
||||
return err
|
||||
}
|
||||
|
||||
function checksumError (expected, found) {
|
||||
const err = new Error(`Integrity check failed:
|
||||
Wanted: ${expected}
|
||||
Found: ${found}`)
|
||||
err.code = 'EINTEGRITY'
|
||||
err.expected = expected
|
||||
err.found = found
|
||||
return err
|
||||
}
|
||||
394
node_modules/cacache/lib/entry-index.js
generated
vendored
Normal file
394
node_modules/cacache/lib/entry-index.js
generated
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
const crypto = require('crypto')
|
||||
const fs = require('fs')
|
||||
const Minipass = require('minipass')
|
||||
const path = require('path')
|
||||
const ssri = require('ssri')
|
||||
const uniqueFilename = require('unique-filename')
|
||||
|
||||
const { disposer } = require('./util/disposer')
|
||||
const contentPath = require('./content/path')
|
||||
const fixOwner = require('./util/fix-owner')
|
||||
const hashToSegments = require('./util/hash-to-segments')
|
||||
const indexV = require('../package.json')['cache-version'].index
|
||||
const moveFile = require('@npmcli/move-file')
|
||||
const _rimraf = require('rimraf')
|
||||
const rimraf = util.promisify(_rimraf)
|
||||
rimraf.sync = _rimraf.sync
|
||||
|
||||
const appendFile = util.promisify(fs.appendFile)
|
||||
const readFile = util.promisify(fs.readFile)
|
||||
const readdir = util.promisify(fs.readdir)
|
||||
const writeFile = util.promisify(fs.writeFile)
|
||||
|
||||
module.exports.NotFoundError = class NotFoundError extends Error {
|
||||
constructor (cache, key) {
|
||||
super(`No cache entry for ${key} found in ${cache}`)
|
||||
this.code = 'ENOENT'
|
||||
this.cache = cache
|
||||
this.key = key
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.compact = compact
|
||||
|
||||
async function compact (cache, key, matchFn, opts = {}) {
|
||||
const bucket = bucketPath(cache, key)
|
||||
const entries = await bucketEntries(bucket)
|
||||
const newEntries = []
|
||||
// we loop backwards because the bottom-most result is the newest
|
||||
// since we add new entries with appendFile
|
||||
for (let i = entries.length - 1; i >= 0; --i) {
|
||||
const entry = entries[i]
|
||||
// a null integrity could mean either a delete was appended
|
||||
// or the user has simply stored an index that does not map
|
||||
// to any content. we determine if the user wants to keep the
|
||||
// null integrity based on the validateEntry function passed in options.
|
||||
// if the integrity is null and no validateEntry is provided, we break
|
||||
// as we consider the null integrity to be a deletion of everything
|
||||
// that came before it.
|
||||
if (entry.integrity === null && !opts.validateEntry)
|
||||
break
|
||||
|
||||
// if this entry is valid, and it is either the first entry or
|
||||
// the newEntries array doesn't already include an entry that
|
||||
// matches this one based on the provided matchFn, then we add
|
||||
// it to the beginning of our list
|
||||
if ((!opts.validateEntry || opts.validateEntry(entry) === true) &&
|
||||
(newEntries.length === 0 ||
|
||||
!newEntries.find((oldEntry) => matchFn(oldEntry, entry))))
|
||||
newEntries.unshift(entry)
|
||||
}
|
||||
|
||||
const newIndex = '\n' + newEntries.map((entry) => {
|
||||
const stringified = JSON.stringify(entry)
|
||||
const hash = hashEntry(stringified)
|
||||
return `${hash}\t${stringified}`
|
||||
}).join('\n')
|
||||
|
||||
const setup = async () => {
|
||||
const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
|
||||
await fixOwner.mkdirfix(cache, path.dirname(target))
|
||||
return {
|
||||
target,
|
||||
moved: false,
|
||||
}
|
||||
}
|
||||
|
||||
const teardown = async (tmp) => {
|
||||
if (!tmp.moved)
|
||||
return rimraf(tmp.target)
|
||||
}
|
||||
|
||||
const write = async (tmp) => {
|
||||
await writeFile(tmp.target, newIndex, { flag: 'wx' })
|
||||
await fixOwner.mkdirfix(cache, path.dirname(bucket))
|
||||
// we use @npmcli/move-file directly here because we
|
||||
// want to overwrite the existing file
|
||||
await moveFile(tmp.target, bucket)
|
||||
tmp.moved = true
|
||||
try {
|
||||
await fixOwner.chownr(cache, bucket)
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT')
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// write the file atomically
|
||||
await disposer(setup(), teardown, write)
|
||||
|
||||
// we reverse the list we generated such that the newest
|
||||
// entries come first in order to make looping through them easier
|
||||
// the true passed to formatEntry tells it to keep null
|
||||
// integrity values, if they made it this far it's because
|
||||
// validateEntry returned true, and as such we should return it
|
||||
return newEntries.reverse().map((entry) => formatEntry(cache, entry, true))
|
||||
}
|
||||
|
||||
module.exports.insert = insert
|
||||
|
||||
function insert (cache, key, integrity, opts = {}) {
|
||||
const { metadata, size } = opts
|
||||
const bucket = bucketPath(cache, key)
|
||||
const entry = {
|
||||
key,
|
||||
integrity: integrity && ssri.stringify(integrity),
|
||||
time: Date.now(),
|
||||
size,
|
||||
metadata,
|
||||
}
|
||||
return fixOwner
|
||||
.mkdirfix(cache, path.dirname(bucket))
|
||||
.then(() => {
|
||||
const stringified = JSON.stringify(entry)
|
||||
// NOTE - Cleverness ahoy!
|
||||
//
|
||||
// This works because it's tremendously unlikely for an entry to corrupt
|
||||
// another while still preserving the string length of the JSON in
|
||||
// question. So, we just slap the length in there and verify it on read.
|
||||
//
|
||||
// Thanks to @isaacs for the whiteboarding session that ended up with
|
||||
// this.
|
||||
return appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
|
||||
})
|
||||
.then(() => fixOwner.chownr(cache, bucket))
|
||||
.catch((err) => {
|
||||
if (err.code === 'ENOENT')
|
||||
return undefined
|
||||
|
||||
throw err
|
||||
// There's a class of race conditions that happen when things get deleted
|
||||
// during fixOwner, or between the two mkdirfix/chownr calls.
|
||||
//
|
||||
// It's perfectly fine to just not bother in those cases and lie
|
||||
// that the index entry was written. Because it's a cache.
|
||||
})
|
||||
.then(() => {
|
||||
return formatEntry(cache, entry)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.insert.sync = insertSync
|
||||
|
||||
function insertSync (cache, key, integrity, opts = {}) {
|
||||
const { metadata, size } = opts
|
||||
const bucket = bucketPath(cache, key)
|
||||
const entry = {
|
||||
key,
|
||||
integrity: integrity && ssri.stringify(integrity),
|
||||
time: Date.now(),
|
||||
size,
|
||||
metadata,
|
||||
}
|
||||
fixOwner.mkdirfix.sync(cache, path.dirname(bucket))
|
||||
const stringified = JSON.stringify(entry)
|
||||
fs.appendFileSync(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
|
||||
try {
|
||||
fixOwner.chownr.sync(cache, bucket)
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT')
|
||||
throw err
|
||||
}
|
||||
return formatEntry(cache, entry)
|
||||
}
|
||||
|
||||
module.exports.find = find
|
||||
|
||||
function find (cache, key) {
|
||||
const bucket = bucketPath(cache, key)
|
||||
return bucketEntries(bucket)
|
||||
.then((entries) => {
|
||||
return entries.reduce((latest, next) => {
|
||||
if (next && next.key === key)
|
||||
return formatEntry(cache, next)
|
||||
else
|
||||
return latest
|
||||
}, null)
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err.code === 'ENOENT')
|
||||
return null
|
||||
else
|
||||
throw err
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.find.sync = findSync
|
||||
|
||||
function findSync (cache, key) {
|
||||
const bucket = bucketPath(cache, key)
|
||||
try {
|
||||
return bucketEntriesSync(bucket).reduce((latest, next) => {
|
||||
if (next && next.key === key)
|
||||
return formatEntry(cache, next)
|
||||
else
|
||||
return latest
|
||||
}, null)
|
||||
} catch (err) {
|
||||
if (err.code === 'ENOENT')
|
||||
return null
|
||||
else
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.delete = del
|
||||
|
||||
function del (cache, key, opts = {}) {
|
||||
if (!opts.removeFully)
|
||||
return insert(cache, key, null, opts)
|
||||
|
||||
const bucket = bucketPath(cache, key)
|
||||
return rimraf(bucket)
|
||||
}
|
||||
|
||||
module.exports.delete.sync = delSync
|
||||
|
||||
function delSync (cache, key, opts = {}) {
|
||||
if (!opts.removeFully)
|
||||
return insertSync(cache, key, null, opts)
|
||||
|
||||
const bucket = bucketPath(cache, key)
|
||||
return rimraf.sync(bucket)
|
||||
}
|
||||
|
||||
module.exports.lsStream = lsStream
|
||||
|
||||
function lsStream (cache) {
|
||||
const indexDir = bucketDir(cache)
|
||||
const stream = new Minipass({ objectMode: true })
|
||||
|
||||
readdirOrEmpty(indexDir).then(buckets => Promise.all(
|
||||
buckets.map(bucket => {
|
||||
const bucketPath = path.join(indexDir, bucket)
|
||||
return readdirOrEmpty(bucketPath).then(subbuckets => Promise.all(
|
||||
subbuckets.map(subbucket => {
|
||||
const subbucketPath = path.join(bucketPath, subbucket)
|
||||
|
||||
// "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
|
||||
return readdirOrEmpty(subbucketPath).then(entries => Promise.all(
|
||||
entries.map(entry => {
|
||||
const entryPath = path.join(subbucketPath, entry)
|
||||
return bucketEntries(entryPath).then(entries =>
|
||||
// using a Map here prevents duplicate keys from
|
||||
// showing up twice, I guess?
|
||||
entries.reduce((acc, entry) => {
|
||||
acc.set(entry.key, entry)
|
||||
return acc
|
||||
}, new Map())
|
||||
).then(reduced => {
|
||||
// reduced is a map of key => entry
|
||||
for (const entry of reduced.values()) {
|
||||
const formatted = formatEntry(cache, entry)
|
||||
if (formatted)
|
||||
stream.write(formatted)
|
||||
}
|
||||
}).catch(err => {
|
||||
if (err.code === 'ENOENT')
|
||||
return undefined
|
||||
throw err
|
||||
})
|
||||
})
|
||||
))
|
||||
})
|
||||
))
|
||||
})
|
||||
))
|
||||
.then(
|
||||
() => stream.end(),
|
||||
err => stream.emit('error', err)
|
||||
)
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
module.exports.ls = ls
|
||||
|
||||
function ls (cache) {
|
||||
return lsStream(cache).collect().then(entries =>
|
||||
entries.reduce((acc, xs) => {
|
||||
acc[xs.key] = xs
|
||||
return acc
|
||||
}, {})
|
||||
)
|
||||
}
|
||||
|
||||
module.exports.bucketEntries = bucketEntries
|
||||
|
||||
function bucketEntries (bucket, filter) {
|
||||
return readFile(bucket, 'utf8').then((data) => _bucketEntries(data, filter))
|
||||
}
|
||||
|
||||
module.exports.bucketEntries.sync = bucketEntriesSync
|
||||
|
||||
function bucketEntriesSync (bucket, filter) {
|
||||
const data = fs.readFileSync(bucket, 'utf8')
|
||||
return _bucketEntries(data, filter)
|
||||
}
|
||||
|
||||
function _bucketEntries (data, filter) {
|
||||
const entries = []
|
||||
data.split('\n').forEach((entry) => {
|
||||
if (!entry)
|
||||
return
|
||||
|
||||
const pieces = entry.split('\t')
|
||||
if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {
|
||||
// Hash is no good! Corruption or malice? Doesn't matter!
|
||||
// EJECT EJECT
|
||||
return
|
||||
}
|
||||
let obj
|
||||
try {
|
||||
obj = JSON.parse(pieces[1])
|
||||
} catch (e) {
|
||||
// Entry is corrupted!
|
||||
return
|
||||
}
|
||||
if (obj)
|
||||
entries.push(obj)
|
||||
})
|
||||
return entries
|
||||
}
|
||||
|
||||
module.exports.bucketDir = bucketDir
|
||||
|
||||
function bucketDir (cache) {
|
||||
return path.join(cache, `index-v${indexV}`)
|
||||
}
|
||||
|
||||
module.exports.bucketPath = bucketPath
|
||||
|
||||
function bucketPath (cache, key) {
|
||||
const hashed = hashKey(key)
|
||||
return path.join.apply(
|
||||
path,
|
||||
[bucketDir(cache)].concat(hashToSegments(hashed))
|
||||
)
|
||||
}
|
||||
|
||||
module.exports.hashKey = hashKey
|
||||
|
||||
function hashKey (key) {
|
||||
return hash(key, 'sha256')
|
||||
}
|
||||
|
||||
module.exports.hashEntry = hashEntry
|
||||
|
||||
function hashEntry (str) {
|
||||
return hash(str, 'sha1')
|
||||
}
|
||||
|
||||
function hash (str, digest) {
|
||||
return crypto
|
||||
.createHash(digest)
|
||||
.update(str)
|
||||
.digest('hex')
|
||||
}
|
||||
|
||||
function formatEntry (cache, entry, keepAll) {
|
||||
// Treat null digests as deletions. They'll shadow any previous entries.
|
||||
if (!entry.integrity && !keepAll)
|
||||
return null
|
||||
|
||||
return {
|
||||
key: entry.key,
|
||||
integrity: entry.integrity,
|
||||
path: entry.integrity ? contentPath(cache, entry.integrity) : undefined,
|
||||
size: entry.size,
|
||||
time: entry.time,
|
||||
metadata: entry.metadata,
|
||||
}
|
||||
}
|
||||
|
||||
function readdirOrEmpty (dir) {
|
||||
return readdir(dir).catch((err) => {
|
||||
if (err.code === 'ENOENT' || err.code === 'ENOTDIR')
|
||||
return []
|
||||
|
||||
throw err
|
||||
})
|
||||
}
|
||||
73
node_modules/cacache/lib/memoization.js
generated
vendored
Normal file
73
node_modules/cacache/lib/memoization.js
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
|
||||
const MAX_SIZE = 50 * 1024 * 1024 // 50MB
|
||||
const MAX_AGE = 3 * 60 * 1000
|
||||
|
||||
const MEMOIZED = new LRU({
|
||||
max: MAX_SIZE,
|
||||
maxAge: MAX_AGE,
|
||||
length: (entry, key) => key.startsWith('key:') ? entry.data.length : entry.length,
|
||||
})
|
||||
|
||||
module.exports.clearMemoized = clearMemoized
|
||||
|
||||
function clearMemoized () {
|
||||
const old = {}
|
||||
MEMOIZED.forEach((v, k) => {
|
||||
old[k] = v
|
||||
})
|
||||
MEMOIZED.reset()
|
||||
return old
|
||||
}
|
||||
|
||||
module.exports.put = put
|
||||
|
||||
function put (cache, entry, data, opts) {
|
||||
pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data })
|
||||
putDigest(cache, entry.integrity, data, opts)
|
||||
}
|
||||
|
||||
module.exports.put.byDigest = putDigest
|
||||
|
||||
function putDigest (cache, integrity, data, opts) {
|
||||
pickMem(opts).set(`digest:${cache}:${integrity}`, data)
|
||||
}
|
||||
|
||||
module.exports.get = get
|
||||
|
||||
function get (cache, key, opts) {
|
||||
return pickMem(opts).get(`key:${cache}:${key}`)
|
||||
}
|
||||
|
||||
module.exports.get.byDigest = getDigest
|
||||
|
||||
function getDigest (cache, integrity, opts) {
|
||||
return pickMem(opts).get(`digest:${cache}:${integrity}`)
|
||||
}
|
||||
|
||||
class ObjProxy {
|
||||
constructor (obj) {
|
||||
this.obj = obj
|
||||
}
|
||||
|
||||
get (key) {
|
||||
return this.obj[key]
|
||||
}
|
||||
|
||||
set (key, val) {
|
||||
this.obj[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
function pickMem (opts) {
|
||||
if (!opts || !opts.memoize)
|
||||
return MEMOIZED
|
||||
else if (opts.memoize.get && opts.memoize.set)
|
||||
return opts.memoize
|
||||
else if (typeof opts.memoize === 'object')
|
||||
return new ObjProxy(opts.memoize)
|
||||
else
|
||||
return MEMOIZED
|
||||
}
|
||||
30
node_modules/cacache/lib/util/disposer.js
generated
vendored
Normal file
30
node_modules/cacache/lib/util/disposer.js
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
'use strict'
|
||||
|
||||
module.exports.disposer = disposer
|
||||
|
||||
function disposer (creatorFn, disposerFn, fn) {
|
||||
const runDisposer = (resource, result, shouldThrow = false) => {
|
||||
return disposerFn(resource)
|
||||
.then(
|
||||
// disposer resolved, do something with original fn's promise
|
||||
() => {
|
||||
if (shouldThrow)
|
||||
throw result
|
||||
|
||||
return result
|
||||
},
|
||||
// Disposer fn failed, crash process
|
||||
(err) => {
|
||||
throw err
|
||||
// Or process.exit?
|
||||
})
|
||||
}
|
||||
|
||||
return creatorFn
|
||||
.then((resource) => {
|
||||
// fn(resource) can throw, so wrap in a promise here
|
||||
return Promise.resolve().then(() => fn(resource))
|
||||
.then((result) => runDisposer(resource, result))
|
||||
.catch((err) => runDisposer(resource, err, true))
|
||||
})
|
||||
}
|
||||
142
node_modules/cacache/lib/util/fix-owner.js
generated
vendored
Normal file
142
node_modules/cacache/lib/util/fix-owner.js
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const chownr = util.promisify(require('chownr'))
|
||||
const mkdirp = require('mkdirp')
|
||||
const inflight = require('promise-inflight')
|
||||
const inferOwner = require('infer-owner')
|
||||
|
||||
// Memoize getuid()/getgid() calls.
|
||||
// patch process.setuid/setgid to invalidate cached value on change
|
||||
const self = { uid: null, gid: null }
|
||||
const getSelf = () => {
|
||||
if (typeof self.uid !== 'number') {
|
||||
self.uid = process.getuid()
|
||||
const setuid = process.setuid
|
||||
process.setuid = (uid) => {
|
||||
self.uid = null
|
||||
process.setuid = setuid
|
||||
return process.setuid(uid)
|
||||
}
|
||||
}
|
||||
if (typeof self.gid !== 'number') {
|
||||
self.gid = process.getgid()
|
||||
const setgid = process.setgid
|
||||
process.setgid = (gid) => {
|
||||
self.gid = null
|
||||
process.setgid = setgid
|
||||
return process.setgid(gid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.chownr = fixOwner
|
||||
|
||||
function fixOwner (cache, filepath) {
|
||||
if (!process.getuid) {
|
||||
// This platform doesn't need ownership fixing
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
getSelf()
|
||||
if (self.uid !== 0) {
|
||||
// almost certainly can't chown anyway
|
||||
return Promise.resolve()
|
||||
}
|
||||
|
||||
return Promise.resolve(inferOwner(cache)).then((owner) => {
|
||||
const { uid, gid } = owner
|
||||
|
||||
// No need to override if it's already what we used.
|
||||
if (self.uid === uid && self.gid === gid)
|
||||
return
|
||||
|
||||
return inflight('fixOwner: fixing ownership on ' + filepath, () =>
|
||||
chownr(
|
||||
filepath,
|
||||
typeof uid === 'number' ? uid : self.uid,
|
||||
typeof gid === 'number' ? gid : self.gid
|
||||
).catch((err) => {
|
||||
if (err.code === 'ENOENT')
|
||||
return null
|
||||
|
||||
throw err
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.chownr.sync = fixOwnerSync
|
||||
|
||||
function fixOwnerSync (cache, filepath) {
|
||||
if (!process.getuid) {
|
||||
// This platform doesn't need ownership fixing
|
||||
return
|
||||
}
|
||||
const { uid, gid } = inferOwner.sync(cache)
|
||||
getSelf()
|
||||
if (self.uid !== 0) {
|
||||
// almost certainly can't chown anyway
|
||||
return
|
||||
}
|
||||
|
||||
if (self.uid === uid && self.gid === gid) {
|
||||
// No need to override if it's already what we used.
|
||||
return
|
||||
}
|
||||
try {
|
||||
chownr.sync(
|
||||
filepath,
|
||||
typeof uid === 'number' ? uid : self.uid,
|
||||
typeof gid === 'number' ? gid : self.gid
|
||||
)
|
||||
} catch (err) {
|
||||
// only catch ENOENT, any other error is a problem.
|
||||
if (err.code === 'ENOENT')
|
||||
return null
|
||||
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.mkdirfix = mkdirfix
|
||||
|
||||
function mkdirfix (cache, p, cb) {
|
||||
// we have to infer the owner _before_ making the directory, even though
|
||||
// we aren't going to use the results, since the cache itself might not
|
||||
// exist yet. If we mkdirp it, then our current uid/gid will be assumed
|
||||
// to be correct if it creates the cache folder in the process.
|
||||
return Promise.resolve(inferOwner(cache)).then(() => {
|
||||
return mkdirp(p)
|
||||
.then((made) => {
|
||||
if (made)
|
||||
return fixOwner(cache, made).then(() => made)
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err.code === 'EEXIST')
|
||||
return fixOwner(cache, p).then(() => null)
|
||||
|
||||
throw err
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.mkdirfix.sync = mkdirfixSync
|
||||
|
||||
function mkdirfixSync (cache, p) {
|
||||
try {
|
||||
inferOwner.sync(cache)
|
||||
const made = mkdirp.sync(p)
|
||||
if (made) {
|
||||
fixOwnerSync(cache, made)
|
||||
return made
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.code === 'EEXIST') {
|
||||
fixOwnerSync(cache, p)
|
||||
return null
|
||||
} else
|
||||
throw err
|
||||
}
|
||||
}
|
||||
7
node_modules/cacache/lib/util/hash-to-segments.js
generated
vendored
Normal file
7
node_modules/cacache/lib/util/hash-to-segments.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = hashToSegments
|
||||
|
||||
function hashToSegments (hash) {
|
||||
return [hash.slice(0, 2), hash.slice(2, 4), hash.slice(4)]
|
||||
}
|
||||
67
node_modules/cacache/lib/util/move-file.js
generated
vendored
Normal file
67
node_modules/cacache/lib/util/move-file.js
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
'use strict'
|
||||
|
||||
const fs = require('fs')
|
||||
const util = require('util')
|
||||
const chmod = util.promisify(fs.chmod)
|
||||
const unlink = util.promisify(fs.unlink)
|
||||
const stat = util.promisify(fs.stat)
|
||||
const move = require('@npmcli/move-file')
|
||||
const pinflight = require('promise-inflight')
|
||||
|
||||
module.exports = moveFile
|
||||
|
||||
function moveFile (src, dest) {
|
||||
const isWindows = global.__CACACHE_TEST_FAKE_WINDOWS__ ||
|
||||
process.platform === 'win32'
|
||||
|
||||
// This isn't quite an fs.rename -- the assumption is that
|
||||
// if `dest` already exists, and we get certain errors while
|
||||
// trying to move it, we should just not bother.
|
||||
//
|
||||
// In the case of cache corruption, users will receive an
|
||||
// EINTEGRITY error elsewhere, and can remove the offending
|
||||
// content their own way.
|
||||
//
|
||||
// Note that, as the name suggests, this strictly only supports file moves.
|
||||
return new Promise((resolve, reject) => {
|
||||
fs.link(src, dest, (err) => {
|
||||
if (err) {
|
||||
if (isWindows && err.code === 'EPERM') {
|
||||
// XXX This is a really weird way to handle this situation, as it
|
||||
// results in the src file being deleted even though the dest
|
||||
// might not exist. Since we pretty much always write files to
|
||||
// deterministic locations based on content hash, this is likely
|
||||
// ok (or at worst, just ends in a future cache miss). But it would
|
||||
// be worth investigating at some time in the future if this is
|
||||
// really what we want to do here.
|
||||
return resolve()
|
||||
} else if (err.code === 'EEXIST' || err.code === 'EBUSY') {
|
||||
// file already exists, so whatever
|
||||
return resolve()
|
||||
} else
|
||||
return reject(err)
|
||||
} else
|
||||
return resolve()
|
||||
})
|
||||
})
|
||||
.then(() => {
|
||||
// content should never change for any reason, so make it read-only
|
||||
return Promise.all([
|
||||
unlink(src),
|
||||
!isWindows && chmod(dest, '0444'),
|
||||
])
|
||||
})
|
||||
.catch(() => {
|
||||
return pinflight('cacache-move-file:' + dest, () => {
|
||||
return stat(dest).catch((err) => {
|
||||
if (err.code !== 'ENOENT') {
|
||||
// Something else is wrong here. Bail bail bail
|
||||
throw err
|
||||
}
|
||||
// file doesn't already exist! let's try a rename -> copy fallback
|
||||
// only delete if it successfully copies
|
||||
return move(src, dest)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
35
node_modules/cacache/lib/util/tmp.js
generated
vendored
Normal file
35
node_modules/cacache/lib/util/tmp.js
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
'use strict'
|
||||
|
||||
const fs = require('@npmcli/fs')
|
||||
|
||||
const fixOwner = require('./fix-owner')
|
||||
const path = require('path')
|
||||
|
||||
module.exports.mkdir = mktmpdir
|
||||
|
||||
function mktmpdir (cache, opts = {}) {
|
||||
const { tmpPrefix } = opts
|
||||
const tmpDir = path.join(cache, 'tmp')
|
||||
return fs.mkdir(tmpDir, { recursive: true, owner: 'inherit' })
|
||||
.then(() => {
|
||||
// do not use path.join(), it drops the trailing / if tmpPrefix is unset
|
||||
const target = `${tmpDir}${path.sep}${tmpPrefix || ''}`
|
||||
return fs.mkdtemp(target, { owner: 'inherit' })
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.withTmp = withTmp
|
||||
|
||||
function withTmp (cache, opts, cb) {
|
||||
if (!cb) {
|
||||
cb = opts
|
||||
opts = {}
|
||||
}
|
||||
return fs.withTempDir(path.join(cache, 'tmp'), cb, opts)
|
||||
}
|
||||
|
||||
module.exports.fix = fixtmpdir
|
||||
|
||||
function fixtmpdir (cache) {
|
||||
return fixOwner(cache, path.join(cache, 'tmp'))
|
||||
}
|
||||
287
node_modules/cacache/lib/verify.js
generated
vendored
Normal file
287
node_modules/cacache/lib/verify.js
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const pMap = require('p-map')
|
||||
const contentPath = require('./content/path')
|
||||
const fixOwner = require('./util/fix-owner')
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const glob = util.promisify(require('glob'))
|
||||
const index = require('./entry-index')
|
||||
const path = require('path')
|
||||
const rimraf = util.promisify(require('rimraf'))
|
||||
const ssri = require('ssri')
|
||||
|
||||
const hasOwnProperty = (obj, key) =>
|
||||
Object.prototype.hasOwnProperty.call(obj, key)
|
||||
|
||||
const stat = util.promisify(fs.stat)
|
||||
const truncate = util.promisify(fs.truncate)
|
||||
const writeFile = util.promisify(fs.writeFile)
|
||||
const readFile = util.promisify(fs.readFile)
|
||||
|
||||
const verifyOpts = (opts) => ({
|
||||
concurrency: 20,
|
||||
log: { silly () {} },
|
||||
...opts,
|
||||
})
|
||||
|
||||
module.exports = verify
|
||||
|
||||
function verify (cache, opts) {
|
||||
opts = verifyOpts(opts)
|
||||
opts.log.silly('verify', 'verifying cache at', cache)
|
||||
|
||||
const steps = [
|
||||
markStartTime,
|
||||
fixPerms,
|
||||
garbageCollect,
|
||||
rebuildIndex,
|
||||
cleanTmp,
|
||||
writeVerifile,
|
||||
markEndTime,
|
||||
]
|
||||
|
||||
return steps
|
||||
.reduce((promise, step, i) => {
|
||||
const label = step.name
|
||||
const start = new Date()
|
||||
return promise.then((stats) => {
|
||||
return step(cache, opts).then((s) => {
|
||||
s &&
|
||||
Object.keys(s).forEach((k) => {
|
||||
stats[k] = s[k]
|
||||
})
|
||||
const end = new Date()
|
||||
if (!stats.runTime)
|
||||
stats.runTime = {}
|
||||
|
||||
stats.runTime[label] = end - start
|
||||
return Promise.resolve(stats)
|
||||
})
|
||||
})
|
||||
}, Promise.resolve({}))
|
||||
.then((stats) => {
|
||||
stats.runTime.total = stats.endTime - stats.startTime
|
||||
opts.log.silly(
|
||||
'verify',
|
||||
'verification finished for',
|
||||
cache,
|
||||
'in',
|
||||
`${stats.runTime.total}ms`
|
||||
)
|
||||
return stats
|
||||
})
|
||||
}
|
||||
|
||||
function markStartTime (cache, opts) {
|
||||
return Promise.resolve({ startTime: new Date() })
|
||||
}
|
||||
|
||||
function markEndTime (cache, opts) {
|
||||
return Promise.resolve({ endTime: new Date() })
|
||||
}
|
||||
|
||||
function fixPerms (cache, opts) {
|
||||
opts.log.silly('verify', 'fixing cache permissions')
|
||||
return fixOwner
|
||||
.mkdirfix(cache, cache)
|
||||
.then(() => {
|
||||
// TODO - fix file permissions too
|
||||
return fixOwner.chownr(cache, cache)
|
||||
})
|
||||
.then(() => null)
|
||||
}
|
||||
|
||||
// Implements a naive mark-and-sweep tracing garbage collector.
|
||||
//
|
||||
// The algorithm is basically as follows:
|
||||
// 1. Read (and filter) all index entries ("pointers")
|
||||
// 2. Mark each integrity value as "live"
|
||||
// 3. Read entire filesystem tree in `content-vX/` dir
|
||||
// 4. If content is live, verify its checksum and delete it if it fails
|
||||
// 5. If content is not marked as live, rimraf it.
|
||||
//
|
||||
function garbageCollect (cache, opts) {
|
||||
opts.log.silly('verify', 'garbage collecting content')
|
||||
const indexStream = index.lsStream(cache)
|
||||
const liveContent = new Set()
|
||||
indexStream.on('data', (entry) => {
|
||||
if (opts.filter && !opts.filter(entry))
|
||||
return
|
||||
|
||||
liveContent.add(entry.integrity.toString())
|
||||
})
|
||||
return new Promise((resolve, reject) => {
|
||||
indexStream.on('end', resolve).on('error', reject)
|
||||
}).then(() => {
|
||||
const contentDir = contentPath.contentDir(cache)
|
||||
return glob(path.join(contentDir, '**'), {
|
||||
follow: false,
|
||||
nodir: true,
|
||||
nosort: true,
|
||||
}).then((files) => {
|
||||
return Promise.resolve({
|
||||
verifiedContent: 0,
|
||||
reclaimedCount: 0,
|
||||
reclaimedSize: 0,
|
||||
badContentCount: 0,
|
||||
keptSize: 0,
|
||||
}).then((stats) =>
|
||||
pMap(
|
||||
files,
|
||||
(f) => {
|
||||
const split = f.split(/[/\\]/)
|
||||
const digest = split.slice(split.length - 3).join('')
|
||||
const algo = split[split.length - 4]
|
||||
const integrity = ssri.fromHex(digest, algo)
|
||||
if (liveContent.has(integrity.toString())) {
|
||||
return verifyContent(f, integrity).then((info) => {
|
||||
if (!info.valid) {
|
||||
stats.reclaimedCount++
|
||||
stats.badContentCount++
|
||||
stats.reclaimedSize += info.size
|
||||
} else {
|
||||
stats.verifiedContent++
|
||||
stats.keptSize += info.size
|
||||
}
|
||||
return stats
|
||||
})
|
||||
} else {
|
||||
// No entries refer to this content. We can delete.
|
||||
stats.reclaimedCount++
|
||||
return stat(f).then((s) => {
|
||||
return rimraf(f).then(() => {
|
||||
stats.reclaimedSize += s.size
|
||||
return stats
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
{ concurrency: opts.concurrency }
|
||||
).then(() => stats)
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
function verifyContent (filepath, sri) {
|
||||
return stat(filepath)
|
||||
.then((s) => {
|
||||
const contentInfo = {
|
||||
size: s.size,
|
||||
valid: true,
|
||||
}
|
||||
return ssri
|
||||
.checkStream(new fsm.ReadStream(filepath), sri)
|
||||
.catch((err) => {
|
||||
if (err.code !== 'EINTEGRITY')
|
||||
throw err
|
||||
|
||||
return rimraf(filepath).then(() => {
|
||||
contentInfo.valid = false
|
||||
})
|
||||
})
|
||||
.then(() => contentInfo)
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err.code === 'ENOENT')
|
||||
return { size: 0, valid: false }
|
||||
|
||||
throw err
|
||||
})
|
||||
}
|
||||
|
||||
function rebuildIndex (cache, opts) {
|
||||
opts.log.silly('verify', 'rebuilding index')
|
||||
return index.ls(cache).then((entries) => {
|
||||
const stats = {
|
||||
missingContent: 0,
|
||||
rejectedEntries: 0,
|
||||
totalEntries: 0,
|
||||
}
|
||||
const buckets = {}
|
||||
for (const k in entries) {
|
||||
/* istanbul ignore else */
|
||||
if (hasOwnProperty(entries, k)) {
|
||||
const hashed = index.hashKey(k)
|
||||
const entry = entries[k]
|
||||
const excluded = opts.filter && !opts.filter(entry)
|
||||
excluded && stats.rejectedEntries++
|
||||
if (buckets[hashed] && !excluded)
|
||||
buckets[hashed].push(entry)
|
||||
else if (buckets[hashed] && excluded) {
|
||||
// skip
|
||||
} else if (excluded) {
|
||||
buckets[hashed] = []
|
||||
buckets[hashed]._path = index.bucketPath(cache, k)
|
||||
} else {
|
||||
buckets[hashed] = [entry]
|
||||
buckets[hashed]._path = index.bucketPath(cache, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pMap(
|
||||
Object.keys(buckets),
|
||||
(key) => {
|
||||
return rebuildBucket(cache, buckets[key], stats, opts)
|
||||
},
|
||||
{ concurrency: opts.concurrency }
|
||||
).then(() => stats)
|
||||
})
|
||||
}
|
||||
|
||||
function rebuildBucket (cache, bucket, stats, opts) {
|
||||
return truncate(bucket._path).then(() => {
|
||||
// This needs to be serialized because cacache explicitly
|
||||
// lets very racy bucket conflicts clobber each other.
|
||||
return bucket.reduce((promise, entry) => {
|
||||
return promise.then(() => {
|
||||
const content = contentPath(cache, entry.integrity)
|
||||
return stat(content)
|
||||
.then(() => {
|
||||
return index
|
||||
.insert(cache, entry.key, entry.integrity, {
|
||||
metadata: entry.metadata,
|
||||
size: entry.size,
|
||||
})
|
||||
.then(() => {
|
||||
stats.totalEntries++
|
||||
})
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err.code === 'ENOENT') {
|
||||
stats.rejectedEntries++
|
||||
stats.missingContent++
|
||||
return
|
||||
}
|
||||
throw err
|
||||
})
|
||||
})
|
||||
}, Promise.resolve())
|
||||
})
|
||||
}
|
||||
|
||||
function cleanTmp (cache, opts) {
|
||||
opts.log.silly('verify', 'cleaning tmp directory')
|
||||
return rimraf(path.join(cache, 'tmp'))
|
||||
}
|
||||
|
||||
function writeVerifile (cache, opts) {
|
||||
const verifile = path.join(cache, '_lastverified')
|
||||
opts.log.silly('verify', 'writing verifile to ' + verifile)
|
||||
try {
|
||||
return writeFile(verifile, '' + +new Date())
|
||||
} finally {
|
||||
fixOwner.chownr.sync(cache, verifile)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.lastRun = lastRun
|
||||
|
||||
function lastRun (cache) {
|
||||
return readFile(path.join(cache, '_lastverified'), 'utf8').then(
|
||||
(data) => new Date(+data)
|
||||
)
|
||||
}
|
||||
6
node_modules/cacache/ls.js
generated
vendored
Normal file
6
node_modules/cacache/ls.js
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const index = require('./lib/entry-index')
|
||||
|
||||
module.exports = index.ls
|
||||
module.exports.stream = index.lsStream
|
||||
15
node_modules/cacache/node_modules/lru-cache/LICENSE
generated
vendored
Normal file
15
node_modules/cacache/node_modules/lru-cache/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
The ISC License
|
||||
|
||||
Copyright (c) Isaac Z. Schlueter and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
166
node_modules/cacache/node_modules/lru-cache/README.md
generated
vendored
Normal file
166
node_modules/cacache/node_modules/lru-cache/README.md
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
# lru cache
|
||||
|
||||
A cache object that deletes the least-recently-used items.
|
||||
|
||||
[](https://travis-ci.org/isaacs/node-lru-cache) [](https://coveralls.io/github/isaacs/node-lru-cache)
|
||||
|
||||
## Installation:
|
||||
|
||||
```javascript
|
||||
npm install lru-cache --save
|
||||
```
|
||||
|
||||
## Usage:
|
||||
|
||||
```javascript
|
||||
var LRU = require("lru-cache")
|
||||
, options = { max: 500
|
||||
, length: function (n, key) { return n * 2 + key.length }
|
||||
, dispose: function (key, n) { n.close() }
|
||||
, maxAge: 1000 * 60 * 60 }
|
||||
, cache = new LRU(options)
|
||||
, otherCache = new LRU(50) // sets just the max size
|
||||
|
||||
cache.set("key", "value")
|
||||
cache.get("key") // "value"
|
||||
|
||||
// non-string keys ARE fully supported
|
||||
// but note that it must be THE SAME object, not
|
||||
// just a JSON-equivalent object.
|
||||
var someObject = { a: 1 }
|
||||
cache.set(someObject, 'a value')
|
||||
// Object keys are not toString()-ed
|
||||
cache.set('[object Object]', 'a different value')
|
||||
assert.equal(cache.get(someObject), 'a value')
|
||||
// A similar object with same keys/values won't work,
|
||||
// because it's a different object identity
|
||||
assert.equal(cache.get({ a: 1 }), undefined)
|
||||
|
||||
cache.reset() // empty the cache
|
||||
```
|
||||
|
||||
If you put more stuff in it, then items will fall out.
|
||||
|
||||
If you try to put an oversized thing in it, then it'll fall out right
|
||||
away.
|
||||
|
||||
## Options
|
||||
|
||||
* `max` The maximum size of the cache, checked by applying the length
|
||||
function to all values in the cache. Not setting this is kind of
|
||||
silly, since that's the whole purpose of this lib, but it defaults
|
||||
to `Infinity`. Setting it to a non-number or negative number will
|
||||
throw a `TypeError`. Setting it to 0 makes it be `Infinity`.
|
||||
* `maxAge` Maximum age in ms. Items are not pro-actively pruned out
|
||||
as they age, but if you try to get an item that is too old, it'll
|
||||
drop it and return undefined instead of giving it to you.
|
||||
Setting this to a negative value will make everything seem old!
|
||||
Setting it to a non-number will throw a `TypeError`.
|
||||
* `length` Function that is used to calculate the length of stored
|
||||
items. If you're storing strings or buffers, then you probably want
|
||||
to do something like `function(n, key){return n.length}`. The default is
|
||||
`function(){return 1}`, which is fine if you want to store `max`
|
||||
like-sized things. The item is passed as the first argument, and
|
||||
the key is passed as the second argumnet.
|
||||
* `dispose` Function that is called on items when they are dropped
|
||||
from the cache. This can be handy if you want to close file
|
||||
descriptors or do other cleanup tasks when items are no longer
|
||||
accessible. Called with `key, value`. It's called *before*
|
||||
actually removing the item from the internal cache, so if you want
|
||||
to immediately put it back in, you'll have to do that in a
|
||||
`nextTick` or `setTimeout` callback or it won't do anything.
|
||||
* `stale` By default, if you set a `maxAge`, it'll only actually pull
|
||||
stale items out of the cache when you `get(key)`. (That is, it's
|
||||
not pre-emptively doing a `setTimeout` or anything.) If you set
|
||||
`stale:true`, it'll return the stale value before deleting it. If
|
||||
you don't set this, then it'll return `undefined` when you try to
|
||||
get a stale entry, as if it had already been deleted.
|
||||
* `noDisposeOnSet` By default, if you set a `dispose()` method, then
|
||||
it'll be called whenever a `set()` operation overwrites an existing
|
||||
key. If you set this option, `dispose()` will only be called when a
|
||||
key falls out of the cache, not when it is overwritten.
|
||||
* `updateAgeOnGet` When using time-expiring entries with `maxAge`,
|
||||
setting this to `true` will make each item's effective time update
|
||||
to the current time whenever it is retrieved from cache, causing it
|
||||
to not expire. (It can still fall out of cache based on recency of
|
||||
use, of course.)
|
||||
|
||||
## API
|
||||
|
||||
* `set(key, value, maxAge)`
|
||||
* `get(key) => value`
|
||||
|
||||
Both of these will update the "recently used"-ness of the key.
|
||||
They do what you think. `maxAge` is optional and overrides the
|
||||
cache `maxAge` option if provided.
|
||||
|
||||
If the key is not found, `get()` will return `undefined`.
|
||||
|
||||
The key and val can be any value.
|
||||
|
||||
* `peek(key)`
|
||||
|
||||
Returns the key value (or `undefined` if not found) without
|
||||
updating the "recently used"-ness of the key.
|
||||
|
||||
(If you find yourself using this a lot, you *might* be using the
|
||||
wrong sort of data structure, but there are some use cases where
|
||||
it's handy.)
|
||||
|
||||
* `del(key)`
|
||||
|
||||
Deletes a key out of the cache.
|
||||
|
||||
* `reset()`
|
||||
|
||||
Clear the cache entirely, throwing away all values.
|
||||
|
||||
* `has(key)`
|
||||
|
||||
Check if a key is in the cache, without updating the recent-ness
|
||||
or deleting it for being stale.
|
||||
|
||||
* `forEach(function(value,key,cache), [thisp])`
|
||||
|
||||
Just like `Array.prototype.forEach`. Iterates over all the keys
|
||||
in the cache, in order of recent-ness. (Ie, more recently used
|
||||
items are iterated over first.)
|
||||
|
||||
* `rforEach(function(value,key,cache), [thisp])`
|
||||
|
||||
The same as `cache.forEach(...)` but items are iterated over in
|
||||
reverse order. (ie, less recently used items are iterated over
|
||||
first.)
|
||||
|
||||
* `keys()`
|
||||
|
||||
Return an array of the keys in the cache.
|
||||
|
||||
* `values()`
|
||||
|
||||
Return an array of the values in the cache.
|
||||
|
||||
* `length`
|
||||
|
||||
Return total length of objects in cache taking into account
|
||||
`length` options function.
|
||||
|
||||
* `itemCount`
|
||||
|
||||
Return total quantity of objects currently in cache. Note, that
|
||||
`stale` (see options) items are returned as part of this item
|
||||
count.
|
||||
|
||||
* `dump()`
|
||||
|
||||
Return an array of the cache entries ready for serialization and usage
|
||||
with 'destinationCache.load(arr)`.
|
||||
|
||||
* `load(cacheEntriesArray)`
|
||||
|
||||
Loads another cache entries array, obtained with `sourceCache.dump()`,
|
||||
into the cache. The destination cache is reset before loading new entries
|
||||
|
||||
* `prune()`
|
||||
|
||||
Manually iterates over the entire cache proactively pruning old entries
|
||||
334
node_modules/cacache/node_modules/lru-cache/index.js
generated
vendored
Normal file
334
node_modules/cacache/node_modules/lru-cache/index.js
generated
vendored
Normal file
@@ -0,0 +1,334 @@
|
||||
'use strict'
|
||||
|
||||
// A linked list to keep track of recently-used-ness
|
||||
const Yallist = require('yallist')
|
||||
|
||||
const MAX = Symbol('max')
|
||||
const LENGTH = Symbol('length')
|
||||
const LENGTH_CALCULATOR = Symbol('lengthCalculator')
|
||||
const ALLOW_STALE = Symbol('allowStale')
|
||||
const MAX_AGE = Symbol('maxAge')
|
||||
const DISPOSE = Symbol('dispose')
|
||||
const NO_DISPOSE_ON_SET = Symbol('noDisposeOnSet')
|
||||
const LRU_LIST = Symbol('lruList')
|
||||
const CACHE = Symbol('cache')
|
||||
const UPDATE_AGE_ON_GET = Symbol('updateAgeOnGet')
|
||||
|
||||
const naiveLength = () => 1
|
||||
|
||||
// lruList is a yallist where the head is the youngest
|
||||
// item, and the tail is the oldest. the list contains the Hit
|
||||
// objects as the entries.
|
||||
// Each Hit object has a reference to its Yallist.Node. This
|
||||
// never changes.
|
||||
//
|
||||
// cache is a Map (or PseudoMap) that matches the keys to
|
||||
// the Yallist.Node object.
|
||||
class LRUCache {
|
||||
constructor (options) {
|
||||
if (typeof options === 'number')
|
||||
options = { max: options }
|
||||
|
||||
if (!options)
|
||||
options = {}
|
||||
|
||||
if (options.max && (typeof options.max !== 'number' || options.max < 0))
|
||||
throw new TypeError('max must be a non-negative number')
|
||||
// Kind of weird to have a default max of Infinity, but oh well.
|
||||
const max = this[MAX] = options.max || Infinity
|
||||
|
||||
const lc = options.length || naiveLength
|
||||
this[LENGTH_CALCULATOR] = (typeof lc !== 'function') ? naiveLength : lc
|
||||
this[ALLOW_STALE] = options.stale || false
|
||||
if (options.maxAge && typeof options.maxAge !== 'number')
|
||||
throw new TypeError('maxAge must be a number')
|
||||
this[MAX_AGE] = options.maxAge || 0
|
||||
this[DISPOSE] = options.dispose
|
||||
this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false
|
||||
this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false
|
||||
this.reset()
|
||||
}
|
||||
|
||||
// resize the cache when the max changes.
|
||||
set max (mL) {
|
||||
if (typeof mL !== 'number' || mL < 0)
|
||||
throw new TypeError('max must be a non-negative number')
|
||||
|
||||
this[MAX] = mL || Infinity
|
||||
trim(this)
|
||||
}
|
||||
get max () {
|
||||
return this[MAX]
|
||||
}
|
||||
|
||||
set allowStale (allowStale) {
|
||||
this[ALLOW_STALE] = !!allowStale
|
||||
}
|
||||
get allowStale () {
|
||||
return this[ALLOW_STALE]
|
||||
}
|
||||
|
||||
set maxAge (mA) {
|
||||
if (typeof mA !== 'number')
|
||||
throw new TypeError('maxAge must be a non-negative number')
|
||||
|
||||
this[MAX_AGE] = mA
|
||||
trim(this)
|
||||
}
|
||||
get maxAge () {
|
||||
return this[MAX_AGE]
|
||||
}
|
||||
|
||||
// resize the cache when the lengthCalculator changes.
|
||||
set lengthCalculator (lC) {
|
||||
if (typeof lC !== 'function')
|
||||
lC = naiveLength
|
||||
|
||||
if (lC !== this[LENGTH_CALCULATOR]) {
|
||||
this[LENGTH_CALCULATOR] = lC
|
||||
this[LENGTH] = 0
|
||||
this[LRU_LIST].forEach(hit => {
|
||||
hit.length = this[LENGTH_CALCULATOR](hit.value, hit.key)
|
||||
this[LENGTH] += hit.length
|
||||
})
|
||||
}
|
||||
trim(this)
|
||||
}
|
||||
get lengthCalculator () { return this[LENGTH_CALCULATOR] }
|
||||
|
||||
get length () { return this[LENGTH] }
|
||||
get itemCount () { return this[LRU_LIST].length }
|
||||
|
||||
rforEach (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
for (let walker = this[LRU_LIST].tail; walker !== null;) {
|
||||
const prev = walker.prev
|
||||
forEachStep(this, fn, walker, thisp)
|
||||
walker = prev
|
||||
}
|
||||
}
|
||||
|
||||
forEach (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
for (let walker = this[LRU_LIST].head; walker !== null;) {
|
||||
const next = walker.next
|
||||
forEachStep(this, fn, walker, thisp)
|
||||
walker = next
|
||||
}
|
||||
}
|
||||
|
||||
keys () {
|
||||
return this[LRU_LIST].toArray().map(k => k.key)
|
||||
}
|
||||
|
||||
values () {
|
||||
return this[LRU_LIST].toArray().map(k => k.value)
|
||||
}
|
||||
|
||||
reset () {
|
||||
if (this[DISPOSE] &&
|
||||
this[LRU_LIST] &&
|
||||
this[LRU_LIST].length) {
|
||||
this[LRU_LIST].forEach(hit => this[DISPOSE](hit.key, hit.value))
|
||||
}
|
||||
|
||||
this[CACHE] = new Map() // hash of items by key
|
||||
this[LRU_LIST] = new Yallist() // list of items in order of use recency
|
||||
this[LENGTH] = 0 // length of items in the list
|
||||
}
|
||||
|
||||
dump () {
|
||||
return this[LRU_LIST].map(hit =>
|
||||
isStale(this, hit) ? false : {
|
||||
k: hit.key,
|
||||
v: hit.value,
|
||||
e: hit.now + (hit.maxAge || 0)
|
||||
}).toArray().filter(h => h)
|
||||
}
|
||||
|
||||
dumpLru () {
|
||||
return this[LRU_LIST]
|
||||
}
|
||||
|
||||
set (key, value, maxAge) {
|
||||
maxAge = maxAge || this[MAX_AGE]
|
||||
|
||||
if (maxAge && typeof maxAge !== 'number')
|
||||
throw new TypeError('maxAge must be a number')
|
||||
|
||||
const now = maxAge ? Date.now() : 0
|
||||
const len = this[LENGTH_CALCULATOR](value, key)
|
||||
|
||||
if (this[CACHE].has(key)) {
|
||||
if (len > this[MAX]) {
|
||||
del(this, this[CACHE].get(key))
|
||||
return false
|
||||
}
|
||||
|
||||
const node = this[CACHE].get(key)
|
||||
const item = node.value
|
||||
|
||||
// dispose of the old one before overwriting
|
||||
// split out into 2 ifs for better coverage tracking
|
||||
if (this[DISPOSE]) {
|
||||
if (!this[NO_DISPOSE_ON_SET])
|
||||
this[DISPOSE](key, item.value)
|
||||
}
|
||||
|
||||
item.now = now
|
||||
item.maxAge = maxAge
|
||||
item.value = value
|
||||
this[LENGTH] += len - item.length
|
||||
item.length = len
|
||||
this.get(key)
|
||||
trim(this)
|
||||
return true
|
||||
}
|
||||
|
||||
const hit = new Entry(key, value, len, now, maxAge)
|
||||
|
||||
// oversized objects fall out of cache automatically.
|
||||
if (hit.length > this[MAX]) {
|
||||
if (this[DISPOSE])
|
||||
this[DISPOSE](key, value)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
this[LENGTH] += hit.length
|
||||
this[LRU_LIST].unshift(hit)
|
||||
this[CACHE].set(key, this[LRU_LIST].head)
|
||||
trim(this)
|
||||
return true
|
||||
}
|
||||
|
||||
has (key) {
|
||||
if (!this[CACHE].has(key)) return false
|
||||
const hit = this[CACHE].get(key).value
|
||||
return !isStale(this, hit)
|
||||
}
|
||||
|
||||
get (key) {
|
||||
return get(this, key, true)
|
||||
}
|
||||
|
||||
peek (key) {
|
||||
return get(this, key, false)
|
||||
}
|
||||
|
||||
pop () {
|
||||
const node = this[LRU_LIST].tail
|
||||
if (!node)
|
||||
return null
|
||||
|
||||
del(this, node)
|
||||
return node.value
|
||||
}
|
||||
|
||||
del (key) {
|
||||
del(this, this[CACHE].get(key))
|
||||
}
|
||||
|
||||
load (arr) {
|
||||
// reset the cache
|
||||
this.reset()
|
||||
|
||||
const now = Date.now()
|
||||
// A previous serialized cache has the most recent items first
|
||||
for (let l = arr.length - 1; l >= 0; l--) {
|
||||
const hit = arr[l]
|
||||
const expiresAt = hit.e || 0
|
||||
if (expiresAt === 0)
|
||||
// the item was created without expiration in a non aged cache
|
||||
this.set(hit.k, hit.v)
|
||||
else {
|
||||
const maxAge = expiresAt - now
|
||||
// dont add already expired items
|
||||
if (maxAge > 0) {
|
||||
this.set(hit.k, hit.v, maxAge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prune () {
|
||||
this[CACHE].forEach((value, key) => get(this, key, false))
|
||||
}
|
||||
}
|
||||
|
||||
const get = (self, key, doUse) => {
|
||||
const node = self[CACHE].get(key)
|
||||
if (node) {
|
||||
const hit = node.value
|
||||
if (isStale(self, hit)) {
|
||||
del(self, node)
|
||||
if (!self[ALLOW_STALE])
|
||||
return undefined
|
||||
} else {
|
||||
if (doUse) {
|
||||
if (self[UPDATE_AGE_ON_GET])
|
||||
node.value.now = Date.now()
|
||||
self[LRU_LIST].unshiftNode(node)
|
||||
}
|
||||
}
|
||||
return hit.value
|
||||
}
|
||||
}
|
||||
|
||||
const isStale = (self, hit) => {
|
||||
if (!hit || (!hit.maxAge && !self[MAX_AGE]))
|
||||
return false
|
||||
|
||||
const diff = Date.now() - hit.now
|
||||
return hit.maxAge ? diff > hit.maxAge
|
||||
: self[MAX_AGE] && (diff > self[MAX_AGE])
|
||||
}
|
||||
|
||||
const trim = self => {
|
||||
if (self[LENGTH] > self[MAX]) {
|
||||
for (let walker = self[LRU_LIST].tail;
|
||||
self[LENGTH] > self[MAX] && walker !== null;) {
|
||||
// We know that we're about to delete this one, and also
|
||||
// what the next least recently used key will be, so just
|
||||
// go ahead and set it now.
|
||||
const prev = walker.prev
|
||||
del(self, walker)
|
||||
walker = prev
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const del = (self, node) => {
|
||||
if (node) {
|
||||
const hit = node.value
|
||||
if (self[DISPOSE])
|
||||
self[DISPOSE](hit.key, hit.value)
|
||||
|
||||
self[LENGTH] -= hit.length
|
||||
self[CACHE].delete(hit.key)
|
||||
self[LRU_LIST].removeNode(node)
|
||||
}
|
||||
}
|
||||
|
||||
class Entry {
|
||||
constructor (key, value, length, now, maxAge) {
|
||||
this.key = key
|
||||
this.value = value
|
||||
this.length = length
|
||||
this.now = now
|
||||
this.maxAge = maxAge || 0
|
||||
}
|
||||
}
|
||||
|
||||
const forEachStep = (self, fn, node, thisp) => {
|
||||
let hit = node.value
|
||||
if (isStale(self, hit)) {
|
||||
del(self, node)
|
||||
if (!self[ALLOW_STALE])
|
||||
hit = undefined
|
||||
}
|
||||
if (hit)
|
||||
fn.call(thisp, hit.value, hit.key, self)
|
||||
}
|
||||
|
||||
module.exports = LRUCache
|
||||
34
node_modules/cacache/node_modules/lru-cache/package.json
generated
vendored
Normal file
34
node_modules/cacache/node_modules/lru-cache/package.json
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "lru-cache",
|
||||
"description": "A cache object that deletes the least-recently-used items.",
|
||||
"version": "6.0.0",
|
||||
"author": "Isaac Z. Schlueter <i@izs.me>",
|
||||
"keywords": [
|
||||
"mru",
|
||||
"lru",
|
||||
"cache"
|
||||
],
|
||||
"scripts": {
|
||||
"test": "tap",
|
||||
"snap": "tap",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"prepublishOnly": "git push origin --follow-tags"
|
||||
},
|
||||
"main": "index.js",
|
||||
"repository": "git://github.com/isaacs/node-lru-cache.git",
|
||||
"devDependencies": {
|
||||
"benchmark": "^2.1.4",
|
||||
"tap": "^14.10.7"
|
||||
},
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"yallist": "^4.0.0"
|
||||
},
|
||||
"files": [
|
||||
"index.js"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
}
|
||||
80
node_modules/cacache/package.json
generated
vendored
Normal file
80
node_modules/cacache/package.json
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"name": "cacache",
|
||||
"version": "15.3.0",
|
||||
"cache-version": {
|
||||
"content": "2",
|
||||
"index": "5"
|
||||
},
|
||||
"description": "Fast, fault-tolerant, cross-platform, disk-based, data-agnostic, content-addressable cache.",
|
||||
"main": "index.js",
|
||||
"files": [
|
||||
"*.js",
|
||||
"lib"
|
||||
],
|
||||
"scripts": {
|
||||
"benchmarks": "node test/benchmarks",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"prepublishOnly": "git push origin --follow-tags",
|
||||
"test": "tap",
|
||||
"snap": "tap",
|
||||
"coverage": "tap",
|
||||
"test-docker": "docker run -it --rm --name pacotest -v \"$PWD\":/tmp -w /tmp node:latest npm test",
|
||||
"lint": "npm run npmclilint -- \"*.*js\" \"lib/**/*.*js\" \"test/**/*.*js\"",
|
||||
"npmclilint": "npmcli-lint",
|
||||
"lintfix": "npm run lint -- --fix",
|
||||
"postsnap": "npm run lintfix --"
|
||||
},
|
||||
"repository": "https://github.com/npm/cacache",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"caching",
|
||||
"content-addressable",
|
||||
"sri",
|
||||
"sri hash",
|
||||
"subresource integrity",
|
||||
"cache",
|
||||
"storage",
|
||||
"store",
|
||||
"file store",
|
||||
"filesystem",
|
||||
"disk cache",
|
||||
"disk storage"
|
||||
],
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@npmcli/fs": "^1.0.0",
|
||||
"@npmcli/move-file": "^1.0.1",
|
||||
"chownr": "^2.0.0",
|
||||
"fs-minipass": "^2.0.0",
|
||||
"glob": "^7.1.4",
|
||||
"infer-owner": "^1.0.4",
|
||||
"lru-cache": "^6.0.0",
|
||||
"minipass": "^3.1.1",
|
||||
"minipass-collect": "^1.0.2",
|
||||
"minipass-flush": "^1.0.5",
|
||||
"minipass-pipeline": "^1.2.2",
|
||||
"mkdirp": "^1.0.3",
|
||||
"p-map": "^4.0.0",
|
||||
"promise-inflight": "^1.0.1",
|
||||
"rimraf": "^3.0.2",
|
||||
"ssri": "^8.0.1",
|
||||
"tar": "^6.0.2",
|
||||
"unique-filename": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@npmcli/lint": "^1.0.1",
|
||||
"benchmark": "^2.1.4",
|
||||
"chalk": "^4.0.0",
|
||||
"require-inject": "^1.4.4",
|
||||
"tacks": "^1.3.0",
|
||||
"tap": "^15.0.9"
|
||||
},
|
||||
"tap": {
|
||||
"100": true,
|
||||
"test-regex": "test/[^/]*.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10"
|
||||
}
|
||||
}
|
||||
83
node_modules/cacache/put.js
generated
vendored
Normal file
83
node_modules/cacache/put.js
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
'use strict'
|
||||
|
||||
const index = require('./lib/entry-index')
|
||||
const memo = require('./lib/memoization')
|
||||
const write = require('./lib/content/write')
|
||||
const Flush = require('minipass-flush')
|
||||
const { PassThrough } = require('minipass-collect')
|
||||
const Pipeline = require('minipass-pipeline')
|
||||
|
||||
const putOpts = (opts) => ({
|
||||
algorithms: ['sha512'],
|
||||
...opts,
|
||||
})
|
||||
|
||||
module.exports = putData
|
||||
|
||||
function putData (cache, key, data, opts = {}) {
|
||||
const { memoize } = opts
|
||||
opts = putOpts(opts)
|
||||
return write(cache, data, opts).then((res) => {
|
||||
return index
|
||||
.insert(cache, key, res.integrity, { ...opts, size: res.size })
|
||||
.then((entry) => {
|
||||
if (memoize)
|
||||
memo.put(cache, entry, data, opts)
|
||||
|
||||
return res.integrity
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
module.exports.stream = putStream
|
||||
|
||||
function putStream (cache, key, opts = {}) {
|
||||
const { memoize } = opts
|
||||
opts = putOpts(opts)
|
||||
let integrity
|
||||
let size
|
||||
|
||||
let memoData
|
||||
const pipeline = new Pipeline()
|
||||
// first item in the pipeline is the memoizer, because we need
|
||||
// that to end first and get the collected data.
|
||||
if (memoize) {
|
||||
const memoizer = new PassThrough().on('collect', data => {
|
||||
memoData = data
|
||||
})
|
||||
pipeline.push(memoizer)
|
||||
}
|
||||
|
||||
// contentStream is a write-only, not a passthrough
|
||||
// no data comes out of it.
|
||||
const contentStream = write.stream(cache, opts)
|
||||
.on('integrity', (int) => {
|
||||
integrity = int
|
||||
})
|
||||
.on('size', (s) => {
|
||||
size = s
|
||||
})
|
||||
|
||||
pipeline.push(contentStream)
|
||||
|
||||
// last but not least, we write the index and emit hash and size,
|
||||
// and memoize if we're doing that
|
||||
pipeline.push(new Flush({
|
||||
flush () {
|
||||
return index
|
||||
.insert(cache, key, integrity, { ...opts, size })
|
||||
.then((entry) => {
|
||||
if (memoize && memoData)
|
||||
memo.put(cache, entry, memoData, opts)
|
||||
|
||||
if (integrity)
|
||||
pipeline.emit('integrity', integrity)
|
||||
|
||||
if (size)
|
||||
pipeline.emit('size', size)
|
||||
})
|
||||
},
|
||||
}))
|
||||
|
||||
return pipeline
|
||||
}
|
||||
31
node_modules/cacache/rm.js
generated
vendored
Normal file
31
node_modules/cacache/rm.js
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
'use strict'
|
||||
|
||||
const util = require('util')
|
||||
|
||||
const index = require('./lib/entry-index')
|
||||
const memo = require('./lib/memoization')
|
||||
const path = require('path')
|
||||
const rimraf = util.promisify(require('rimraf'))
|
||||
const rmContent = require('./lib/content/rm')
|
||||
|
||||
module.exports = entry
|
||||
module.exports.entry = entry
|
||||
|
||||
function entry (cache, key, opts) {
|
||||
memo.clearMemoized()
|
||||
return index.delete(cache, key, opts)
|
||||
}
|
||||
|
||||
module.exports.content = content
|
||||
|
||||
function content (cache, integrity) {
|
||||
memo.clearMemoized()
|
||||
return rmContent(cache, integrity)
|
||||
}
|
||||
|
||||
module.exports.all = all
|
||||
|
||||
function all (cache) {
|
||||
memo.clearMemoized()
|
||||
return rimraf(path.join(cache, '*(content-*|index-*)'))
|
||||
}
|
||||
3
node_modules/cacache/verify.js
generated
vendored
Normal file
3
node_modules/cacache/verify.js
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('./lib/verify')
|
||||
Reference in New Issue
Block a user