-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathentry-index.js
196 lines (186 loc) · 5.66 KB
/
entry-index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
'use strict'
const asyncMap = require('slide/lib/async-map')
const contentPath = require('./content/path')
const crypto = require('crypto')
const fixOwner = require('./util/fix-owner')
const fs = require('graceful-fs')
const lockfile = require('lockfile')
const path = require('path')
const pipe = require('mississippi').pipe
const Promise = require('bluebird')
const split = require('split')
const through = require('mississippi').through
module.exports.insert = insert
function insert (cache, key, digest, opts) {
opts = opts || {}
const bucket = indexPath(cache, key)
const lock = bucket + '.lock'
return fixOwner.mkdirfix(
path.dirname(bucket), opts.uid, opts.gid
).then(() => (
Promise.fromNode(_cb => {
const cb = (err, entry) => {
lockfile.unlock(lock, er => {
_cb(err || er, entry)
})
}
lockfile.lock(lock, {
stale: 60000,
retries: 10,
wait: 10000
}, function (err) {
if (err) { return _cb(err) }
fs.stat(bucket, function (err, existing) {
if (err && err.code !== 'ENOENT' && err.code !== 'EPERM') {
return cb(err)
}
const entry = {
key: key,
digest: digest,
hashAlgorithm: opts.hashAlgorithm,
time: +(new Date()),
metadata: opts.metadata
}
// Because of the way these entries work,
// the index is safe from fs.appendFile stopping
// mid-write so long as newlines are *prepended*
//
// That is, if a write fails, it will be ignored
// by `find`, and the next successful one will be
// used.
//
// This should be -very rare-, since `fs.appendFile`
// will often be atomic on most platforms unless
// very large metadata has been included, but caches
// like this one tend to last a long time. :)
// Most corrupted reads are likely to be from attempting
// to read the index while it's being written to --
// which is safe, but not guaranteed to be atomic.
const e = (existing ? '\n' : '') + JSON.stringify(entry)
fs.appendFile(bucket, e, function (err) {
cb(err, entry)
})
})
})
})
)).then(entry => {
return fixOwner.chownr(bucket, opts.uid, opts.gid).then(() => {
return formatEntry(cache, entry)
})
})
}
module.exports.find = find
function find (cache, key) {
const bucket = indexPath(cache, key)
const stream = fs.createReadStream(bucket)
let ret
return Promise.fromNode(cb => {
pipe(stream, split('\n', null, {trailing: true}).on('data', function (l) {
let obj
try {
obj = JSON.parse(l)
} catch (e) {
return
}
if (obj && (obj.key === key)) {
ret = formatEntry(cache, obj)
}
}), function (err) {
if (err && err.code === 'ENOENT') {
cb(null, null)
} else {
cb(err, ret)
}
})
})
}
module.exports.delete = del
function del (cache, key) {
return insert(cache, key, null)
}
module.exports.lsStream = lsStream
function lsStream (cache) {
const indexPath = path.join(cache, 'index')
const stream = through.obj()
fs.readdir(indexPath, function (err, files) {
if (err && err.code === 'ENOENT') {
return stream.end()
} else if (err) {
return stream.emit('error', err)
} else {
asyncMap(files, function (f, cb) {
fs.readFile(path.join(indexPath, f), 'utf8', function (err, data) {
if (err) { return cb(err) }
const entries = {}
data.split('\n').forEach(function (entry) {
let parsed
try {
parsed = JSON.parse(entry)
} catch (e) {
}
// NOTE - it's possible for an entry to be
// incomplete/corrupt. So we just skip it.
// See comment on `insert()` for deets.
if (parsed) {
entries[parsed.key] = formatEntry(cache, parsed)
}
})
Object.keys(entries).forEach(function (k) {
stream.write(entries[k])
})
cb()
})
}, function (err) {
if (err) { stream.emit('error') }
stream.end()
})
}
})
return stream
}
module.exports.ls = ls
function ls (cache) {
const entries = {}
return Promise.fromNode(cb => {
lsStream(cache).on('finish', function () {
cb(null, entries)
}).on('data', function (d) {
entries[d.key] = d
}).on('error', cb)
})
}
module.exports.notFoundError = notFoundError
function notFoundError (cache, key) {
const err = new Error('content not found')
err.code = 'ENOENT'
err.cache = cache
err.key = key
return err
}
function indexPath (cache, key) {
return path.join(cache, 'index', hashKey(key))
}
module.exports._hashKey = hashKey
function hashKey (key) {
// sha1 conflicts can be generated, but it doesn't matter in this case,
// since we intend for there to be regular conflicts anyway. You can have
// the entire cache in a single bucket and all that'll do is just make a big
// file with a lot of contention, if you can even pull it off in the `key`
// string. So whatever. `sha1` is faster and it doesn't trigger the warnings
// `md5` tends to (yet?...).
return crypto
.createHash('sha1')
.update(key.toLowerCase()) // lump case-variant keys into same bucket.
.digest('hex')
.slice(0, 7)
}
function formatEntry (cache, entry) {
return {
key: entry.key,
digest: entry.digest,
hashAlgorithm: entry.hashAlgorithm,
path: contentPath(cache, entry.digest),
time: entry.time,
metadata: entry.metadata
}
}