##// END OF EJS Templates
remotefilelog: replace repack lock to solve race condition...
Boris Feld -
r43213:5fadf610 default
parent child Browse files
Show More
@@ -1,378 +1,378 b''
1 1 # debugcommands.py - debug logic for remotefilelog
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import hashlib
10 10 import os
11 11 import zlib
12 12
13 13 from mercurial.node import bin, hex, nullid, short
14 14 from mercurial.i18n import _
15 15 from mercurial import (
16 16 error,
17 17 filelog,
18 lock as lockmod,
18 19 node as nodemod,
19 20 pycompat,
20 21 revlog,
21 22 )
22 23 from . import (
23 24 constants,
24 25 datapack,
25 extutil,
26 26 fileserverclient,
27 27 historypack,
28 28 repack,
29 29 shallowutil,
30 30 )
31 31
32 32 def debugremotefilelog(ui, path, **opts):
33 33 decompress = opts.get(r'decompress')
34 34
35 35 size, firstnode, mapping = parsefileblob(path, decompress)
36 36
37 37 ui.status(_("size: %d bytes\n") % (size))
38 38 ui.status(_("path: %s \n") % (path))
39 39 ui.status(_("key: %s \n") % (short(firstnode)))
40 40 ui.status(_("\n"))
41 41 ui.status(_("%12s => %12s %13s %13s %12s\n") %
42 42 ("node", "p1", "p2", "linknode", "copyfrom"))
43 43
44 44 queue = [firstnode]
45 45 while queue:
46 46 node = queue.pop(0)
47 47 p1, p2, linknode, copyfrom = mapping[node]
48 48 ui.status(_("%s => %s %s %s %s\n") %
49 49 (short(node), short(p1), short(p2), short(linknode), copyfrom))
50 50 if p1 != nullid:
51 51 queue.append(p1)
52 52 if p2 != nullid:
53 53 queue.append(p2)
54 54
55 55 def buildtemprevlog(repo, file):
56 56 # get filename key
57 57 filekey = nodemod.hex(hashlib.sha1(file).digest())
58 58 filedir = os.path.join(repo.path, 'store/data', filekey)
59 59
60 60 # sort all entries based on linkrev
61 61 fctxs = []
62 62 for filenode in os.listdir(filedir):
63 63 if '_old' not in filenode:
64 64 fctxs.append(repo.filectx(file, fileid=bin(filenode)))
65 65
66 66 fctxs = sorted(fctxs, key=lambda x: x.linkrev())
67 67
68 68 # add to revlog
69 69 temppath = repo.sjoin('data/temprevlog.i')
70 70 if os.path.exists(temppath):
71 71 os.remove(temppath)
72 72 r = filelog.filelog(repo.svfs, 'temprevlog')
73 73
74 74 class faket(object):
75 75 def add(self, a, b, c):
76 76 pass
77 77 t = faket()
78 78 for fctx in fctxs:
79 79 if fctx.node() not in repo:
80 80 continue
81 81
82 82 p = fctx.filelog().parents(fctx.filenode())
83 83 meta = {}
84 84 if fctx.renamed():
85 85 meta['copy'] = fctx.renamed()[0]
86 86 meta['copyrev'] = hex(fctx.renamed()[1])
87 87
88 88 r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
89 89
90 90 return r
91 91
92 92 def debugindex(orig, ui, repo, file_=None, **opts):
93 93 """dump the contents of an index file"""
94 94 if (opts.get(r'changelog') or
95 95 opts.get(r'manifest') or
96 96 opts.get(r'dir') or
97 97 not shallowutil.isenabled(repo) or
98 98 not repo.shallowmatch(file_)):
99 99 return orig(ui, repo, file_, **opts)
100 100
101 101 r = buildtemprevlog(repo, file_)
102 102
103 103 # debugindex like normal
104 104 format = opts.get('format', 0)
105 105 if format not in (0, 1):
106 106 raise error.Abort(_("unknown format %d") % format)
107 107
108 108 generaldelta = r.version & revlog.FLAG_GENERALDELTA
109 109 if generaldelta:
110 110 basehdr = ' delta'
111 111 else:
112 112 basehdr = ' base'
113 113
114 114 if format == 0:
115 115 ui.write((" rev offset length " + basehdr + " linkrev"
116 116 " nodeid p1 p2\n"))
117 117 elif format == 1:
118 118 ui.write((" rev flag offset length"
119 119 " size " + basehdr + " link p1 p2"
120 120 " nodeid\n"))
121 121
122 122 for i in r:
123 123 node = r.node(i)
124 124 if generaldelta:
125 125 base = r.deltaparent(i)
126 126 else:
127 127 base = r.chainbase(i)
128 128 if format == 0:
129 129 try:
130 130 pp = r.parents(node)
131 131 except Exception:
132 132 pp = [nullid, nullid]
133 133 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
134 134 i, r.start(i), r.length(i), base, r.linkrev(i),
135 135 short(node), short(pp[0]), short(pp[1])))
136 136 elif format == 1:
137 137 pr = r.parentrevs(i)
138 138 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
139 139 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
140 140 base, r.linkrev(i), pr[0], pr[1], short(node)))
141 141
142 142 def debugindexdot(orig, ui, repo, file_):
143 143 """dump an index DAG as a graphviz dot file"""
144 144 if not shallowutil.isenabled(repo):
145 145 return orig(ui, repo, file_)
146 146
147 147 r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
148 148
149 149 ui.write(("digraph G {\n"))
150 150 for i in r:
151 151 node = r.node(i)
152 152 pp = r.parents(node)
153 153 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
154 154 if pp[1] != nullid:
155 155 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
156 156 ui.write("}\n")
157 157
158 158 def verifyremotefilelog(ui, path, **opts):
159 159 decompress = opts.get(r'decompress')
160 160
161 161 for root, dirs, files in os.walk(path):
162 162 for file in files:
163 163 if file == "repos":
164 164 continue
165 165 filepath = os.path.join(root, file)
166 166 size, firstnode, mapping = parsefileblob(filepath, decompress)
167 167 for p1, p2, linknode, copyfrom in mapping.itervalues():
168 168 if linknode == nullid:
169 169 actualpath = os.path.relpath(root, path)
170 170 key = fileserverclient.getcachekey("reponame", actualpath,
171 171 file)
172 172 ui.status("%s %s\n" % (key, os.path.relpath(filepath,
173 173 path)))
174 174
175 175 def _decompressblob(raw):
176 176 return zlib.decompress(raw)
177 177
178 178 def parsefileblob(path, decompress):
179 179 f = open(path, "rb")
180 180 try:
181 181 raw = f.read()
182 182 finally:
183 183 f.close()
184 184
185 185 if decompress:
186 186 raw = _decompressblob(raw)
187 187
188 188 offset, size, flags = shallowutil.parsesizeflags(raw)
189 189 start = offset + size
190 190
191 191 firstnode = None
192 192
193 193 mapping = {}
194 194 while start < len(raw):
195 195 divider = raw.index('\0', start + 80)
196 196
197 197 currentnode = raw[start:(start + 20)]
198 198 if not firstnode:
199 199 firstnode = currentnode
200 200
201 201 p1 = raw[(start + 20):(start + 40)]
202 202 p2 = raw[(start + 40):(start + 60)]
203 203 linknode = raw[(start + 60):(start + 80)]
204 204 copyfrom = raw[(start + 80):divider]
205 205
206 206 mapping[currentnode] = (p1, p2, linknode, copyfrom)
207 207 start = divider + 1
208 208
209 209 return size, firstnode, mapping
210 210
211 211 def debugdatapack(ui, *paths, **opts):
212 212 for path in paths:
213 213 if '.data' in path:
214 214 path = path[:path.index('.data')]
215 215 ui.write("%s:\n" % path)
216 216 dpack = datapack.datapack(path)
217 217 node = opts.get(r'node')
218 218 if node:
219 219 deltachain = dpack.getdeltachain('', bin(node))
220 220 dumpdeltachain(ui, deltachain, **opts)
221 221 return
222 222
223 223 if opts.get(r'long'):
224 224 hashformatter = hex
225 225 hashlen = 42
226 226 else:
227 227 hashformatter = short
228 228 hashlen = 14
229 229
230 230 lastfilename = None
231 231 totaldeltasize = 0
232 232 totalblobsize = 0
233 233 def printtotals():
234 234 if lastfilename is not None:
235 235 ui.write("\n")
236 236 if not totaldeltasize or not totalblobsize:
237 237 return
238 238 difference = totalblobsize - totaldeltasize
239 239 deltastr = "%0.1f%% %s" % (
240 240 (100.0 * abs(difference) / totalblobsize),
241 241 ("smaller" if difference > 0 else "bigger"))
242 242
243 243 ui.write(("Total:%s%s %s (%s)\n") % (
244 244 "".ljust(2 * hashlen - len("Total:")),
245 245 ('%d' % totaldeltasize).ljust(12),
246 246 ('%d' % totalblobsize).ljust(9),
247 247 deltastr
248 248 ))
249 249
250 250 bases = {}
251 251 nodes = set()
252 252 failures = 0
253 253 for filename, node, deltabase, deltalen in dpack.iterentries():
254 254 bases[node] = deltabase
255 255 if node in nodes:
256 256 ui.write(("Bad entry: %s appears twice\n" % short(node)))
257 257 failures += 1
258 258 nodes.add(node)
259 259 if filename != lastfilename:
260 260 printtotals()
261 261 name = '(empty name)' if filename == '' else filename
262 262 ui.write("%s:\n" % name)
263 263 ui.write("%s%s%s%s\n" % (
264 264 "Node".ljust(hashlen),
265 265 "Delta Base".ljust(hashlen),
266 266 "Delta Length".ljust(14),
267 267 "Blob Size".ljust(9)))
268 268 lastfilename = filename
269 269 totalblobsize = 0
270 270 totaldeltasize = 0
271 271
272 272 # Metadata could be missing, in which case it will be an empty dict.
273 273 meta = dpack.getmeta(filename, node)
274 274 if constants.METAKEYSIZE in meta:
275 275 blobsize = meta[constants.METAKEYSIZE]
276 276 totaldeltasize += deltalen
277 277 totalblobsize += blobsize
278 278 else:
279 279 blobsize = "(missing)"
280 280 ui.write("%s %s %s%s\n" % (
281 281 hashformatter(node),
282 282 hashformatter(deltabase),
283 283 ('%d' % deltalen).ljust(14),
284 284 pycompat.bytestr(blobsize)))
285 285
286 286 if filename is not None:
287 287 printtotals()
288 288
289 289 failures += _sanitycheck(ui, set(nodes), bases)
290 290 if failures > 1:
291 291 ui.warn(("%d failures\n" % failures))
292 292 return 1
293 293
294 294 def _sanitycheck(ui, nodes, bases):
295 295 """
296 296 Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
297 297 mapping of node->base):
298 298
299 299 - Each deltabase must itself be a node elsewhere in the pack
300 300 - There must be no cycles
301 301 """
302 302 failures = 0
303 303 for node in nodes:
304 304 seen = set()
305 305 current = node
306 306 deltabase = bases[current]
307 307
308 308 while deltabase != nullid:
309 309 if deltabase not in nodes:
310 310 ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" %
311 311 (short(node), short(deltabase))))
312 312 failures += 1
313 313 break
314 314
315 315 if deltabase in seen:
316 316 ui.warn(("Bad entry: %s has a cycle (at %s)\n" %
317 317 (short(node), short(deltabase))))
318 318 failures += 1
319 319 break
320 320
321 321 current = deltabase
322 322 seen.add(current)
323 323 deltabase = bases[current]
324 324 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
325 325 # so we don't traverse it again.
326 326 bases[node] = nullid
327 327 return failures
328 328
329 329 def dumpdeltachain(ui, deltachain, **opts):
330 330 hashformatter = hex
331 331 hashlen = 40
332 332
333 333 lastfilename = None
334 334 for filename, node, filename, deltabasenode, delta in deltachain:
335 335 if filename != lastfilename:
336 336 ui.write("\n%s\n" % filename)
337 337 lastfilename = filename
338 338 ui.write("%s %s %s %s\n" % (
339 339 "Node".ljust(hashlen),
340 340 "Delta Base".ljust(hashlen),
341 341 "Delta SHA1".ljust(hashlen),
342 342 "Delta Length".ljust(6),
343 343 ))
344 344
345 345 ui.write("%s %s %s %d\n" % (
346 346 hashformatter(node),
347 347 hashformatter(deltabasenode),
348 348 nodemod.hex(hashlib.sha1(delta).digest()),
349 349 len(delta)))
350 350
351 351 def debughistorypack(ui, path):
352 352 if '.hist' in path:
353 353 path = path[:path.index('.hist')]
354 354 hpack = historypack.historypack(path)
355 355
356 356 lastfilename = None
357 357 for entry in hpack.iterentries():
358 358 filename, node, p1node, p2node, linknode, copyfrom = entry
359 359 if filename != lastfilename:
360 360 ui.write("\n%s\n" % filename)
361 361 ui.write("%s%s%s%s%s\n" % (
362 362 "Node".ljust(14),
363 363 "P1 Node".ljust(14),
364 364 "P2 Node".ljust(14),
365 365 "Link Node".ljust(14),
366 366 "Copy From"))
367 367 lastfilename = filename
368 368 ui.write("%s %s %s %s %s\n" % (short(node), short(p1node),
369 369 short(p2node), short(linknode), copyfrom))
370 370
371 371 def debugwaitonrepack(repo):
372 with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''):
372 with lockmod.lock(repack.repacklockvfs(repo), "repacklock", timeout=-1):
373 373 return
374 374
375 375 def debugwaitonprefetch(repo):
376 376 with repo._lock(repo.svfs, "prefetchlock", True, None,
377 377 None, _('prefetching in %s') % repo.origroot):
378 378 pass
@@ -1,779 +1,779 b''
1 1 from __future__ import absolute_import
2 2
3 3 import os
4 4 import time
5 5
6 6 from mercurial.i18n import _
7 7 from mercurial.node import (
8 8 nullid,
9 9 short,
10 10 )
11 11 from mercurial import (
12 12 encoding,
13 13 error,
14 lock as lockmod,
14 15 mdiff,
15 16 policy,
16 17 pycompat,
17 18 scmutil,
18 19 util,
19 20 vfs,
20 21 )
21 22 from mercurial.utils import procutil
22 23 from . import (
23 24 constants,
24 25 contentstore,
25 26 datapack,
26 extutil,
27 27 historypack,
28 28 metadatastore,
29 29 shallowutil,
30 30 )
31 31
32 32 osutil = policy.importmod(r'osutil')
33 33
34 34 class RepackAlreadyRunning(error.Abort):
35 35 pass
36 36
37 37 def backgroundrepack(repo, incremental=True, packsonly=False,
38 38 ensurestart=False):
39 39 cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack']
40 40 msg = _("(running background repack)\n")
41 41 if incremental:
42 42 cmd.append('--incremental')
43 43 msg = _("(running background incremental repack)\n")
44 44 if packsonly:
45 45 cmd.append('--packsonly')
46 46 repo.ui.warn(msg)
47 47 # We know this command will find a binary, so don't block on it starting.
48 48 procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart)
49 49
50 50 def fullrepack(repo, options=None):
51 51 """If ``packsonly`` is True, stores creating only loose objects are skipped.
52 52 """
53 53 if util.safehasattr(repo, 'shareddatastores'):
54 54 datasource = contentstore.unioncontentstore(
55 55 *repo.shareddatastores)
56 56 historysource = metadatastore.unionmetadatastore(
57 57 *repo.sharedhistorystores,
58 58 allowincomplete=True)
59 59
60 60 packpath = shallowutil.getcachepackpath(
61 61 repo,
62 62 constants.FILEPACK_CATEGORY)
63 63 _runrepack(repo, datasource, historysource, packpath,
64 64 constants.FILEPACK_CATEGORY, options=options)
65 65
66 66 if util.safehasattr(repo.manifestlog, 'datastore'):
67 67 localdata, shareddata = _getmanifeststores(repo)
68 68 lpackpath, ldstores, lhstores = localdata
69 69 spackpath, sdstores, shstores = shareddata
70 70
71 71 # Repack the shared manifest store
72 72 datasource = contentstore.unioncontentstore(*sdstores)
73 73 historysource = metadatastore.unionmetadatastore(
74 74 *shstores,
75 75 allowincomplete=True)
76 76 _runrepack(repo, datasource, historysource, spackpath,
77 77 constants.TREEPACK_CATEGORY, options=options)
78 78
79 79 # Repack the local manifest store
80 80 datasource = contentstore.unioncontentstore(
81 81 *ldstores,
82 82 allowincomplete=True)
83 83 historysource = metadatastore.unionmetadatastore(
84 84 *lhstores,
85 85 allowincomplete=True)
86 86 _runrepack(repo, datasource, historysource, lpackpath,
87 87 constants.TREEPACK_CATEGORY, options=options)
88 88
89 89 def incrementalrepack(repo, options=None):
90 90 """This repacks the repo by looking at the distribution of pack files in the
91 91 repo and performing the most minimal repack to keep the repo in good shape.
92 92 """
93 93 if util.safehasattr(repo, 'shareddatastores'):
94 94 packpath = shallowutil.getcachepackpath(
95 95 repo,
96 96 constants.FILEPACK_CATEGORY)
97 97 _incrementalrepack(repo,
98 98 repo.shareddatastores,
99 99 repo.sharedhistorystores,
100 100 packpath,
101 101 constants.FILEPACK_CATEGORY,
102 102 options=options)
103 103
104 104 if util.safehasattr(repo.manifestlog, 'datastore'):
105 105 localdata, shareddata = _getmanifeststores(repo)
106 106 lpackpath, ldstores, lhstores = localdata
107 107 spackpath, sdstores, shstores = shareddata
108 108
109 109 # Repack the shared manifest store
110 110 _incrementalrepack(repo,
111 111 sdstores,
112 112 shstores,
113 113 spackpath,
114 114 constants.TREEPACK_CATEGORY,
115 115 options=options)
116 116
117 117 # Repack the local manifest store
118 118 _incrementalrepack(repo,
119 119 ldstores,
120 120 lhstores,
121 121 lpackpath,
122 122 constants.TREEPACK_CATEGORY,
123 123 allowincompletedata=True,
124 124 options=options)
125 125
126 126 def _getmanifeststores(repo):
127 127 shareddatastores = repo.manifestlog.shareddatastores
128 128 localdatastores = repo.manifestlog.localdatastores
129 129 sharedhistorystores = repo.manifestlog.sharedhistorystores
130 130 localhistorystores = repo.manifestlog.localhistorystores
131 131
132 132 sharedpackpath = shallowutil.getcachepackpath(repo,
133 133 constants.TREEPACK_CATEGORY)
134 134 localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
135 135 constants.TREEPACK_CATEGORY)
136 136
137 137 return ((localpackpath, localdatastores, localhistorystores),
138 138 (sharedpackpath, shareddatastores, sharedhistorystores))
139 139
140 140 def _topacks(packpath, files, constructor):
141 141 paths = list(os.path.join(packpath, p) for p in files)
142 142 packs = list(constructor(p) for p in paths)
143 143 return packs
144 144
145 145 def _deletebigpacks(repo, folder, files):
146 146 """Deletes packfiles that are bigger than ``packs.maxpacksize``.
147 147
148 148 Returns ``files` with the removed files omitted."""
149 149 maxsize = repo.ui.configbytes("packs", "maxpacksize")
150 150 if maxsize <= 0:
151 151 return files
152 152
153 153 # This only considers datapacks today, but we could broaden it to include
154 154 # historypacks.
155 155 VALIDEXTS = [".datapack", ".dataidx"]
156 156
157 157 # Either an oversize index or datapack will trigger cleanup of the whole
158 158 # pack:
159 159 oversized = {os.path.splitext(path)[0] for path, ftype, stat in files
160 160 if (stat.st_size > maxsize and (os.path.splitext(path)[1]
161 161 in VALIDEXTS))}
162 162
163 163 for rootfname in oversized:
164 164 rootpath = os.path.join(folder, rootfname)
165 165 for ext in VALIDEXTS:
166 166 path = rootpath + ext
167 167 repo.ui.debug('removing oversize packfile %s (%s)\n' %
168 168 (path, util.bytecount(os.stat(path).st_size)))
169 169 os.unlink(path)
170 170 return [row for row in files if os.path.basename(row[0]) not in oversized]
171 171
172 172 def _incrementalrepack(repo, datastore, historystore, packpath, category,
173 173 allowincompletedata=False, options=None):
174 174 shallowutil.mkstickygroupdir(repo.ui, packpath)
175 175
176 176 files = osutil.listdir(packpath, stat=True)
177 177 files = _deletebigpacks(repo, packpath, files)
178 178 datapacks = _topacks(packpath,
179 179 _computeincrementaldatapack(repo.ui, files),
180 180 datapack.datapack)
181 181 datapacks.extend(s for s in datastore
182 182 if not isinstance(s, datapack.datapackstore))
183 183
184 184 historypacks = _topacks(packpath,
185 185 _computeincrementalhistorypack(repo.ui, files),
186 186 historypack.historypack)
187 187 historypacks.extend(s for s in historystore
188 188 if not isinstance(s, historypack.historypackstore))
189 189
190 190 # ``allhistory{files,packs}`` contains all known history packs, even ones we
191 191 # don't plan to repack. They are used during the datapack repack to ensure
192 192 # good ordering of nodes.
193 193 allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX,
194 194 historypack.INDEXSUFFIX)
195 195 allhistorypacks = _topacks(packpath,
196 196 (f for f, mode, stat in allhistoryfiles),
197 197 historypack.historypack)
198 198 allhistorypacks.extend(s for s in historystore
199 199 if not isinstance(s, historypack.historypackstore))
200 200 _runrepack(repo,
201 201 contentstore.unioncontentstore(
202 202 *datapacks,
203 203 allowincomplete=allowincompletedata),
204 204 metadatastore.unionmetadatastore(
205 205 *historypacks,
206 206 allowincomplete=True),
207 207 packpath, category,
208 208 fullhistory=metadatastore.unionmetadatastore(
209 209 *allhistorypacks,
210 210 allowincomplete=True),
211 211 options=options)
212 212
213 213 def _computeincrementaldatapack(ui, files):
214 214 opts = {
215 215 'gencountlimit' : ui.configint(
216 216 'remotefilelog', 'data.gencountlimit'),
217 217 'generations' : ui.configlist(
218 218 'remotefilelog', 'data.generations'),
219 219 'maxrepackpacks' : ui.configint(
220 220 'remotefilelog', 'data.maxrepackpacks'),
221 221 'repackmaxpacksize' : ui.configbytes(
222 222 'remotefilelog', 'data.repackmaxpacksize'),
223 223 'repacksizelimit' : ui.configbytes(
224 224 'remotefilelog', 'data.repacksizelimit'),
225 225 }
226 226
227 227 packfiles = _allpackfileswithsuffix(
228 228 files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX)
229 229 return _computeincrementalpack(packfiles, opts)
230 230
231 231 def _computeincrementalhistorypack(ui, files):
232 232 opts = {
233 233 'gencountlimit' : ui.configint(
234 234 'remotefilelog', 'history.gencountlimit'),
235 235 'generations' : ui.configlist(
236 236 'remotefilelog', 'history.generations', ['100MB']),
237 237 'maxrepackpacks' : ui.configint(
238 238 'remotefilelog', 'history.maxrepackpacks'),
239 239 'repackmaxpacksize' : ui.configbytes(
240 240 'remotefilelog', 'history.repackmaxpacksize', '400MB'),
241 241 'repacksizelimit' : ui.configbytes(
242 242 'remotefilelog', 'history.repacksizelimit'),
243 243 }
244 244
245 245 packfiles = _allpackfileswithsuffix(
246 246 files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX)
247 247 return _computeincrementalpack(packfiles, opts)
248 248
249 249 def _allpackfileswithsuffix(files, packsuffix, indexsuffix):
250 250 result = []
251 251 fileset = set(fn for fn, mode, stat in files)
252 252 for filename, mode, stat in files:
253 253 if not filename.endswith(packsuffix):
254 254 continue
255 255
256 256 prefix = filename[:-len(packsuffix)]
257 257
258 258 # Don't process a pack if it doesn't have an index.
259 259 if (prefix + indexsuffix) not in fileset:
260 260 continue
261 261 result.append((prefix, mode, stat))
262 262
263 263 return result
264 264
265 265 def _computeincrementalpack(files, opts):
266 266 """Given a set of pack files along with the configuration options, this
267 267 function computes the list of files that should be packed as part of an
268 268 incremental repack.
269 269
270 270 It tries to strike a balance between keeping incremental repacks cheap (i.e.
271 271 packing small things when possible, and rolling the packs up to the big ones
272 272 over time).
273 273 """
274 274
275 275 limits = list(sorted((util.sizetoint(s) for s in opts['generations']),
276 276 reverse=True))
277 277 limits.append(0)
278 278
279 279 # Group the packs by generation (i.e. by size)
280 280 generations = []
281 281 for i in pycompat.xrange(len(limits)):
282 282 generations.append([])
283 283
284 284 sizes = {}
285 285 for prefix, mode, stat in files:
286 286 size = stat.st_size
287 287 if size > opts['repackmaxpacksize']:
288 288 continue
289 289
290 290 sizes[prefix] = size
291 291 for i, limit in enumerate(limits):
292 292 if size > limit:
293 293 generations[i].append(prefix)
294 294 break
295 295
296 296 # Steps for picking what packs to repack:
297 297 # 1. Pick the largest generation with > gencountlimit pack files.
298 298 # 2. Take the smallest three packs.
299 299 # 3. While total-size-of-packs < repacksizelimit: add another pack
300 300
301 301 # Find the largest generation with more than gencountlimit packs
302 302 genpacks = []
303 303 for i, limit in enumerate(limits):
304 304 if len(generations[i]) > opts['gencountlimit']:
305 305 # Sort to be smallest last, for easy popping later
306 306 genpacks.extend(sorted(generations[i], reverse=True,
307 307 key=lambda x: sizes[x]))
308 308 break
309 309
310 310 # Take as many packs from the generation as we can
311 311 chosenpacks = genpacks[-3:]
312 312 genpacks = genpacks[:-3]
313 313 repacksize = sum(sizes[n] for n in chosenpacks)
314 314 while (repacksize < opts['repacksizelimit'] and genpacks and
315 315 len(chosenpacks) < opts['maxrepackpacks']):
316 316 chosenpacks.append(genpacks.pop())
317 317 repacksize += sizes[chosenpacks[-1]]
318 318
319 319 return chosenpacks
320 320
321 321 def _runrepack(repo, data, history, packpath, category, fullhistory=None,
322 322 options=None):
323 323 shallowutil.mkstickygroupdir(repo.ui, packpath)
324 324
325 325 def isold(repo, filename, node):
326 326 """Check if the file node is older than a limit.
327 327 Unless a limit is specified in the config the default limit is taken.
328 328 """
329 329 filectx = repo.filectx(filename, fileid=node)
330 330 filetime = repo[filectx.linkrev()].date()
331 331
332 332 ttl = repo.ui.configint('remotefilelog', 'nodettl')
333 333
334 334 limit = time.time() - ttl
335 335 return filetime[0] < limit
336 336
337 337 garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack')
338 338 if not fullhistory:
339 339 fullhistory = history
340 340 packer = repacker(repo, data, history, fullhistory, category,
341 341 gc=garbagecollect, isold=isold, options=options)
342 342
343 343 with datapack.mutabledatapack(repo.ui, packpath) as dpack:
344 344 with historypack.mutablehistorypack(repo.ui, packpath) as hpack:
345 345 try:
346 346 packer.run(dpack, hpack)
347 347 except error.LockHeld:
348 348 raise RepackAlreadyRunning(_("skipping repack - another repack "
349 349 "is already running"))
350 350
351 351 def keepset(repo, keyfn, lastkeepkeys=None):
352 352 """Computes a keepset which is not garbage collected.
353 353 'keyfn' is a function that maps filename, node to a unique key.
354 354 'lastkeepkeys' is an optional argument and if provided the keepset
355 355 function updates lastkeepkeys with more keys and returns the result.
356 356 """
357 357 if not lastkeepkeys:
358 358 keepkeys = set()
359 359 else:
360 360 keepkeys = lastkeepkeys
361 361
362 362 # We want to keep:
363 363 # 1. Working copy parent
364 364 # 2. Draft commits
365 365 # 3. Parents of draft commits
366 366 # 4. Pullprefetch and bgprefetchrevs revsets if specified
367 367 revs = ['.', 'draft()', 'parents(draft())']
368 368 prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None)
369 369 if prefetchrevs:
370 370 revs.append('(%s)' % prefetchrevs)
371 371 prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None)
372 372 if prefetchrevs:
373 373 revs.append('(%s)' % prefetchrevs)
374 374 revs = '+'.join(revs)
375 375
376 376 revs = ['sort((%s), "topo")' % revs]
377 377 keep = scmutil.revrange(repo, revs)
378 378
379 379 processed = set()
380 380 lastmanifest = None
381 381
382 382 # process the commits in toposorted order starting from the oldest
383 383 for r in reversed(keep._list):
384 384 if repo[r].p1().rev() in processed:
385 385 # if the direct parent has already been processed
386 386 # then we only need to process the delta
387 387 m = repo[r].manifestctx().readdelta()
388 388 else:
389 389 # otherwise take the manifest and diff it
390 390 # with the previous manifest if one exists
391 391 if lastmanifest:
392 392 m = repo[r].manifest().diff(lastmanifest)
393 393 else:
394 394 m = repo[r].manifest()
395 395 lastmanifest = repo[r].manifest()
396 396 processed.add(r)
397 397
398 398 # populate keepkeys with keys from the current manifest
399 399 if type(m) is dict:
400 400 # m is a result of diff of two manifests and is a dictionary that
401 401 # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple
402 402 for filename, diff in m.iteritems():
403 403 if diff[0][0] is not None:
404 404 keepkeys.add(keyfn(filename, diff[0][0]))
405 405 else:
406 406 # m is a manifest object
407 407 for filename, filenode in m.iteritems():
408 408 keepkeys.add(keyfn(filename, filenode))
409 409
410 410 return keepkeys
411 411
412 412 class repacker(object):
413 413 """Class for orchestrating the repack of data and history information into a
414 414 new format.
415 415 """
416 416 def __init__(self, repo, data, history, fullhistory, category, gc=False,
417 417 isold=None, options=None):
418 418 self.repo = repo
419 419 self.data = data
420 420 self.history = history
421 421 self.fullhistory = fullhistory
422 422 self.unit = constants.getunits(category)
423 423 self.garbagecollect = gc
424 424 self.options = options
425 425 if self.garbagecollect:
426 426 if not isold:
427 427 raise ValueError("Function 'isold' is not properly specified")
428 428 # use (filename, node) tuple as a keepset key
429 429 self.keepkeys = keepset(repo, lambda f, n : (f, n))
430 430 self.isold = isold
431 431
432 432 def run(self, targetdata, targethistory):
433 433 ledger = repackledger()
434 434
435 with extutil.flock(repacklockvfs(self.repo).join("repacklock"),
436 _('repacking %s') % self.repo.origroot, timeout=0):
435 with lockmod.lock(repacklockvfs(self.repo), "repacklock", desc=None,
436 timeout=0):
437 437 self.repo.hook('prerepack')
438 438
439 439 # Populate ledger from source
440 440 self.data.markledger(ledger, options=self.options)
441 441 self.history.markledger(ledger, options=self.options)
442 442
443 443 # Run repack
444 444 self.repackdata(ledger, targetdata)
445 445 self.repackhistory(ledger, targethistory)
446 446
447 447 # Call cleanup on each source
448 448 for source in ledger.sources:
449 449 source.cleanup(ledger)
450 450
451 451 def _chainorphans(self, ui, filename, nodes, orphans, deltabases):
452 452 """Reorderes ``orphans`` into a single chain inside ``nodes`` and
453 453 ``deltabases``.
454 454
455 455 We often have orphan entries (nodes without a base that aren't
456 456 referenced by other nodes -- i.e., part of a chain) due to gaps in
457 457 history. Rather than store them as individual fulltexts, we prefer to
458 458 insert them as one chain sorted by size.
459 459 """
460 460 if not orphans:
461 461 return nodes
462 462
463 463 def getsize(node, default=0):
464 464 meta = self.data.getmeta(filename, node)
465 465 if constants.METAKEYSIZE in meta:
466 466 return meta[constants.METAKEYSIZE]
467 467 else:
468 468 return default
469 469
470 470 # Sort orphans by size; biggest first is preferred, since it's more
471 471 # likely to be the newest version assuming files grow over time.
472 472 # (Sort by node first to ensure the sort is stable.)
473 473 orphans = sorted(orphans)
474 474 orphans = list(sorted(orphans, key=getsize, reverse=True))
475 475 if ui.debugflag:
476 476 ui.debug("%s: orphan chain: %s\n" % (filename,
477 477 ", ".join([short(s) for s in orphans])))
478 478
479 479 # Create one contiguous chain and reassign deltabases.
480 480 for i, node in enumerate(orphans):
481 481 if i == 0:
482 482 deltabases[node] = (nullid, 0)
483 483 else:
484 484 parent = orphans[i - 1]
485 485 deltabases[node] = (parent, deltabases[parent][1] + 1)
486 486 nodes = [n for n in nodes if n not in orphans]
487 487 nodes += orphans
488 488 return nodes
489 489
490 490 def repackdata(self, ledger, target):
491 491 ui = self.repo.ui
492 492 maxchainlen = ui.configint('packs', 'maxchainlen', 1000)
493 493
494 494 byfile = {}
495 495 for entry in ledger.entries.itervalues():
496 496 if entry.datasource:
497 497 byfile.setdefault(entry.filename, {})[entry.node] = entry
498 498
499 499 count = 0
500 500 repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit,
501 501 total=len(byfile))
502 502 for filename, entries in sorted(byfile.iteritems()):
503 503 repackprogress.update(count)
504 504
505 505 ancestors = {}
506 506 nodes = list(node for node in entries)
507 507 nohistory = []
508 508 buildprogress = ui.makeprogress(_("building history"), unit='nodes',
509 509 total=len(nodes))
510 510 for i, node in enumerate(nodes):
511 511 if node in ancestors:
512 512 continue
513 513 buildprogress.update(i)
514 514 try:
515 515 ancestors.update(self.fullhistory.getancestors(filename,
516 516 node, known=ancestors))
517 517 except KeyError:
518 518 # Since we're packing data entries, we may not have the
519 519 # corresponding history entries for them. It's not a big
520 520 # deal, but the entries won't be delta'd perfectly.
521 521 nohistory.append(node)
522 522 buildprogress.complete()
523 523
524 524 # Order the nodes children first, so we can produce reverse deltas
525 525 orderednodes = list(reversed(self._toposort(ancestors)))
526 526 if len(nohistory) > 0:
527 527 ui.debug('repackdata: %d nodes without history\n' %
528 528 len(nohistory))
529 529 orderednodes.extend(sorted(nohistory))
530 530
531 531 # Filter orderednodes to just the nodes we want to serialize (it
532 532 # currently also has the edge nodes' ancestors).
533 533 orderednodes = list(filter(lambda node: node in nodes,
534 534 orderednodes))
535 535
536 536 # Garbage collect old nodes:
537 537 if self.garbagecollect:
538 538 neworderednodes = []
539 539 for node in orderednodes:
540 540 # If the node is old and is not in the keepset, we skip it,
541 541 # and mark as garbage collected
542 542 if ((filename, node) not in self.keepkeys and
543 543 self.isold(self.repo, filename, node)):
544 544 entries[node].gced = True
545 545 continue
546 546 neworderednodes.append(node)
547 547 orderednodes = neworderednodes
548 548
549 549 # Compute delta bases for nodes:
550 550 deltabases = {}
551 551 nobase = set()
552 552 referenced = set()
553 553 nodes = set(nodes)
554 554 processprogress = ui.makeprogress(_("processing nodes"),
555 555 unit='nodes',
556 556 total=len(orderednodes))
557 557 for i, node in enumerate(orderednodes):
558 558 processprogress.update(i)
559 559 # Find delta base
560 560 # TODO: allow delta'ing against most recent descendant instead
561 561 # of immediate child
562 562 deltatuple = deltabases.get(node, None)
563 563 if deltatuple is None:
564 564 deltabase, chainlen = nullid, 0
565 565 deltabases[node] = (nullid, 0)
566 566 nobase.add(node)
567 567 else:
568 568 deltabase, chainlen = deltatuple
569 569 referenced.add(deltabase)
570 570
571 571 # Use available ancestor information to inform our delta choices
572 572 ancestorinfo = ancestors.get(node)
573 573 if ancestorinfo:
574 574 p1, p2, linknode, copyfrom = ancestorinfo
575 575
576 576 # The presence of copyfrom means we're at a point where the
577 577 # file was copied from elsewhere. So don't attempt to do any
578 578 # deltas with the other file.
579 579 if copyfrom:
580 580 p1 = nullid
581 581
582 582 if chainlen < maxchainlen:
583 583 # Record this child as the delta base for its parents.
584 584 # This may be non optimal, since the parents may have
585 585 # many children, and this will only choose the last one.
586 586 # TODO: record all children and try all deltas to find
587 587 # best
588 588 if p1 != nullid:
589 589 deltabases[p1] = (node, chainlen + 1)
590 590 if p2 != nullid:
591 591 deltabases[p2] = (node, chainlen + 1)
592 592
593 593 # experimental config: repack.chainorphansbysize
594 594 if ui.configbool('repack', 'chainorphansbysize'):
595 595 orphans = nobase - referenced
596 596 orderednodes = self._chainorphans(ui, filename, orderednodes,
597 597 orphans, deltabases)
598 598
599 599 # Compute deltas and write to the pack
600 600 for i, node in enumerate(orderednodes):
601 601 deltabase, chainlen = deltabases[node]
602 602 # Compute delta
603 603 # TODO: Optimize the deltachain fetching. Since we're
604 604 # iterating over the different version of the file, we may
605 605 # be fetching the same deltachain over and over again.
606 606 if deltabase != nullid:
607 607 deltaentry = self.data.getdelta(filename, node)
608 608 delta, deltabasename, origdeltabase, meta = deltaentry
609 609 size = meta.get(constants.METAKEYSIZE)
610 610 if (deltabasename != filename or origdeltabase != deltabase
611 611 or size is None):
612 612 deltabasetext = self.data.get(filename, deltabase)
613 613 original = self.data.get(filename, node)
614 614 size = len(original)
615 615 delta = mdiff.textdiff(deltabasetext, original)
616 616 else:
617 617 delta = self.data.get(filename, node)
618 618 size = len(delta)
619 619 meta = self.data.getmeta(filename, node)
620 620
621 621 # TODO: don't use the delta if it's larger than the fulltext
622 622 if constants.METAKEYSIZE not in meta:
623 623 meta[constants.METAKEYSIZE] = size
624 624 target.add(filename, node, deltabase, delta, meta)
625 625
626 626 entries[node].datarepacked = True
627 627
628 628 processprogress.complete()
629 629 count += 1
630 630
631 631 repackprogress.complete()
632 632 target.close(ledger=ledger)
633 633
634 634 def repackhistory(self, ledger, target):
635 635 ui = self.repo.ui
636 636
637 637 byfile = {}
638 638 for entry in ledger.entries.itervalues():
639 639 if entry.historysource:
640 640 byfile.setdefault(entry.filename, {})[entry.node] = entry
641 641
642 642 progress = ui.makeprogress(_("repacking history"), unit=self.unit,
643 643 total=len(byfile))
644 644 for filename, entries in sorted(byfile.iteritems()):
645 645 ancestors = {}
646 646 nodes = list(node for node in entries)
647 647
648 648 for node in nodes:
649 649 if node in ancestors:
650 650 continue
651 651 ancestors.update(self.history.getancestors(filename, node,
652 652 known=ancestors))
653 653
654 654 # Order the nodes children first
655 655 orderednodes = reversed(self._toposort(ancestors))
656 656
657 657 # Write to the pack
658 658 dontprocess = set()
659 659 for node in orderednodes:
660 660 p1, p2, linknode, copyfrom = ancestors[node]
661 661
662 662 # If the node is marked dontprocess, but it's also in the
663 663 # explicit entries set, that means the node exists both in this
664 664 # file and in another file that was copied to this file.
665 665 # Usually this happens if the file was copied to another file,
666 666 # then the copy was deleted, then reintroduced without copy
667 667 # metadata. The original add and the new add have the same hash
668 668 # since the content is identical and the parents are null.
669 669 if node in dontprocess and node not in entries:
670 670 # If copyfrom == filename, it means the copy history
671 671 # went to come other file, then came back to this one, so we
672 672 # should continue processing it.
673 673 if p1 != nullid and copyfrom != filename:
674 674 dontprocess.add(p1)
675 675 if p2 != nullid:
676 676 dontprocess.add(p2)
677 677 continue
678 678
679 679 if copyfrom:
680 680 dontprocess.add(p1)
681 681
682 682 target.add(filename, node, p1, p2, linknode, copyfrom)
683 683
684 684 if node in entries:
685 685 entries[node].historyrepacked = True
686 686
687 687 progress.increment()
688 688
689 689 progress.complete()
690 690 target.close(ledger=ledger)
691 691
692 692 def _toposort(self, ancestors):
693 693 def parentfunc(node):
694 694 p1, p2, linknode, copyfrom = ancestors[node]
695 695 parents = []
696 696 if p1 != nullid:
697 697 parents.append(p1)
698 698 if p2 != nullid:
699 699 parents.append(p2)
700 700 return parents
701 701
702 702 sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc)
703 703 return sortednodes
704 704
705 705 class repackledger(object):
706 706 """Storage for all the bookkeeping that happens during a repack. It contains
707 707 the list of revisions being repacked, what happened to each revision, and
708 708 which source store contained which revision originally (for later cleanup).
709 709 """
710 710 def __init__(self):
711 711 self.entries = {}
712 712 self.sources = {}
713 713 self.created = set()
714 714
715 715 def markdataentry(self, source, filename, node):
716 716 """Mark the given filename+node revision as having a data rev in the
717 717 given source.
718 718 """
719 719 entry = self._getorcreateentry(filename, node)
720 720 entry.datasource = True
721 721 entries = self.sources.get(source)
722 722 if not entries:
723 723 entries = set()
724 724 self.sources[source] = entries
725 725 entries.add(entry)
726 726
727 727 def markhistoryentry(self, source, filename, node):
728 728 """Mark the given filename+node revision as having a history rev in the
729 729 given source.
730 730 """
731 731 entry = self._getorcreateentry(filename, node)
732 732 entry.historysource = True
733 733 entries = self.sources.get(source)
734 734 if not entries:
735 735 entries = set()
736 736 self.sources[source] = entries
737 737 entries.add(entry)
738 738
739 739 def _getorcreateentry(self, filename, node):
740 740 key = (filename, node)
741 741 value = self.entries.get(key)
742 742 if not value:
743 743 value = repackentry(filename, node)
744 744 self.entries[key] = value
745 745
746 746 return value
747 747
748 748 def addcreated(self, value):
749 749 self.created.add(value)
750 750
751 751 class repackentry(object):
752 752 """Simple class representing a single revision entry in the repackledger.
753 753 """
754 754 __slots__ = (r'filename', r'node', r'datasource', r'historysource',
755 755 r'datarepacked', r'historyrepacked', r'gced')
756 756 def __init__(self, filename, node):
757 757 self.filename = filename
758 758 self.node = node
759 759 # If the revision has a data entry in the source
760 760 self.datasource = False
761 761 # If the revision has a history entry in the source
762 762 self.historysource = False
763 763 # If the revision's data entry was repacked into the repack target
764 764 self.datarepacked = False
765 765 # If the revision's history entry was repacked into the repack target
766 766 self.historyrepacked = False
767 767 # If garbage collected
768 768 self.gced = False
769 769
770 770 def repacklockvfs(repo):
771 771 if util.safehasattr(repo, 'name'):
772 772 # Lock in the shared cache so repacks across multiple copies of the same
773 773 # repo are coordinated.
774 774 sharedcachepath = shallowutil.getcachepackpath(
775 775 repo,
776 776 constants.FILEPACK_CATEGORY)
777 777 return vfs.vfs(sharedcachepath)
778 778 else:
779 779 return repo.svfs
@@ -1,381 +1,376 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4 # devel.remotefilelog.ensurestart: reduce race condition with
5 5 # waiton{repack/prefetch}
6 6 $ cat >> $HGRCPATH <<EOF
7 7 > [devel]
8 8 > remotefilelog.ensurestart=True
9 9 > EOF
10 10
11 11 $ hg init master
12 12 $ cd master
13 13 $ cat >> .hg/hgrc <<EOF
14 14 > [remotefilelog]
15 15 > server=True
16 16 > EOF
17 17 $ echo x > x
18 18 $ echo z > z
19 19 $ hg commit -qAm x
20 20 $ echo x2 > x
21 21 $ echo y > y
22 22 $ hg commit -qAm y
23 23 $ echo w > w
24 24 $ rm z
25 25 $ hg commit -qAm w
26 26 $ hg bookmark foo
27 27
28 28 $ cd ..
29 29
30 30 # clone the repo
31 31
32 32 $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
33 33 streaming all changes
34 34 2 files to transfer, 776 bytes of data
35 35 transferred 776 bytes in * seconds (*/sec) (glob)
36 36 searching for changes
37 37 no changes found
38 38
39 39 # Set the prefetchdays config to zero so that all commits are prefetched
40 40 # no matter what their creation date is. Also set prefetchdelay config
41 41 # to zero so that there is no delay between prefetches.
42 42 $ cd shallow
43 43 $ cat >> .hg/hgrc <<EOF
44 44 > [remotefilelog]
45 45 > prefetchdays=0
46 46 > prefetchdelay=0
47 47 > EOF
48 48 $ cd ..
49 49
50 50 # prefetch a revision
51 51 $ cd shallow
52 52
53 53 $ hg prefetch -r 0
54 54 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
55 55
56 56 $ hg cat -r 0 x
57 57 x
58 58
59 59 # background prefetch on pull when configured
60 60
61 61 $ cat >> .hg/hgrc <<EOF
62 62 > [remotefilelog]
63 63 > pullprefetch=bookmark()
64 64 > backgroundprefetch=True
65 65 > EOF
66 66 $ hg strip tip
67 67 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob)
68 68
69 69 $ clearcache
70 70 $ hg pull
71 71 pulling from ssh://user@dummy/master
72 72 searching for changes
73 73 adding changesets
74 74 adding manifests
75 75 adding file changes
76 76 updating bookmark foo
77 77 added 1 changesets with 0 changes to 0 files
78 78 new changesets 6b4b6f66ef8c
79 79 (run 'hg update' to get a working copy)
80 80 prefetching file contents
81 81 $ sleep 0.5
82 82 $ hg debugwaitonprefetch >/dev/null 2>%1
83 83 $ find $CACHEDIR -type f | sort
84 84 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0
85 85 $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
86 86 $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c
87 87 $TESTTMP/hgcache/repos
88 88
89 89 # background prefetch with repack on pull when configured
90 90
91 91 $ cat >> .hg/hgrc <<EOF
92 92 > [remotefilelog]
93 93 > backgroundrepack=True
94 94 > EOF
95 95 $ hg strip tip
96 96 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob)
97 97
98 98 $ clearcache
99 99 $ hg pull
100 100 pulling from ssh://user@dummy/master
101 101 searching for changes
102 102 adding changesets
103 103 adding manifests
104 104 adding file changes
105 105 updating bookmark foo
106 106 added 1 changesets with 0 changes to 0 files
107 107 new changesets 6b4b6f66ef8c
108 108 (run 'hg update' to get a working copy)
109 109 prefetching file contents
110 110 $ sleep 0.5
111 111 $ hg debugwaitonprefetch >/dev/null 2>%1
112 112 $ sleep 0.5
113 113 $ hg debugwaitonrepack >/dev/null 2>%1
114 114 $ sleep 0.5
115 115 $ find $CACHEDIR -type f | sort
116 116 $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histidx
117 117 $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack
118 118 $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.dataidx
119 119 $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.datapack
120 $TESTTMP/hgcache/master/packs/repacklock
121 120 $TESTTMP/hgcache/repos
122 121
123 122 # background prefetch with repack on update when wcprevset configured
124 123
125 124 $ clearcache
126 125 $ hg up -r 0
127 126 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
128 127 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
129 128 $ find $CACHEDIR -type f | sort
130 129 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
131 130 $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a
132 131 $TESTTMP/hgcache/repos
133 132
134 133 $ hg up -r 1
135 134 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 135 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
137 136
138 137 $ cat >> .hg/hgrc <<EOF
139 138 > [remotefilelog]
140 139 > bgprefetchrevs=.::
141 140 > EOF
142 141
143 142 $ clearcache
144 143 $ hg up -r 0
145 144 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
146 145 * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob)
147 146 $ sleep 1
148 147 $ hg debugwaitonprefetch >/dev/null 2>%1
149 148 $ sleep 1
150 149 $ hg debugwaitonrepack >/dev/null 2>%1
151 150 $ sleep 1
152 151 $ find $CACHEDIR -type f | sort
153 152 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
154 153 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
155 154 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
156 155 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
157 $TESTTMP/hgcache/master/packs/repacklock
158 156 $TESTTMP/hgcache/repos
159 157
160 158 # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore
161 159 # could only be downloaded by the background prefetch
162 160
163 161 $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
164 162 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
165 163 w:
166 164 Node Delta Base Delta Length Blob Size
167 165 bb6ccd5dceaa 000000000000 2 2
168 166
169 167 Total: 2 2 (0.0% bigger)
170 168 x:
171 169 Node Delta Base Delta Length Blob Size
172 170 ef95c5376f34 000000000000 3 3
173 171 1406e7411862 ef95c5376f34 14 2
174 172
175 173 Total: 17 5 (240.0% bigger)
176 174 y:
177 175 Node Delta Base Delta Length Blob Size
178 176 076f5e2225b3 000000000000 2 2
179 177
180 178 Total: 2 2 (0.0% bigger)
181 179 z:
182 180 Node Delta Base Delta Length Blob Size
183 181 69a1b6752270 000000000000 2 2
184 182
185 183 Total: 2 2 (0.0% bigger)
186 184
187 185 # background prefetch with repack on commit when wcprevset configured
188 186
189 187 $ cat >> .hg/hgrc <<EOF
190 188 > [remotefilelog]
191 189 > bgprefetchrevs=0::
192 190 > EOF
193 191
194 192 $ clearcache
195 193 $ find $CACHEDIR -type f | sort
196 194 $ echo b > b
197 195 $ hg commit -qAm b
198 196 * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob)
199 197 $ hg bookmark temporary
200 198 $ sleep 1
201 199 $ hg debugwaitonprefetch >/dev/null 2>%1
202 200 $ sleep 1
203 201 $ hg debugwaitonrepack >/dev/null 2>%1
204 202 $ sleep 1
205 203 $ find $CACHEDIR -type f | sort
206 204 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
207 205 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
208 206 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
209 207 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
210 $TESTTMP/hgcache/master/packs/repacklock
211 208 $TESTTMP/hgcache/repos
212 209
213 210 # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore
214 211 # could only be downloaded by the background prefetch
215 212
216 213 $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
217 214 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
218 215 w:
219 216 Node Delta Base Delta Length Blob Size
220 217 bb6ccd5dceaa 000000000000 2 2
221 218
222 219 Total: 2 2 (0.0% bigger)
223 220 x:
224 221 Node Delta Base Delta Length Blob Size
225 222 ef95c5376f34 000000000000 3 3
226 223 1406e7411862 ef95c5376f34 14 2
227 224
228 225 Total: 17 5 (240.0% bigger)
229 226 y:
230 227 Node Delta Base Delta Length Blob Size
231 228 076f5e2225b3 000000000000 2 2
232 229
233 230 Total: 2 2 (0.0% bigger)
234 231 z:
235 232 Node Delta Base Delta Length Blob Size
236 233 69a1b6752270 000000000000 2 2
237 234
238 235 Total: 2 2 (0.0% bigger)
239 236
240 237 # background prefetch with repack on rebase when wcprevset configured
241 238
242 239 $ hg up -r 2
243 240 3 files updated, 0 files merged, 3 files removed, 0 files unresolved
244 241 (leaving bookmark temporary)
245 242 $ clearcache
246 243 $ find $CACHEDIR -type f | sort
247 244 $ hg rebase -s temporary -d foo
248 245 rebasing 3:58147a5b5242 "b" (temporary tip)
249 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg (glob)
246 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg
250 247 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
251 248 $ sleep 1
252 249 $ hg debugwaitonprefetch >/dev/null 2>%1
253 250 $ sleep 1
254 251 $ hg debugwaitonrepack >/dev/null 2>%1
255 252 $ sleep 1
256 253
257 254 # Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore
258 255 # could only be downloaded by the background prefetch
259 256
260 257 $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
261 258 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
262 259 w:
263 260 Node Delta Base Delta Length Blob Size
264 261 bb6ccd5dceaa 000000000000 2 2
265 262
266 263 Total: 2 2 (0.0% bigger)
267 264 x:
268 265 Node Delta Base Delta Length Blob Size
269 266 ef95c5376f34 000000000000 3 3
270 267 1406e7411862 ef95c5376f34 14 2
271 268
272 269 Total: 17 5 (240.0% bigger)
273 270 y:
274 271 Node Delta Base Delta Length Blob Size
275 272 076f5e2225b3 000000000000 2 2
276 273
277 274 Total: 2 2 (0.0% bigger)
278 275 z:
279 276 Node Delta Base Delta Length Blob Size
280 277 69a1b6752270 000000000000 2 2
281 278
282 279 Total: 2 2 (0.0% bigger)
283 280
284 281 # Check that foregound prefetch with no arguments blocks until background prefetches finish
285 282
286 283 $ hg up -r 3
287 284 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 285 $ clearcache
289 286 $ hg prefetch --repack
290 287 waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?)
291 288 got lock after * seconds (glob) (?)
292 289 (running background incremental repack)
293 290 * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?)
294 291
295 292 $ sleep 0.5
296 293 $ hg debugwaitonrepack >/dev/null 2>%1
297 294 $ sleep 0.5
298 295
299 296 $ find $CACHEDIR -type f | sort
300 297 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
301 298 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
302 299 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
303 300 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
304 $TESTTMP/hgcache/master/packs/repacklock
305 301 $TESTTMP/hgcache/repos
306 302
307 303 # Ensure that files were prefetched
308 304 $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
309 305 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
310 306 w:
311 307 Node Delta Base Delta Length Blob Size
312 308 bb6ccd5dceaa 000000000000 2 2
313 309
314 310 Total: 2 2 (0.0% bigger)
315 311 x:
316 312 Node Delta Base Delta Length Blob Size
317 313 ef95c5376f34 000000000000 3 3
318 314 1406e7411862 ef95c5376f34 14 2
319 315
320 316 Total: 17 5 (240.0% bigger)
321 317 y:
322 318 Node Delta Base Delta Length Blob Size
323 319 076f5e2225b3 000000000000 2 2
324 320
325 321 Total: 2 2 (0.0% bigger)
326 322 z:
327 323 Node Delta Base Delta Length Blob Size
328 324 69a1b6752270 000000000000 2 2
329 325
330 326 Total: 2 2 (0.0% bigger)
331 327
332 328 # Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch'
333 329
334 330 $ clearcache
335 331 $ hg prefetch --repack
336 332 waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?)
337 333 got lock after * seconds (glob) (?)
338 334 (running background incremental repack)
339 335 * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?)
340 336 $ sleep 0.5
341 337 $ hg debugwaitonrepack >/dev/null 2>%1
342 338 $ sleep 0.5
343 339
344 340 $ find $CACHEDIR -type f | sort
345 341 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
346 342 $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
347 343 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
348 344 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
349 $TESTTMP/hgcache/master/packs/repacklock
350 345 $TESTTMP/hgcache/repos
351 346
352 347 # Ensure that files were prefetched
353 348 $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
354 349 $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
355 350 w:
356 351 Node Delta Base Delta Length Blob Size
357 352 bb6ccd5dceaa 000000000000 2 2
358 353
359 354 Total: 2 2 (0.0% bigger)
360 355 x:
361 356 Node Delta Base Delta Length Blob Size
362 357 ef95c5376f34 000000000000 3 3
363 358 1406e7411862 ef95c5376f34 14 2
364 359
365 360 Total: 17 5 (240.0% bigger)
366 361 y:
367 362 Node Delta Base Delta Length Blob Size
368 363 076f5e2225b3 000000000000 2 2
369 364
370 365 Total: 2 2 (0.0% bigger)
371 366 z:
372 367 Node Delta Base Delta Length Blob Size
373 368 69a1b6752270 000000000000 2 2
374 369
375 370 Total: 2 2 (0.0% bigger)
376 371
377 372 # Test that if data was prefetched and repacked we dont need to prefetch it again
378 373 # It ensures that Mercurial looks not only in loose files but in packs as well
379 374
380 375 $ hg prefetch --repack
381 376 (running background incremental repack)
@@ -1,112 +1,111 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4
5 5 $ hg init master
6 6 $ cd master
7 7 $ cat >> .hg/hgrc <<EOF
8 8 > [remotefilelog]
9 9 > server=True
10 10 > serverexpiration=-1
11 11 > EOF
12 12 $ echo x > x
13 13 $ hg commit -qAm x
14 14 $ cd ..
15 15
16 16 $ hgcloneshallow ssh://user@dummy/master shallow -q
17 17 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
18 18
19 19 # Set the prefetchdays config to zero so that all commits are prefetched
20 20 # no matter what their creation date is.
21 21 $ cd shallow
22 22 $ cat >> .hg/hgrc <<EOF
23 23 > [remotefilelog]
24 24 > prefetchdays=0
25 25 > EOF
26 26 $ cd ..
27 27
28 28 # commit a new version of x so we can gc the old one
29 29
30 30 $ cd master
31 31 $ echo y > x
32 32 $ hg commit -qAm y
33 33 $ cd ..
34 34
35 35 $ cd shallow
36 36 $ hg pull -q
37 37 $ hg update -q
38 38 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
39 39 $ cd ..
40 40
41 41 # gc client cache
42 42
43 43 $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'`
44 44 $ find $CACHEDIR -type f -exec touch -t $lastweek {} \;
45 45
46 46 $ find $CACHEDIR -type f | sort
47 47 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
48 48 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
49 49 $TESTTMP/hgcache/repos (glob)
50 50 $ hg gc
51 51 finished: removed 1 of 2 files (0.00 GB to 0.00 GB)
52 52 $ find $CACHEDIR -type f | sort
53 53 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
54 54 $TESTTMP/hgcache/repos
55 55
56 56 # gc server cache
57 57
58 58 $ find master/.hg/remotefilelogcache -type f | sort
59 59 master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob)
60 60 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
61 61 $ hg gc master
62 62 finished: removed 0 of 1 files (0.00 GB to 0.00 GB)
63 63 $ find master/.hg/remotefilelogcache -type f | sort
64 64 master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
65 65
66 66 # Test that GC keepset includes pullprefetch revset if it is configured
67 67
68 68 $ cd shallow
69 69 $ cat >> .hg/hgrc <<EOF
70 70 > [remotefilelog]
71 71 > pullprefetch=all()
72 72 > EOF
73 73 $ hg prefetch
74 74 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
75 75
76 76 $ cd ..
77 77 $ hg gc
78 78 finished: removed 0 of 2 files (0.00 GB to 0.00 GB)
79 79
80 80 # Ensure that there are 2 versions of the file in cache
81 81 $ find $CACHEDIR -type f | sort
82 82 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
83 83 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
84 84 $TESTTMP/hgcache/repos (glob)
85 85
86 86 # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run
87 87
88 88 $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True
89 89
90 90 # Ensure that loose files are repacked
91 91 $ find $CACHEDIR -type f | sort
92 92 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx
93 93 $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack
94 94 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx
95 95 $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack
96 $TESTTMP/hgcache/master/packs/repacklock
97 96 $TESTTMP/hgcache/repos
98 97
99 98 # Test that warning is displayed when there are no valid repos in repofile
100 99
101 100 $ cp $CACHEDIR/repos $CACHEDIR/repos.bak
102 101 $ echo " " > $CACHEDIR/repos
103 102 $ hg gc
104 103 warning: no valid repos in repofile
105 104 $ mv $CACHEDIR/repos.bak $CACHEDIR/repos
106 105
107 106 # Test that warning is displayed when the repo path is malformed
108 107
109 108 $ printf "asdas\0das" >> $CACHEDIR/repos
110 109 $ hg gc
111 110 abort: invalid path asdas\x00da: .*(null|NULL).* (re)
112 111 [255]
@@ -1,387 +1,379 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4 # devel.remotefilelog.ensurestart: reduce race condition with
5 5 # waiton{repack/prefetch}
6 6 $ cat >> $HGRCPATH <<EOF
7 7 > [remotefilelog]
8 8 > fastdatapack=True
9 9 > [devel]
10 10 > remotefilelog.ensurestart=True
11 11 > EOF
12 12
13 13 $ hg init master
14 14 $ cd master
15 15 $ cat >> .hg/hgrc <<EOF
16 16 > [remotefilelog]
17 17 > server=True
18 18 > serverexpiration=-1
19 19 > EOF
20 20 $ echo x > x
21 21 $ hg commit -qAm x
22 22 $ echo x >> x
23 23 $ hg commit -qAm x2
24 24 $ cd ..
25 25
26 26 $ hgcloneshallow ssh://user@dummy/master shallow -q
27 27 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
28 28
29 29 # Set the prefetchdays config to zero so that all commits are prefetched
30 30 # no matter what their creation date is.
31 31 $ cd shallow
32 32 $ cat >> .hg/hgrc <<EOF
33 33 > [remotefilelog]
34 34 > prefetchdays=0
35 35 > EOF
36 36 $ cd ..
37 37
38 38 # Test that repack cleans up the old files and creates new packs
39 39
40 40 $ cd shallow
41 41 $ find $CACHEDIR | sort
42 42 $TESTTMP/hgcache
43 43 $TESTTMP/hgcache/master
44 44 $TESTTMP/hgcache/master/11
45 45 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072
46 46 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51
47 47 $TESTTMP/hgcache/repos
48 48
49 49 $ hg repack
50 50
51 51 $ find $CACHEDIR | sort
52 52 $TESTTMP/hgcache
53 53 $TESTTMP/hgcache/master
54 54 $TESTTMP/hgcache/master/packs
55 55 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
56 56 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
57 57 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
58 58 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
59 $TESTTMP/hgcache/master/packs/repacklock
60 59 $TESTTMP/hgcache/repos
61 60
62 61 # Test that the packs are readonly
63 62 $ ls_l $CACHEDIR/master/packs
64 63 -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
65 64 -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
66 65 -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
67 66 -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
68 -rw-r--r-- 0 repacklock
69 67
70 68 # Test that the data in the new packs is accessible
71 69 $ hg cat -r . x
72 70 x
73 71 x
74 72
75 73 # Test that adding new data and repacking it results in the loose data and the
76 74 # old packs being combined.
77 75
78 76 $ cd ../master
79 77 $ echo x >> x
80 78 $ hg commit -m x3
81 79 $ cd ../shallow
82 80 $ hg pull -q
83 81 $ hg up -q tip
84 82 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
85 83
86 84 $ find $CACHEDIR -type f | sort
87 85 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
88 86 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
89 87 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
90 88 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
91 89 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
92 $TESTTMP/hgcache/master/packs/repacklock
93 90 $TESTTMP/hgcache/repos
94 91
95 92 $ hg repack --traceback
96 93
97 94 $ find $CACHEDIR -type f | sort
98 95 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
99 96 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
100 97 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
101 98 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
102 $TESTTMP/hgcache/master/packs/repacklock
103 99 $TESTTMP/hgcache/repos
104 100
105 101 # Verify all the file data is still available
106 102 $ hg cat -r . x
107 103 x
108 104 x
109 105 x
110 106 $ hg cat -r '.^' x
111 107 x
112 108 x
113 109
114 110 # Test that repacking again without new data does not delete the pack files
115 111 # and did not change the pack names
116 112 $ hg repack
117 113 $ find $CACHEDIR -type f | sort
118 114 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
119 115 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
120 116 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
121 117 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
122 $TESTTMP/hgcache/master/packs/repacklock
123 118 $TESTTMP/hgcache/repos
124 119
125 120 # Run two repacks at once
126 121 $ hg repack --config "hooks.prerepack=sleep 3" &
127 122 $ sleep 1
128 123 $ hg repack
129 124 skipping repack - another repack is already running
130 125 $ hg debugwaitonrepack >/dev/null 2>&1
131 126
132 127 # Run repack in the background
133 128 $ cd ../master
134 129 $ echo x >> x
135 130 $ hg commit -m x4
136 131 $ cd ../shallow
137 132 $ hg pull -q
138 133 $ hg up -q tip
139 134 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
140 135 $ find $CACHEDIR -type f | sort
141 136 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72
142 137 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
143 138 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
144 139 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
145 140 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
146 $TESTTMP/hgcache/master/packs/repacklock
147 141 $TESTTMP/hgcache/repos
148 142
149 143 $ hg repack --background
150 144 (running background repack)
151 145 $ sleep 0.5
152 146 $ hg debugwaitonrepack >/dev/null 2>&1
153 147 $ find $CACHEDIR -type f | sort
154 148 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx
155 149 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
156 150 $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
157 151 $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
158 $TESTTMP/hgcache/master/packs/repacklock
159 152 $TESTTMP/hgcache/repos
160 153
161 154 # Test debug commands
162 155
163 156 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
164 157 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
165 158 x:
166 159 Node Delta Base Delta Length Blob Size
167 160 1bb2e6237e03 000000000000 8 8
168 161 d4a3ed9310e5 1bb2e6237e03 12 6
169 162 aee31534993a d4a3ed9310e5 12 4
170 163
171 164 Total: 32 18 (77.8% bigger)
172 165 $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack
173 166 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
174 167 x:
175 168 Node Delta Base Delta Length Blob Size
176 169 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8
177 170 d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6
178 171 aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4
179 172
180 173 Total: 32 18 (77.8% bigger)
181 174 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216
182 175 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
183 176
184 177 x
185 178 Node Delta Base Delta SHA1 Delta Length
186 179 d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12
187 180 Node Delta Base Delta SHA1 Delta Length
188 181 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8
189 182 $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
190 183
191 184 x
192 185 Node P1 Node P2 Node Link Node Copy From
193 186 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
194 187 d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
195 188 aee31534993a 1406e7411862 000000000000 a89d614e2364
196 189 1406e7411862 000000000000 000000000000 b292c1e3311f
197 190
198 191 # Test copy tracing from a pack
199 192 $ cd ../master
200 193 $ hg mv x y
201 194 $ hg commit -m 'move x to y'
202 195 $ cd ../shallow
203 196 $ hg pull -q
204 197 $ hg up -q tip
205 198 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
206 199 $ hg repack
207 200 $ hg log -f y -T '{desc}\n'
208 201 move x to y
209 202 x4
210 203 x3
211 204 x2
212 205 x
213 206
214 207 # Test copy trace across rename and back
215 208 $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks
216 209 $ cd ../master
217 210 $ hg mv y x
218 211 $ hg commit -m 'move y back to x'
219 212 $ hg revert -r 0 x
220 213 $ mv x y
221 214 $ hg add y
222 215 $ echo >> y
223 216 $ hg revert x
224 217 $ hg commit -m 'add y back without metadata'
225 218 $ cd ../shallow
226 219 $ hg pull -q
227 220 $ hg up -q tip
228 221 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob)
229 222 $ hg repack
230 223 $ ls $TESTTMP/hgcache/master/packs
231 224 bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx
232 225 bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
233 226 fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
234 227 fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
235 repacklock
236 228 $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
237 229
238 230 x
239 231 Node P1 Node P2 Node Link Node Copy From
240 232 cd410a44d584 577959738234 000000000000 609547eda446 y
241 233 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
242 234 d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
243 235 aee31534993a 1406e7411862 000000000000 a89d614e2364
244 236 1406e7411862 000000000000 000000000000 b292c1e3311f
245 237
246 238 y
247 239 Node P1 Node P2 Node Link Node Copy From
248 240 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x
249 241 21f46f2721e7 000000000000 000000000000 d6868642b790
250 242 $ hg strip -r '.^'
251 243 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
252 244 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
253 245 $ hg -R ../master strip -r '.^'
254 246 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
255 247 saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
256 248
257 249 $ rm -rf $TESTTMP/hgcache/master/packs
258 250 $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs
259 251
260 252 # Test repacking datapack without history
261 253 $ rm -rf $CACHEDIR/master/packs/*hist*
262 254 $ hg repack
263 255 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
264 256 $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a:
265 257 x:
266 258 Node Delta Base Delta Length Blob Size
267 259 1bb2e6237e03 000000000000 8 8
268 260 d4a3ed9310e5 1bb2e6237e03 12 6
269 261 aee31534993a d4a3ed9310e5 12 4
270 262
271 263 Total: 32 18 (77.8% bigger)
272 264 y:
273 265 Node Delta Base Delta Length Blob Size
274 266 577959738234 000000000000 70 8
275 267
276 268 Total: 70 8 (775.0% bigger)
277 269
278 270 $ hg cat -r ".^" x
279 271 x
280 272 x
281 273 x
282 274 x
283 275
284 276 Incremental repack
285 277 $ rm -rf $CACHEDIR/master/packs/*
286 278 $ cat >> .hg/hgrc <<EOF
287 279 > [remotefilelog]
288 280 > data.generations=60
289 281 > 150
290 282 > EOF
291 283
292 284 Single pack - repack does nothing
293 285 $ hg prefetch -r 0
294 286 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
295 287 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
296 288 [1]
297 289 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
298 290 [1]
299 291 $ hg repack --incremental
300 292 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
301 293 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
302 294 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
303 295 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
304 296
305 297 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1
306 298 $ hg prefetch -r 1
307 299 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
308 300 $ hg prefetch -r 2
309 301 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
310 302 $ hg prefetch -r 3
311 303 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
312 304 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
313 305 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
314 306 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
315 307 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
316 308 $ hg repack --incremental
317 309 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
318 310 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
319 311 -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack
320 312 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
321 313 -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
322 314 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
323 315
324 316 1 gen3 pack, 1 gen0 pack - does nothing
325 317 $ hg repack --incremental
326 318 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
327 319 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
328 320 -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack
329 321 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
330 322 -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
331 323 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
332 324
333 325 Pull should run background repack
334 326 $ cat >> .hg/hgrc <<EOF
335 327 > [remotefilelog]
336 328 > backgroundrepack=True
337 329 > EOF
338 330 $ clearcache
339 331 $ hg prefetch -r 0
340 332 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
341 333 $ hg prefetch -r 1
342 334 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
343 335 $ hg prefetch -r 2
344 336 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
345 337 $ hg prefetch -r 3
346 338 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
347 339
348 340 $ hg pull
349 341 pulling from ssh://user@dummy/master
350 342 searching for changes
351 343 no changes found
352 344 (running background incremental repack)
353 345 $ sleep 0.5
354 346 $ hg debugwaitonrepack >/dev/null 2>&1
355 347 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
356 348 -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack
357 349 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
358 350 -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
359 351
360 352 Test environment variable resolution
361 353 $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH'
362 354 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
363 355 $ find $TESTTMP/envcache | sort
364 356 $TESTTMP/envcache
365 357 $TESTTMP/envcache/master
366 358 $TESTTMP/envcache/master/95
367 359 $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a
368 360 $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0
369 361 $TESTTMP/envcache/repos
370 362
371 363 Test local remotefilelog blob is correct when based on a pack
372 364 $ hg prefetch -r .
373 365 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
374 366 $ echo >> y
375 367 $ hg commit -m y2
376 368 $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
377 369 size: 9 bytes
378 370 path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
379 371 key: b70860edba4f
380 372
381 373 node => p1 p2 linknode copyfrom
382 374 b70860edba4f => 577959738234 000000000000 08d3fbc98c48
383 375 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x
384 376 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7
385 377 d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6
386 378 aee31534993a => 1406e7411862 000000000000 a89d614e2364
387 379 1406e7411862 => 000000000000 000000000000 b292c1e3311f
@@ -1,468 +1,459 b''
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4 # devel.remotefilelog.ensurestart: reduce race condition with
5 5 # waiton{repack/prefetch}
6 6 $ cat >> $HGRCPATH <<EOF
7 7 > [devel]
8 8 > remotefilelog.ensurestart=True
9 9 > EOF
10 10
11 11 $ hg init master
12 12 $ cd master
13 13 $ cat >> .hg/hgrc <<EOF
14 14 > [remotefilelog]
15 15 > server=True
16 16 > serverexpiration=-1
17 17 > EOF
18 18 $ echo x > x
19 19 $ hg commit -qAm x
20 20 $ echo x >> x
21 21 $ hg commit -qAm x2
22 22 $ cd ..
23 23
24 24 $ hgcloneshallow ssh://user@dummy/master shallow -q
25 25 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
26 26
27 27 # Set the prefetchdays config to zero so that all commits are prefetched
28 28 # no matter what their creation date is.
29 29 $ cd shallow
30 30 $ cat >> .hg/hgrc <<EOF
31 31 > [remotefilelog]
32 32 > prefetchdays=0
33 33 > EOF
34 34 $ cd ..
35 35
36 36 # Test that repack cleans up the old files and creates new packs
37 37
38 38 $ cd shallow
39 39 $ find $CACHEDIR | sort
40 40 $TESTTMP/hgcache
41 41 $TESTTMP/hgcache/master
42 42 $TESTTMP/hgcache/master/11
43 43 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072
44 44 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51
45 45 $TESTTMP/hgcache/repos
46 46
47 47 $ hg repack
48 48
49 49 $ find $CACHEDIR | sort
50 50 $TESTTMP/hgcache
51 51 $TESTTMP/hgcache/master
52 52 $TESTTMP/hgcache/master/packs
53 53 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
54 54 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
55 55 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
56 56 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
57 $TESTTMP/hgcache/master/packs/repacklock
58 57 $TESTTMP/hgcache/repos
59 58
60 59 # Test that the packs are readonly
61 60 $ ls_l $CACHEDIR/master/packs
62 61 -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
63 62 -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
64 63 -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
65 64 -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
66 -rw-r--r-- 0 repacklock
67 65
68 66 # Test that the data in the new packs is accessible
69 67 $ hg cat -r . x
70 68 x
71 69 x
72 70
73 71 # Test that adding new data and repacking it results in the loose data and the
74 72 # old packs being combined.
75 73
76 74 $ cd ../master
77 75 $ echo x >> x
78 76 $ hg commit -m x3
79 77 $ cd ../shallow
80 78 $ hg pull -q
81 79 $ hg up -q tip
82 80 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
83 81
84 82 $ find $CACHEDIR -type f | sort
85 83 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
86 84 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
87 85 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
88 86 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
89 87 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
90 $TESTTMP/hgcache/master/packs/repacklock
91 88 $TESTTMP/hgcache/repos
92 89
93 90 # First assert that with --packsonly, the loose object will be ignored:
94 91
95 92 $ hg repack --packsonly
96 93
97 94 $ find $CACHEDIR -type f | sort
98 95 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
99 96 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
100 97 $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
101 98 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
102 99 $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
103 $TESTTMP/hgcache/master/packs/repacklock
104 100 $TESTTMP/hgcache/repos
105 101
106 102 $ hg repack --traceback
107 103
108 104 $ find $CACHEDIR -type f | sort
109 105 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
110 106 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
111 107 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
112 108 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
113 $TESTTMP/hgcache/master/packs/repacklock
114 109 $TESTTMP/hgcache/repos
115 110
116 111 # Verify all the file data is still available
117 112 $ hg cat -r . x
118 113 x
119 114 x
120 115 x
121 116 $ hg cat -r '.^' x
122 117 x
123 118 x
124 119
125 120 # Test that repacking again without new data does not delete the pack files
126 121 # and did not change the pack names
127 122 $ hg repack
128 123 $ find $CACHEDIR -type f | sort
129 124 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
130 125 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
131 126 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
132 127 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
133 $TESTTMP/hgcache/master/packs/repacklock
134 128 $TESTTMP/hgcache/repos
135 129
136 130 # Run two repacks at once
137 131 $ hg repack --config "hooks.prerepack=sleep 3" &
138 132 $ sleep 1
139 133 $ hg repack
140 134 skipping repack - another repack is already running
141 135 $ hg debugwaitonrepack >/dev/null 2>&1
142 136
143 137 # Run repack in the background
144 138 $ cd ../master
145 139 $ echo x >> x
146 140 $ hg commit -m x4
147 141 $ cd ../shallow
148 142 $ hg pull -q
149 143 $ hg up -q tip
150 144 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
151 145 $ find $CACHEDIR -type f | sort
152 146 $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72
153 147 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
154 148 $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
155 149 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
156 150 $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
157 $TESTTMP/hgcache/master/packs/repacklock
158 151 $TESTTMP/hgcache/repos
159 152
160 153 $ hg repack --background
161 154 (running background repack)
162 155 $ sleep 0.5
163 156 $ hg debugwaitonrepack >/dev/null 2>&1
164 157 $ find $CACHEDIR -type f | sort
165 158 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx
166 159 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
167 160 $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
168 161 $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
169 $TESTTMP/hgcache/master/packs/repacklock
170 162 $TESTTMP/hgcache/repos
171 163
172 164 # Test debug commands
173 165
174 166 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
175 167 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
176 168 x:
177 169 Node Delta Base Delta Length Blob Size
178 170 1bb2e6237e03 000000000000 8 8
179 171 d4a3ed9310e5 1bb2e6237e03 12 6
180 172 aee31534993a d4a3ed9310e5 12 4
181 173
182 174 Total: 32 18 (77.8% bigger)
183 175 $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack
184 176 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
185 177 x:
186 178 Node Delta Base Delta Length Blob Size
187 179 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8
188 180 d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6
189 181 aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4
190 182
191 183 Total: 32 18 (77.8% bigger)
192 184 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216
193 185 $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
194 186
195 187 x
196 188 Node Delta Base Delta SHA1 Delta Length
197 189 d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12
198 190 Node Delta Base Delta SHA1 Delta Length
199 191 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8
200 192 $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
201 193
202 194 x
203 195 Node P1 Node P2 Node Link Node Copy From
204 196 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
205 197 d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
206 198 aee31534993a 1406e7411862 000000000000 a89d614e2364
207 199 1406e7411862 000000000000 000000000000 b292c1e3311f
208 200
209 201 # Test copy tracing from a pack
210 202 $ cd ../master
211 203 $ hg mv x y
212 204 $ hg commit -m 'move x to y'
213 205 $ cd ../shallow
214 206 $ hg pull -q
215 207 $ hg up -q tip
216 208 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
217 209 $ hg repack
218 210 $ hg log -f y -T '{desc}\n'
219 211 move x to y
220 212 x4
221 213 x3
222 214 x2
223 215 x
224 216
225 217 # Test copy trace across rename and back
226 218 $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks
227 219 $ cd ../master
228 220 $ hg mv y x
229 221 $ hg commit -m 'move y back to x'
230 222 $ hg revert -r 0 x
231 223 $ mv x y
232 224 $ hg add y
233 225 $ echo >> y
234 226 $ hg revert x
235 227 $ hg commit -m 'add y back without metadata'
236 228 $ cd ../shallow
237 229 $ hg pull -q
238 230 $ hg up -q tip
239 231 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob)
240 232 $ hg repack
241 233 $ ls $TESTTMP/hgcache/master/packs
242 234 bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx
243 235 bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
244 236 fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
245 237 fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
246 repacklock
247 238 $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
248 239
249 240 x
250 241 Node P1 Node P2 Node Link Node Copy From
251 242 cd410a44d584 577959738234 000000000000 609547eda446 y
252 243 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
253 244 d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
254 245 aee31534993a 1406e7411862 000000000000 a89d614e2364
255 246 1406e7411862 000000000000 000000000000 b292c1e3311f
256 247
257 248 y
258 249 Node P1 Node P2 Node Link Node Copy From
259 250 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x
260 251 21f46f2721e7 000000000000 000000000000 d6868642b790
261 252 $ hg strip -r '.^'
262 253 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
263 254 saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
264 255 $ hg -R ../master strip -r '.^'
265 256 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
266 257 saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
267 258
268 259 $ rm -rf $TESTTMP/hgcache/master/packs
269 260 $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs
270 261
271 262 # Test repacking datapack without history
272 263 $ rm -rf $CACHEDIR/master/packs/*hist*
273 264 $ hg repack
274 265 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
275 266 $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a:
276 267 x:
277 268 Node Delta Base Delta Length Blob Size
278 269 1bb2e6237e03 000000000000 8 8
279 270 d4a3ed9310e5 1bb2e6237e03 12 6
280 271 aee31534993a d4a3ed9310e5 12 4
281 272
282 273 Total: 32 18 (77.8% bigger)
283 274 y:
284 275 Node Delta Base Delta Length Blob Size
285 276 577959738234 000000000000 70 8
286 277
287 278 Total: 70 8 (775.0% bigger)
288 279
289 280 $ hg cat -r ".^" x
290 281 x
291 282 x
292 283 x
293 284 x
294 285
295 286 Incremental repack
296 287 $ rm -rf $CACHEDIR/master/packs/*
297 288 $ cat >> .hg/hgrc <<EOF
298 289 > [remotefilelog]
299 290 > data.generations=60
300 291 > 150
301 292 > EOF
302 293
303 294 Single pack - repack does nothing
304 295 $ hg prefetch -r 0
305 296 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
306 297 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
307 298 [1]
308 299 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
309 300 [1]
310 301 $ hg repack --incremental
311 302 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
312 303 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
313 304 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
314 305 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
315 306
316 307 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1
317 308 $ hg prefetch -r 1
318 309 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
319 310 $ hg prefetch -r 2
320 311 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
321 312 $ hg prefetch -r 38
322 313 abort: unknown revision '38'!
323 314 [255]
324 315 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
325 316 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
326 317 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
327 318 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
328 319
329 320 For the data packs, setting the limit for the repackmaxpacksize to be 64 such
330 321 that data pack with size 65 is more than the limit. This effectively ensures
331 322 that no generation has 3 packs and therefore, no packs are chosen for the
332 323 incremental repacking. As for the history packs, setting repackmaxpacksize to be
333 324 0 which should always result in no repacking.
334 325 $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \
335 326 > --config remotefilelog.history.repackmaxpacksize=0
336 327 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
337 328 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
338 329 -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
339 330 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
340 331 -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
341 332 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
342 333
343 334 Setting limit for the repackmaxpacksize to be the size of the biggest pack file
344 335 which ensures that it is effectively ignored in the incremental repacking.
345 336 $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \
346 337 > --config remotefilelog.history.repackmaxpacksize=336
347 338 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
348 339 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
349 340 -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
350 341 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
351 342 -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
352 343 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
353 344
354 345 1 gen3 pack, 1 gen0 pack - does nothing
355 346 $ hg repack --incremental
356 347 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
357 348 -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
358 349 -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
359 350 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
360 351 -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
361 352 -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
362 353
363 354 Pull should run background repack
364 355 $ cat >> .hg/hgrc <<EOF
365 356 > [remotefilelog]
366 357 > backgroundrepack=True
367 358 > EOF
368 359 $ clearcache
369 360 $ hg prefetch -r 0
370 361 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
371 362 $ hg prefetch -r 1
372 363 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
373 364 $ hg prefetch -r 2
374 365 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
375 366 $ hg prefetch -r 3
376 367 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
377 368
378 369 $ hg pull
379 370 pulling from ssh://user@dummy/master
380 371 searching for changes
381 372 no changes found
382 373 (running background incremental repack)
383 374 $ sleep 0.5
384 375 $ hg debugwaitonrepack >/dev/null 2>&1
385 376 $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
386 377 -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack
387 378 $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
388 379 -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
389 380
390 381 Test environment variable resolution
391 382 $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH'
392 383 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
393 384 $ find $TESTTMP/envcache | sort
394 385 $TESTTMP/envcache
395 386 $TESTTMP/envcache/master
396 387 $TESTTMP/envcache/master/95
397 388 $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a
398 389 $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0
399 390 $TESTTMP/envcache/repos
400 391
401 392 Test local remotefilelog blob is correct when based on a pack
402 393 $ hg prefetch -r .
403 394 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
404 395 $ echo >> y
405 396 $ hg commit -m y2
406 397 $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
407 398 size: 9 bytes
408 399 path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
409 400 key: b70860edba4f
410 401
411 402 node => p1 p2 linknode copyfrom
412 403 b70860edba4f => 577959738234 000000000000 08d3fbc98c48
413 404 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x
414 405 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7
415 406 d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6
416 407 aee31534993a => 1406e7411862 000000000000 a89d614e2364
417 408 1406e7411862 => 000000000000 000000000000 b292c1e3311f
418 409
419 410 Test limiting the max delta chain length
420 411 $ hg repack --config packs.maxchainlen=1
421 412 $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx
422 413 $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909:
423 414 x:
424 415 Node Delta Base Delta Length Blob Size
425 416 1bb2e6237e03 000000000000 8 8
426 417 d4a3ed9310e5 1bb2e6237e03 12 6
427 418 aee31534993a 000000000000 4 4
428 419 1406e7411862 aee31534993a 12 2
429 420
430 421 Total: 36 20 (80.0% bigger)
431 422 y:
432 423 Node Delta Base Delta Length Blob Size
433 424 577959738234 000000000000 70 8
434 425
435 426 Total: 70 8 (775.0% bigger)
436 427
437 428 Test huge pack cleanup using different values of packs.maxpacksize:
438 429 $ hg repack --incremental --debug
439 430 $ hg repack --incremental --debug --config packs.maxpacksize=512
440 431 removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.datapack (425 bytes)
441 432 removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.dataidx (1.21 KB)
442 433
443 434 Do a repack where the new pack reuses a delta from the old pack
444 435 $ clearcache
445 436 $ hg prefetch -r '2::3'
446 437 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob)
447 438 $ hg repack
448 439 $ hg debugdatapack $CACHEDIR/master/packs/*.datapack
449 440 $TESTTMP/hgcache/master/packs/9ec6b30891bd851320acb7c66b69a2bdf41c8df3:
450 441 x:
451 442 Node Delta Base Delta Length Blob Size
452 443 1bb2e6237e03 000000000000 8 8
453 444 d4a3ed9310e5 1bb2e6237e03 12 6
454 445
455 446 Total: 20 14 (42.9% bigger)
456 447 $ hg prefetch -r '0::1'
457 448 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob)
458 449 $ hg repack
459 450 $ hg debugdatapack $CACHEDIR/master/packs/*.datapack
460 451 $TESTTMP/hgcache/master/packs/156a6c1c83aeb69422d7936e0a46ba9bc06a71c0:
461 452 x:
462 453 Node Delta Base Delta Length Blob Size
463 454 1bb2e6237e03 000000000000 8 8
464 455 d4a3ed9310e5 1bb2e6237e03 12 6
465 456 aee31534993a d4a3ed9310e5 12 4
466 457 1406e7411862 aee31534993a 12 2
467 458
468 459 Total: 44 20 (120.0% bigger)
General Comments 0
You need to be logged in to leave comments. Login now