##// END OF EJS Templates
remotefilelog: adapt the `debugindex` command to past API changes...
Matt Harbison -
r52714:c371134f default
parent child Browse files
Show More
@@ -1,480 +1,480
1 # debugcommands.py - debug logic for remotefilelog
1 # debugcommands.py - debug logic for remotefilelog
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 import zlib
9 import zlib
10
10
11 from mercurial.node import (
11 from mercurial.node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 sha1nodeconstants,
14 sha1nodeconstants,
15 short,
15 short,
16 )
16 )
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19 from mercurial import (
19 from mercurial import (
20 error,
20 error,
21 filelog,
21 filelog,
22 lock as lockmod,
22 lock as lockmod,
23 pycompat,
23 pycompat,
24 revlog,
24 revlog,
25 )
25 )
26 from mercurial.utils import hashutil
26 from mercurial.utils import hashutil
27 from . import (
27 from . import (
28 constants,
28 constants,
29 datapack,
29 datapack,
30 fileserverclient,
30 fileserverclient,
31 historypack,
31 historypack,
32 repack,
32 repack,
33 shallowutil,
33 shallowutil,
34 )
34 )
35
35
36
36
37 def debugremotefilelog(ui, path, **opts) -> None:
37 def debugremotefilelog(ui, path, **opts) -> None:
38 decompress = opts.get('decompress')
38 decompress = opts.get('decompress')
39
39
40 size, firstnode, mapping = parsefileblob(path, decompress)
40 size, firstnode, mapping = parsefileblob(path, decompress)
41
41
42 ui.status(_(b"size: %d bytes\n") % size)
42 ui.status(_(b"size: %d bytes\n") % size)
43 ui.status(_(b"path: %s \n") % path)
43 ui.status(_(b"path: %s \n") % path)
44 ui.status(_(b"key: %s \n") % (short(firstnode)))
44 ui.status(_(b"key: %s \n") % (short(firstnode)))
45 ui.status(_(b"\n"))
45 ui.status(_(b"\n"))
46 ui.status(
46 ui.status(
47 _(b"%12s => %12s %13s %13s %12s\n")
47 _(b"%12s => %12s %13s %13s %12s\n")
48 % (b"node", b"p1", b"p2", b"linknode", b"copyfrom")
48 % (b"node", b"p1", b"p2", b"linknode", b"copyfrom")
49 )
49 )
50
50
51 queue = [firstnode]
51 queue = [firstnode]
52 while queue:
52 while queue:
53 node = queue.pop(0)
53 node = queue.pop(0)
54 p1, p2, linknode, copyfrom = mapping[node]
54 p1, p2, linknode, copyfrom = mapping[node]
55 ui.status(
55 ui.status(
56 _(b"%s => %s %s %s %s\n")
56 _(b"%s => %s %s %s %s\n")
57 % (short(node), short(p1), short(p2), short(linknode), copyfrom)
57 % (short(node), short(p1), short(p2), short(linknode), copyfrom)
58 )
58 )
59 if p1 != sha1nodeconstants.nullid:
59 if p1 != sha1nodeconstants.nullid:
60 queue.append(p1)
60 queue.append(p1)
61 if p2 != sha1nodeconstants.nullid:
61 if p2 != sha1nodeconstants.nullid:
62 queue.append(p2)
62 queue.append(p2)
63
63
64
64
65 def buildtemprevlog(repo, file):
65 def buildtemprevlog(repo, file):
66 # get filename key
66 # get filename key
67 filekey = hex(hashutil.sha1(file).digest())
67 filekey = hex(hashutil.sha1(file).digest())
68 filedir = os.path.join(repo.path, b'store/data', filekey)
68 filedir = os.path.join(repo.path, b'store/data', filekey)
69
69
70 # sort all entries based on linkrev
70 # sort all entries based on linkrev
71 fctxs = []
71 fctxs = []
72 for filenode in os.listdir(filedir):
72 for filenode in os.listdir(filedir):
73 if b'_old' not in filenode:
73 if b'_old' not in filenode:
74 fctxs.append(repo.filectx(file, fileid=bin(filenode)))
74 fctxs.append(repo.filectx(file, fileid=bin(filenode)))
75
75
76 fctxs = sorted(fctxs, key=lambda x: x.linkrev())
76 fctxs = sorted(fctxs, key=lambda x: x.linkrev())
77
77
78 # add to revlog
78 # add to revlog
79 temppath = repo.sjoin(b'data/temprevlog.i')
79 temppath = repo.sjoin(b'data/temprevlog.i')
80 if os.path.exists(temppath):
80 if os.path.exists(temppath):
81 os.remove(temppath)
81 os.remove(temppath)
82 r = filelog.filelog(repo.svfs, b'temprevlog')
82 r = filelog.filelog(repo.svfs, b'temprevlog')
83
83
84 class faket:
84 class faket:
85 def add(self, a, b, c):
85 def add(self, a, b, c):
86 pass
86 pass
87
87
88 t = faket()
88 t = faket()
89 for fctx in fctxs:
89 for fctx in fctxs:
90 if fctx.node() not in repo:
90 if fctx.node() not in repo:
91 continue
91 continue
92
92
93 p = fctx.filelog().parents(fctx.filenode())
93 p = fctx.filelog().parents(fctx.filenode())
94 meta = {}
94 meta = {}
95 if fctx.renamed():
95 if fctx.renamed():
96 meta[b'copy'] = fctx.renamed()[0]
96 meta[b'copy'] = fctx.renamed()[0]
97 meta[b'copyrev'] = hex(fctx.renamed()[1])
97 meta[b'copyrev'] = hex(fctx.renamed()[1])
98
98
99 r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
99 r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
100
100
101 return r
101 return r
102
102
103
103
104 def debugindex(orig, ui, repo, file_=None, **opts):
104 def debugindex(orig, ui, repo, file_=None, **opts):
105 """dump the contents of an index file"""
105 """dump the contents of an index file"""
106 if (
106 if (
107 opts.get('changelog')
107 opts.get('changelog')
108 or opts.get('manifest')
108 or opts.get('manifest')
109 or opts.get('dir')
109 or opts.get('dir')
110 or not shallowutil.isenabled(repo)
110 or not shallowutil.isenabled(repo)
111 or not repo.shallowmatch(file_)
111 or not repo.shallowmatch(file_)
112 ):
112 ):
113 return orig(ui, repo, file_, **opts)
113 return orig(ui, repo, file_, **opts)
114
114
115 r = buildtemprevlog(repo, file_)
115 r = buildtemprevlog(repo, file_)
116
116
117 # debugindex like normal
117 # debugindex like normal
118 format = opts.get('format', 0)
118 format = opts.get('format', 0)
119 if format not in (0, 1):
119 if format not in (0, 1):
120 raise error.Abort(_(b"unknown format %d") % format)
120 raise error.Abort(_(b"unknown format %d") % format)
121
121
122 generaldelta = r.version & revlog.FLAG_GENERALDELTA
122 generaldelta = r.get_revlog()._format_flags & revlog.FLAG_GENERALDELTA
123 if generaldelta:
123 if generaldelta:
124 basehdr = b' delta'
124 basehdr = b' delta'
125 else:
125 else:
126 basehdr = b' base'
126 basehdr = b' base'
127
127
128 if format == 0:
128 if format == 0:
129 ui.write(
129 ui.write(
130 (
130 (
131 b" rev offset length " + basehdr + b" linkrev"
131 b" rev offset length " + basehdr + b" linkrev"
132 b" nodeid p1 p2\n"
132 b" nodeid p1 p2\n"
133 )
133 )
134 )
134 )
135 elif format == 1:
135 elif format == 1:
136 ui.write(
136 ui.write(
137 (
137 (
138 b" rev flag offset length"
138 b" rev flag offset length"
139 b" size " + basehdr + b" link p1 p2"
139 b" size " + basehdr + b" link p1 p2"
140 b" nodeid\n"
140 b" nodeid\n"
141 )
141 )
142 )
142 )
143
143
144 for i in r:
144 for i in r:
145 node = r.node(i)
145 node = r.node(i)
146 if generaldelta:
146 if generaldelta:
147 base = r.deltaparent(i)
147 base = r.get_revlog().deltaparent(i)
148 else:
148 else:
149 base = r.chainbase(i)
149 base = r.get_revlog().chainbase(i)
150 if format == 0:
150 if format == 0:
151 try:
151 try:
152 pp = r.parents(node)
152 pp = r.parents(node)
153 except Exception:
153 except Exception:
154 pp = [repo.nullid, repo.nullid]
154 pp = [repo.nullid, repo.nullid]
155 ui.write(
155 ui.write(
156 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
156 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
157 % (
157 % (
158 i,
158 i,
159 r.start(i),
159 r.get_revlog().start(i),
160 r.length(i),
160 r.get_revlog().length(i),
161 base,
161 base,
162 r.linkrev(i),
162 r.linkrev(i),
163 short(node),
163 short(node),
164 short(pp[0]),
164 short(pp[0]),
165 short(pp[1]),
165 short(pp[1]),
166 )
166 )
167 )
167 )
168 elif format == 1:
168 elif format == 1:
169 pr = r.parentrevs(i)
169 pr = r.parentrevs(i)
170 ui.write(
170 ui.write(
171 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
171 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n"
172 % (
172 % (
173 i,
173 i,
174 r.flags(i),
174 r.get_revlog().flags(i),
175 r.start(i),
175 r.get_revlog().start(i),
176 r.length(i),
176 r.get_revlog().length(i),
177 r.rawsize(i),
177 r.get_revlog().rawsize(i),
178 base,
178 base,
179 r.linkrev(i),
179 r.linkrev(i),
180 pr[0],
180 pr[0],
181 pr[1],
181 pr[1],
182 short(node),
182 short(node),
183 )
183 )
184 )
184 )
185
185
186
186
187 def debugindexdot(orig, ui, repo, file_):
187 def debugindexdot(orig, ui, repo, file_):
188 """dump an index DAG as a graphviz dot file"""
188 """dump an index DAG as a graphviz dot file"""
189 if not shallowutil.isenabled(repo):
189 if not shallowutil.isenabled(repo):
190 return orig(ui, repo, file_)
190 return orig(ui, repo, file_)
191
191
192 r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
192 r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
193
193
194 ui.writenoi18n(b"digraph G {\n")
194 ui.writenoi18n(b"digraph G {\n")
195 for i in r:
195 for i in r:
196 node = r.node(i)
196 node = r.node(i)
197 pp = r.parents(node)
197 pp = r.parents(node)
198 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
198 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
199 if pp[1] != repo.nullid:
199 if pp[1] != repo.nullid:
200 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
200 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
201 ui.write(b"}\n")
201 ui.write(b"}\n")
202
202
203
203
204 def verifyremotefilelog(ui, path, **opts):
204 def verifyremotefilelog(ui, path, **opts):
205 decompress = opts.get('decompress')
205 decompress = opts.get('decompress')
206
206
207 for root, dirs, files in os.walk(path):
207 for root, dirs, files in os.walk(path):
208 for file in files:
208 for file in files:
209 if file == b"repos":
209 if file == b"repos":
210 continue
210 continue
211 filepath = os.path.join(root, file)
211 filepath = os.path.join(root, file)
212 size, firstnode, mapping = parsefileblob(filepath, decompress)
212 size, firstnode, mapping = parsefileblob(filepath, decompress)
213 for p1, p2, linknode, copyfrom in mapping.values():
213 for p1, p2, linknode, copyfrom in mapping.values():
214 if linknode == sha1nodeconstants.nullid:
214 if linknode == sha1nodeconstants.nullid:
215 actualpath = os.path.relpath(root, path)
215 actualpath = os.path.relpath(root, path)
216 key = fileserverclient.getcachekey(
216 key = fileserverclient.getcachekey(
217 b"reponame", actualpath, file
217 b"reponame", actualpath, file
218 )
218 )
219 ui.status(
219 ui.status(
220 b"%s %s\n" % (key, os.path.relpath(filepath, path))
220 b"%s %s\n" % (key, os.path.relpath(filepath, path))
221 )
221 )
222
222
223
223
224 def _decompressblob(raw):
224 def _decompressblob(raw):
225 return zlib.decompress(raw)
225 return zlib.decompress(raw)
226
226
227
227
228 def parsefileblob(path, decompress):
228 def parsefileblob(path, decompress):
229 f = open(path, b"rb")
229 f = open(path, b"rb")
230 try:
230 try:
231 raw = f.read()
231 raw = f.read()
232 finally:
232 finally:
233 f.close()
233 f.close()
234
234
235 if decompress:
235 if decompress:
236 raw = _decompressblob(raw)
236 raw = _decompressblob(raw)
237
237
238 offset, size, flags = shallowutil.parsesizeflags(raw)
238 offset, size, flags = shallowutil.parsesizeflags(raw)
239 start = offset + size
239 start = offset + size
240
240
241 firstnode = None
241 firstnode = None
242
242
243 mapping = {}
243 mapping = {}
244 while start < len(raw):
244 while start < len(raw):
245 divider = raw.index(b'\0', start + 80)
245 divider = raw.index(b'\0', start + 80)
246
246
247 currentnode = raw[start : (start + 20)]
247 currentnode = raw[start : (start + 20)]
248 if not firstnode:
248 if not firstnode:
249 firstnode = currentnode
249 firstnode = currentnode
250
250
251 p1 = raw[(start + 20) : (start + 40)]
251 p1 = raw[(start + 20) : (start + 40)]
252 p2 = raw[(start + 40) : (start + 60)]
252 p2 = raw[(start + 40) : (start + 60)]
253 linknode = raw[(start + 60) : (start + 80)]
253 linknode = raw[(start + 60) : (start + 80)]
254 copyfrom = raw[(start + 80) : divider]
254 copyfrom = raw[(start + 80) : divider]
255
255
256 mapping[currentnode] = (p1, p2, linknode, copyfrom)
256 mapping[currentnode] = (p1, p2, linknode, copyfrom)
257 start = divider + 1
257 start = divider + 1
258
258
259 return size, firstnode, mapping
259 return size, firstnode, mapping
260
260
261
261
262 def debugdatapack(ui, *paths, **opts):
262 def debugdatapack(ui, *paths, **opts):
263 for path in paths:
263 for path in paths:
264 if b'.data' in path:
264 if b'.data' in path:
265 path = path[: path.index(b'.data')]
265 path = path[: path.index(b'.data')]
266 ui.write(b"%s:\n" % path)
266 ui.write(b"%s:\n" % path)
267 dpack = datapack.datapack(path)
267 dpack = datapack.datapack(path)
268 node = opts.get('node')
268 node = opts.get('node')
269 if node:
269 if node:
270 deltachain = dpack.getdeltachain(b'', bin(node))
270 deltachain = dpack.getdeltachain(b'', bin(node))
271 dumpdeltachain(ui, deltachain, **opts)
271 dumpdeltachain(ui, deltachain, **opts)
272 return
272 return
273
273
274 if opts.get('long'):
274 if opts.get('long'):
275 hashformatter = hex
275 hashformatter = hex
276 hashlen = 42
276 hashlen = 42
277 else:
277 else:
278 hashformatter = short
278 hashformatter = short
279 hashlen = 14
279 hashlen = 14
280
280
281 lastfilename = None
281 lastfilename = None
282 totaldeltasize = 0
282 totaldeltasize = 0
283 totalblobsize = 0
283 totalblobsize = 0
284
284
285 def printtotals():
285 def printtotals():
286 if lastfilename is not None:
286 if lastfilename is not None:
287 ui.write(b"\n")
287 ui.write(b"\n")
288 if not totaldeltasize or not totalblobsize:
288 if not totaldeltasize or not totalblobsize:
289 return
289 return
290 difference = totalblobsize - totaldeltasize
290 difference = totalblobsize - totaldeltasize
291 deltastr = b"%0.1f%% %s" % (
291 deltastr = b"%0.1f%% %s" % (
292 (100.0 * abs(difference) / totalblobsize),
292 (100.0 * abs(difference) / totalblobsize),
293 (b"smaller" if difference > 0 else b"bigger"),
293 (b"smaller" if difference > 0 else b"bigger"),
294 )
294 )
295
295
296 ui.writenoi18n(
296 ui.writenoi18n(
297 b"Total:%s%s %s (%s)\n"
297 b"Total:%s%s %s (%s)\n"
298 % (
298 % (
299 b"".ljust(2 * hashlen - len(b"Total:")),
299 b"".ljust(2 * hashlen - len(b"Total:")),
300 (b'%d' % totaldeltasize).ljust(12),
300 (b'%d' % totaldeltasize).ljust(12),
301 (b'%d' % totalblobsize).ljust(9),
301 (b'%d' % totalblobsize).ljust(9),
302 deltastr,
302 deltastr,
303 )
303 )
304 )
304 )
305
305
306 bases = {}
306 bases = {}
307 nodes = set()
307 nodes = set()
308 failures = 0
308 failures = 0
309 for filename, node, deltabase, deltalen in dpack.iterentries():
309 for filename, node, deltabase, deltalen in dpack.iterentries():
310 bases[node] = deltabase
310 bases[node] = deltabase
311 if node in nodes:
311 if node in nodes:
312 ui.write((b"Bad entry: %s appears twice\n" % short(node)))
312 ui.write((b"Bad entry: %s appears twice\n" % short(node)))
313 failures += 1
313 failures += 1
314 nodes.add(node)
314 nodes.add(node)
315 if filename != lastfilename:
315 if filename != lastfilename:
316 printtotals()
316 printtotals()
317 name = b'(empty name)' if filename == b'' else filename
317 name = b'(empty name)' if filename == b'' else filename
318 ui.write(b"%s:\n" % name)
318 ui.write(b"%s:\n" % name)
319 ui.write(
319 ui.write(
320 b"%s%s%s%s\n"
320 b"%s%s%s%s\n"
321 % (
321 % (
322 b"Node".ljust(hashlen),
322 b"Node".ljust(hashlen),
323 b"Delta Base".ljust(hashlen),
323 b"Delta Base".ljust(hashlen),
324 b"Delta Length".ljust(14),
324 b"Delta Length".ljust(14),
325 b"Blob Size".ljust(9),
325 b"Blob Size".ljust(9),
326 )
326 )
327 )
327 )
328 lastfilename = filename
328 lastfilename = filename
329 totalblobsize = 0
329 totalblobsize = 0
330 totaldeltasize = 0
330 totaldeltasize = 0
331
331
332 # Metadata could be missing, in which case it will be an empty dict.
332 # Metadata could be missing, in which case it will be an empty dict.
333 meta = dpack.getmeta(filename, node)
333 meta = dpack.getmeta(filename, node)
334 if constants.METAKEYSIZE in meta:
334 if constants.METAKEYSIZE in meta:
335 blobsize = meta[constants.METAKEYSIZE]
335 blobsize = meta[constants.METAKEYSIZE]
336 totaldeltasize += deltalen
336 totaldeltasize += deltalen
337 totalblobsize += blobsize
337 totalblobsize += blobsize
338 else:
338 else:
339 blobsize = b"(missing)"
339 blobsize = b"(missing)"
340 ui.write(
340 ui.write(
341 b"%s %s %s%s\n"
341 b"%s %s %s%s\n"
342 % (
342 % (
343 hashformatter(node),
343 hashformatter(node),
344 hashformatter(deltabase),
344 hashformatter(deltabase),
345 (b'%d' % deltalen).ljust(14),
345 (b'%d' % deltalen).ljust(14),
346 pycompat.bytestr(blobsize),
346 pycompat.bytestr(blobsize),
347 )
347 )
348 )
348 )
349
349
350 if filename is not None:
350 if filename is not None:
351 printtotals()
351 printtotals()
352
352
353 failures += _sanitycheck(ui, set(nodes), bases)
353 failures += _sanitycheck(ui, set(nodes), bases)
354 if failures > 1:
354 if failures > 1:
355 ui.warn((b"%d failures\n" % failures))
355 ui.warn((b"%d failures\n" % failures))
356 return 1
356 return 1
357
357
358
358
359 def _sanitycheck(ui, nodes, bases):
359 def _sanitycheck(ui, nodes, bases):
360 """
360 """
361 Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
361 Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
362 mapping of node->base):
362 mapping of node->base):
363
363
364 - Each deltabase must itself be a node elsewhere in the pack
364 - Each deltabase must itself be a node elsewhere in the pack
365 - There must be no cycles
365 - There must be no cycles
366 """
366 """
367 failures = 0
367 failures = 0
368 for node in nodes:
368 for node in nodes:
369 seen = set()
369 seen = set()
370 current = node
370 current = node
371 deltabase = bases[current]
371 deltabase = bases[current]
372
372
373 while deltabase != sha1nodeconstants.nullid:
373 while deltabase != sha1nodeconstants.nullid:
374 if deltabase not in nodes:
374 if deltabase not in nodes:
375 ui.warn(
375 ui.warn(
376 (
376 (
377 b"Bad entry: %s has an unknown deltabase (%s)\n"
377 b"Bad entry: %s has an unknown deltabase (%s)\n"
378 % (short(node), short(deltabase))
378 % (short(node), short(deltabase))
379 )
379 )
380 )
380 )
381 failures += 1
381 failures += 1
382 break
382 break
383
383
384 if deltabase in seen:
384 if deltabase in seen:
385 ui.warn(
385 ui.warn(
386 (
386 (
387 b"Bad entry: %s has a cycle (at %s)\n"
387 b"Bad entry: %s has a cycle (at %s)\n"
388 % (short(node), short(deltabase))
388 % (short(node), short(deltabase))
389 )
389 )
390 )
390 )
391 failures += 1
391 failures += 1
392 break
392 break
393
393
394 current = deltabase
394 current = deltabase
395 seen.add(current)
395 seen.add(current)
396 deltabase = bases[current]
396 deltabase = bases[current]
397 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
397 # Since ``node`` begins a valid chain, reset/memoize its base to nullid
398 # so we don't traverse it again.
398 # so we don't traverse it again.
399 bases[node] = sha1nodeconstants.nullid
399 bases[node] = sha1nodeconstants.nullid
400 return failures
400 return failures
401
401
402
402
403 def dumpdeltachain(ui, deltachain, **opts):
403 def dumpdeltachain(ui, deltachain, **opts):
404 hashformatter = hex
404 hashformatter = hex
405 hashlen = 40
405 hashlen = 40
406
406
407 lastfilename = None
407 lastfilename = None
408 for filename, node, filename, deltabasenode, delta in deltachain:
408 for filename, node, filename, deltabasenode, delta in deltachain:
409 if filename != lastfilename:
409 if filename != lastfilename:
410 ui.write(b"\n%s\n" % filename)
410 ui.write(b"\n%s\n" % filename)
411 lastfilename = filename
411 lastfilename = filename
412 ui.write(
412 ui.write(
413 b"%s %s %s %s\n"
413 b"%s %s %s %s\n"
414 % (
414 % (
415 b"Node".ljust(hashlen),
415 b"Node".ljust(hashlen),
416 b"Delta Base".ljust(hashlen),
416 b"Delta Base".ljust(hashlen),
417 b"Delta SHA1".ljust(hashlen),
417 b"Delta SHA1".ljust(hashlen),
418 b"Delta Length".ljust(6),
418 b"Delta Length".ljust(6),
419 )
419 )
420 )
420 )
421
421
422 ui.write(
422 ui.write(
423 b"%s %s %s %d\n"
423 b"%s %s %s %d\n"
424 % (
424 % (
425 hashformatter(node),
425 hashformatter(node),
426 hashformatter(deltabasenode),
426 hashformatter(deltabasenode),
427 hex(hashutil.sha1(delta).digest()),
427 hex(hashutil.sha1(delta).digest()),
428 len(delta),
428 len(delta),
429 )
429 )
430 )
430 )
431
431
432
432
433 def debughistorypack(ui, path):
433 def debughistorypack(ui, path):
434 if b'.hist' in path:
434 if b'.hist' in path:
435 path = path[: path.index(b'.hist')]
435 path = path[: path.index(b'.hist')]
436 hpack = historypack.historypack(path)
436 hpack = historypack.historypack(path)
437
437
438 lastfilename = None
438 lastfilename = None
439 for entry in hpack.iterentries():
439 for entry in hpack.iterentries():
440 filename, node, p1node, p2node, linknode, copyfrom = entry
440 filename, node, p1node, p2node, linknode, copyfrom = entry
441 if filename != lastfilename:
441 if filename != lastfilename:
442 ui.write(b"\n%s\n" % filename)
442 ui.write(b"\n%s\n" % filename)
443 ui.write(
443 ui.write(
444 b"%s%s%s%s%s\n"
444 b"%s%s%s%s%s\n"
445 % (
445 % (
446 b"Node".ljust(14),
446 b"Node".ljust(14),
447 b"P1 Node".ljust(14),
447 b"P1 Node".ljust(14),
448 b"P2 Node".ljust(14),
448 b"P2 Node".ljust(14),
449 b"Link Node".ljust(14),
449 b"Link Node".ljust(14),
450 b"Copy From",
450 b"Copy From",
451 )
451 )
452 )
452 )
453 lastfilename = filename
453 lastfilename = filename
454 ui.write(
454 ui.write(
455 b"%s %s %s %s %s\n"
455 b"%s %s %s %s %s\n"
456 % (
456 % (
457 short(node),
457 short(node),
458 short(p1node),
458 short(p1node),
459 short(p2node),
459 short(p2node),
460 short(linknode),
460 short(linknode),
461 copyfrom,
461 copyfrom,
462 )
462 )
463 )
463 )
464
464
465
465
466 def debugwaitonrepack(repo):
466 def debugwaitonrepack(repo):
467 with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1):
467 with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1):
468 return
468 return
469
469
470
470
471 def debugwaitonprefetch(repo):
471 def debugwaitonprefetch(repo):
472 with repo._lock(
472 with repo._lock(
473 repo.svfs,
473 repo.svfs,
474 b"prefetchlock",
474 b"prefetchlock",
475 True,
475 True,
476 None,
476 None,
477 None,
477 None,
478 _(b'prefetching in %s') % repo.origroot,
478 _(b'prefetching in %s') % repo.origroot,
479 ):
479 ):
480 pass
480 pass
General Comments 0
You need to be logged in to leave comments. Login now