Show More
@@ -1,378 +1,378 b'' | |||||
1 | # debugcommands.py - debug logic for remotefilelog |
|
1 | # debugcommands.py - debug logic for remotefilelog | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import hashlib |
|
9 | import hashlib | |
10 | import os |
|
10 | import os | |
11 | import zlib |
|
11 | import zlib | |
12 |
|
12 | |||
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 | from mercurial import ( |
|
15 | from mercurial import ( | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
|
18 | lock as lockmod, | |||
18 | node as nodemod, |
|
19 | node as nodemod, | |
19 | pycompat, |
|
20 | pycompat, | |
20 | revlog, |
|
21 | revlog, | |
21 | ) |
|
22 | ) | |
22 | from . import ( |
|
23 | from . import ( | |
23 | constants, |
|
24 | constants, | |
24 | datapack, |
|
25 | datapack, | |
25 | extutil, |
|
|||
26 | fileserverclient, |
|
26 | fileserverclient, | |
27 | historypack, |
|
27 | historypack, | |
28 | repack, |
|
28 | repack, | |
29 | shallowutil, |
|
29 | shallowutil, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
32 | def debugremotefilelog(ui, path, **opts): |
|
32 | def debugremotefilelog(ui, path, **opts): | |
33 | decompress = opts.get(r'decompress') |
|
33 | decompress = opts.get(r'decompress') | |
34 |
|
34 | |||
35 | size, firstnode, mapping = parsefileblob(path, decompress) |
|
35 | size, firstnode, mapping = parsefileblob(path, decompress) | |
36 |
|
36 | |||
37 | ui.status(_("size: %d bytes\n") % (size)) |
|
37 | ui.status(_("size: %d bytes\n") % (size)) | |
38 | ui.status(_("path: %s \n") % (path)) |
|
38 | ui.status(_("path: %s \n") % (path)) | |
39 | ui.status(_("key: %s \n") % (short(firstnode))) |
|
39 | ui.status(_("key: %s \n") % (short(firstnode))) | |
40 | ui.status(_("\n")) |
|
40 | ui.status(_("\n")) | |
41 | ui.status(_("%12s => %12s %13s %13s %12s\n") % |
|
41 | ui.status(_("%12s => %12s %13s %13s %12s\n") % | |
42 | ("node", "p1", "p2", "linknode", "copyfrom")) |
|
42 | ("node", "p1", "p2", "linknode", "copyfrom")) | |
43 |
|
43 | |||
44 | queue = [firstnode] |
|
44 | queue = [firstnode] | |
45 | while queue: |
|
45 | while queue: | |
46 | node = queue.pop(0) |
|
46 | node = queue.pop(0) | |
47 | p1, p2, linknode, copyfrom = mapping[node] |
|
47 | p1, p2, linknode, copyfrom = mapping[node] | |
48 | ui.status(_("%s => %s %s %s %s\n") % |
|
48 | ui.status(_("%s => %s %s %s %s\n") % | |
49 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) |
|
49 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) | |
50 | if p1 != nullid: |
|
50 | if p1 != nullid: | |
51 | queue.append(p1) |
|
51 | queue.append(p1) | |
52 | if p2 != nullid: |
|
52 | if p2 != nullid: | |
53 | queue.append(p2) |
|
53 | queue.append(p2) | |
54 |
|
54 | |||
55 | def buildtemprevlog(repo, file): |
|
55 | def buildtemprevlog(repo, file): | |
56 | # get filename key |
|
56 | # get filename key | |
57 | filekey = nodemod.hex(hashlib.sha1(file).digest()) |
|
57 | filekey = nodemod.hex(hashlib.sha1(file).digest()) | |
58 | filedir = os.path.join(repo.path, 'store/data', filekey) |
|
58 | filedir = os.path.join(repo.path, 'store/data', filekey) | |
59 |
|
59 | |||
60 | # sort all entries based on linkrev |
|
60 | # sort all entries based on linkrev | |
61 | fctxs = [] |
|
61 | fctxs = [] | |
62 | for filenode in os.listdir(filedir): |
|
62 | for filenode in os.listdir(filedir): | |
63 | if '_old' not in filenode: |
|
63 | if '_old' not in filenode: | |
64 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) |
|
64 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) | |
65 |
|
65 | |||
66 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) |
|
66 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | |
67 |
|
67 | |||
68 | # add to revlog |
|
68 | # add to revlog | |
69 | temppath = repo.sjoin('data/temprevlog.i') |
|
69 | temppath = repo.sjoin('data/temprevlog.i') | |
70 | if os.path.exists(temppath): |
|
70 | if os.path.exists(temppath): | |
71 | os.remove(temppath) |
|
71 | os.remove(temppath) | |
72 | r = filelog.filelog(repo.svfs, 'temprevlog') |
|
72 | r = filelog.filelog(repo.svfs, 'temprevlog') | |
73 |
|
73 | |||
74 | class faket(object): |
|
74 | class faket(object): | |
75 | def add(self, a, b, c): |
|
75 | def add(self, a, b, c): | |
76 | pass |
|
76 | pass | |
77 | t = faket() |
|
77 | t = faket() | |
78 | for fctx in fctxs: |
|
78 | for fctx in fctxs: | |
79 | if fctx.node() not in repo: |
|
79 | if fctx.node() not in repo: | |
80 | continue |
|
80 | continue | |
81 |
|
81 | |||
82 | p = fctx.filelog().parents(fctx.filenode()) |
|
82 | p = fctx.filelog().parents(fctx.filenode()) | |
83 | meta = {} |
|
83 | meta = {} | |
84 | if fctx.renamed(): |
|
84 | if fctx.renamed(): | |
85 | meta['copy'] = fctx.renamed()[0] |
|
85 | meta['copy'] = fctx.renamed()[0] | |
86 | meta['copyrev'] = hex(fctx.renamed()[1]) |
|
86 | meta['copyrev'] = hex(fctx.renamed()[1]) | |
87 |
|
87 | |||
88 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) |
|
88 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) | |
89 |
|
89 | |||
90 | return r |
|
90 | return r | |
91 |
|
91 | |||
92 | def debugindex(orig, ui, repo, file_=None, **opts): |
|
92 | def debugindex(orig, ui, repo, file_=None, **opts): | |
93 | """dump the contents of an index file""" |
|
93 | """dump the contents of an index file""" | |
94 | if (opts.get(r'changelog') or |
|
94 | if (opts.get(r'changelog') or | |
95 | opts.get(r'manifest') or |
|
95 | opts.get(r'manifest') or | |
96 | opts.get(r'dir') or |
|
96 | opts.get(r'dir') or | |
97 | not shallowutil.isenabled(repo) or |
|
97 | not shallowutil.isenabled(repo) or | |
98 | not repo.shallowmatch(file_)): |
|
98 | not repo.shallowmatch(file_)): | |
99 | return orig(ui, repo, file_, **opts) |
|
99 | return orig(ui, repo, file_, **opts) | |
100 |
|
100 | |||
101 | r = buildtemprevlog(repo, file_) |
|
101 | r = buildtemprevlog(repo, file_) | |
102 |
|
102 | |||
103 | # debugindex like normal |
|
103 | # debugindex like normal | |
104 | format = opts.get('format', 0) |
|
104 | format = opts.get('format', 0) | |
105 | if format not in (0, 1): |
|
105 | if format not in (0, 1): | |
106 | raise error.Abort(_("unknown format %d") % format) |
|
106 | raise error.Abort(_("unknown format %d") % format) | |
107 |
|
107 | |||
108 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
108 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |
109 | if generaldelta: |
|
109 | if generaldelta: | |
110 | basehdr = ' delta' |
|
110 | basehdr = ' delta' | |
111 | else: |
|
111 | else: | |
112 | basehdr = ' base' |
|
112 | basehdr = ' base' | |
113 |
|
113 | |||
114 | if format == 0: |
|
114 | if format == 0: | |
115 | ui.write((" rev offset length " + basehdr + " linkrev" |
|
115 | ui.write((" rev offset length " + basehdr + " linkrev" | |
116 | " nodeid p1 p2\n")) |
|
116 | " nodeid p1 p2\n")) | |
117 | elif format == 1: |
|
117 | elif format == 1: | |
118 | ui.write((" rev flag offset length" |
|
118 | ui.write((" rev flag offset length" | |
119 | " size " + basehdr + " link p1 p2" |
|
119 | " size " + basehdr + " link p1 p2" | |
120 | " nodeid\n")) |
|
120 | " nodeid\n")) | |
121 |
|
121 | |||
122 | for i in r: |
|
122 | for i in r: | |
123 | node = r.node(i) |
|
123 | node = r.node(i) | |
124 | if generaldelta: |
|
124 | if generaldelta: | |
125 | base = r.deltaparent(i) |
|
125 | base = r.deltaparent(i) | |
126 | else: |
|
126 | else: | |
127 | base = r.chainbase(i) |
|
127 | base = r.chainbase(i) | |
128 | if format == 0: |
|
128 | if format == 0: | |
129 | try: |
|
129 | try: | |
130 | pp = r.parents(node) |
|
130 | pp = r.parents(node) | |
131 | except Exception: |
|
131 | except Exception: | |
132 | pp = [nullid, nullid] |
|
132 | pp = [nullid, nullid] | |
133 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( |
|
133 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( | |
134 | i, r.start(i), r.length(i), base, r.linkrev(i), |
|
134 | i, r.start(i), r.length(i), base, r.linkrev(i), | |
135 | short(node), short(pp[0]), short(pp[1]))) |
|
135 | short(node), short(pp[0]), short(pp[1]))) | |
136 | elif format == 1: |
|
136 | elif format == 1: | |
137 | pr = r.parentrevs(i) |
|
137 | pr = r.parentrevs(i) | |
138 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( |
|
138 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( | |
139 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
139 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), | |
140 | base, r.linkrev(i), pr[0], pr[1], short(node))) |
|
140 | base, r.linkrev(i), pr[0], pr[1], short(node))) | |
141 |
|
141 | |||
142 | def debugindexdot(orig, ui, repo, file_): |
|
142 | def debugindexdot(orig, ui, repo, file_): | |
143 | """dump an index DAG as a graphviz dot file""" |
|
143 | """dump an index DAG as a graphviz dot file""" | |
144 | if not shallowutil.isenabled(repo): |
|
144 | if not shallowutil.isenabled(repo): | |
145 | return orig(ui, repo, file_) |
|
145 | return orig(ui, repo, file_) | |
146 |
|
146 | |||
147 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) |
|
147 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) | |
148 |
|
148 | |||
149 | ui.write(("digraph G {\n")) |
|
149 | ui.write(("digraph G {\n")) | |
150 | for i in r: |
|
150 | for i in r: | |
151 | node = r.node(i) |
|
151 | node = r.node(i) | |
152 | pp = r.parents(node) |
|
152 | pp = r.parents(node) | |
153 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
153 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) | |
154 | if pp[1] != nullid: |
|
154 | if pp[1] != nullid: | |
155 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
155 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) | |
156 | ui.write("}\n") |
|
156 | ui.write("}\n") | |
157 |
|
157 | |||
158 | def verifyremotefilelog(ui, path, **opts): |
|
158 | def verifyremotefilelog(ui, path, **opts): | |
159 | decompress = opts.get(r'decompress') |
|
159 | decompress = opts.get(r'decompress') | |
160 |
|
160 | |||
161 | for root, dirs, files in os.walk(path): |
|
161 | for root, dirs, files in os.walk(path): | |
162 | for file in files: |
|
162 | for file in files: | |
163 | if file == "repos": |
|
163 | if file == "repos": | |
164 | continue |
|
164 | continue | |
165 | filepath = os.path.join(root, file) |
|
165 | filepath = os.path.join(root, file) | |
166 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
166 | size, firstnode, mapping = parsefileblob(filepath, decompress) | |
167 | for p1, p2, linknode, copyfrom in mapping.itervalues(): |
|
167 | for p1, p2, linknode, copyfrom in mapping.itervalues(): | |
168 | if linknode == nullid: |
|
168 | if linknode == nullid: | |
169 | actualpath = os.path.relpath(root, path) |
|
169 | actualpath = os.path.relpath(root, path) | |
170 | key = fileserverclient.getcachekey("reponame", actualpath, |
|
170 | key = fileserverclient.getcachekey("reponame", actualpath, | |
171 | file) |
|
171 | file) | |
172 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, |
|
172 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, | |
173 | path))) |
|
173 | path))) | |
174 |
|
174 | |||
175 | def _decompressblob(raw): |
|
175 | def _decompressblob(raw): | |
176 | return zlib.decompress(raw) |
|
176 | return zlib.decompress(raw) | |
177 |
|
177 | |||
178 | def parsefileblob(path, decompress): |
|
178 | def parsefileblob(path, decompress): | |
179 | f = open(path, "rb") |
|
179 | f = open(path, "rb") | |
180 | try: |
|
180 | try: | |
181 | raw = f.read() |
|
181 | raw = f.read() | |
182 | finally: |
|
182 | finally: | |
183 | f.close() |
|
183 | f.close() | |
184 |
|
184 | |||
185 | if decompress: |
|
185 | if decompress: | |
186 | raw = _decompressblob(raw) |
|
186 | raw = _decompressblob(raw) | |
187 |
|
187 | |||
188 | offset, size, flags = shallowutil.parsesizeflags(raw) |
|
188 | offset, size, flags = shallowutil.parsesizeflags(raw) | |
189 | start = offset + size |
|
189 | start = offset + size | |
190 |
|
190 | |||
191 | firstnode = None |
|
191 | firstnode = None | |
192 |
|
192 | |||
193 | mapping = {} |
|
193 | mapping = {} | |
194 | while start < len(raw): |
|
194 | while start < len(raw): | |
195 | divider = raw.index('\0', start + 80) |
|
195 | divider = raw.index('\0', start + 80) | |
196 |
|
196 | |||
197 | currentnode = raw[start:(start + 20)] |
|
197 | currentnode = raw[start:(start + 20)] | |
198 | if not firstnode: |
|
198 | if not firstnode: | |
199 | firstnode = currentnode |
|
199 | firstnode = currentnode | |
200 |
|
200 | |||
201 | p1 = raw[(start + 20):(start + 40)] |
|
201 | p1 = raw[(start + 20):(start + 40)] | |
202 | p2 = raw[(start + 40):(start + 60)] |
|
202 | p2 = raw[(start + 40):(start + 60)] | |
203 | linknode = raw[(start + 60):(start + 80)] |
|
203 | linknode = raw[(start + 60):(start + 80)] | |
204 | copyfrom = raw[(start + 80):divider] |
|
204 | copyfrom = raw[(start + 80):divider] | |
205 |
|
205 | |||
206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |
207 | start = divider + 1 |
|
207 | start = divider + 1 | |
208 |
|
208 | |||
209 | return size, firstnode, mapping |
|
209 | return size, firstnode, mapping | |
210 |
|
210 | |||
211 | def debugdatapack(ui, *paths, **opts): |
|
211 | def debugdatapack(ui, *paths, **opts): | |
212 | for path in paths: |
|
212 | for path in paths: | |
213 | if '.data' in path: |
|
213 | if '.data' in path: | |
214 | path = path[:path.index('.data')] |
|
214 | path = path[:path.index('.data')] | |
215 | ui.write("%s:\n" % path) |
|
215 | ui.write("%s:\n" % path) | |
216 | dpack = datapack.datapack(path) |
|
216 | dpack = datapack.datapack(path) | |
217 | node = opts.get(r'node') |
|
217 | node = opts.get(r'node') | |
218 | if node: |
|
218 | if node: | |
219 | deltachain = dpack.getdeltachain('', bin(node)) |
|
219 | deltachain = dpack.getdeltachain('', bin(node)) | |
220 | dumpdeltachain(ui, deltachain, **opts) |
|
220 | dumpdeltachain(ui, deltachain, **opts) | |
221 | return |
|
221 | return | |
222 |
|
222 | |||
223 | if opts.get(r'long'): |
|
223 | if opts.get(r'long'): | |
224 | hashformatter = hex |
|
224 | hashformatter = hex | |
225 | hashlen = 42 |
|
225 | hashlen = 42 | |
226 | else: |
|
226 | else: | |
227 | hashformatter = short |
|
227 | hashformatter = short | |
228 | hashlen = 14 |
|
228 | hashlen = 14 | |
229 |
|
229 | |||
230 | lastfilename = None |
|
230 | lastfilename = None | |
231 | totaldeltasize = 0 |
|
231 | totaldeltasize = 0 | |
232 | totalblobsize = 0 |
|
232 | totalblobsize = 0 | |
233 | def printtotals(): |
|
233 | def printtotals(): | |
234 | if lastfilename is not None: |
|
234 | if lastfilename is not None: | |
235 | ui.write("\n") |
|
235 | ui.write("\n") | |
236 | if not totaldeltasize or not totalblobsize: |
|
236 | if not totaldeltasize or not totalblobsize: | |
237 | return |
|
237 | return | |
238 | difference = totalblobsize - totaldeltasize |
|
238 | difference = totalblobsize - totaldeltasize | |
239 | deltastr = "%0.1f%% %s" % ( |
|
239 | deltastr = "%0.1f%% %s" % ( | |
240 | (100.0 * abs(difference) / totalblobsize), |
|
240 | (100.0 * abs(difference) / totalblobsize), | |
241 | ("smaller" if difference > 0 else "bigger")) |
|
241 | ("smaller" if difference > 0 else "bigger")) | |
242 |
|
242 | |||
243 | ui.write(("Total:%s%s %s (%s)\n") % ( |
|
243 | ui.write(("Total:%s%s %s (%s)\n") % ( | |
244 | "".ljust(2 * hashlen - len("Total:")), |
|
244 | "".ljust(2 * hashlen - len("Total:")), | |
245 | ('%d' % totaldeltasize).ljust(12), |
|
245 | ('%d' % totaldeltasize).ljust(12), | |
246 | ('%d' % totalblobsize).ljust(9), |
|
246 | ('%d' % totalblobsize).ljust(9), | |
247 | deltastr |
|
247 | deltastr | |
248 | )) |
|
248 | )) | |
249 |
|
249 | |||
250 | bases = {} |
|
250 | bases = {} | |
251 | nodes = set() |
|
251 | nodes = set() | |
252 | failures = 0 |
|
252 | failures = 0 | |
253 | for filename, node, deltabase, deltalen in dpack.iterentries(): |
|
253 | for filename, node, deltabase, deltalen in dpack.iterentries(): | |
254 | bases[node] = deltabase |
|
254 | bases[node] = deltabase | |
255 | if node in nodes: |
|
255 | if node in nodes: | |
256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) |
|
256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) | |
257 | failures += 1 |
|
257 | failures += 1 | |
258 | nodes.add(node) |
|
258 | nodes.add(node) | |
259 | if filename != lastfilename: |
|
259 | if filename != lastfilename: | |
260 | printtotals() |
|
260 | printtotals() | |
261 | name = '(empty name)' if filename == '' else filename |
|
261 | name = '(empty name)' if filename == '' else filename | |
262 | ui.write("%s:\n" % name) |
|
262 | ui.write("%s:\n" % name) | |
263 | ui.write("%s%s%s%s\n" % ( |
|
263 | ui.write("%s%s%s%s\n" % ( | |
264 | "Node".ljust(hashlen), |
|
264 | "Node".ljust(hashlen), | |
265 | "Delta Base".ljust(hashlen), |
|
265 | "Delta Base".ljust(hashlen), | |
266 | "Delta Length".ljust(14), |
|
266 | "Delta Length".ljust(14), | |
267 | "Blob Size".ljust(9))) |
|
267 | "Blob Size".ljust(9))) | |
268 | lastfilename = filename |
|
268 | lastfilename = filename | |
269 | totalblobsize = 0 |
|
269 | totalblobsize = 0 | |
270 | totaldeltasize = 0 |
|
270 | totaldeltasize = 0 | |
271 |
|
271 | |||
272 | # Metadata could be missing, in which case it will be an empty dict. |
|
272 | # Metadata could be missing, in which case it will be an empty dict. | |
273 | meta = dpack.getmeta(filename, node) |
|
273 | meta = dpack.getmeta(filename, node) | |
274 | if constants.METAKEYSIZE in meta: |
|
274 | if constants.METAKEYSIZE in meta: | |
275 | blobsize = meta[constants.METAKEYSIZE] |
|
275 | blobsize = meta[constants.METAKEYSIZE] | |
276 | totaldeltasize += deltalen |
|
276 | totaldeltasize += deltalen | |
277 | totalblobsize += blobsize |
|
277 | totalblobsize += blobsize | |
278 | else: |
|
278 | else: | |
279 | blobsize = "(missing)" |
|
279 | blobsize = "(missing)" | |
280 | ui.write("%s %s %s%s\n" % ( |
|
280 | ui.write("%s %s %s%s\n" % ( | |
281 | hashformatter(node), |
|
281 | hashformatter(node), | |
282 | hashformatter(deltabase), |
|
282 | hashformatter(deltabase), | |
283 | ('%d' % deltalen).ljust(14), |
|
283 | ('%d' % deltalen).ljust(14), | |
284 | pycompat.bytestr(blobsize))) |
|
284 | pycompat.bytestr(blobsize))) | |
285 |
|
285 | |||
286 | if filename is not None: |
|
286 | if filename is not None: | |
287 | printtotals() |
|
287 | printtotals() | |
288 |
|
288 | |||
289 | failures += _sanitycheck(ui, set(nodes), bases) |
|
289 | failures += _sanitycheck(ui, set(nodes), bases) | |
290 | if failures > 1: |
|
290 | if failures > 1: | |
291 | ui.warn(("%d failures\n" % failures)) |
|
291 | ui.warn(("%d failures\n" % failures)) | |
292 | return 1 |
|
292 | return 1 | |
293 |
|
293 | |||
294 | def _sanitycheck(ui, nodes, bases): |
|
294 | def _sanitycheck(ui, nodes, bases): | |
295 | """ |
|
295 | """ | |
296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a |
|
296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a | |
297 | mapping of node->base): |
|
297 | mapping of node->base): | |
298 |
|
298 | |||
299 | - Each deltabase must itself be a node elsewhere in the pack |
|
299 | - Each deltabase must itself be a node elsewhere in the pack | |
300 | - There must be no cycles |
|
300 | - There must be no cycles | |
301 | """ |
|
301 | """ | |
302 | failures = 0 |
|
302 | failures = 0 | |
303 | for node in nodes: |
|
303 | for node in nodes: | |
304 | seen = set() |
|
304 | seen = set() | |
305 | current = node |
|
305 | current = node | |
306 | deltabase = bases[current] |
|
306 | deltabase = bases[current] | |
307 |
|
307 | |||
308 | while deltabase != nullid: |
|
308 | while deltabase != nullid: | |
309 | if deltabase not in nodes: |
|
309 | if deltabase not in nodes: | |
310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % |
|
310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % | |
311 | (short(node), short(deltabase)))) |
|
311 | (short(node), short(deltabase)))) | |
312 | failures += 1 |
|
312 | failures += 1 | |
313 | break |
|
313 | break | |
314 |
|
314 | |||
315 | if deltabase in seen: |
|
315 | if deltabase in seen: | |
316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % |
|
316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % | |
317 | (short(node), short(deltabase)))) |
|
317 | (short(node), short(deltabase)))) | |
318 | failures += 1 |
|
318 | failures += 1 | |
319 | break |
|
319 | break | |
320 |
|
320 | |||
321 | current = deltabase |
|
321 | current = deltabase | |
322 | seen.add(current) |
|
322 | seen.add(current) | |
323 | deltabase = bases[current] |
|
323 | deltabase = bases[current] | |
324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid | |
325 | # so we don't traverse it again. |
|
325 | # so we don't traverse it again. | |
326 | bases[node] = nullid |
|
326 | bases[node] = nullid | |
327 | return failures |
|
327 | return failures | |
328 |
|
328 | |||
329 | def dumpdeltachain(ui, deltachain, **opts): |
|
329 | def dumpdeltachain(ui, deltachain, **opts): | |
330 | hashformatter = hex |
|
330 | hashformatter = hex | |
331 | hashlen = 40 |
|
331 | hashlen = 40 | |
332 |
|
332 | |||
333 | lastfilename = None |
|
333 | lastfilename = None | |
334 | for filename, node, filename, deltabasenode, delta in deltachain: |
|
334 | for filename, node, filename, deltabasenode, delta in deltachain: | |
335 | if filename != lastfilename: |
|
335 | if filename != lastfilename: | |
336 | ui.write("\n%s\n" % filename) |
|
336 | ui.write("\n%s\n" % filename) | |
337 | lastfilename = filename |
|
337 | lastfilename = filename | |
338 | ui.write("%s %s %s %s\n" % ( |
|
338 | ui.write("%s %s %s %s\n" % ( | |
339 | "Node".ljust(hashlen), |
|
339 | "Node".ljust(hashlen), | |
340 | "Delta Base".ljust(hashlen), |
|
340 | "Delta Base".ljust(hashlen), | |
341 | "Delta SHA1".ljust(hashlen), |
|
341 | "Delta SHA1".ljust(hashlen), | |
342 | "Delta Length".ljust(6), |
|
342 | "Delta Length".ljust(6), | |
343 | )) |
|
343 | )) | |
344 |
|
344 | |||
345 | ui.write("%s %s %s %d\n" % ( |
|
345 | ui.write("%s %s %s %d\n" % ( | |
346 | hashformatter(node), |
|
346 | hashformatter(node), | |
347 | hashformatter(deltabasenode), |
|
347 | hashformatter(deltabasenode), | |
348 | nodemod.hex(hashlib.sha1(delta).digest()), |
|
348 | nodemod.hex(hashlib.sha1(delta).digest()), | |
349 | len(delta))) |
|
349 | len(delta))) | |
350 |
|
350 | |||
351 | def debughistorypack(ui, path): |
|
351 | def debughistorypack(ui, path): | |
352 | if '.hist' in path: |
|
352 | if '.hist' in path: | |
353 | path = path[:path.index('.hist')] |
|
353 | path = path[:path.index('.hist')] | |
354 | hpack = historypack.historypack(path) |
|
354 | hpack = historypack.historypack(path) | |
355 |
|
355 | |||
356 | lastfilename = None |
|
356 | lastfilename = None | |
357 | for entry in hpack.iterentries(): |
|
357 | for entry in hpack.iterentries(): | |
358 | filename, node, p1node, p2node, linknode, copyfrom = entry |
|
358 | filename, node, p1node, p2node, linknode, copyfrom = entry | |
359 | if filename != lastfilename: |
|
359 | if filename != lastfilename: | |
360 | ui.write("\n%s\n" % filename) |
|
360 | ui.write("\n%s\n" % filename) | |
361 | ui.write("%s%s%s%s%s\n" % ( |
|
361 | ui.write("%s%s%s%s%s\n" % ( | |
362 | "Node".ljust(14), |
|
362 | "Node".ljust(14), | |
363 | "P1 Node".ljust(14), |
|
363 | "P1 Node".ljust(14), | |
364 | "P2 Node".ljust(14), |
|
364 | "P2 Node".ljust(14), | |
365 | "Link Node".ljust(14), |
|
365 | "Link Node".ljust(14), | |
366 | "Copy From")) |
|
366 | "Copy From")) | |
367 | lastfilename = filename |
|
367 | lastfilename = filename | |
368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), |
|
368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), | |
369 | short(p2node), short(linknode), copyfrom)) |
|
369 | short(p2node), short(linknode), copyfrom)) | |
370 |
|
370 | |||
371 | def debugwaitonrepack(repo): |
|
371 | def debugwaitonrepack(repo): | |
372 |
with |
|
372 | with lockmod.lock(repack.repacklockvfs(repo), "repacklock", timeout=-1): | |
373 | return |
|
373 | return | |
374 |
|
374 | |||
375 | def debugwaitonprefetch(repo): |
|
375 | def debugwaitonprefetch(repo): | |
376 | with repo._lock(repo.svfs, "prefetchlock", True, None, |
|
376 | with repo._lock(repo.svfs, "prefetchlock", True, None, | |
377 | None, _('prefetching in %s') % repo.origroot): |
|
377 | None, _('prefetching in %s') % repo.origroot): | |
378 | pass |
|
378 | pass |
@@ -1,779 +1,779 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import os |
|
3 | import os | |
4 | import time |
|
4 | import time | |
5 |
|
5 | |||
6 | from mercurial.i18n import _ |
|
6 | from mercurial.i18n import _ | |
7 | from mercurial.node import ( |
|
7 | from mercurial.node import ( | |
8 | nullid, |
|
8 | nullid, | |
9 | short, |
|
9 | short, | |
10 | ) |
|
10 | ) | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | encoding, |
|
12 | encoding, | |
13 | error, |
|
13 | error, | |
|
14 | lock as lockmod, | |||
14 | mdiff, |
|
15 | mdiff, | |
15 | policy, |
|
16 | policy, | |
16 | pycompat, |
|
17 | pycompat, | |
17 | scmutil, |
|
18 | scmutil, | |
18 | util, |
|
19 | util, | |
19 | vfs, |
|
20 | vfs, | |
20 | ) |
|
21 | ) | |
21 | from mercurial.utils import procutil |
|
22 | from mercurial.utils import procutil | |
22 | from . import ( |
|
23 | from . import ( | |
23 | constants, |
|
24 | constants, | |
24 | contentstore, |
|
25 | contentstore, | |
25 | datapack, |
|
26 | datapack, | |
26 | extutil, |
|
|||
27 | historypack, |
|
27 | historypack, | |
28 | metadatastore, |
|
28 | metadatastore, | |
29 | shallowutil, |
|
29 | shallowutil, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
32 | osutil = policy.importmod(r'osutil') |
|
32 | osutil = policy.importmod(r'osutil') | |
33 |
|
33 | |||
34 | class RepackAlreadyRunning(error.Abort): |
|
34 | class RepackAlreadyRunning(error.Abort): | |
35 | pass |
|
35 | pass | |
36 |
|
36 | |||
37 | def backgroundrepack(repo, incremental=True, packsonly=False, |
|
37 | def backgroundrepack(repo, incremental=True, packsonly=False, | |
38 | ensurestart=False): |
|
38 | ensurestart=False): | |
39 | cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack'] |
|
39 | cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack'] | |
40 | msg = _("(running background repack)\n") |
|
40 | msg = _("(running background repack)\n") | |
41 | if incremental: |
|
41 | if incremental: | |
42 | cmd.append('--incremental') |
|
42 | cmd.append('--incremental') | |
43 | msg = _("(running background incremental repack)\n") |
|
43 | msg = _("(running background incremental repack)\n") | |
44 | if packsonly: |
|
44 | if packsonly: | |
45 | cmd.append('--packsonly') |
|
45 | cmd.append('--packsonly') | |
46 | repo.ui.warn(msg) |
|
46 | repo.ui.warn(msg) | |
47 | # We know this command will find a binary, so don't block on it starting. |
|
47 | # We know this command will find a binary, so don't block on it starting. | |
48 | procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) |
|
48 | procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) | |
49 |
|
49 | |||
50 | def fullrepack(repo, options=None): |
|
50 | def fullrepack(repo, options=None): | |
51 | """If ``packsonly`` is True, stores creating only loose objects are skipped. |
|
51 | """If ``packsonly`` is True, stores creating only loose objects are skipped. | |
52 | """ |
|
52 | """ | |
53 | if util.safehasattr(repo, 'shareddatastores'): |
|
53 | if util.safehasattr(repo, 'shareddatastores'): | |
54 | datasource = contentstore.unioncontentstore( |
|
54 | datasource = contentstore.unioncontentstore( | |
55 | *repo.shareddatastores) |
|
55 | *repo.shareddatastores) | |
56 | historysource = metadatastore.unionmetadatastore( |
|
56 | historysource = metadatastore.unionmetadatastore( | |
57 | *repo.sharedhistorystores, |
|
57 | *repo.sharedhistorystores, | |
58 | allowincomplete=True) |
|
58 | allowincomplete=True) | |
59 |
|
59 | |||
60 | packpath = shallowutil.getcachepackpath( |
|
60 | packpath = shallowutil.getcachepackpath( | |
61 | repo, |
|
61 | repo, | |
62 | constants.FILEPACK_CATEGORY) |
|
62 | constants.FILEPACK_CATEGORY) | |
63 | _runrepack(repo, datasource, historysource, packpath, |
|
63 | _runrepack(repo, datasource, historysource, packpath, | |
64 | constants.FILEPACK_CATEGORY, options=options) |
|
64 | constants.FILEPACK_CATEGORY, options=options) | |
65 |
|
65 | |||
66 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
66 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
67 | localdata, shareddata = _getmanifeststores(repo) |
|
67 | localdata, shareddata = _getmanifeststores(repo) | |
68 | lpackpath, ldstores, lhstores = localdata |
|
68 | lpackpath, ldstores, lhstores = localdata | |
69 | spackpath, sdstores, shstores = shareddata |
|
69 | spackpath, sdstores, shstores = shareddata | |
70 |
|
70 | |||
71 | # Repack the shared manifest store |
|
71 | # Repack the shared manifest store | |
72 | datasource = contentstore.unioncontentstore(*sdstores) |
|
72 | datasource = contentstore.unioncontentstore(*sdstores) | |
73 | historysource = metadatastore.unionmetadatastore( |
|
73 | historysource = metadatastore.unionmetadatastore( | |
74 | *shstores, |
|
74 | *shstores, | |
75 | allowincomplete=True) |
|
75 | allowincomplete=True) | |
76 | _runrepack(repo, datasource, historysource, spackpath, |
|
76 | _runrepack(repo, datasource, historysource, spackpath, | |
77 | constants.TREEPACK_CATEGORY, options=options) |
|
77 | constants.TREEPACK_CATEGORY, options=options) | |
78 |
|
78 | |||
79 | # Repack the local manifest store |
|
79 | # Repack the local manifest store | |
80 | datasource = contentstore.unioncontentstore( |
|
80 | datasource = contentstore.unioncontentstore( | |
81 | *ldstores, |
|
81 | *ldstores, | |
82 | allowincomplete=True) |
|
82 | allowincomplete=True) | |
83 | historysource = metadatastore.unionmetadatastore( |
|
83 | historysource = metadatastore.unionmetadatastore( | |
84 | *lhstores, |
|
84 | *lhstores, | |
85 | allowincomplete=True) |
|
85 | allowincomplete=True) | |
86 | _runrepack(repo, datasource, historysource, lpackpath, |
|
86 | _runrepack(repo, datasource, historysource, lpackpath, | |
87 | constants.TREEPACK_CATEGORY, options=options) |
|
87 | constants.TREEPACK_CATEGORY, options=options) | |
88 |
|
88 | |||
89 | def incrementalrepack(repo, options=None): |
|
89 | def incrementalrepack(repo, options=None): | |
90 | """This repacks the repo by looking at the distribution of pack files in the |
|
90 | """This repacks the repo by looking at the distribution of pack files in the | |
91 | repo and performing the most minimal repack to keep the repo in good shape. |
|
91 | repo and performing the most minimal repack to keep the repo in good shape. | |
92 | """ |
|
92 | """ | |
93 | if util.safehasattr(repo, 'shareddatastores'): |
|
93 | if util.safehasattr(repo, 'shareddatastores'): | |
94 | packpath = shallowutil.getcachepackpath( |
|
94 | packpath = shallowutil.getcachepackpath( | |
95 | repo, |
|
95 | repo, | |
96 | constants.FILEPACK_CATEGORY) |
|
96 | constants.FILEPACK_CATEGORY) | |
97 | _incrementalrepack(repo, |
|
97 | _incrementalrepack(repo, | |
98 | repo.shareddatastores, |
|
98 | repo.shareddatastores, | |
99 | repo.sharedhistorystores, |
|
99 | repo.sharedhistorystores, | |
100 | packpath, |
|
100 | packpath, | |
101 | constants.FILEPACK_CATEGORY, |
|
101 | constants.FILEPACK_CATEGORY, | |
102 | options=options) |
|
102 | options=options) | |
103 |
|
103 | |||
104 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
104 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
105 | localdata, shareddata = _getmanifeststores(repo) |
|
105 | localdata, shareddata = _getmanifeststores(repo) | |
106 | lpackpath, ldstores, lhstores = localdata |
|
106 | lpackpath, ldstores, lhstores = localdata | |
107 | spackpath, sdstores, shstores = shareddata |
|
107 | spackpath, sdstores, shstores = shareddata | |
108 |
|
108 | |||
109 | # Repack the shared manifest store |
|
109 | # Repack the shared manifest store | |
110 | _incrementalrepack(repo, |
|
110 | _incrementalrepack(repo, | |
111 | sdstores, |
|
111 | sdstores, | |
112 | shstores, |
|
112 | shstores, | |
113 | spackpath, |
|
113 | spackpath, | |
114 | constants.TREEPACK_CATEGORY, |
|
114 | constants.TREEPACK_CATEGORY, | |
115 | options=options) |
|
115 | options=options) | |
116 |
|
116 | |||
117 | # Repack the local manifest store |
|
117 | # Repack the local manifest store | |
118 | _incrementalrepack(repo, |
|
118 | _incrementalrepack(repo, | |
119 | ldstores, |
|
119 | ldstores, | |
120 | lhstores, |
|
120 | lhstores, | |
121 | lpackpath, |
|
121 | lpackpath, | |
122 | constants.TREEPACK_CATEGORY, |
|
122 | constants.TREEPACK_CATEGORY, | |
123 | allowincompletedata=True, |
|
123 | allowincompletedata=True, | |
124 | options=options) |
|
124 | options=options) | |
125 |
|
125 | |||
126 | def _getmanifeststores(repo): |
|
126 | def _getmanifeststores(repo): | |
127 | shareddatastores = repo.manifestlog.shareddatastores |
|
127 | shareddatastores = repo.manifestlog.shareddatastores | |
128 | localdatastores = repo.manifestlog.localdatastores |
|
128 | localdatastores = repo.manifestlog.localdatastores | |
129 | sharedhistorystores = repo.manifestlog.sharedhistorystores |
|
129 | sharedhistorystores = repo.manifestlog.sharedhistorystores | |
130 | localhistorystores = repo.manifestlog.localhistorystores |
|
130 | localhistorystores = repo.manifestlog.localhistorystores | |
131 |
|
131 | |||
132 | sharedpackpath = shallowutil.getcachepackpath(repo, |
|
132 | sharedpackpath = shallowutil.getcachepackpath(repo, | |
133 | constants.TREEPACK_CATEGORY) |
|
133 | constants.TREEPACK_CATEGORY) | |
134 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, |
|
134 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, | |
135 | constants.TREEPACK_CATEGORY) |
|
135 | constants.TREEPACK_CATEGORY) | |
136 |
|
136 | |||
137 | return ((localpackpath, localdatastores, localhistorystores), |
|
137 | return ((localpackpath, localdatastores, localhistorystores), | |
138 | (sharedpackpath, shareddatastores, sharedhistorystores)) |
|
138 | (sharedpackpath, shareddatastores, sharedhistorystores)) | |
139 |
|
139 | |||
140 | def _topacks(packpath, files, constructor): |
|
140 | def _topacks(packpath, files, constructor): | |
141 | paths = list(os.path.join(packpath, p) for p in files) |
|
141 | paths = list(os.path.join(packpath, p) for p in files) | |
142 | packs = list(constructor(p) for p in paths) |
|
142 | packs = list(constructor(p) for p in paths) | |
143 | return packs |
|
143 | return packs | |
144 |
|
144 | |||
145 | def _deletebigpacks(repo, folder, files): |
|
145 | def _deletebigpacks(repo, folder, files): | |
146 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
|
146 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. | |
147 |
|
147 | |||
148 | Returns ``files` with the removed files omitted.""" |
|
148 | Returns ``files` with the removed files omitted.""" | |
149 | maxsize = repo.ui.configbytes("packs", "maxpacksize") |
|
149 | maxsize = repo.ui.configbytes("packs", "maxpacksize") | |
150 | if maxsize <= 0: |
|
150 | if maxsize <= 0: | |
151 | return files |
|
151 | return files | |
152 |
|
152 | |||
153 | # This only considers datapacks today, but we could broaden it to include |
|
153 | # This only considers datapacks today, but we could broaden it to include | |
154 | # historypacks. |
|
154 | # historypacks. | |
155 | VALIDEXTS = [".datapack", ".dataidx"] |
|
155 | VALIDEXTS = [".datapack", ".dataidx"] | |
156 |
|
156 | |||
157 | # Either an oversize index or datapack will trigger cleanup of the whole |
|
157 | # Either an oversize index or datapack will trigger cleanup of the whole | |
158 | # pack: |
|
158 | # pack: | |
159 | oversized = {os.path.splitext(path)[0] for path, ftype, stat in files |
|
159 | oversized = {os.path.splitext(path)[0] for path, ftype, stat in files | |
160 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] |
|
160 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] | |
161 | in VALIDEXTS))} |
|
161 | in VALIDEXTS))} | |
162 |
|
162 | |||
163 | for rootfname in oversized: |
|
163 | for rootfname in oversized: | |
164 | rootpath = os.path.join(folder, rootfname) |
|
164 | rootpath = os.path.join(folder, rootfname) | |
165 | for ext in VALIDEXTS: |
|
165 | for ext in VALIDEXTS: | |
166 | path = rootpath + ext |
|
166 | path = rootpath + ext | |
167 | repo.ui.debug('removing oversize packfile %s (%s)\n' % |
|
167 | repo.ui.debug('removing oversize packfile %s (%s)\n' % | |
168 | (path, util.bytecount(os.stat(path).st_size))) |
|
168 | (path, util.bytecount(os.stat(path).st_size))) | |
169 | os.unlink(path) |
|
169 | os.unlink(path) | |
170 | return [row for row in files if os.path.basename(row[0]) not in oversized] |
|
170 | return [row for row in files if os.path.basename(row[0]) not in oversized] | |
171 |
|
171 | |||
172 | def _incrementalrepack(repo, datastore, historystore, packpath, category, |
|
172 | def _incrementalrepack(repo, datastore, historystore, packpath, category, | |
173 | allowincompletedata=False, options=None): |
|
173 | allowincompletedata=False, options=None): | |
174 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
174 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
175 |
|
175 | |||
176 | files = osutil.listdir(packpath, stat=True) |
|
176 | files = osutil.listdir(packpath, stat=True) | |
177 | files = _deletebigpacks(repo, packpath, files) |
|
177 | files = _deletebigpacks(repo, packpath, files) | |
178 | datapacks = _topacks(packpath, |
|
178 | datapacks = _topacks(packpath, | |
179 | _computeincrementaldatapack(repo.ui, files), |
|
179 | _computeincrementaldatapack(repo.ui, files), | |
180 | datapack.datapack) |
|
180 | datapack.datapack) | |
181 | datapacks.extend(s for s in datastore |
|
181 | datapacks.extend(s for s in datastore | |
182 | if not isinstance(s, datapack.datapackstore)) |
|
182 | if not isinstance(s, datapack.datapackstore)) | |
183 |
|
183 | |||
184 | historypacks = _topacks(packpath, |
|
184 | historypacks = _topacks(packpath, | |
185 | _computeincrementalhistorypack(repo.ui, files), |
|
185 | _computeincrementalhistorypack(repo.ui, files), | |
186 | historypack.historypack) |
|
186 | historypack.historypack) | |
187 | historypacks.extend(s for s in historystore |
|
187 | historypacks.extend(s for s in historystore | |
188 | if not isinstance(s, historypack.historypackstore)) |
|
188 | if not isinstance(s, historypack.historypackstore)) | |
189 |
|
189 | |||
190 | # ``allhistory{files,packs}`` contains all known history packs, even ones we |
|
190 | # ``allhistory{files,packs}`` contains all known history packs, even ones we | |
191 | # don't plan to repack. They are used during the datapack repack to ensure |
|
191 | # don't plan to repack. They are used during the datapack repack to ensure | |
192 | # good ordering of nodes. |
|
192 | # good ordering of nodes. | |
193 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, |
|
193 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, | |
194 | historypack.INDEXSUFFIX) |
|
194 | historypack.INDEXSUFFIX) | |
195 | allhistorypacks = _topacks(packpath, |
|
195 | allhistorypacks = _topacks(packpath, | |
196 | (f for f, mode, stat in allhistoryfiles), |
|
196 | (f for f, mode, stat in allhistoryfiles), | |
197 | historypack.historypack) |
|
197 | historypack.historypack) | |
198 | allhistorypacks.extend(s for s in historystore |
|
198 | allhistorypacks.extend(s for s in historystore | |
199 | if not isinstance(s, historypack.historypackstore)) |
|
199 | if not isinstance(s, historypack.historypackstore)) | |
200 | _runrepack(repo, |
|
200 | _runrepack(repo, | |
201 | contentstore.unioncontentstore( |
|
201 | contentstore.unioncontentstore( | |
202 | *datapacks, |
|
202 | *datapacks, | |
203 | allowincomplete=allowincompletedata), |
|
203 | allowincomplete=allowincompletedata), | |
204 | metadatastore.unionmetadatastore( |
|
204 | metadatastore.unionmetadatastore( | |
205 | *historypacks, |
|
205 | *historypacks, | |
206 | allowincomplete=True), |
|
206 | allowincomplete=True), | |
207 | packpath, category, |
|
207 | packpath, category, | |
208 | fullhistory=metadatastore.unionmetadatastore( |
|
208 | fullhistory=metadatastore.unionmetadatastore( | |
209 | *allhistorypacks, |
|
209 | *allhistorypacks, | |
210 | allowincomplete=True), |
|
210 | allowincomplete=True), | |
211 | options=options) |
|
211 | options=options) | |
212 |
|
212 | |||
213 | def _computeincrementaldatapack(ui, files): |
|
213 | def _computeincrementaldatapack(ui, files): | |
214 | opts = { |
|
214 | opts = { | |
215 | 'gencountlimit' : ui.configint( |
|
215 | 'gencountlimit' : ui.configint( | |
216 | 'remotefilelog', 'data.gencountlimit'), |
|
216 | 'remotefilelog', 'data.gencountlimit'), | |
217 | 'generations' : ui.configlist( |
|
217 | 'generations' : ui.configlist( | |
218 | 'remotefilelog', 'data.generations'), |
|
218 | 'remotefilelog', 'data.generations'), | |
219 | 'maxrepackpacks' : ui.configint( |
|
219 | 'maxrepackpacks' : ui.configint( | |
220 | 'remotefilelog', 'data.maxrepackpacks'), |
|
220 | 'remotefilelog', 'data.maxrepackpacks'), | |
221 | 'repackmaxpacksize' : ui.configbytes( |
|
221 | 'repackmaxpacksize' : ui.configbytes( | |
222 | 'remotefilelog', 'data.repackmaxpacksize'), |
|
222 | 'remotefilelog', 'data.repackmaxpacksize'), | |
223 | 'repacksizelimit' : ui.configbytes( |
|
223 | 'repacksizelimit' : ui.configbytes( | |
224 | 'remotefilelog', 'data.repacksizelimit'), |
|
224 | 'remotefilelog', 'data.repacksizelimit'), | |
225 | } |
|
225 | } | |
226 |
|
226 | |||
227 | packfiles = _allpackfileswithsuffix( |
|
227 | packfiles = _allpackfileswithsuffix( | |
228 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) |
|
228 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) | |
229 | return _computeincrementalpack(packfiles, opts) |
|
229 | return _computeincrementalpack(packfiles, opts) | |
230 |
|
230 | |||
231 | def _computeincrementalhistorypack(ui, files): |
|
231 | def _computeincrementalhistorypack(ui, files): | |
232 | opts = { |
|
232 | opts = { | |
233 | 'gencountlimit' : ui.configint( |
|
233 | 'gencountlimit' : ui.configint( | |
234 | 'remotefilelog', 'history.gencountlimit'), |
|
234 | 'remotefilelog', 'history.gencountlimit'), | |
235 | 'generations' : ui.configlist( |
|
235 | 'generations' : ui.configlist( | |
236 | 'remotefilelog', 'history.generations', ['100MB']), |
|
236 | 'remotefilelog', 'history.generations', ['100MB']), | |
237 | 'maxrepackpacks' : ui.configint( |
|
237 | 'maxrepackpacks' : ui.configint( | |
238 | 'remotefilelog', 'history.maxrepackpacks'), |
|
238 | 'remotefilelog', 'history.maxrepackpacks'), | |
239 | 'repackmaxpacksize' : ui.configbytes( |
|
239 | 'repackmaxpacksize' : ui.configbytes( | |
240 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), |
|
240 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), | |
241 | 'repacksizelimit' : ui.configbytes( |
|
241 | 'repacksizelimit' : ui.configbytes( | |
242 | 'remotefilelog', 'history.repacksizelimit'), |
|
242 | 'remotefilelog', 'history.repacksizelimit'), | |
243 | } |
|
243 | } | |
244 |
|
244 | |||
245 | packfiles = _allpackfileswithsuffix( |
|
245 | packfiles = _allpackfileswithsuffix( | |
246 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) |
|
246 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) | |
247 | return _computeincrementalpack(packfiles, opts) |
|
247 | return _computeincrementalpack(packfiles, opts) | |
248 |
|
248 | |||
249 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): |
|
249 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): | |
250 | result = [] |
|
250 | result = [] | |
251 | fileset = set(fn for fn, mode, stat in files) |
|
251 | fileset = set(fn for fn, mode, stat in files) | |
252 | for filename, mode, stat in files: |
|
252 | for filename, mode, stat in files: | |
253 | if not filename.endswith(packsuffix): |
|
253 | if not filename.endswith(packsuffix): | |
254 | continue |
|
254 | continue | |
255 |
|
255 | |||
256 | prefix = filename[:-len(packsuffix)] |
|
256 | prefix = filename[:-len(packsuffix)] | |
257 |
|
257 | |||
258 | # Don't process a pack if it doesn't have an index. |
|
258 | # Don't process a pack if it doesn't have an index. | |
259 | if (prefix + indexsuffix) not in fileset: |
|
259 | if (prefix + indexsuffix) not in fileset: | |
260 | continue |
|
260 | continue | |
261 | result.append((prefix, mode, stat)) |
|
261 | result.append((prefix, mode, stat)) | |
262 |
|
262 | |||
263 | return result |
|
263 | return result | |
264 |
|
264 | |||
265 | def _computeincrementalpack(files, opts): |
|
265 | def _computeincrementalpack(files, opts): | |
266 | """Given a set of pack files along with the configuration options, this |
|
266 | """Given a set of pack files along with the configuration options, this | |
267 | function computes the list of files that should be packed as part of an |
|
267 | function computes the list of files that should be packed as part of an | |
268 | incremental repack. |
|
268 | incremental repack. | |
269 |
|
269 | |||
270 | It tries to strike a balance between keeping incremental repacks cheap (i.e. |
|
270 | It tries to strike a balance between keeping incremental repacks cheap (i.e. | |
271 | packing small things when possible, and rolling the packs up to the big ones |
|
271 | packing small things when possible, and rolling the packs up to the big ones | |
272 | over time). |
|
272 | over time). | |
273 | """ |
|
273 | """ | |
274 |
|
274 | |||
275 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), |
|
275 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), | |
276 | reverse=True)) |
|
276 | reverse=True)) | |
277 | limits.append(0) |
|
277 | limits.append(0) | |
278 |
|
278 | |||
279 | # Group the packs by generation (i.e. by size) |
|
279 | # Group the packs by generation (i.e. by size) | |
280 | generations = [] |
|
280 | generations = [] | |
281 | for i in pycompat.xrange(len(limits)): |
|
281 | for i in pycompat.xrange(len(limits)): | |
282 | generations.append([]) |
|
282 | generations.append([]) | |
283 |
|
283 | |||
284 | sizes = {} |
|
284 | sizes = {} | |
285 | for prefix, mode, stat in files: |
|
285 | for prefix, mode, stat in files: | |
286 | size = stat.st_size |
|
286 | size = stat.st_size | |
287 | if size > opts['repackmaxpacksize']: |
|
287 | if size > opts['repackmaxpacksize']: | |
288 | continue |
|
288 | continue | |
289 |
|
289 | |||
290 | sizes[prefix] = size |
|
290 | sizes[prefix] = size | |
291 | for i, limit in enumerate(limits): |
|
291 | for i, limit in enumerate(limits): | |
292 | if size > limit: |
|
292 | if size > limit: | |
293 | generations[i].append(prefix) |
|
293 | generations[i].append(prefix) | |
294 | break |
|
294 | break | |
295 |
|
295 | |||
296 | # Steps for picking what packs to repack: |
|
296 | # Steps for picking what packs to repack: | |
297 | # 1. Pick the largest generation with > gencountlimit pack files. |
|
297 | # 1. Pick the largest generation with > gencountlimit pack files. | |
298 | # 2. Take the smallest three packs. |
|
298 | # 2. Take the smallest three packs. | |
299 | # 3. While total-size-of-packs < repacksizelimit: add another pack |
|
299 | # 3. While total-size-of-packs < repacksizelimit: add another pack | |
300 |
|
300 | |||
301 | # Find the largest generation with more than gencountlimit packs |
|
301 | # Find the largest generation with more than gencountlimit packs | |
302 | genpacks = [] |
|
302 | genpacks = [] | |
303 | for i, limit in enumerate(limits): |
|
303 | for i, limit in enumerate(limits): | |
304 | if len(generations[i]) > opts['gencountlimit']: |
|
304 | if len(generations[i]) > opts['gencountlimit']: | |
305 | # Sort to be smallest last, for easy popping later |
|
305 | # Sort to be smallest last, for easy popping later | |
306 | genpacks.extend(sorted(generations[i], reverse=True, |
|
306 | genpacks.extend(sorted(generations[i], reverse=True, | |
307 | key=lambda x: sizes[x])) |
|
307 | key=lambda x: sizes[x])) | |
308 | break |
|
308 | break | |
309 |
|
309 | |||
310 | # Take as many packs from the generation as we can |
|
310 | # Take as many packs from the generation as we can | |
311 | chosenpacks = genpacks[-3:] |
|
311 | chosenpacks = genpacks[-3:] | |
312 | genpacks = genpacks[:-3] |
|
312 | genpacks = genpacks[:-3] | |
313 | repacksize = sum(sizes[n] for n in chosenpacks) |
|
313 | repacksize = sum(sizes[n] for n in chosenpacks) | |
314 | while (repacksize < opts['repacksizelimit'] and genpacks and |
|
314 | while (repacksize < opts['repacksizelimit'] and genpacks and | |
315 | len(chosenpacks) < opts['maxrepackpacks']): |
|
315 | len(chosenpacks) < opts['maxrepackpacks']): | |
316 | chosenpacks.append(genpacks.pop()) |
|
316 | chosenpacks.append(genpacks.pop()) | |
317 | repacksize += sizes[chosenpacks[-1]] |
|
317 | repacksize += sizes[chosenpacks[-1]] | |
318 |
|
318 | |||
319 | return chosenpacks |
|
319 | return chosenpacks | |
320 |
|
320 | |||
321 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, |
|
321 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, | |
322 | options=None): |
|
322 | options=None): | |
323 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
323 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
324 |
|
324 | |||
325 | def isold(repo, filename, node): |
|
325 | def isold(repo, filename, node): | |
326 | """Check if the file node is older than a limit. |
|
326 | """Check if the file node is older than a limit. | |
327 | Unless a limit is specified in the config the default limit is taken. |
|
327 | Unless a limit is specified in the config the default limit is taken. | |
328 | """ |
|
328 | """ | |
329 | filectx = repo.filectx(filename, fileid=node) |
|
329 | filectx = repo.filectx(filename, fileid=node) | |
330 | filetime = repo[filectx.linkrev()].date() |
|
330 | filetime = repo[filectx.linkrev()].date() | |
331 |
|
331 | |||
332 | ttl = repo.ui.configint('remotefilelog', 'nodettl') |
|
332 | ttl = repo.ui.configint('remotefilelog', 'nodettl') | |
333 |
|
333 | |||
334 | limit = time.time() - ttl |
|
334 | limit = time.time() - ttl | |
335 | return filetime[0] < limit |
|
335 | return filetime[0] < limit | |
336 |
|
336 | |||
337 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
337 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') | |
338 | if not fullhistory: |
|
338 | if not fullhistory: | |
339 | fullhistory = history |
|
339 | fullhistory = history | |
340 | packer = repacker(repo, data, history, fullhistory, category, |
|
340 | packer = repacker(repo, data, history, fullhistory, category, | |
341 | gc=garbagecollect, isold=isold, options=options) |
|
341 | gc=garbagecollect, isold=isold, options=options) | |
342 |
|
342 | |||
343 | with datapack.mutabledatapack(repo.ui, packpath) as dpack: |
|
343 | with datapack.mutabledatapack(repo.ui, packpath) as dpack: | |
344 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
|
344 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: | |
345 | try: |
|
345 | try: | |
346 | packer.run(dpack, hpack) |
|
346 | packer.run(dpack, hpack) | |
347 | except error.LockHeld: |
|
347 | except error.LockHeld: | |
348 | raise RepackAlreadyRunning(_("skipping repack - another repack " |
|
348 | raise RepackAlreadyRunning(_("skipping repack - another repack " | |
349 | "is already running")) |
|
349 | "is already running")) | |
350 |
|
350 | |||
351 | def keepset(repo, keyfn, lastkeepkeys=None): |
|
351 | def keepset(repo, keyfn, lastkeepkeys=None): | |
352 | """Computes a keepset which is not garbage collected. |
|
352 | """Computes a keepset which is not garbage collected. | |
353 | 'keyfn' is a function that maps filename, node to a unique key. |
|
353 | 'keyfn' is a function that maps filename, node to a unique key. | |
354 | 'lastkeepkeys' is an optional argument and if provided the keepset |
|
354 | 'lastkeepkeys' is an optional argument and if provided the keepset | |
355 | function updates lastkeepkeys with more keys and returns the result. |
|
355 | function updates lastkeepkeys with more keys and returns the result. | |
356 | """ |
|
356 | """ | |
357 | if not lastkeepkeys: |
|
357 | if not lastkeepkeys: | |
358 | keepkeys = set() |
|
358 | keepkeys = set() | |
359 | else: |
|
359 | else: | |
360 | keepkeys = lastkeepkeys |
|
360 | keepkeys = lastkeepkeys | |
361 |
|
361 | |||
362 | # We want to keep: |
|
362 | # We want to keep: | |
363 | # 1. Working copy parent |
|
363 | # 1. Working copy parent | |
364 | # 2. Draft commits |
|
364 | # 2. Draft commits | |
365 | # 3. Parents of draft commits |
|
365 | # 3. Parents of draft commits | |
366 | # 4. Pullprefetch and bgprefetchrevs revsets if specified |
|
366 | # 4. Pullprefetch and bgprefetchrevs revsets if specified | |
367 | revs = ['.', 'draft()', 'parents(draft())'] |
|
367 | revs = ['.', 'draft()', 'parents(draft())'] | |
368 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) |
|
368 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) | |
369 | if prefetchrevs: |
|
369 | if prefetchrevs: | |
370 | revs.append('(%s)' % prefetchrevs) |
|
370 | revs.append('(%s)' % prefetchrevs) | |
371 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
371 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) | |
372 | if prefetchrevs: |
|
372 | if prefetchrevs: | |
373 | revs.append('(%s)' % prefetchrevs) |
|
373 | revs.append('(%s)' % prefetchrevs) | |
374 | revs = '+'.join(revs) |
|
374 | revs = '+'.join(revs) | |
375 |
|
375 | |||
376 | revs = ['sort((%s), "topo")' % revs] |
|
376 | revs = ['sort((%s), "topo")' % revs] | |
377 | keep = scmutil.revrange(repo, revs) |
|
377 | keep = scmutil.revrange(repo, revs) | |
378 |
|
378 | |||
379 | processed = set() |
|
379 | processed = set() | |
380 | lastmanifest = None |
|
380 | lastmanifest = None | |
381 |
|
381 | |||
382 | # process the commits in toposorted order starting from the oldest |
|
382 | # process the commits in toposorted order starting from the oldest | |
383 | for r in reversed(keep._list): |
|
383 | for r in reversed(keep._list): | |
384 | if repo[r].p1().rev() in processed: |
|
384 | if repo[r].p1().rev() in processed: | |
385 | # if the direct parent has already been processed |
|
385 | # if the direct parent has already been processed | |
386 | # then we only need to process the delta |
|
386 | # then we only need to process the delta | |
387 | m = repo[r].manifestctx().readdelta() |
|
387 | m = repo[r].manifestctx().readdelta() | |
388 | else: |
|
388 | else: | |
389 | # otherwise take the manifest and diff it |
|
389 | # otherwise take the manifest and diff it | |
390 | # with the previous manifest if one exists |
|
390 | # with the previous manifest if one exists | |
391 | if lastmanifest: |
|
391 | if lastmanifest: | |
392 | m = repo[r].manifest().diff(lastmanifest) |
|
392 | m = repo[r].manifest().diff(lastmanifest) | |
393 | else: |
|
393 | else: | |
394 | m = repo[r].manifest() |
|
394 | m = repo[r].manifest() | |
395 | lastmanifest = repo[r].manifest() |
|
395 | lastmanifest = repo[r].manifest() | |
396 | processed.add(r) |
|
396 | processed.add(r) | |
397 |
|
397 | |||
398 | # populate keepkeys with keys from the current manifest |
|
398 | # populate keepkeys with keys from the current manifest | |
399 | if type(m) is dict: |
|
399 | if type(m) is dict: | |
400 | # m is a result of diff of two manifests and is a dictionary that |
|
400 | # m is a result of diff of two manifests and is a dictionary that | |
401 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple |
|
401 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple | |
402 | for filename, diff in m.iteritems(): |
|
402 | for filename, diff in m.iteritems(): | |
403 | if diff[0][0] is not None: |
|
403 | if diff[0][0] is not None: | |
404 | keepkeys.add(keyfn(filename, diff[0][0])) |
|
404 | keepkeys.add(keyfn(filename, diff[0][0])) | |
405 | else: |
|
405 | else: | |
406 | # m is a manifest object |
|
406 | # m is a manifest object | |
407 | for filename, filenode in m.iteritems(): |
|
407 | for filename, filenode in m.iteritems(): | |
408 | keepkeys.add(keyfn(filename, filenode)) |
|
408 | keepkeys.add(keyfn(filename, filenode)) | |
409 |
|
409 | |||
410 | return keepkeys |
|
410 | return keepkeys | |
411 |
|
411 | |||
412 | class repacker(object): |
|
412 | class repacker(object): | |
413 | """Class for orchestrating the repack of data and history information into a |
|
413 | """Class for orchestrating the repack of data and history information into a | |
414 | new format. |
|
414 | new format. | |
415 | """ |
|
415 | """ | |
416 | def __init__(self, repo, data, history, fullhistory, category, gc=False, |
|
416 | def __init__(self, repo, data, history, fullhistory, category, gc=False, | |
417 | isold=None, options=None): |
|
417 | isold=None, options=None): | |
418 | self.repo = repo |
|
418 | self.repo = repo | |
419 | self.data = data |
|
419 | self.data = data | |
420 | self.history = history |
|
420 | self.history = history | |
421 | self.fullhistory = fullhistory |
|
421 | self.fullhistory = fullhistory | |
422 | self.unit = constants.getunits(category) |
|
422 | self.unit = constants.getunits(category) | |
423 | self.garbagecollect = gc |
|
423 | self.garbagecollect = gc | |
424 | self.options = options |
|
424 | self.options = options | |
425 | if self.garbagecollect: |
|
425 | if self.garbagecollect: | |
426 | if not isold: |
|
426 | if not isold: | |
427 | raise ValueError("Function 'isold' is not properly specified") |
|
427 | raise ValueError("Function 'isold' is not properly specified") | |
428 | # use (filename, node) tuple as a keepset key |
|
428 | # use (filename, node) tuple as a keepset key | |
429 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) |
|
429 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) | |
430 | self.isold = isold |
|
430 | self.isold = isold | |
431 |
|
431 | |||
432 | def run(self, targetdata, targethistory): |
|
432 | def run(self, targetdata, targethistory): | |
433 | ledger = repackledger() |
|
433 | ledger = repackledger() | |
434 |
|
434 | |||
435 |
with |
|
435 | with lockmod.lock(repacklockvfs(self.repo), "repacklock", desc=None, | |
436 |
|
|
436 | timeout=0): | |
437 | self.repo.hook('prerepack') |
|
437 | self.repo.hook('prerepack') | |
438 |
|
438 | |||
439 | # Populate ledger from source |
|
439 | # Populate ledger from source | |
440 | self.data.markledger(ledger, options=self.options) |
|
440 | self.data.markledger(ledger, options=self.options) | |
441 | self.history.markledger(ledger, options=self.options) |
|
441 | self.history.markledger(ledger, options=self.options) | |
442 |
|
442 | |||
443 | # Run repack |
|
443 | # Run repack | |
444 | self.repackdata(ledger, targetdata) |
|
444 | self.repackdata(ledger, targetdata) | |
445 | self.repackhistory(ledger, targethistory) |
|
445 | self.repackhistory(ledger, targethistory) | |
446 |
|
446 | |||
447 | # Call cleanup on each source |
|
447 | # Call cleanup on each source | |
448 | for source in ledger.sources: |
|
448 | for source in ledger.sources: | |
449 | source.cleanup(ledger) |
|
449 | source.cleanup(ledger) | |
450 |
|
450 | |||
451 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): |
|
451 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): | |
452 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and |
|
452 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and | |
453 | ``deltabases``. |
|
453 | ``deltabases``. | |
454 |
|
454 | |||
455 | We often have orphan entries (nodes without a base that aren't |
|
455 | We often have orphan entries (nodes without a base that aren't | |
456 | referenced by other nodes -- i.e., part of a chain) due to gaps in |
|
456 | referenced by other nodes -- i.e., part of a chain) due to gaps in | |
457 | history. Rather than store them as individual fulltexts, we prefer to |
|
457 | history. Rather than store them as individual fulltexts, we prefer to | |
458 | insert them as one chain sorted by size. |
|
458 | insert them as one chain sorted by size. | |
459 | """ |
|
459 | """ | |
460 | if not orphans: |
|
460 | if not orphans: | |
461 | return nodes |
|
461 | return nodes | |
462 |
|
462 | |||
463 | def getsize(node, default=0): |
|
463 | def getsize(node, default=0): | |
464 | meta = self.data.getmeta(filename, node) |
|
464 | meta = self.data.getmeta(filename, node) | |
465 | if constants.METAKEYSIZE in meta: |
|
465 | if constants.METAKEYSIZE in meta: | |
466 | return meta[constants.METAKEYSIZE] |
|
466 | return meta[constants.METAKEYSIZE] | |
467 | else: |
|
467 | else: | |
468 | return default |
|
468 | return default | |
469 |
|
469 | |||
470 | # Sort orphans by size; biggest first is preferred, since it's more |
|
470 | # Sort orphans by size; biggest first is preferred, since it's more | |
471 | # likely to be the newest version assuming files grow over time. |
|
471 | # likely to be the newest version assuming files grow over time. | |
472 | # (Sort by node first to ensure the sort is stable.) |
|
472 | # (Sort by node first to ensure the sort is stable.) | |
473 | orphans = sorted(orphans) |
|
473 | orphans = sorted(orphans) | |
474 | orphans = list(sorted(orphans, key=getsize, reverse=True)) |
|
474 | orphans = list(sorted(orphans, key=getsize, reverse=True)) | |
475 | if ui.debugflag: |
|
475 | if ui.debugflag: | |
476 | ui.debug("%s: orphan chain: %s\n" % (filename, |
|
476 | ui.debug("%s: orphan chain: %s\n" % (filename, | |
477 | ", ".join([short(s) for s in orphans]))) |
|
477 | ", ".join([short(s) for s in orphans]))) | |
478 |
|
478 | |||
479 | # Create one contiguous chain and reassign deltabases. |
|
479 | # Create one contiguous chain and reassign deltabases. | |
480 | for i, node in enumerate(orphans): |
|
480 | for i, node in enumerate(orphans): | |
481 | if i == 0: |
|
481 | if i == 0: | |
482 | deltabases[node] = (nullid, 0) |
|
482 | deltabases[node] = (nullid, 0) | |
483 | else: |
|
483 | else: | |
484 | parent = orphans[i - 1] |
|
484 | parent = orphans[i - 1] | |
485 | deltabases[node] = (parent, deltabases[parent][1] + 1) |
|
485 | deltabases[node] = (parent, deltabases[parent][1] + 1) | |
486 | nodes = [n for n in nodes if n not in orphans] |
|
486 | nodes = [n for n in nodes if n not in orphans] | |
487 | nodes += orphans |
|
487 | nodes += orphans | |
488 | return nodes |
|
488 | return nodes | |
489 |
|
489 | |||
490 | def repackdata(self, ledger, target): |
|
490 | def repackdata(self, ledger, target): | |
491 | ui = self.repo.ui |
|
491 | ui = self.repo.ui | |
492 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) |
|
492 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) | |
493 |
|
493 | |||
494 | byfile = {} |
|
494 | byfile = {} | |
495 | for entry in ledger.entries.itervalues(): |
|
495 | for entry in ledger.entries.itervalues(): | |
496 | if entry.datasource: |
|
496 | if entry.datasource: | |
497 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
497 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
498 |
|
498 | |||
499 | count = 0 |
|
499 | count = 0 | |
500 | repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit, |
|
500 | repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit, | |
501 | total=len(byfile)) |
|
501 | total=len(byfile)) | |
502 | for filename, entries in sorted(byfile.iteritems()): |
|
502 | for filename, entries in sorted(byfile.iteritems()): | |
503 | repackprogress.update(count) |
|
503 | repackprogress.update(count) | |
504 |
|
504 | |||
505 | ancestors = {} |
|
505 | ancestors = {} | |
506 | nodes = list(node for node in entries) |
|
506 | nodes = list(node for node in entries) | |
507 | nohistory = [] |
|
507 | nohistory = [] | |
508 | buildprogress = ui.makeprogress(_("building history"), unit='nodes', |
|
508 | buildprogress = ui.makeprogress(_("building history"), unit='nodes', | |
509 | total=len(nodes)) |
|
509 | total=len(nodes)) | |
510 | for i, node in enumerate(nodes): |
|
510 | for i, node in enumerate(nodes): | |
511 | if node in ancestors: |
|
511 | if node in ancestors: | |
512 | continue |
|
512 | continue | |
513 | buildprogress.update(i) |
|
513 | buildprogress.update(i) | |
514 | try: |
|
514 | try: | |
515 | ancestors.update(self.fullhistory.getancestors(filename, |
|
515 | ancestors.update(self.fullhistory.getancestors(filename, | |
516 | node, known=ancestors)) |
|
516 | node, known=ancestors)) | |
517 | except KeyError: |
|
517 | except KeyError: | |
518 | # Since we're packing data entries, we may not have the |
|
518 | # Since we're packing data entries, we may not have the | |
519 | # corresponding history entries for them. It's not a big |
|
519 | # corresponding history entries for them. It's not a big | |
520 | # deal, but the entries won't be delta'd perfectly. |
|
520 | # deal, but the entries won't be delta'd perfectly. | |
521 | nohistory.append(node) |
|
521 | nohistory.append(node) | |
522 | buildprogress.complete() |
|
522 | buildprogress.complete() | |
523 |
|
523 | |||
524 | # Order the nodes children first, so we can produce reverse deltas |
|
524 | # Order the nodes children first, so we can produce reverse deltas | |
525 | orderednodes = list(reversed(self._toposort(ancestors))) |
|
525 | orderednodes = list(reversed(self._toposort(ancestors))) | |
526 | if len(nohistory) > 0: |
|
526 | if len(nohistory) > 0: | |
527 | ui.debug('repackdata: %d nodes without history\n' % |
|
527 | ui.debug('repackdata: %d nodes without history\n' % | |
528 | len(nohistory)) |
|
528 | len(nohistory)) | |
529 | orderednodes.extend(sorted(nohistory)) |
|
529 | orderednodes.extend(sorted(nohistory)) | |
530 |
|
530 | |||
531 | # Filter orderednodes to just the nodes we want to serialize (it |
|
531 | # Filter orderednodes to just the nodes we want to serialize (it | |
532 | # currently also has the edge nodes' ancestors). |
|
532 | # currently also has the edge nodes' ancestors). | |
533 | orderednodes = list(filter(lambda node: node in nodes, |
|
533 | orderednodes = list(filter(lambda node: node in nodes, | |
534 | orderednodes)) |
|
534 | orderednodes)) | |
535 |
|
535 | |||
536 | # Garbage collect old nodes: |
|
536 | # Garbage collect old nodes: | |
537 | if self.garbagecollect: |
|
537 | if self.garbagecollect: | |
538 | neworderednodes = [] |
|
538 | neworderednodes = [] | |
539 | for node in orderednodes: |
|
539 | for node in orderednodes: | |
540 | # If the node is old and is not in the keepset, we skip it, |
|
540 | # If the node is old and is not in the keepset, we skip it, | |
541 | # and mark as garbage collected |
|
541 | # and mark as garbage collected | |
542 | if ((filename, node) not in self.keepkeys and |
|
542 | if ((filename, node) not in self.keepkeys and | |
543 | self.isold(self.repo, filename, node)): |
|
543 | self.isold(self.repo, filename, node)): | |
544 | entries[node].gced = True |
|
544 | entries[node].gced = True | |
545 | continue |
|
545 | continue | |
546 | neworderednodes.append(node) |
|
546 | neworderednodes.append(node) | |
547 | orderednodes = neworderednodes |
|
547 | orderednodes = neworderednodes | |
548 |
|
548 | |||
549 | # Compute delta bases for nodes: |
|
549 | # Compute delta bases for nodes: | |
550 | deltabases = {} |
|
550 | deltabases = {} | |
551 | nobase = set() |
|
551 | nobase = set() | |
552 | referenced = set() |
|
552 | referenced = set() | |
553 | nodes = set(nodes) |
|
553 | nodes = set(nodes) | |
554 | processprogress = ui.makeprogress(_("processing nodes"), |
|
554 | processprogress = ui.makeprogress(_("processing nodes"), | |
555 | unit='nodes', |
|
555 | unit='nodes', | |
556 | total=len(orderednodes)) |
|
556 | total=len(orderednodes)) | |
557 | for i, node in enumerate(orderednodes): |
|
557 | for i, node in enumerate(orderednodes): | |
558 | processprogress.update(i) |
|
558 | processprogress.update(i) | |
559 | # Find delta base |
|
559 | # Find delta base | |
560 | # TODO: allow delta'ing against most recent descendant instead |
|
560 | # TODO: allow delta'ing against most recent descendant instead | |
561 | # of immediate child |
|
561 | # of immediate child | |
562 | deltatuple = deltabases.get(node, None) |
|
562 | deltatuple = deltabases.get(node, None) | |
563 | if deltatuple is None: |
|
563 | if deltatuple is None: | |
564 | deltabase, chainlen = nullid, 0 |
|
564 | deltabase, chainlen = nullid, 0 | |
565 | deltabases[node] = (nullid, 0) |
|
565 | deltabases[node] = (nullid, 0) | |
566 | nobase.add(node) |
|
566 | nobase.add(node) | |
567 | else: |
|
567 | else: | |
568 | deltabase, chainlen = deltatuple |
|
568 | deltabase, chainlen = deltatuple | |
569 | referenced.add(deltabase) |
|
569 | referenced.add(deltabase) | |
570 |
|
570 | |||
571 | # Use available ancestor information to inform our delta choices |
|
571 | # Use available ancestor information to inform our delta choices | |
572 | ancestorinfo = ancestors.get(node) |
|
572 | ancestorinfo = ancestors.get(node) | |
573 | if ancestorinfo: |
|
573 | if ancestorinfo: | |
574 | p1, p2, linknode, copyfrom = ancestorinfo |
|
574 | p1, p2, linknode, copyfrom = ancestorinfo | |
575 |
|
575 | |||
576 | # The presence of copyfrom means we're at a point where the |
|
576 | # The presence of copyfrom means we're at a point where the | |
577 | # file was copied from elsewhere. So don't attempt to do any |
|
577 | # file was copied from elsewhere. So don't attempt to do any | |
578 | # deltas with the other file. |
|
578 | # deltas with the other file. | |
579 | if copyfrom: |
|
579 | if copyfrom: | |
580 | p1 = nullid |
|
580 | p1 = nullid | |
581 |
|
581 | |||
582 | if chainlen < maxchainlen: |
|
582 | if chainlen < maxchainlen: | |
583 | # Record this child as the delta base for its parents. |
|
583 | # Record this child as the delta base for its parents. | |
584 | # This may be non optimal, since the parents may have |
|
584 | # This may be non optimal, since the parents may have | |
585 | # many children, and this will only choose the last one. |
|
585 | # many children, and this will only choose the last one. | |
586 | # TODO: record all children and try all deltas to find |
|
586 | # TODO: record all children and try all deltas to find | |
587 | # best |
|
587 | # best | |
588 | if p1 != nullid: |
|
588 | if p1 != nullid: | |
589 | deltabases[p1] = (node, chainlen + 1) |
|
589 | deltabases[p1] = (node, chainlen + 1) | |
590 | if p2 != nullid: |
|
590 | if p2 != nullid: | |
591 | deltabases[p2] = (node, chainlen + 1) |
|
591 | deltabases[p2] = (node, chainlen + 1) | |
592 |
|
592 | |||
593 | # experimental config: repack.chainorphansbysize |
|
593 | # experimental config: repack.chainorphansbysize | |
594 | if ui.configbool('repack', 'chainorphansbysize'): |
|
594 | if ui.configbool('repack', 'chainorphansbysize'): | |
595 | orphans = nobase - referenced |
|
595 | orphans = nobase - referenced | |
596 | orderednodes = self._chainorphans(ui, filename, orderednodes, |
|
596 | orderednodes = self._chainorphans(ui, filename, orderednodes, | |
597 | orphans, deltabases) |
|
597 | orphans, deltabases) | |
598 |
|
598 | |||
599 | # Compute deltas and write to the pack |
|
599 | # Compute deltas and write to the pack | |
600 | for i, node in enumerate(orderednodes): |
|
600 | for i, node in enumerate(orderednodes): | |
601 | deltabase, chainlen = deltabases[node] |
|
601 | deltabase, chainlen = deltabases[node] | |
602 | # Compute delta |
|
602 | # Compute delta | |
603 | # TODO: Optimize the deltachain fetching. Since we're |
|
603 | # TODO: Optimize the deltachain fetching. Since we're | |
604 | # iterating over the different version of the file, we may |
|
604 | # iterating over the different version of the file, we may | |
605 | # be fetching the same deltachain over and over again. |
|
605 | # be fetching the same deltachain over and over again. | |
606 | if deltabase != nullid: |
|
606 | if deltabase != nullid: | |
607 | deltaentry = self.data.getdelta(filename, node) |
|
607 | deltaentry = self.data.getdelta(filename, node) | |
608 | delta, deltabasename, origdeltabase, meta = deltaentry |
|
608 | delta, deltabasename, origdeltabase, meta = deltaentry | |
609 | size = meta.get(constants.METAKEYSIZE) |
|
609 | size = meta.get(constants.METAKEYSIZE) | |
610 | if (deltabasename != filename or origdeltabase != deltabase |
|
610 | if (deltabasename != filename or origdeltabase != deltabase | |
611 | or size is None): |
|
611 | or size is None): | |
612 | deltabasetext = self.data.get(filename, deltabase) |
|
612 | deltabasetext = self.data.get(filename, deltabase) | |
613 | original = self.data.get(filename, node) |
|
613 | original = self.data.get(filename, node) | |
614 | size = len(original) |
|
614 | size = len(original) | |
615 | delta = mdiff.textdiff(deltabasetext, original) |
|
615 | delta = mdiff.textdiff(deltabasetext, original) | |
616 | else: |
|
616 | else: | |
617 | delta = self.data.get(filename, node) |
|
617 | delta = self.data.get(filename, node) | |
618 | size = len(delta) |
|
618 | size = len(delta) | |
619 | meta = self.data.getmeta(filename, node) |
|
619 | meta = self.data.getmeta(filename, node) | |
620 |
|
620 | |||
621 | # TODO: don't use the delta if it's larger than the fulltext |
|
621 | # TODO: don't use the delta if it's larger than the fulltext | |
622 | if constants.METAKEYSIZE not in meta: |
|
622 | if constants.METAKEYSIZE not in meta: | |
623 | meta[constants.METAKEYSIZE] = size |
|
623 | meta[constants.METAKEYSIZE] = size | |
624 | target.add(filename, node, deltabase, delta, meta) |
|
624 | target.add(filename, node, deltabase, delta, meta) | |
625 |
|
625 | |||
626 | entries[node].datarepacked = True |
|
626 | entries[node].datarepacked = True | |
627 |
|
627 | |||
628 | processprogress.complete() |
|
628 | processprogress.complete() | |
629 | count += 1 |
|
629 | count += 1 | |
630 |
|
630 | |||
631 | repackprogress.complete() |
|
631 | repackprogress.complete() | |
632 | target.close(ledger=ledger) |
|
632 | target.close(ledger=ledger) | |
633 |
|
633 | |||
634 | def repackhistory(self, ledger, target): |
|
634 | def repackhistory(self, ledger, target): | |
635 | ui = self.repo.ui |
|
635 | ui = self.repo.ui | |
636 |
|
636 | |||
637 | byfile = {} |
|
637 | byfile = {} | |
638 | for entry in ledger.entries.itervalues(): |
|
638 | for entry in ledger.entries.itervalues(): | |
639 | if entry.historysource: |
|
639 | if entry.historysource: | |
640 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
640 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
641 |
|
641 | |||
642 | progress = ui.makeprogress(_("repacking history"), unit=self.unit, |
|
642 | progress = ui.makeprogress(_("repacking history"), unit=self.unit, | |
643 | total=len(byfile)) |
|
643 | total=len(byfile)) | |
644 | for filename, entries in sorted(byfile.iteritems()): |
|
644 | for filename, entries in sorted(byfile.iteritems()): | |
645 | ancestors = {} |
|
645 | ancestors = {} | |
646 | nodes = list(node for node in entries) |
|
646 | nodes = list(node for node in entries) | |
647 |
|
647 | |||
648 | for node in nodes: |
|
648 | for node in nodes: | |
649 | if node in ancestors: |
|
649 | if node in ancestors: | |
650 | continue |
|
650 | continue | |
651 | ancestors.update(self.history.getancestors(filename, node, |
|
651 | ancestors.update(self.history.getancestors(filename, node, | |
652 | known=ancestors)) |
|
652 | known=ancestors)) | |
653 |
|
653 | |||
654 | # Order the nodes children first |
|
654 | # Order the nodes children first | |
655 | orderednodes = reversed(self._toposort(ancestors)) |
|
655 | orderednodes = reversed(self._toposort(ancestors)) | |
656 |
|
656 | |||
657 | # Write to the pack |
|
657 | # Write to the pack | |
658 | dontprocess = set() |
|
658 | dontprocess = set() | |
659 | for node in orderednodes: |
|
659 | for node in orderednodes: | |
660 | p1, p2, linknode, copyfrom = ancestors[node] |
|
660 | p1, p2, linknode, copyfrom = ancestors[node] | |
661 |
|
661 | |||
662 | # If the node is marked dontprocess, but it's also in the |
|
662 | # If the node is marked dontprocess, but it's also in the | |
663 | # explicit entries set, that means the node exists both in this |
|
663 | # explicit entries set, that means the node exists both in this | |
664 | # file and in another file that was copied to this file. |
|
664 | # file and in another file that was copied to this file. | |
665 | # Usually this happens if the file was copied to another file, |
|
665 | # Usually this happens if the file was copied to another file, | |
666 | # then the copy was deleted, then reintroduced without copy |
|
666 | # then the copy was deleted, then reintroduced without copy | |
667 | # metadata. The original add and the new add have the same hash |
|
667 | # metadata. The original add and the new add have the same hash | |
668 | # since the content is identical and the parents are null. |
|
668 | # since the content is identical and the parents are null. | |
669 | if node in dontprocess and node not in entries: |
|
669 | if node in dontprocess and node not in entries: | |
670 | # If copyfrom == filename, it means the copy history |
|
670 | # If copyfrom == filename, it means the copy history | |
671 | # went to come other file, then came back to this one, so we |
|
671 | # went to come other file, then came back to this one, so we | |
672 | # should continue processing it. |
|
672 | # should continue processing it. | |
673 | if p1 != nullid and copyfrom != filename: |
|
673 | if p1 != nullid and copyfrom != filename: | |
674 | dontprocess.add(p1) |
|
674 | dontprocess.add(p1) | |
675 | if p2 != nullid: |
|
675 | if p2 != nullid: | |
676 | dontprocess.add(p2) |
|
676 | dontprocess.add(p2) | |
677 | continue |
|
677 | continue | |
678 |
|
678 | |||
679 | if copyfrom: |
|
679 | if copyfrom: | |
680 | dontprocess.add(p1) |
|
680 | dontprocess.add(p1) | |
681 |
|
681 | |||
682 | target.add(filename, node, p1, p2, linknode, copyfrom) |
|
682 | target.add(filename, node, p1, p2, linknode, copyfrom) | |
683 |
|
683 | |||
684 | if node in entries: |
|
684 | if node in entries: | |
685 | entries[node].historyrepacked = True |
|
685 | entries[node].historyrepacked = True | |
686 |
|
686 | |||
687 | progress.increment() |
|
687 | progress.increment() | |
688 |
|
688 | |||
689 | progress.complete() |
|
689 | progress.complete() | |
690 | target.close(ledger=ledger) |
|
690 | target.close(ledger=ledger) | |
691 |
|
691 | |||
692 | def _toposort(self, ancestors): |
|
692 | def _toposort(self, ancestors): | |
693 | def parentfunc(node): |
|
693 | def parentfunc(node): | |
694 | p1, p2, linknode, copyfrom = ancestors[node] |
|
694 | p1, p2, linknode, copyfrom = ancestors[node] | |
695 | parents = [] |
|
695 | parents = [] | |
696 | if p1 != nullid: |
|
696 | if p1 != nullid: | |
697 | parents.append(p1) |
|
697 | parents.append(p1) | |
698 | if p2 != nullid: |
|
698 | if p2 != nullid: | |
699 | parents.append(p2) |
|
699 | parents.append(p2) | |
700 | return parents |
|
700 | return parents | |
701 |
|
701 | |||
702 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) |
|
702 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | |
703 | return sortednodes |
|
703 | return sortednodes | |
704 |
|
704 | |||
705 | class repackledger(object): |
|
705 | class repackledger(object): | |
706 | """Storage for all the bookkeeping that happens during a repack. It contains |
|
706 | """Storage for all the bookkeeping that happens during a repack. It contains | |
707 | the list of revisions being repacked, what happened to each revision, and |
|
707 | the list of revisions being repacked, what happened to each revision, and | |
708 | which source store contained which revision originally (for later cleanup). |
|
708 | which source store contained which revision originally (for later cleanup). | |
709 | """ |
|
709 | """ | |
710 | def __init__(self): |
|
710 | def __init__(self): | |
711 | self.entries = {} |
|
711 | self.entries = {} | |
712 | self.sources = {} |
|
712 | self.sources = {} | |
713 | self.created = set() |
|
713 | self.created = set() | |
714 |
|
714 | |||
715 | def markdataentry(self, source, filename, node): |
|
715 | def markdataentry(self, source, filename, node): | |
716 | """Mark the given filename+node revision as having a data rev in the |
|
716 | """Mark the given filename+node revision as having a data rev in the | |
717 | given source. |
|
717 | given source. | |
718 | """ |
|
718 | """ | |
719 | entry = self._getorcreateentry(filename, node) |
|
719 | entry = self._getorcreateentry(filename, node) | |
720 | entry.datasource = True |
|
720 | entry.datasource = True | |
721 | entries = self.sources.get(source) |
|
721 | entries = self.sources.get(source) | |
722 | if not entries: |
|
722 | if not entries: | |
723 | entries = set() |
|
723 | entries = set() | |
724 | self.sources[source] = entries |
|
724 | self.sources[source] = entries | |
725 | entries.add(entry) |
|
725 | entries.add(entry) | |
726 |
|
726 | |||
727 | def markhistoryentry(self, source, filename, node): |
|
727 | def markhistoryentry(self, source, filename, node): | |
728 | """Mark the given filename+node revision as having a history rev in the |
|
728 | """Mark the given filename+node revision as having a history rev in the | |
729 | given source. |
|
729 | given source. | |
730 | """ |
|
730 | """ | |
731 | entry = self._getorcreateentry(filename, node) |
|
731 | entry = self._getorcreateentry(filename, node) | |
732 | entry.historysource = True |
|
732 | entry.historysource = True | |
733 | entries = self.sources.get(source) |
|
733 | entries = self.sources.get(source) | |
734 | if not entries: |
|
734 | if not entries: | |
735 | entries = set() |
|
735 | entries = set() | |
736 | self.sources[source] = entries |
|
736 | self.sources[source] = entries | |
737 | entries.add(entry) |
|
737 | entries.add(entry) | |
738 |
|
738 | |||
739 | def _getorcreateentry(self, filename, node): |
|
739 | def _getorcreateentry(self, filename, node): | |
740 | key = (filename, node) |
|
740 | key = (filename, node) | |
741 | value = self.entries.get(key) |
|
741 | value = self.entries.get(key) | |
742 | if not value: |
|
742 | if not value: | |
743 | value = repackentry(filename, node) |
|
743 | value = repackentry(filename, node) | |
744 | self.entries[key] = value |
|
744 | self.entries[key] = value | |
745 |
|
745 | |||
746 | return value |
|
746 | return value | |
747 |
|
747 | |||
748 | def addcreated(self, value): |
|
748 | def addcreated(self, value): | |
749 | self.created.add(value) |
|
749 | self.created.add(value) | |
750 |
|
750 | |||
751 | class repackentry(object): |
|
751 | class repackentry(object): | |
752 | """Simple class representing a single revision entry in the repackledger. |
|
752 | """Simple class representing a single revision entry in the repackledger. | |
753 | """ |
|
753 | """ | |
754 | __slots__ = (r'filename', r'node', r'datasource', r'historysource', |
|
754 | __slots__ = (r'filename', r'node', r'datasource', r'historysource', | |
755 | r'datarepacked', r'historyrepacked', r'gced') |
|
755 | r'datarepacked', r'historyrepacked', r'gced') | |
756 | def __init__(self, filename, node): |
|
756 | def __init__(self, filename, node): | |
757 | self.filename = filename |
|
757 | self.filename = filename | |
758 | self.node = node |
|
758 | self.node = node | |
759 | # If the revision has a data entry in the source |
|
759 | # If the revision has a data entry in the source | |
760 | self.datasource = False |
|
760 | self.datasource = False | |
761 | # If the revision has a history entry in the source |
|
761 | # If the revision has a history entry in the source | |
762 | self.historysource = False |
|
762 | self.historysource = False | |
763 | # If the revision's data entry was repacked into the repack target |
|
763 | # If the revision's data entry was repacked into the repack target | |
764 | self.datarepacked = False |
|
764 | self.datarepacked = False | |
765 | # If the revision's history entry was repacked into the repack target |
|
765 | # If the revision's history entry was repacked into the repack target | |
766 | self.historyrepacked = False |
|
766 | self.historyrepacked = False | |
767 | # If garbage collected |
|
767 | # If garbage collected | |
768 | self.gced = False |
|
768 | self.gced = False | |
769 |
|
769 | |||
770 | def repacklockvfs(repo): |
|
770 | def repacklockvfs(repo): | |
771 | if util.safehasattr(repo, 'name'): |
|
771 | if util.safehasattr(repo, 'name'): | |
772 | # Lock in the shared cache so repacks across multiple copies of the same |
|
772 | # Lock in the shared cache so repacks across multiple copies of the same | |
773 | # repo are coordinated. |
|
773 | # repo are coordinated. | |
774 | sharedcachepath = shallowutil.getcachepackpath( |
|
774 | sharedcachepath = shallowutil.getcachepackpath( | |
775 | repo, |
|
775 | repo, | |
776 | constants.FILEPACK_CATEGORY) |
|
776 | constants.FILEPACK_CATEGORY) | |
777 | return vfs.vfs(sharedcachepath) |
|
777 | return vfs.vfs(sharedcachepath) | |
778 | else: |
|
778 | else: | |
779 | return repo.svfs |
|
779 | return repo.svfs |
@@ -1,381 +1,376 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 | # devel.remotefilelog.ensurestart: reduce race condition with |
|
4 | # devel.remotefilelog.ensurestart: reduce race condition with | |
5 | # waiton{repack/prefetch} |
|
5 | # waiton{repack/prefetch} | |
6 | $ cat >> $HGRCPATH <<EOF |
|
6 | $ cat >> $HGRCPATH <<EOF | |
7 | > [devel] |
|
7 | > [devel] | |
8 | > remotefilelog.ensurestart=True |
|
8 | > remotefilelog.ensurestart=True | |
9 | > EOF |
|
9 | > EOF | |
10 |
|
10 | |||
11 | $ hg init master |
|
11 | $ hg init master | |
12 | $ cd master |
|
12 | $ cd master | |
13 | $ cat >> .hg/hgrc <<EOF |
|
13 | $ cat >> .hg/hgrc <<EOF | |
14 | > [remotefilelog] |
|
14 | > [remotefilelog] | |
15 | > server=True |
|
15 | > server=True | |
16 | > EOF |
|
16 | > EOF | |
17 | $ echo x > x |
|
17 | $ echo x > x | |
18 | $ echo z > z |
|
18 | $ echo z > z | |
19 | $ hg commit -qAm x |
|
19 | $ hg commit -qAm x | |
20 | $ echo x2 > x |
|
20 | $ echo x2 > x | |
21 | $ echo y > y |
|
21 | $ echo y > y | |
22 | $ hg commit -qAm y |
|
22 | $ hg commit -qAm y | |
23 | $ echo w > w |
|
23 | $ echo w > w | |
24 | $ rm z |
|
24 | $ rm z | |
25 | $ hg commit -qAm w |
|
25 | $ hg commit -qAm w | |
26 | $ hg bookmark foo |
|
26 | $ hg bookmark foo | |
27 |
|
27 | |||
28 | $ cd .. |
|
28 | $ cd .. | |
29 |
|
29 | |||
30 | # clone the repo |
|
30 | # clone the repo | |
31 |
|
31 | |||
32 | $ hgcloneshallow ssh://user@dummy/master shallow --noupdate |
|
32 | $ hgcloneshallow ssh://user@dummy/master shallow --noupdate | |
33 | streaming all changes |
|
33 | streaming all changes | |
34 | 2 files to transfer, 776 bytes of data |
|
34 | 2 files to transfer, 776 bytes of data | |
35 | transferred 776 bytes in * seconds (*/sec) (glob) |
|
35 | transferred 776 bytes in * seconds (*/sec) (glob) | |
36 | searching for changes |
|
36 | searching for changes | |
37 | no changes found |
|
37 | no changes found | |
38 |
|
38 | |||
39 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
39 | # Set the prefetchdays config to zero so that all commits are prefetched | |
40 | # no matter what their creation date is. Also set prefetchdelay config |
|
40 | # no matter what their creation date is. Also set prefetchdelay config | |
41 | # to zero so that there is no delay between prefetches. |
|
41 | # to zero so that there is no delay between prefetches. | |
42 | $ cd shallow |
|
42 | $ cd shallow | |
43 | $ cat >> .hg/hgrc <<EOF |
|
43 | $ cat >> .hg/hgrc <<EOF | |
44 | > [remotefilelog] |
|
44 | > [remotefilelog] | |
45 | > prefetchdays=0 |
|
45 | > prefetchdays=0 | |
46 | > prefetchdelay=0 |
|
46 | > prefetchdelay=0 | |
47 | > EOF |
|
47 | > EOF | |
48 | $ cd .. |
|
48 | $ cd .. | |
49 |
|
49 | |||
50 | # prefetch a revision |
|
50 | # prefetch a revision | |
51 | $ cd shallow |
|
51 | $ cd shallow | |
52 |
|
52 | |||
53 | $ hg prefetch -r 0 |
|
53 | $ hg prefetch -r 0 | |
54 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
54 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
55 |
|
55 | |||
56 | $ hg cat -r 0 x |
|
56 | $ hg cat -r 0 x | |
57 | x |
|
57 | x | |
58 |
|
58 | |||
59 | # background prefetch on pull when configured |
|
59 | # background prefetch on pull when configured | |
60 |
|
60 | |||
61 | $ cat >> .hg/hgrc <<EOF |
|
61 | $ cat >> .hg/hgrc <<EOF | |
62 | > [remotefilelog] |
|
62 | > [remotefilelog] | |
63 | > pullprefetch=bookmark() |
|
63 | > pullprefetch=bookmark() | |
64 | > backgroundprefetch=True |
|
64 | > backgroundprefetch=True | |
65 | > EOF |
|
65 | > EOF | |
66 | $ hg strip tip |
|
66 | $ hg strip tip | |
67 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) |
|
67 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) | |
68 |
|
68 | |||
69 | $ clearcache |
|
69 | $ clearcache | |
70 | $ hg pull |
|
70 | $ hg pull | |
71 | pulling from ssh://user@dummy/master |
|
71 | pulling from ssh://user@dummy/master | |
72 | searching for changes |
|
72 | searching for changes | |
73 | adding changesets |
|
73 | adding changesets | |
74 | adding manifests |
|
74 | adding manifests | |
75 | adding file changes |
|
75 | adding file changes | |
76 | updating bookmark foo |
|
76 | updating bookmark foo | |
77 | added 1 changesets with 0 changes to 0 files |
|
77 | added 1 changesets with 0 changes to 0 files | |
78 | new changesets 6b4b6f66ef8c |
|
78 | new changesets 6b4b6f66ef8c | |
79 | (run 'hg update' to get a working copy) |
|
79 | (run 'hg update' to get a working copy) | |
80 | prefetching file contents |
|
80 | prefetching file contents | |
81 | $ sleep 0.5 |
|
81 | $ sleep 0.5 | |
82 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
82 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
83 | $ find $CACHEDIR -type f | sort |
|
83 | $ find $CACHEDIR -type f | sort | |
84 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0 |
|
84 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0 | |
85 | $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
85 | $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
86 | $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c |
|
86 | $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c | |
87 | $TESTTMP/hgcache/repos |
|
87 | $TESTTMP/hgcache/repos | |
88 |
|
88 | |||
89 | # background prefetch with repack on pull when configured |
|
89 | # background prefetch with repack on pull when configured | |
90 |
|
90 | |||
91 | $ cat >> .hg/hgrc <<EOF |
|
91 | $ cat >> .hg/hgrc <<EOF | |
92 | > [remotefilelog] |
|
92 | > [remotefilelog] | |
93 | > backgroundrepack=True |
|
93 | > backgroundrepack=True | |
94 | > EOF |
|
94 | > EOF | |
95 | $ hg strip tip |
|
95 | $ hg strip tip | |
96 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) |
|
96 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) | |
97 |
|
97 | |||
98 | $ clearcache |
|
98 | $ clearcache | |
99 | $ hg pull |
|
99 | $ hg pull | |
100 | pulling from ssh://user@dummy/master |
|
100 | pulling from ssh://user@dummy/master | |
101 | searching for changes |
|
101 | searching for changes | |
102 | adding changesets |
|
102 | adding changesets | |
103 | adding manifests |
|
103 | adding manifests | |
104 | adding file changes |
|
104 | adding file changes | |
105 | updating bookmark foo |
|
105 | updating bookmark foo | |
106 | added 1 changesets with 0 changes to 0 files |
|
106 | added 1 changesets with 0 changes to 0 files | |
107 | new changesets 6b4b6f66ef8c |
|
107 | new changesets 6b4b6f66ef8c | |
108 | (run 'hg update' to get a working copy) |
|
108 | (run 'hg update' to get a working copy) | |
109 | prefetching file contents |
|
109 | prefetching file contents | |
110 | $ sleep 0.5 |
|
110 | $ sleep 0.5 | |
111 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
111 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
112 | $ sleep 0.5 |
|
112 | $ sleep 0.5 | |
113 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
113 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
114 | $ sleep 0.5 |
|
114 | $ sleep 0.5 | |
115 | $ find $CACHEDIR -type f | sort |
|
115 | $ find $CACHEDIR -type f | sort | |
116 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histidx |
|
116 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histidx | |
117 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack |
|
117 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack | |
118 | $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.dataidx |
|
118 | $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.dataidx | |
119 | $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.datapack |
|
119 | $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.datapack | |
120 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
121 | $TESTTMP/hgcache/repos |
|
120 | $TESTTMP/hgcache/repos | |
122 |
|
121 | |||
123 | # background prefetch with repack on update when wcprevset configured |
|
122 | # background prefetch with repack on update when wcprevset configured | |
124 |
|
123 | |||
125 | $ clearcache |
|
124 | $ clearcache | |
126 | $ hg up -r 0 |
|
125 | $ hg up -r 0 | |
127 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
126 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
128 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
127 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
129 | $ find $CACHEDIR -type f | sort |
|
128 | $ find $CACHEDIR -type f | sort | |
130 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
129 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
131 | $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a |
|
130 | $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a | |
132 | $TESTTMP/hgcache/repos |
|
131 | $TESTTMP/hgcache/repos | |
133 |
|
132 | |||
134 | $ hg up -r 1 |
|
133 | $ hg up -r 1 | |
135 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
134 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
136 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
135 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
137 |
|
136 | |||
138 | $ cat >> .hg/hgrc <<EOF |
|
137 | $ cat >> .hg/hgrc <<EOF | |
139 | > [remotefilelog] |
|
138 | > [remotefilelog] | |
140 | > bgprefetchrevs=.:: |
|
139 | > bgprefetchrevs=.:: | |
141 | > EOF |
|
140 | > EOF | |
142 |
|
141 | |||
143 | $ clearcache |
|
142 | $ clearcache | |
144 | $ hg up -r 0 |
|
143 | $ hg up -r 0 | |
145 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
144 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
146 | * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob) |
|
145 | * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob) | |
147 | $ sleep 1 |
|
146 | $ sleep 1 | |
148 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
147 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
149 | $ sleep 1 |
|
148 | $ sleep 1 | |
150 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
149 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
151 | $ sleep 1 |
|
150 | $ sleep 1 | |
152 | $ find $CACHEDIR -type f | sort |
|
151 | $ find $CACHEDIR -type f | sort | |
153 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx |
|
152 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
154 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack |
|
153 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
155 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx |
|
154 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx | |
156 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack |
|
155 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack | |
157 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
158 | $TESTTMP/hgcache/repos |
|
156 | $TESTTMP/hgcache/repos | |
159 |
|
157 | |||
160 | # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore |
|
158 | # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore | |
161 | # could only be downloaded by the background prefetch |
|
159 | # could only be downloaded by the background prefetch | |
162 |
|
160 | |||
163 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
161 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
164 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: |
|
162 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: | |
165 | w: |
|
163 | w: | |
166 | Node Delta Base Delta Length Blob Size |
|
164 | Node Delta Base Delta Length Blob Size | |
167 | bb6ccd5dceaa 000000000000 2 2 |
|
165 | bb6ccd5dceaa 000000000000 2 2 | |
168 |
|
166 | |||
169 | Total: 2 2 (0.0% bigger) |
|
167 | Total: 2 2 (0.0% bigger) | |
170 | x: |
|
168 | x: | |
171 | Node Delta Base Delta Length Blob Size |
|
169 | Node Delta Base Delta Length Blob Size | |
172 | ef95c5376f34 000000000000 3 3 |
|
170 | ef95c5376f34 000000000000 3 3 | |
173 | 1406e7411862 ef95c5376f34 14 2 |
|
171 | 1406e7411862 ef95c5376f34 14 2 | |
174 |
|
172 | |||
175 | Total: 17 5 (240.0% bigger) |
|
173 | Total: 17 5 (240.0% bigger) | |
176 | y: |
|
174 | y: | |
177 | Node Delta Base Delta Length Blob Size |
|
175 | Node Delta Base Delta Length Blob Size | |
178 | 076f5e2225b3 000000000000 2 2 |
|
176 | 076f5e2225b3 000000000000 2 2 | |
179 |
|
177 | |||
180 | Total: 2 2 (0.0% bigger) |
|
178 | Total: 2 2 (0.0% bigger) | |
181 | z: |
|
179 | z: | |
182 | Node Delta Base Delta Length Blob Size |
|
180 | Node Delta Base Delta Length Blob Size | |
183 | 69a1b6752270 000000000000 2 2 |
|
181 | 69a1b6752270 000000000000 2 2 | |
184 |
|
182 | |||
185 | Total: 2 2 (0.0% bigger) |
|
183 | Total: 2 2 (0.0% bigger) | |
186 |
|
184 | |||
187 | # background prefetch with repack on commit when wcprevset configured |
|
185 | # background prefetch with repack on commit when wcprevset configured | |
188 |
|
186 | |||
189 | $ cat >> .hg/hgrc <<EOF |
|
187 | $ cat >> .hg/hgrc <<EOF | |
190 | > [remotefilelog] |
|
188 | > [remotefilelog] | |
191 | > bgprefetchrevs=0:: |
|
189 | > bgprefetchrevs=0:: | |
192 | > EOF |
|
190 | > EOF | |
193 |
|
191 | |||
194 | $ clearcache |
|
192 | $ clearcache | |
195 | $ find $CACHEDIR -type f | sort |
|
193 | $ find $CACHEDIR -type f | sort | |
196 | $ echo b > b |
|
194 | $ echo b > b | |
197 | $ hg commit -qAm b |
|
195 | $ hg commit -qAm b | |
198 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) |
|
196 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) | |
199 | $ hg bookmark temporary |
|
197 | $ hg bookmark temporary | |
200 | $ sleep 1 |
|
198 | $ sleep 1 | |
201 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
199 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
202 | $ sleep 1 |
|
200 | $ sleep 1 | |
203 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
201 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
204 | $ sleep 1 |
|
202 | $ sleep 1 | |
205 | $ find $CACHEDIR -type f | sort |
|
203 | $ find $CACHEDIR -type f | sort | |
206 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx |
|
204 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
207 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack |
|
205 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
208 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx |
|
206 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx | |
209 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack |
|
207 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack | |
210 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
211 | $TESTTMP/hgcache/repos |
|
208 | $TESTTMP/hgcache/repos | |
212 |
|
209 | |||
213 | # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore |
|
210 | # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore | |
214 | # could only be downloaded by the background prefetch |
|
211 | # could only be downloaded by the background prefetch | |
215 |
|
212 | |||
216 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
213 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
217 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: |
|
214 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: | |
218 | w: |
|
215 | w: | |
219 | Node Delta Base Delta Length Blob Size |
|
216 | Node Delta Base Delta Length Blob Size | |
220 | bb6ccd5dceaa 000000000000 2 2 |
|
217 | bb6ccd5dceaa 000000000000 2 2 | |
221 |
|
218 | |||
222 | Total: 2 2 (0.0% bigger) |
|
219 | Total: 2 2 (0.0% bigger) | |
223 | x: |
|
220 | x: | |
224 | Node Delta Base Delta Length Blob Size |
|
221 | Node Delta Base Delta Length Blob Size | |
225 | ef95c5376f34 000000000000 3 3 |
|
222 | ef95c5376f34 000000000000 3 3 | |
226 | 1406e7411862 ef95c5376f34 14 2 |
|
223 | 1406e7411862 ef95c5376f34 14 2 | |
227 |
|
224 | |||
228 | Total: 17 5 (240.0% bigger) |
|
225 | Total: 17 5 (240.0% bigger) | |
229 | y: |
|
226 | y: | |
230 | Node Delta Base Delta Length Blob Size |
|
227 | Node Delta Base Delta Length Blob Size | |
231 | 076f5e2225b3 000000000000 2 2 |
|
228 | 076f5e2225b3 000000000000 2 2 | |
232 |
|
229 | |||
233 | Total: 2 2 (0.0% bigger) |
|
230 | Total: 2 2 (0.0% bigger) | |
234 | z: |
|
231 | z: | |
235 | Node Delta Base Delta Length Blob Size |
|
232 | Node Delta Base Delta Length Blob Size | |
236 | 69a1b6752270 000000000000 2 2 |
|
233 | 69a1b6752270 000000000000 2 2 | |
237 |
|
234 | |||
238 | Total: 2 2 (0.0% bigger) |
|
235 | Total: 2 2 (0.0% bigger) | |
239 |
|
236 | |||
240 | # background prefetch with repack on rebase when wcprevset configured |
|
237 | # background prefetch with repack on rebase when wcprevset configured | |
241 |
|
238 | |||
242 | $ hg up -r 2 |
|
239 | $ hg up -r 2 | |
243 | 3 files updated, 0 files merged, 3 files removed, 0 files unresolved |
|
240 | 3 files updated, 0 files merged, 3 files removed, 0 files unresolved | |
244 | (leaving bookmark temporary) |
|
241 | (leaving bookmark temporary) | |
245 | $ clearcache |
|
242 | $ clearcache | |
246 | $ find $CACHEDIR -type f | sort |
|
243 | $ find $CACHEDIR -type f | sort | |
247 | $ hg rebase -s temporary -d foo |
|
244 | $ hg rebase -s temporary -d foo | |
248 | rebasing 3:58147a5b5242 "b" (temporary tip) |
|
245 | rebasing 3:58147a5b5242 "b" (temporary tip) | |
249 |
saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg |
|
246 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg | |
250 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) |
|
247 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) | |
251 | $ sleep 1 |
|
248 | $ sleep 1 | |
252 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
249 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
253 | $ sleep 1 |
|
250 | $ sleep 1 | |
254 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
251 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
255 | $ sleep 1 |
|
252 | $ sleep 1 | |
256 |
|
253 | |||
257 | # Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore |
|
254 | # Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore | |
258 | # could only be downloaded by the background prefetch |
|
255 | # could only be downloaded by the background prefetch | |
259 |
|
256 | |||
260 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
257 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
261 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: |
|
258 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: | |
262 | w: |
|
259 | w: | |
263 | Node Delta Base Delta Length Blob Size |
|
260 | Node Delta Base Delta Length Blob Size | |
264 | bb6ccd5dceaa 000000000000 2 2 |
|
261 | bb6ccd5dceaa 000000000000 2 2 | |
265 |
|
262 | |||
266 | Total: 2 2 (0.0% bigger) |
|
263 | Total: 2 2 (0.0% bigger) | |
267 | x: |
|
264 | x: | |
268 | Node Delta Base Delta Length Blob Size |
|
265 | Node Delta Base Delta Length Blob Size | |
269 | ef95c5376f34 000000000000 3 3 |
|
266 | ef95c5376f34 000000000000 3 3 | |
270 | 1406e7411862 ef95c5376f34 14 2 |
|
267 | 1406e7411862 ef95c5376f34 14 2 | |
271 |
|
268 | |||
272 | Total: 17 5 (240.0% bigger) |
|
269 | Total: 17 5 (240.0% bigger) | |
273 | y: |
|
270 | y: | |
274 | Node Delta Base Delta Length Blob Size |
|
271 | Node Delta Base Delta Length Blob Size | |
275 | 076f5e2225b3 000000000000 2 2 |
|
272 | 076f5e2225b3 000000000000 2 2 | |
276 |
|
273 | |||
277 | Total: 2 2 (0.0% bigger) |
|
274 | Total: 2 2 (0.0% bigger) | |
278 | z: |
|
275 | z: | |
279 | Node Delta Base Delta Length Blob Size |
|
276 | Node Delta Base Delta Length Blob Size | |
280 | 69a1b6752270 000000000000 2 2 |
|
277 | 69a1b6752270 000000000000 2 2 | |
281 |
|
278 | |||
282 | Total: 2 2 (0.0% bigger) |
|
279 | Total: 2 2 (0.0% bigger) | |
283 |
|
280 | |||
284 | # Check that foregound prefetch with no arguments blocks until background prefetches finish |
|
281 | # Check that foregound prefetch with no arguments blocks until background prefetches finish | |
285 |
|
282 | |||
286 | $ hg up -r 3 |
|
283 | $ hg up -r 3 | |
287 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
284 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
288 | $ clearcache |
|
285 | $ clearcache | |
289 | $ hg prefetch --repack |
|
286 | $ hg prefetch --repack | |
290 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) |
|
287 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) | |
291 | got lock after * seconds (glob) (?) |
|
288 | got lock after * seconds (glob) (?) | |
292 | (running background incremental repack) |
|
289 | (running background incremental repack) | |
293 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) |
|
290 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) | |
294 |
|
291 | |||
295 | $ sleep 0.5 |
|
292 | $ sleep 0.5 | |
296 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
293 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
297 | $ sleep 0.5 |
|
294 | $ sleep 0.5 | |
298 |
|
295 | |||
299 | $ find $CACHEDIR -type f | sort |
|
296 | $ find $CACHEDIR -type f | sort | |
300 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx |
|
297 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
301 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack |
|
298 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
302 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx |
|
299 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx | |
303 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack |
|
300 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack | |
304 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
305 | $TESTTMP/hgcache/repos |
|
301 | $TESTTMP/hgcache/repos | |
306 |
|
302 | |||
307 | # Ensure that files were prefetched |
|
303 | # Ensure that files were prefetched | |
308 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
304 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
309 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: |
|
305 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: | |
310 | w: |
|
306 | w: | |
311 | Node Delta Base Delta Length Blob Size |
|
307 | Node Delta Base Delta Length Blob Size | |
312 | bb6ccd5dceaa 000000000000 2 2 |
|
308 | bb6ccd5dceaa 000000000000 2 2 | |
313 |
|
309 | |||
314 | Total: 2 2 (0.0% bigger) |
|
310 | Total: 2 2 (0.0% bigger) | |
315 | x: |
|
311 | x: | |
316 | Node Delta Base Delta Length Blob Size |
|
312 | Node Delta Base Delta Length Blob Size | |
317 | ef95c5376f34 000000000000 3 3 |
|
313 | ef95c5376f34 000000000000 3 3 | |
318 | 1406e7411862 ef95c5376f34 14 2 |
|
314 | 1406e7411862 ef95c5376f34 14 2 | |
319 |
|
315 | |||
320 | Total: 17 5 (240.0% bigger) |
|
316 | Total: 17 5 (240.0% bigger) | |
321 | y: |
|
317 | y: | |
322 | Node Delta Base Delta Length Blob Size |
|
318 | Node Delta Base Delta Length Blob Size | |
323 | 076f5e2225b3 000000000000 2 2 |
|
319 | 076f5e2225b3 000000000000 2 2 | |
324 |
|
320 | |||
325 | Total: 2 2 (0.0% bigger) |
|
321 | Total: 2 2 (0.0% bigger) | |
326 | z: |
|
322 | z: | |
327 | Node Delta Base Delta Length Blob Size |
|
323 | Node Delta Base Delta Length Blob Size | |
328 | 69a1b6752270 000000000000 2 2 |
|
324 | 69a1b6752270 000000000000 2 2 | |
329 |
|
325 | |||
330 | Total: 2 2 (0.0% bigger) |
|
326 | Total: 2 2 (0.0% bigger) | |
331 |
|
327 | |||
332 | # Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch' |
|
328 | # Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch' | |
333 |
|
329 | |||
334 | $ clearcache |
|
330 | $ clearcache | |
335 | $ hg prefetch --repack |
|
331 | $ hg prefetch --repack | |
336 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) |
|
332 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) | |
337 | got lock after * seconds (glob) (?) |
|
333 | got lock after * seconds (glob) (?) | |
338 | (running background incremental repack) |
|
334 | (running background incremental repack) | |
339 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) |
|
335 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) | |
340 | $ sleep 0.5 |
|
336 | $ sleep 0.5 | |
341 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
337 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
342 | $ sleep 0.5 |
|
338 | $ sleep 0.5 | |
343 |
|
339 | |||
344 | $ find $CACHEDIR -type f | sort |
|
340 | $ find $CACHEDIR -type f | sort | |
345 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx |
|
341 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
346 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack |
|
342 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
347 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx |
|
343 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx | |
348 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack |
|
344 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack | |
349 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
350 | $TESTTMP/hgcache/repos |
|
345 | $TESTTMP/hgcache/repos | |
351 |
|
346 | |||
352 | # Ensure that files were prefetched |
|
347 | # Ensure that files were prefetched | |
353 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
348 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
354 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: |
|
349 | $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407: | |
355 | w: |
|
350 | w: | |
356 | Node Delta Base Delta Length Blob Size |
|
351 | Node Delta Base Delta Length Blob Size | |
357 | bb6ccd5dceaa 000000000000 2 2 |
|
352 | bb6ccd5dceaa 000000000000 2 2 | |
358 |
|
353 | |||
359 | Total: 2 2 (0.0% bigger) |
|
354 | Total: 2 2 (0.0% bigger) | |
360 | x: |
|
355 | x: | |
361 | Node Delta Base Delta Length Blob Size |
|
356 | Node Delta Base Delta Length Blob Size | |
362 | ef95c5376f34 000000000000 3 3 |
|
357 | ef95c5376f34 000000000000 3 3 | |
363 | 1406e7411862 ef95c5376f34 14 2 |
|
358 | 1406e7411862 ef95c5376f34 14 2 | |
364 |
|
359 | |||
365 | Total: 17 5 (240.0% bigger) |
|
360 | Total: 17 5 (240.0% bigger) | |
366 | y: |
|
361 | y: | |
367 | Node Delta Base Delta Length Blob Size |
|
362 | Node Delta Base Delta Length Blob Size | |
368 | 076f5e2225b3 000000000000 2 2 |
|
363 | 076f5e2225b3 000000000000 2 2 | |
369 |
|
364 | |||
370 | Total: 2 2 (0.0% bigger) |
|
365 | Total: 2 2 (0.0% bigger) | |
371 | z: |
|
366 | z: | |
372 | Node Delta Base Delta Length Blob Size |
|
367 | Node Delta Base Delta Length Blob Size | |
373 | 69a1b6752270 000000000000 2 2 |
|
368 | 69a1b6752270 000000000000 2 2 | |
374 |
|
369 | |||
375 | Total: 2 2 (0.0% bigger) |
|
370 | Total: 2 2 (0.0% bigger) | |
376 |
|
371 | |||
377 | # Test that if data was prefetched and repacked we dont need to prefetch it again |
|
372 | # Test that if data was prefetched and repacked we dont need to prefetch it again | |
378 | # It ensures that Mercurial looks not only in loose files but in packs as well |
|
373 | # It ensures that Mercurial looks not only in loose files but in packs as well | |
379 |
|
374 | |||
380 | $ hg prefetch --repack |
|
375 | $ hg prefetch --repack | |
381 | (running background incremental repack) |
|
376 | (running background incremental repack) |
@@ -1,112 +1,111 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 |
|
4 | |||
5 | $ hg init master |
|
5 | $ hg init master | |
6 | $ cd master |
|
6 | $ cd master | |
7 | $ cat >> .hg/hgrc <<EOF |
|
7 | $ cat >> .hg/hgrc <<EOF | |
8 | > [remotefilelog] |
|
8 | > [remotefilelog] | |
9 | > server=True |
|
9 | > server=True | |
10 | > serverexpiration=-1 |
|
10 | > serverexpiration=-1 | |
11 | > EOF |
|
11 | > EOF | |
12 | $ echo x > x |
|
12 | $ echo x > x | |
13 | $ hg commit -qAm x |
|
13 | $ hg commit -qAm x | |
14 | $ cd .. |
|
14 | $ cd .. | |
15 |
|
15 | |||
16 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
16 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
17 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
17 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
18 |
|
18 | |||
19 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
19 | # Set the prefetchdays config to zero so that all commits are prefetched | |
20 | # no matter what their creation date is. |
|
20 | # no matter what their creation date is. | |
21 | $ cd shallow |
|
21 | $ cd shallow | |
22 | $ cat >> .hg/hgrc <<EOF |
|
22 | $ cat >> .hg/hgrc <<EOF | |
23 | > [remotefilelog] |
|
23 | > [remotefilelog] | |
24 | > prefetchdays=0 |
|
24 | > prefetchdays=0 | |
25 | > EOF |
|
25 | > EOF | |
26 | $ cd .. |
|
26 | $ cd .. | |
27 |
|
27 | |||
28 | # commit a new version of x so we can gc the old one |
|
28 | # commit a new version of x so we can gc the old one | |
29 |
|
29 | |||
30 | $ cd master |
|
30 | $ cd master | |
31 | $ echo y > x |
|
31 | $ echo y > x | |
32 | $ hg commit -qAm y |
|
32 | $ hg commit -qAm y | |
33 | $ cd .. |
|
33 | $ cd .. | |
34 |
|
34 | |||
35 | $ cd shallow |
|
35 | $ cd shallow | |
36 | $ hg pull -q |
|
36 | $ hg pull -q | |
37 | $ hg update -q |
|
37 | $ hg update -q | |
38 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
38 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
39 | $ cd .. |
|
39 | $ cd .. | |
40 |
|
40 | |||
41 | # gc client cache |
|
41 | # gc client cache | |
42 |
|
42 | |||
43 | $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'` |
|
43 | $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'` | |
44 | $ find $CACHEDIR -type f -exec touch -t $lastweek {} \; |
|
44 | $ find $CACHEDIR -type f -exec touch -t $lastweek {} \; | |
45 |
|
45 | |||
46 | $ find $CACHEDIR -type f | sort |
|
46 | $ find $CACHEDIR -type f | sort | |
47 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) |
|
47 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) | |
48 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
48 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
49 | $TESTTMP/hgcache/repos (glob) |
|
49 | $TESTTMP/hgcache/repos (glob) | |
50 | $ hg gc |
|
50 | $ hg gc | |
51 | finished: removed 1 of 2 files (0.00 GB to 0.00 GB) |
|
51 | finished: removed 1 of 2 files (0.00 GB to 0.00 GB) | |
52 | $ find $CACHEDIR -type f | sort |
|
52 | $ find $CACHEDIR -type f | sort | |
53 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
53 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
54 | $TESTTMP/hgcache/repos |
|
54 | $TESTTMP/hgcache/repos | |
55 |
|
55 | |||
56 | # gc server cache |
|
56 | # gc server cache | |
57 |
|
57 | |||
58 | $ find master/.hg/remotefilelogcache -type f | sort |
|
58 | $ find master/.hg/remotefilelogcache -type f | sort | |
59 | master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob) |
|
59 | master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob) | |
60 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
60 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
61 | $ hg gc master |
|
61 | $ hg gc master | |
62 | finished: removed 0 of 1 files (0.00 GB to 0.00 GB) |
|
62 | finished: removed 0 of 1 files (0.00 GB to 0.00 GB) | |
63 | $ find master/.hg/remotefilelogcache -type f | sort |
|
63 | $ find master/.hg/remotefilelogcache -type f | sort | |
64 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
64 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
65 |
|
65 | |||
66 | # Test that GC keepset includes pullprefetch revset if it is configured |
|
66 | # Test that GC keepset includes pullprefetch revset if it is configured | |
67 |
|
67 | |||
68 | $ cd shallow |
|
68 | $ cd shallow | |
69 | $ cat >> .hg/hgrc <<EOF |
|
69 | $ cat >> .hg/hgrc <<EOF | |
70 | > [remotefilelog] |
|
70 | > [remotefilelog] | |
71 | > pullprefetch=all() |
|
71 | > pullprefetch=all() | |
72 | > EOF |
|
72 | > EOF | |
73 | $ hg prefetch |
|
73 | $ hg prefetch | |
74 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
74 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
75 |
|
75 | |||
76 | $ cd .. |
|
76 | $ cd .. | |
77 | $ hg gc |
|
77 | $ hg gc | |
78 | finished: removed 0 of 2 files (0.00 GB to 0.00 GB) |
|
78 | finished: removed 0 of 2 files (0.00 GB to 0.00 GB) | |
79 |
|
79 | |||
80 | # Ensure that there are 2 versions of the file in cache |
|
80 | # Ensure that there are 2 versions of the file in cache | |
81 | $ find $CACHEDIR -type f | sort |
|
81 | $ find $CACHEDIR -type f | sort | |
82 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) |
|
82 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) | |
83 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
83 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
84 | $TESTTMP/hgcache/repos (glob) |
|
84 | $TESTTMP/hgcache/repos (glob) | |
85 |
|
85 | |||
86 | # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run |
|
86 | # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run | |
87 |
|
87 | |||
88 | $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True |
|
88 | $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True | |
89 |
|
89 | |||
90 | # Ensure that loose files are repacked |
|
90 | # Ensure that loose files are repacked | |
91 | $ find $CACHEDIR -type f | sort |
|
91 | $ find $CACHEDIR -type f | sort | |
92 | $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx |
|
92 | $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx | |
93 | $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack |
|
93 | $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack | |
94 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx |
|
94 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx | |
95 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack |
|
95 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack | |
96 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
97 | $TESTTMP/hgcache/repos |
|
96 | $TESTTMP/hgcache/repos | |
98 |
|
97 | |||
99 | # Test that warning is displayed when there are no valid repos in repofile |
|
98 | # Test that warning is displayed when there are no valid repos in repofile | |
100 |
|
99 | |||
101 | $ cp $CACHEDIR/repos $CACHEDIR/repos.bak |
|
100 | $ cp $CACHEDIR/repos $CACHEDIR/repos.bak | |
102 | $ echo " " > $CACHEDIR/repos |
|
101 | $ echo " " > $CACHEDIR/repos | |
103 | $ hg gc |
|
102 | $ hg gc | |
104 | warning: no valid repos in repofile |
|
103 | warning: no valid repos in repofile | |
105 | $ mv $CACHEDIR/repos.bak $CACHEDIR/repos |
|
104 | $ mv $CACHEDIR/repos.bak $CACHEDIR/repos | |
106 |
|
105 | |||
107 | # Test that warning is displayed when the repo path is malformed |
|
106 | # Test that warning is displayed when the repo path is malformed | |
108 |
|
107 | |||
109 | $ printf "asdas\0das" >> $CACHEDIR/repos |
|
108 | $ printf "asdas\0das" >> $CACHEDIR/repos | |
110 | $ hg gc |
|
109 | $ hg gc | |
111 | abort: invalid path asdas\x00da: .*(null|NULL).* (re) |
|
110 | abort: invalid path asdas\x00da: .*(null|NULL).* (re) | |
112 | [255] |
|
111 | [255] |
@@ -1,387 +1,379 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 | # devel.remotefilelog.ensurestart: reduce race condition with |
|
4 | # devel.remotefilelog.ensurestart: reduce race condition with | |
5 | # waiton{repack/prefetch} |
|
5 | # waiton{repack/prefetch} | |
6 | $ cat >> $HGRCPATH <<EOF |
|
6 | $ cat >> $HGRCPATH <<EOF | |
7 | > [remotefilelog] |
|
7 | > [remotefilelog] | |
8 | > fastdatapack=True |
|
8 | > fastdatapack=True | |
9 | > [devel] |
|
9 | > [devel] | |
10 | > remotefilelog.ensurestart=True |
|
10 | > remotefilelog.ensurestart=True | |
11 | > EOF |
|
11 | > EOF | |
12 |
|
12 | |||
13 | $ hg init master |
|
13 | $ hg init master | |
14 | $ cd master |
|
14 | $ cd master | |
15 | $ cat >> .hg/hgrc <<EOF |
|
15 | $ cat >> .hg/hgrc <<EOF | |
16 | > [remotefilelog] |
|
16 | > [remotefilelog] | |
17 | > server=True |
|
17 | > server=True | |
18 | > serverexpiration=-1 |
|
18 | > serverexpiration=-1 | |
19 | > EOF |
|
19 | > EOF | |
20 | $ echo x > x |
|
20 | $ echo x > x | |
21 | $ hg commit -qAm x |
|
21 | $ hg commit -qAm x | |
22 | $ echo x >> x |
|
22 | $ echo x >> x | |
23 | $ hg commit -qAm x2 |
|
23 | $ hg commit -qAm x2 | |
24 | $ cd .. |
|
24 | $ cd .. | |
25 |
|
25 | |||
26 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
26 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
27 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
27 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
28 |
|
28 | |||
29 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
29 | # Set the prefetchdays config to zero so that all commits are prefetched | |
30 | # no matter what their creation date is. |
|
30 | # no matter what their creation date is. | |
31 | $ cd shallow |
|
31 | $ cd shallow | |
32 | $ cat >> .hg/hgrc <<EOF |
|
32 | $ cat >> .hg/hgrc <<EOF | |
33 | > [remotefilelog] |
|
33 | > [remotefilelog] | |
34 | > prefetchdays=0 |
|
34 | > prefetchdays=0 | |
35 | > EOF |
|
35 | > EOF | |
36 | $ cd .. |
|
36 | $ cd .. | |
37 |
|
37 | |||
38 | # Test that repack cleans up the old files and creates new packs |
|
38 | # Test that repack cleans up the old files and creates new packs | |
39 |
|
39 | |||
40 | $ cd shallow |
|
40 | $ cd shallow | |
41 | $ find $CACHEDIR | sort |
|
41 | $ find $CACHEDIR | sort | |
42 | $TESTTMP/hgcache |
|
42 | $TESTTMP/hgcache | |
43 | $TESTTMP/hgcache/master |
|
43 | $TESTTMP/hgcache/master | |
44 | $TESTTMP/hgcache/master/11 |
|
44 | $TESTTMP/hgcache/master/11 | |
45 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 |
|
45 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 | |
46 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 |
|
46 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 | |
47 | $TESTTMP/hgcache/repos |
|
47 | $TESTTMP/hgcache/repos | |
48 |
|
48 | |||
49 | $ hg repack |
|
49 | $ hg repack | |
50 |
|
50 | |||
51 | $ find $CACHEDIR | sort |
|
51 | $ find $CACHEDIR | sort | |
52 | $TESTTMP/hgcache |
|
52 | $TESTTMP/hgcache | |
53 | $TESTTMP/hgcache/master |
|
53 | $TESTTMP/hgcache/master | |
54 | $TESTTMP/hgcache/master/packs |
|
54 | $TESTTMP/hgcache/master/packs | |
55 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
55 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
56 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
56 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
57 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
57 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
58 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
58 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
59 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
60 | $TESTTMP/hgcache/repos |
|
59 | $TESTTMP/hgcache/repos | |
61 |
|
60 | |||
62 | # Test that the packs are readonly |
|
61 | # Test that the packs are readonly | |
63 | $ ls_l $CACHEDIR/master/packs |
|
62 | $ ls_l $CACHEDIR/master/packs | |
64 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
63 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
65 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
64 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
66 | -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
65 | -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
67 | -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
66 | -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
68 | -rw-r--r-- 0 repacklock |
|
|||
69 |
|
67 | |||
70 | # Test that the data in the new packs is accessible |
|
68 | # Test that the data in the new packs is accessible | |
71 | $ hg cat -r . x |
|
69 | $ hg cat -r . x | |
72 | x |
|
70 | x | |
73 | x |
|
71 | x | |
74 |
|
72 | |||
75 | # Test that adding new data and repacking it results in the loose data and the |
|
73 | # Test that adding new data and repacking it results in the loose data and the | |
76 | # old packs being combined. |
|
74 | # old packs being combined. | |
77 |
|
75 | |||
78 | $ cd ../master |
|
76 | $ cd ../master | |
79 | $ echo x >> x |
|
77 | $ echo x >> x | |
80 | $ hg commit -m x3 |
|
78 | $ hg commit -m x3 | |
81 | $ cd ../shallow |
|
79 | $ cd ../shallow | |
82 | $ hg pull -q |
|
80 | $ hg pull -q | |
83 | $ hg up -q tip |
|
81 | $ hg up -q tip | |
84 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
82 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
85 |
|
83 | |||
86 | $ find $CACHEDIR -type f | sort |
|
84 | $ find $CACHEDIR -type f | sort | |
87 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
85 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
88 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
86 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
89 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
87 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
90 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
88 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
91 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
89 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
92 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
93 | $TESTTMP/hgcache/repos |
|
90 | $TESTTMP/hgcache/repos | |
94 |
|
91 | |||
95 | $ hg repack --traceback |
|
92 | $ hg repack --traceback | |
96 |
|
93 | |||
97 | $ find $CACHEDIR -type f | sort |
|
94 | $ find $CACHEDIR -type f | sort | |
98 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
95 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
99 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
96 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
100 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
97 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
101 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
98 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
102 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
103 | $TESTTMP/hgcache/repos |
|
99 | $TESTTMP/hgcache/repos | |
104 |
|
100 | |||
105 | # Verify all the file data is still available |
|
101 | # Verify all the file data is still available | |
106 | $ hg cat -r . x |
|
102 | $ hg cat -r . x | |
107 | x |
|
103 | x | |
108 | x |
|
104 | x | |
109 | x |
|
105 | x | |
110 | $ hg cat -r '.^' x |
|
106 | $ hg cat -r '.^' x | |
111 | x |
|
107 | x | |
112 | x |
|
108 | x | |
113 |
|
109 | |||
114 | # Test that repacking again without new data does not delete the pack files |
|
110 | # Test that repacking again without new data does not delete the pack files | |
115 | # and did not change the pack names |
|
111 | # and did not change the pack names | |
116 | $ hg repack |
|
112 | $ hg repack | |
117 | $ find $CACHEDIR -type f | sort |
|
113 | $ find $CACHEDIR -type f | sort | |
118 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
114 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
119 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
115 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
120 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
116 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
121 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
117 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
122 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
123 | $TESTTMP/hgcache/repos |
|
118 | $TESTTMP/hgcache/repos | |
124 |
|
119 | |||
125 | # Run two repacks at once |
|
120 | # Run two repacks at once | |
126 | $ hg repack --config "hooks.prerepack=sleep 3" & |
|
121 | $ hg repack --config "hooks.prerepack=sleep 3" & | |
127 | $ sleep 1 |
|
122 | $ sleep 1 | |
128 | $ hg repack |
|
123 | $ hg repack | |
129 | skipping repack - another repack is already running |
|
124 | skipping repack - another repack is already running | |
130 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
125 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
131 |
|
126 | |||
132 | # Run repack in the background |
|
127 | # Run repack in the background | |
133 | $ cd ../master |
|
128 | $ cd ../master | |
134 | $ echo x >> x |
|
129 | $ echo x >> x | |
135 | $ hg commit -m x4 |
|
130 | $ hg commit -m x4 | |
136 | $ cd ../shallow |
|
131 | $ cd ../shallow | |
137 | $ hg pull -q |
|
132 | $ hg pull -q | |
138 | $ hg up -q tip |
|
133 | $ hg up -q tip | |
139 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
134 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
140 | $ find $CACHEDIR -type f | sort |
|
135 | $ find $CACHEDIR -type f | sort | |
141 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 |
|
136 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 | |
142 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
137 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
143 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
138 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
144 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
139 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
145 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
140 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
146 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
147 | $TESTTMP/hgcache/repos |
|
141 | $TESTTMP/hgcache/repos | |
148 |
|
142 | |||
149 | $ hg repack --background |
|
143 | $ hg repack --background | |
150 | (running background repack) |
|
144 | (running background repack) | |
151 | $ sleep 0.5 |
|
145 | $ sleep 0.5 | |
152 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
146 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
153 | $ find $CACHEDIR -type f | sort |
|
147 | $ find $CACHEDIR -type f | sort | |
154 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx |
|
148 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx | |
155 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack |
|
149 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack | |
156 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx |
|
150 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx | |
157 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
151 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack | |
158 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
159 | $TESTTMP/hgcache/repos |
|
152 | $TESTTMP/hgcache/repos | |
160 |
|
153 | |||
161 | # Test debug commands |
|
154 | # Test debug commands | |
162 |
|
155 | |||
163 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
156 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
164 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
157 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
165 | x: |
|
158 | x: | |
166 | Node Delta Base Delta Length Blob Size |
|
159 | Node Delta Base Delta Length Blob Size | |
167 | 1bb2e6237e03 000000000000 8 8 |
|
160 | 1bb2e6237e03 000000000000 8 8 | |
168 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
161 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
169 | aee31534993a d4a3ed9310e5 12 4 |
|
162 | aee31534993a d4a3ed9310e5 12 4 | |
170 |
|
163 | |||
171 | Total: 32 18 (77.8% bigger) |
|
164 | Total: 32 18 (77.8% bigger) | |
172 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack |
|
165 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack | |
173 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
166 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
174 | x: |
|
167 | x: | |
175 | Node Delta Base Delta Length Blob Size |
|
168 | Node Delta Base Delta Length Blob Size | |
176 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 |
|
169 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 | |
177 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 |
|
170 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 | |
178 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 |
|
171 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 | |
179 |
|
172 | |||
180 | Total: 32 18 (77.8% bigger) |
|
173 | Total: 32 18 (77.8% bigger) | |
181 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
174 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
182 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
175 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
183 |
|
176 | |||
184 | x |
|
177 | x | |
185 | Node Delta Base Delta SHA1 Delta Length |
|
178 | Node Delta Base Delta SHA1 Delta Length | |
186 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 |
|
179 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 | |
187 | Node Delta Base Delta SHA1 Delta Length |
|
180 | Node Delta Base Delta SHA1 Delta Length | |
188 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 |
|
181 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 | |
189 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
182 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
190 |
|
183 | |||
191 | x |
|
184 | x | |
192 | Node P1 Node P2 Node Link Node Copy From |
|
185 | Node P1 Node P2 Node Link Node Copy From | |
193 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
186 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
194 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
187 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
195 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
188 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
196 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
189 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
197 |
|
190 | |||
198 | # Test copy tracing from a pack |
|
191 | # Test copy tracing from a pack | |
199 | $ cd ../master |
|
192 | $ cd ../master | |
200 | $ hg mv x y |
|
193 | $ hg mv x y | |
201 | $ hg commit -m 'move x to y' |
|
194 | $ hg commit -m 'move x to y' | |
202 | $ cd ../shallow |
|
195 | $ cd ../shallow | |
203 | $ hg pull -q |
|
196 | $ hg pull -q | |
204 | $ hg up -q tip |
|
197 | $ hg up -q tip | |
205 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
198 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
206 | $ hg repack |
|
199 | $ hg repack | |
207 | $ hg log -f y -T '{desc}\n' |
|
200 | $ hg log -f y -T '{desc}\n' | |
208 | move x to y |
|
201 | move x to y | |
209 | x4 |
|
202 | x4 | |
210 | x3 |
|
203 | x3 | |
211 | x2 |
|
204 | x2 | |
212 | x |
|
205 | x | |
213 |
|
206 | |||
214 | # Test copy trace across rename and back |
|
207 | # Test copy trace across rename and back | |
215 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks |
|
208 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks | |
216 | $ cd ../master |
|
209 | $ cd ../master | |
217 | $ hg mv y x |
|
210 | $ hg mv y x | |
218 | $ hg commit -m 'move y back to x' |
|
211 | $ hg commit -m 'move y back to x' | |
219 | $ hg revert -r 0 x |
|
212 | $ hg revert -r 0 x | |
220 | $ mv x y |
|
213 | $ mv x y | |
221 | $ hg add y |
|
214 | $ hg add y | |
222 | $ echo >> y |
|
215 | $ echo >> y | |
223 | $ hg revert x |
|
216 | $ hg revert x | |
224 | $ hg commit -m 'add y back without metadata' |
|
217 | $ hg commit -m 'add y back without metadata' | |
225 | $ cd ../shallow |
|
218 | $ cd ../shallow | |
226 | $ hg pull -q |
|
219 | $ hg pull -q | |
227 | $ hg up -q tip |
|
220 | $ hg up -q tip | |
228 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
221 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
229 | $ hg repack |
|
222 | $ hg repack | |
230 | $ ls $TESTTMP/hgcache/master/packs |
|
223 | $ ls $TESTTMP/hgcache/master/packs | |
231 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx |
|
224 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx | |
232 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack |
|
225 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack | |
233 | fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx |
|
226 | fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx | |
234 | fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack |
|
227 | fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack | |
235 | repacklock |
|
|||
236 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
228 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
237 |
|
229 | |||
238 | x |
|
230 | x | |
239 | Node P1 Node P2 Node Link Node Copy From |
|
231 | Node P1 Node P2 Node Link Node Copy From | |
240 | cd410a44d584 577959738234 000000000000 609547eda446 y |
|
232 | cd410a44d584 577959738234 000000000000 609547eda446 y | |
241 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
233 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
242 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
234 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
243 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
235 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
244 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
236 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
245 |
|
237 | |||
246 | y |
|
238 | y | |
247 | Node P1 Node P2 Node Link Node Copy From |
|
239 | Node P1 Node P2 Node Link Node Copy From | |
248 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
240 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x | |
249 | 21f46f2721e7 000000000000 000000000000 d6868642b790 |
|
241 | 21f46f2721e7 000000000000 000000000000 d6868642b790 | |
250 | $ hg strip -r '.^' |
|
242 | $ hg strip -r '.^' | |
251 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
243 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
252 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
244 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
253 | $ hg -R ../master strip -r '.^' |
|
245 | $ hg -R ../master strip -r '.^' | |
254 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
246 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
255 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
247 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
256 |
|
248 | |||
257 | $ rm -rf $TESTTMP/hgcache/master/packs |
|
249 | $ rm -rf $TESTTMP/hgcache/master/packs | |
258 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs |
|
250 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs | |
259 |
|
251 | |||
260 | # Test repacking datapack without history |
|
252 | # Test repacking datapack without history | |
261 | $ rm -rf $CACHEDIR/master/packs/*hist* |
|
253 | $ rm -rf $CACHEDIR/master/packs/*hist* | |
262 | $ hg repack |
|
254 | $ hg repack | |
263 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
255 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
264 | $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a: |
|
256 | $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a: | |
265 | x: |
|
257 | x: | |
266 | Node Delta Base Delta Length Blob Size |
|
258 | Node Delta Base Delta Length Blob Size | |
267 | 1bb2e6237e03 000000000000 8 8 |
|
259 | 1bb2e6237e03 000000000000 8 8 | |
268 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
260 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
269 | aee31534993a d4a3ed9310e5 12 4 |
|
261 | aee31534993a d4a3ed9310e5 12 4 | |
270 |
|
262 | |||
271 | Total: 32 18 (77.8% bigger) |
|
263 | Total: 32 18 (77.8% bigger) | |
272 | y: |
|
264 | y: | |
273 | Node Delta Base Delta Length Blob Size |
|
265 | Node Delta Base Delta Length Blob Size | |
274 | 577959738234 000000000000 70 8 |
|
266 | 577959738234 000000000000 70 8 | |
275 |
|
267 | |||
276 | Total: 70 8 (775.0% bigger) |
|
268 | Total: 70 8 (775.0% bigger) | |
277 |
|
269 | |||
278 | $ hg cat -r ".^" x |
|
270 | $ hg cat -r ".^" x | |
279 | x |
|
271 | x | |
280 | x |
|
272 | x | |
281 | x |
|
273 | x | |
282 | x |
|
274 | x | |
283 |
|
275 | |||
284 | Incremental repack |
|
276 | Incremental repack | |
285 | $ rm -rf $CACHEDIR/master/packs/* |
|
277 | $ rm -rf $CACHEDIR/master/packs/* | |
286 | $ cat >> .hg/hgrc <<EOF |
|
278 | $ cat >> .hg/hgrc <<EOF | |
287 | > [remotefilelog] |
|
279 | > [remotefilelog] | |
288 | > data.generations=60 |
|
280 | > data.generations=60 | |
289 | > 150 |
|
281 | > 150 | |
290 | > EOF |
|
282 | > EOF | |
291 |
|
283 | |||
292 | Single pack - repack does nothing |
|
284 | Single pack - repack does nothing | |
293 | $ hg prefetch -r 0 |
|
285 | $ hg prefetch -r 0 | |
294 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
286 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
295 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
287 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
296 | [1] |
|
288 | [1] | |
297 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
289 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
298 | [1] |
|
290 | [1] | |
299 | $ hg repack --incremental |
|
291 | $ hg repack --incremental | |
300 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
292 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
301 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
293 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
302 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
294 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
303 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
295 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
304 |
|
296 | |||
305 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 |
|
297 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 | |
306 | $ hg prefetch -r 1 |
|
298 | $ hg prefetch -r 1 | |
307 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
299 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
308 | $ hg prefetch -r 2 |
|
300 | $ hg prefetch -r 2 | |
309 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
301 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
310 | $ hg prefetch -r 3 |
|
302 | $ hg prefetch -r 3 | |
311 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
303 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
312 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
304 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
313 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
305 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
314 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
306 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
315 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
307 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
316 | $ hg repack --incremental |
|
308 | $ hg repack --incremental | |
317 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
309 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
318 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
310 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
319 | -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack |
|
311 | -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack | |
320 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
312 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
321 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
313 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
322 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
314 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
323 |
|
315 | |||
324 | 1 gen3 pack, 1 gen0 pack - does nothing |
|
316 | 1 gen3 pack, 1 gen0 pack - does nothing | |
325 | $ hg repack --incremental |
|
317 | $ hg repack --incremental | |
326 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
318 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
327 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
319 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
328 | -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack |
|
320 | -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack | |
329 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
321 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
330 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
322 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
331 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
323 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
332 |
|
324 | |||
333 | Pull should run background repack |
|
325 | Pull should run background repack | |
334 | $ cat >> .hg/hgrc <<EOF |
|
326 | $ cat >> .hg/hgrc <<EOF | |
335 | > [remotefilelog] |
|
327 | > [remotefilelog] | |
336 | > backgroundrepack=True |
|
328 | > backgroundrepack=True | |
337 | > EOF |
|
329 | > EOF | |
338 | $ clearcache |
|
330 | $ clearcache | |
339 | $ hg prefetch -r 0 |
|
331 | $ hg prefetch -r 0 | |
340 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
332 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
341 | $ hg prefetch -r 1 |
|
333 | $ hg prefetch -r 1 | |
342 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
334 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
343 | $ hg prefetch -r 2 |
|
335 | $ hg prefetch -r 2 | |
344 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
336 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
345 | $ hg prefetch -r 3 |
|
337 | $ hg prefetch -r 3 | |
346 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
338 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
347 |
|
339 | |||
348 | $ hg pull |
|
340 | $ hg pull | |
349 | pulling from ssh://user@dummy/master |
|
341 | pulling from ssh://user@dummy/master | |
350 | searching for changes |
|
342 | searching for changes | |
351 | no changes found |
|
343 | no changes found | |
352 | (running background incremental repack) |
|
344 | (running background incremental repack) | |
353 | $ sleep 0.5 |
|
345 | $ sleep 0.5 | |
354 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
346 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
355 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
347 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
356 | -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack |
|
348 | -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack | |
357 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
349 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
358 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
350 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
359 |
|
351 | |||
360 | Test environment variable resolution |
|
352 | Test environment variable resolution | |
361 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' |
|
353 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' | |
362 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
354 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
363 | $ find $TESTTMP/envcache | sort |
|
355 | $ find $TESTTMP/envcache | sort | |
364 | $TESTTMP/envcache |
|
356 | $TESTTMP/envcache | |
365 | $TESTTMP/envcache/master |
|
357 | $TESTTMP/envcache/master | |
366 | $TESTTMP/envcache/master/95 |
|
358 | $TESTTMP/envcache/master/95 | |
367 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a |
|
359 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a | |
368 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 |
|
360 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 | |
369 | $TESTTMP/envcache/repos |
|
361 | $TESTTMP/envcache/repos | |
370 |
|
362 | |||
371 | Test local remotefilelog blob is correct when based on a pack |
|
363 | Test local remotefilelog blob is correct when based on a pack | |
372 | $ hg prefetch -r . |
|
364 | $ hg prefetch -r . | |
373 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
365 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
374 | $ echo >> y |
|
366 | $ echo >> y | |
375 | $ hg commit -m y2 |
|
367 | $ hg commit -m y2 | |
376 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
368 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
377 | size: 9 bytes |
|
369 | size: 9 bytes | |
378 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
370 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
379 | key: b70860edba4f |
|
371 | key: b70860edba4f | |
380 |
|
372 | |||
381 | node => p1 p2 linknode copyfrom |
|
373 | node => p1 p2 linknode copyfrom | |
382 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 |
|
374 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 | |
383 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
375 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x | |
384 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
376 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
385 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 |
|
377 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 | |
386 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 |
|
378 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 | |
387 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
|
379 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
@@ -1,468 +1,459 b'' | |||||
1 | #require no-windows |
|
1 | #require no-windows | |
2 |
|
2 | |||
3 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
3 | $ . "$TESTDIR/remotefilelog-library.sh" | |
4 | # devel.remotefilelog.ensurestart: reduce race condition with |
|
4 | # devel.remotefilelog.ensurestart: reduce race condition with | |
5 | # waiton{repack/prefetch} |
|
5 | # waiton{repack/prefetch} | |
6 | $ cat >> $HGRCPATH <<EOF |
|
6 | $ cat >> $HGRCPATH <<EOF | |
7 | > [devel] |
|
7 | > [devel] | |
8 | > remotefilelog.ensurestart=True |
|
8 | > remotefilelog.ensurestart=True | |
9 | > EOF |
|
9 | > EOF | |
10 |
|
10 | |||
11 | $ hg init master |
|
11 | $ hg init master | |
12 | $ cd master |
|
12 | $ cd master | |
13 | $ cat >> .hg/hgrc <<EOF |
|
13 | $ cat >> .hg/hgrc <<EOF | |
14 | > [remotefilelog] |
|
14 | > [remotefilelog] | |
15 | > server=True |
|
15 | > server=True | |
16 | > serverexpiration=-1 |
|
16 | > serverexpiration=-1 | |
17 | > EOF |
|
17 | > EOF | |
18 | $ echo x > x |
|
18 | $ echo x > x | |
19 | $ hg commit -qAm x |
|
19 | $ hg commit -qAm x | |
20 | $ echo x >> x |
|
20 | $ echo x >> x | |
21 | $ hg commit -qAm x2 |
|
21 | $ hg commit -qAm x2 | |
22 | $ cd .. |
|
22 | $ cd .. | |
23 |
|
23 | |||
24 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
24 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
25 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
25 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
26 |
|
26 | |||
27 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
27 | # Set the prefetchdays config to zero so that all commits are prefetched | |
28 | # no matter what their creation date is. |
|
28 | # no matter what their creation date is. | |
29 | $ cd shallow |
|
29 | $ cd shallow | |
30 | $ cat >> .hg/hgrc <<EOF |
|
30 | $ cat >> .hg/hgrc <<EOF | |
31 | > [remotefilelog] |
|
31 | > [remotefilelog] | |
32 | > prefetchdays=0 |
|
32 | > prefetchdays=0 | |
33 | > EOF |
|
33 | > EOF | |
34 | $ cd .. |
|
34 | $ cd .. | |
35 |
|
35 | |||
36 | # Test that repack cleans up the old files and creates new packs |
|
36 | # Test that repack cleans up the old files and creates new packs | |
37 |
|
37 | |||
38 | $ cd shallow |
|
38 | $ cd shallow | |
39 | $ find $CACHEDIR | sort |
|
39 | $ find $CACHEDIR | sort | |
40 | $TESTTMP/hgcache |
|
40 | $TESTTMP/hgcache | |
41 | $TESTTMP/hgcache/master |
|
41 | $TESTTMP/hgcache/master | |
42 | $TESTTMP/hgcache/master/11 |
|
42 | $TESTTMP/hgcache/master/11 | |
43 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 |
|
43 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 | |
44 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 |
|
44 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 | |
45 | $TESTTMP/hgcache/repos |
|
45 | $TESTTMP/hgcache/repos | |
46 |
|
46 | |||
47 | $ hg repack |
|
47 | $ hg repack | |
48 |
|
48 | |||
49 | $ find $CACHEDIR | sort |
|
49 | $ find $CACHEDIR | sort | |
50 | $TESTTMP/hgcache |
|
50 | $TESTTMP/hgcache | |
51 | $TESTTMP/hgcache/master |
|
51 | $TESTTMP/hgcache/master | |
52 | $TESTTMP/hgcache/master/packs |
|
52 | $TESTTMP/hgcache/master/packs | |
53 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
53 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
54 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
54 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
55 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
55 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
56 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
56 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
57 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
58 | $TESTTMP/hgcache/repos |
|
57 | $TESTTMP/hgcache/repos | |
59 |
|
58 | |||
60 | # Test that the packs are readonly |
|
59 | # Test that the packs are readonly | |
61 | $ ls_l $CACHEDIR/master/packs |
|
60 | $ ls_l $CACHEDIR/master/packs | |
62 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
61 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
63 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
62 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
64 | -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
63 | -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
65 | -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
64 | -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
66 | -rw-r--r-- 0 repacklock |
|
|||
67 |
|
65 | |||
68 | # Test that the data in the new packs is accessible |
|
66 | # Test that the data in the new packs is accessible | |
69 | $ hg cat -r . x |
|
67 | $ hg cat -r . x | |
70 | x |
|
68 | x | |
71 | x |
|
69 | x | |
72 |
|
70 | |||
73 | # Test that adding new data and repacking it results in the loose data and the |
|
71 | # Test that adding new data and repacking it results in the loose data and the | |
74 | # old packs being combined. |
|
72 | # old packs being combined. | |
75 |
|
73 | |||
76 | $ cd ../master |
|
74 | $ cd ../master | |
77 | $ echo x >> x |
|
75 | $ echo x >> x | |
78 | $ hg commit -m x3 |
|
76 | $ hg commit -m x3 | |
79 | $ cd ../shallow |
|
77 | $ cd ../shallow | |
80 | $ hg pull -q |
|
78 | $ hg pull -q | |
81 | $ hg up -q tip |
|
79 | $ hg up -q tip | |
82 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
80 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
83 |
|
81 | |||
84 | $ find $CACHEDIR -type f | sort |
|
82 | $ find $CACHEDIR -type f | sort | |
85 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
83 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
86 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
84 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
87 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
85 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
88 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
86 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
89 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
87 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
90 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
91 | $TESTTMP/hgcache/repos |
|
88 | $TESTTMP/hgcache/repos | |
92 |
|
89 | |||
93 | # First assert that with --packsonly, the loose object will be ignored: |
|
90 | # First assert that with --packsonly, the loose object will be ignored: | |
94 |
|
91 | |||
95 | $ hg repack --packsonly |
|
92 | $ hg repack --packsonly | |
96 |
|
93 | |||
97 | $ find $CACHEDIR -type f | sort |
|
94 | $ find $CACHEDIR -type f | sort | |
98 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
95 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
99 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx |
|
96 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
100 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack |
|
97 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
101 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx |
|
98 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx | |
102 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack |
|
99 | $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack | |
103 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
104 | $TESTTMP/hgcache/repos |
|
100 | $TESTTMP/hgcache/repos | |
105 |
|
101 | |||
106 | $ hg repack --traceback |
|
102 | $ hg repack --traceback | |
107 |
|
103 | |||
108 | $ find $CACHEDIR -type f | sort |
|
104 | $ find $CACHEDIR -type f | sort | |
109 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
105 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
110 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
106 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
111 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
107 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
112 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
108 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
113 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
114 | $TESTTMP/hgcache/repos |
|
109 | $TESTTMP/hgcache/repos | |
115 |
|
110 | |||
116 | # Verify all the file data is still available |
|
111 | # Verify all the file data is still available | |
117 | $ hg cat -r . x |
|
112 | $ hg cat -r . x | |
118 | x |
|
113 | x | |
119 | x |
|
114 | x | |
120 | x |
|
115 | x | |
121 | $ hg cat -r '.^' x |
|
116 | $ hg cat -r '.^' x | |
122 | x |
|
117 | x | |
123 | x |
|
118 | x | |
124 |
|
119 | |||
125 | # Test that repacking again without new data does not delete the pack files |
|
120 | # Test that repacking again without new data does not delete the pack files | |
126 | # and did not change the pack names |
|
121 | # and did not change the pack names | |
127 | $ hg repack |
|
122 | $ hg repack | |
128 | $ find $CACHEDIR -type f | sort |
|
123 | $ find $CACHEDIR -type f | sort | |
129 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
124 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
130 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
125 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
131 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
126 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
132 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
127 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
133 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
134 | $TESTTMP/hgcache/repos |
|
128 | $TESTTMP/hgcache/repos | |
135 |
|
129 | |||
136 | # Run two repacks at once |
|
130 | # Run two repacks at once | |
137 | $ hg repack --config "hooks.prerepack=sleep 3" & |
|
131 | $ hg repack --config "hooks.prerepack=sleep 3" & | |
138 | $ sleep 1 |
|
132 | $ sleep 1 | |
139 | $ hg repack |
|
133 | $ hg repack | |
140 | skipping repack - another repack is already running |
|
134 | skipping repack - another repack is already running | |
141 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
135 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
142 |
|
136 | |||
143 | # Run repack in the background |
|
137 | # Run repack in the background | |
144 | $ cd ../master |
|
138 | $ cd ../master | |
145 | $ echo x >> x |
|
139 | $ echo x >> x | |
146 | $ hg commit -m x4 |
|
140 | $ hg commit -m x4 | |
147 | $ cd ../shallow |
|
141 | $ cd ../shallow | |
148 | $ hg pull -q |
|
142 | $ hg pull -q | |
149 | $ hg up -q tip |
|
143 | $ hg up -q tip | |
150 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
144 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
151 | $ find $CACHEDIR -type f | sort |
|
145 | $ find $CACHEDIR -type f | sort | |
152 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 |
|
146 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 | |
153 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx |
|
147 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx | |
154 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
148 | $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack | |
155 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx |
|
149 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
156 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
150 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
157 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
158 | $TESTTMP/hgcache/repos |
|
151 | $TESTTMP/hgcache/repos | |
159 |
|
152 | |||
160 | $ hg repack --background |
|
153 | $ hg repack --background | |
161 | (running background repack) |
|
154 | (running background repack) | |
162 | $ sleep 0.5 |
|
155 | $ sleep 0.5 | |
163 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
156 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
164 | $ find $CACHEDIR -type f | sort |
|
157 | $ find $CACHEDIR -type f | sort | |
165 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx |
|
158 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx | |
166 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack |
|
159 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack | |
167 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx |
|
160 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx | |
168 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
161 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack | |
169 | $TESTTMP/hgcache/master/packs/repacklock |
|
|||
170 | $TESTTMP/hgcache/repos |
|
162 | $TESTTMP/hgcache/repos | |
171 |
|
163 | |||
172 | # Test debug commands |
|
164 | # Test debug commands | |
173 |
|
165 | |||
174 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
166 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
175 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
167 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
176 | x: |
|
168 | x: | |
177 | Node Delta Base Delta Length Blob Size |
|
169 | Node Delta Base Delta Length Blob Size | |
178 | 1bb2e6237e03 000000000000 8 8 |
|
170 | 1bb2e6237e03 000000000000 8 8 | |
179 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
171 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
180 | aee31534993a d4a3ed9310e5 12 4 |
|
172 | aee31534993a d4a3ed9310e5 12 4 | |
181 |
|
173 | |||
182 | Total: 32 18 (77.8% bigger) |
|
174 | Total: 32 18 (77.8% bigger) | |
183 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack |
|
175 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack | |
184 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
176 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
185 | x: |
|
177 | x: | |
186 | Node Delta Base Delta Length Blob Size |
|
178 | Node Delta Base Delta Length Blob Size | |
187 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 |
|
179 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 | |
188 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 |
|
180 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 | |
189 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 |
|
181 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 | |
190 |
|
182 | |||
191 | Total: 32 18 (77.8% bigger) |
|
183 | Total: 32 18 (77.8% bigger) | |
192 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
184 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
193 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: |
|
185 | $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0: | |
194 |
|
186 | |||
195 | x |
|
187 | x | |
196 | Node Delta Base Delta SHA1 Delta Length |
|
188 | Node Delta Base Delta SHA1 Delta Length | |
197 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 |
|
189 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 | |
198 | Node Delta Base Delta SHA1 Delta Length |
|
190 | Node Delta Base Delta SHA1 Delta Length | |
199 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 |
|
191 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 | |
200 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
192 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
201 |
|
193 | |||
202 | x |
|
194 | x | |
203 | Node P1 Node P2 Node Link Node Copy From |
|
195 | Node P1 Node P2 Node Link Node Copy From | |
204 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
196 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
205 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
197 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
206 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
198 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
207 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
199 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
208 |
|
200 | |||
209 | # Test copy tracing from a pack |
|
201 | # Test copy tracing from a pack | |
210 | $ cd ../master |
|
202 | $ cd ../master | |
211 | $ hg mv x y |
|
203 | $ hg mv x y | |
212 | $ hg commit -m 'move x to y' |
|
204 | $ hg commit -m 'move x to y' | |
213 | $ cd ../shallow |
|
205 | $ cd ../shallow | |
214 | $ hg pull -q |
|
206 | $ hg pull -q | |
215 | $ hg up -q tip |
|
207 | $ hg up -q tip | |
216 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
208 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
217 | $ hg repack |
|
209 | $ hg repack | |
218 | $ hg log -f y -T '{desc}\n' |
|
210 | $ hg log -f y -T '{desc}\n' | |
219 | move x to y |
|
211 | move x to y | |
220 | x4 |
|
212 | x4 | |
221 | x3 |
|
213 | x3 | |
222 | x2 |
|
214 | x2 | |
223 | x |
|
215 | x | |
224 |
|
216 | |||
225 | # Test copy trace across rename and back |
|
217 | # Test copy trace across rename and back | |
226 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks |
|
218 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks | |
227 | $ cd ../master |
|
219 | $ cd ../master | |
228 | $ hg mv y x |
|
220 | $ hg mv y x | |
229 | $ hg commit -m 'move y back to x' |
|
221 | $ hg commit -m 'move y back to x' | |
230 | $ hg revert -r 0 x |
|
222 | $ hg revert -r 0 x | |
231 | $ mv x y |
|
223 | $ mv x y | |
232 | $ hg add y |
|
224 | $ hg add y | |
233 | $ echo >> y |
|
225 | $ echo >> y | |
234 | $ hg revert x |
|
226 | $ hg revert x | |
235 | $ hg commit -m 'add y back without metadata' |
|
227 | $ hg commit -m 'add y back without metadata' | |
236 | $ cd ../shallow |
|
228 | $ cd ../shallow | |
237 | $ hg pull -q |
|
229 | $ hg pull -q | |
238 | $ hg up -q tip |
|
230 | $ hg up -q tip | |
239 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
231 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
240 | $ hg repack |
|
232 | $ hg repack | |
241 | $ ls $TESTTMP/hgcache/master/packs |
|
233 | $ ls $TESTTMP/hgcache/master/packs | |
242 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx |
|
234 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx | |
243 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack |
|
235 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack | |
244 | fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx |
|
236 | fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx | |
245 | fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack |
|
237 | fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack | |
246 | repacklock |
|
|||
247 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
238 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
248 |
|
239 | |||
249 | x |
|
240 | x | |
250 | Node P1 Node P2 Node Link Node Copy From |
|
241 | Node P1 Node P2 Node Link Node Copy From | |
251 | cd410a44d584 577959738234 000000000000 609547eda446 y |
|
242 | cd410a44d584 577959738234 000000000000 609547eda446 y | |
252 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
243 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
253 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
244 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
254 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
245 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
255 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
246 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
256 |
|
247 | |||
257 | y |
|
248 | y | |
258 | Node P1 Node P2 Node Link Node Copy From |
|
249 | Node P1 Node P2 Node Link Node Copy From | |
259 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
250 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x | |
260 | 21f46f2721e7 000000000000 000000000000 d6868642b790 |
|
251 | 21f46f2721e7 000000000000 000000000000 d6868642b790 | |
261 | $ hg strip -r '.^' |
|
252 | $ hg strip -r '.^' | |
262 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
253 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
263 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
254 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
264 | $ hg -R ../master strip -r '.^' |
|
255 | $ hg -R ../master strip -r '.^' | |
265 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
256 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
266 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
257 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
267 |
|
258 | |||
268 | $ rm -rf $TESTTMP/hgcache/master/packs |
|
259 | $ rm -rf $TESTTMP/hgcache/master/packs | |
269 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs |
|
260 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs | |
270 |
|
261 | |||
271 | # Test repacking datapack without history |
|
262 | # Test repacking datapack without history | |
272 | $ rm -rf $CACHEDIR/master/packs/*hist* |
|
263 | $ rm -rf $CACHEDIR/master/packs/*hist* | |
273 | $ hg repack |
|
264 | $ hg repack | |
274 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
265 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
275 | $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a: |
|
266 | $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a: | |
276 | x: |
|
267 | x: | |
277 | Node Delta Base Delta Length Blob Size |
|
268 | Node Delta Base Delta Length Blob Size | |
278 | 1bb2e6237e03 000000000000 8 8 |
|
269 | 1bb2e6237e03 000000000000 8 8 | |
279 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
270 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
280 | aee31534993a d4a3ed9310e5 12 4 |
|
271 | aee31534993a d4a3ed9310e5 12 4 | |
281 |
|
272 | |||
282 | Total: 32 18 (77.8% bigger) |
|
273 | Total: 32 18 (77.8% bigger) | |
283 | y: |
|
274 | y: | |
284 | Node Delta Base Delta Length Blob Size |
|
275 | Node Delta Base Delta Length Blob Size | |
285 | 577959738234 000000000000 70 8 |
|
276 | 577959738234 000000000000 70 8 | |
286 |
|
277 | |||
287 | Total: 70 8 (775.0% bigger) |
|
278 | Total: 70 8 (775.0% bigger) | |
288 |
|
279 | |||
289 | $ hg cat -r ".^" x |
|
280 | $ hg cat -r ".^" x | |
290 | x |
|
281 | x | |
291 | x |
|
282 | x | |
292 | x |
|
283 | x | |
293 | x |
|
284 | x | |
294 |
|
285 | |||
295 | Incremental repack |
|
286 | Incremental repack | |
296 | $ rm -rf $CACHEDIR/master/packs/* |
|
287 | $ rm -rf $CACHEDIR/master/packs/* | |
297 | $ cat >> .hg/hgrc <<EOF |
|
288 | $ cat >> .hg/hgrc <<EOF | |
298 | > [remotefilelog] |
|
289 | > [remotefilelog] | |
299 | > data.generations=60 |
|
290 | > data.generations=60 | |
300 | > 150 |
|
291 | > 150 | |
301 | > EOF |
|
292 | > EOF | |
302 |
|
293 | |||
303 | Single pack - repack does nothing |
|
294 | Single pack - repack does nothing | |
304 | $ hg prefetch -r 0 |
|
295 | $ hg prefetch -r 0 | |
305 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
296 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
306 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
297 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
307 | [1] |
|
298 | [1] | |
308 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
299 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
309 | [1] |
|
300 | [1] | |
310 | $ hg repack --incremental |
|
301 | $ hg repack --incremental | |
311 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
302 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
312 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
303 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
313 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
304 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
314 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
305 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
315 |
|
306 | |||
316 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 |
|
307 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 | |
317 | $ hg prefetch -r 1 |
|
308 | $ hg prefetch -r 1 | |
318 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
309 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
319 | $ hg prefetch -r 2 |
|
310 | $ hg prefetch -r 2 | |
320 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
311 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
321 | $ hg prefetch -r 38 |
|
312 | $ hg prefetch -r 38 | |
322 | abort: unknown revision '38'! |
|
313 | abort: unknown revision '38'! | |
323 | [255] |
|
314 | [255] | |
324 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
315 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
325 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
316 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
326 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
317 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
327 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
318 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
328 |
|
319 | |||
329 | For the data packs, setting the limit for the repackmaxpacksize to be 64 such |
|
320 | For the data packs, setting the limit for the repackmaxpacksize to be 64 such | |
330 | that data pack with size 65 is more than the limit. This effectively ensures |
|
321 | that data pack with size 65 is more than the limit. This effectively ensures | |
331 | that no generation has 3 packs and therefore, no packs are chosen for the |
|
322 | that no generation has 3 packs and therefore, no packs are chosen for the | |
332 | incremental repacking. As for the history packs, setting repackmaxpacksize to be |
|
323 | incremental repacking. As for the history packs, setting repackmaxpacksize to be | |
333 | 0 which should always result in no repacking. |
|
324 | 0 which should always result in no repacking. | |
334 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \ |
|
325 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \ | |
335 | > --config remotefilelog.history.repackmaxpacksize=0 |
|
326 | > --config remotefilelog.history.repackmaxpacksize=0 | |
336 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
327 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
337 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
328 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
338 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
329 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack | |
339 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
330 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
340 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
331 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
341 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
332 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
342 |
|
333 | |||
343 | Setting limit for the repackmaxpacksize to be the size of the biggest pack file |
|
334 | Setting limit for the repackmaxpacksize to be the size of the biggest pack file | |
344 | which ensures that it is effectively ignored in the incremental repacking. |
|
335 | which ensures that it is effectively ignored in the incremental repacking. | |
345 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \ |
|
336 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \ | |
346 | > --config remotefilelog.history.repackmaxpacksize=336 |
|
337 | > --config remotefilelog.history.repackmaxpacksize=336 | |
347 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
338 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
348 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
339 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
349 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
340 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack | |
350 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
341 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
351 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
342 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
352 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
343 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
353 |
|
344 | |||
354 | 1 gen3 pack, 1 gen0 pack - does nothing |
|
345 | 1 gen3 pack, 1 gen0 pack - does nothing | |
355 | $ hg repack --incremental |
|
346 | $ hg repack --incremental | |
356 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
347 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
357 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack |
|
348 | -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack | |
358 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack |
|
349 | -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack | |
359 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
350 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
360 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack |
|
351 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
361 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack |
|
352 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
362 |
|
353 | |||
363 | Pull should run background repack |
|
354 | Pull should run background repack | |
364 | $ cat >> .hg/hgrc <<EOF |
|
355 | $ cat >> .hg/hgrc <<EOF | |
365 | > [remotefilelog] |
|
356 | > [remotefilelog] | |
366 | > backgroundrepack=True |
|
357 | > backgroundrepack=True | |
367 | > EOF |
|
358 | > EOF | |
368 | $ clearcache |
|
359 | $ clearcache | |
369 | $ hg prefetch -r 0 |
|
360 | $ hg prefetch -r 0 | |
370 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
361 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
371 | $ hg prefetch -r 1 |
|
362 | $ hg prefetch -r 1 | |
372 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
363 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
373 | $ hg prefetch -r 2 |
|
364 | $ hg prefetch -r 2 | |
374 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
365 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
375 | $ hg prefetch -r 3 |
|
366 | $ hg prefetch -r 3 | |
376 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
367 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
377 |
|
368 | |||
378 | $ hg pull |
|
369 | $ hg pull | |
379 | pulling from ssh://user@dummy/master |
|
370 | pulling from ssh://user@dummy/master | |
380 | searching for changes |
|
371 | searching for changes | |
381 | no changes found |
|
372 | no changes found | |
382 | (running background incremental repack) |
|
373 | (running background incremental repack) | |
383 | $ sleep 0.5 |
|
374 | $ sleep 0.5 | |
384 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
375 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
385 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
376 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
386 | -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack |
|
377 | -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack | |
387 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
378 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
388 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack |
|
379 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
389 |
|
380 | |||
390 | Test environment variable resolution |
|
381 | Test environment variable resolution | |
391 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' |
|
382 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' | |
392 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
383 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
393 | $ find $TESTTMP/envcache | sort |
|
384 | $ find $TESTTMP/envcache | sort | |
394 | $TESTTMP/envcache |
|
385 | $TESTTMP/envcache | |
395 | $TESTTMP/envcache/master |
|
386 | $TESTTMP/envcache/master | |
396 | $TESTTMP/envcache/master/95 |
|
387 | $TESTTMP/envcache/master/95 | |
397 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a |
|
388 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a | |
398 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 |
|
389 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 | |
399 | $TESTTMP/envcache/repos |
|
390 | $TESTTMP/envcache/repos | |
400 |
|
391 | |||
401 | Test local remotefilelog blob is correct when based on a pack |
|
392 | Test local remotefilelog blob is correct when based on a pack | |
402 | $ hg prefetch -r . |
|
393 | $ hg prefetch -r . | |
403 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
394 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
404 | $ echo >> y |
|
395 | $ echo >> y | |
405 | $ hg commit -m y2 |
|
396 | $ hg commit -m y2 | |
406 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
397 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
407 | size: 9 bytes |
|
398 | size: 9 bytes | |
408 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
399 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
409 | key: b70860edba4f |
|
400 | key: b70860edba4f | |
410 |
|
401 | |||
411 | node => p1 p2 linknode copyfrom |
|
402 | node => p1 p2 linknode copyfrom | |
412 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 |
|
403 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 | |
413 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
404 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x | |
414 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
405 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
415 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 |
|
406 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 | |
416 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 |
|
407 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 | |
417 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
|
408 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f | |
418 |
|
409 | |||
419 | Test limiting the max delta chain length |
|
410 | Test limiting the max delta chain length | |
420 | $ hg repack --config packs.maxchainlen=1 |
|
411 | $ hg repack --config packs.maxchainlen=1 | |
421 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx |
|
412 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx | |
422 | $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909: |
|
413 | $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909: | |
423 | x: |
|
414 | x: | |
424 | Node Delta Base Delta Length Blob Size |
|
415 | Node Delta Base Delta Length Blob Size | |
425 | 1bb2e6237e03 000000000000 8 8 |
|
416 | 1bb2e6237e03 000000000000 8 8 | |
426 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
417 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
427 | aee31534993a 000000000000 4 4 |
|
418 | aee31534993a 000000000000 4 4 | |
428 | 1406e7411862 aee31534993a 12 2 |
|
419 | 1406e7411862 aee31534993a 12 2 | |
429 |
|
420 | |||
430 | Total: 36 20 (80.0% bigger) |
|
421 | Total: 36 20 (80.0% bigger) | |
431 | y: |
|
422 | y: | |
432 | Node Delta Base Delta Length Blob Size |
|
423 | Node Delta Base Delta Length Blob Size | |
433 | 577959738234 000000000000 70 8 |
|
424 | 577959738234 000000000000 70 8 | |
434 |
|
425 | |||
435 | Total: 70 8 (775.0% bigger) |
|
426 | Total: 70 8 (775.0% bigger) | |
436 |
|
427 | |||
437 | Test huge pack cleanup using different values of packs.maxpacksize: |
|
428 | Test huge pack cleanup using different values of packs.maxpacksize: | |
438 | $ hg repack --incremental --debug |
|
429 | $ hg repack --incremental --debug | |
439 | $ hg repack --incremental --debug --config packs.maxpacksize=512 |
|
430 | $ hg repack --incremental --debug --config packs.maxpacksize=512 | |
440 | removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.datapack (425 bytes) |
|
431 | removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.datapack (425 bytes) | |
441 | removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.dataidx (1.21 KB) |
|
432 | removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.dataidx (1.21 KB) | |
442 |
|
433 | |||
443 | Do a repack where the new pack reuses a delta from the old pack |
|
434 | Do a repack where the new pack reuses a delta from the old pack | |
444 | $ clearcache |
|
435 | $ clearcache | |
445 | $ hg prefetch -r '2::3' |
|
436 | $ hg prefetch -r '2::3' | |
446 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
437 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
447 | $ hg repack |
|
438 | $ hg repack | |
448 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack |
|
439 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack | |
449 | $TESTTMP/hgcache/master/packs/9ec6b30891bd851320acb7c66b69a2bdf41c8df3: |
|
440 | $TESTTMP/hgcache/master/packs/9ec6b30891bd851320acb7c66b69a2bdf41c8df3: | |
450 | x: |
|
441 | x: | |
451 | Node Delta Base Delta Length Blob Size |
|
442 | Node Delta Base Delta Length Blob Size | |
452 | 1bb2e6237e03 000000000000 8 8 |
|
443 | 1bb2e6237e03 000000000000 8 8 | |
453 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
444 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
454 |
|
445 | |||
455 | Total: 20 14 (42.9% bigger) |
|
446 | Total: 20 14 (42.9% bigger) | |
456 | $ hg prefetch -r '0::1' |
|
447 | $ hg prefetch -r '0::1' | |
457 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
448 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
458 | $ hg repack |
|
449 | $ hg repack | |
459 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack |
|
450 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack | |
460 | $TESTTMP/hgcache/master/packs/156a6c1c83aeb69422d7936e0a46ba9bc06a71c0: |
|
451 | $TESTTMP/hgcache/master/packs/156a6c1c83aeb69422d7936e0a46ba9bc06a71c0: | |
461 | x: |
|
452 | x: | |
462 | Node Delta Base Delta Length Blob Size |
|
453 | Node Delta Base Delta Length Blob Size | |
463 | 1bb2e6237e03 000000000000 8 8 |
|
454 | 1bb2e6237e03 000000000000 8 8 | |
464 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
455 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
465 | aee31534993a d4a3ed9310e5 12 4 |
|
456 | aee31534993a d4a3ed9310e5 12 4 | |
466 | 1406e7411862 aee31534993a 12 2 |
|
457 | 1406e7411862 aee31534993a 12 2 | |
467 |
|
458 | |||
468 | Total: 44 20 (120.0% bigger) |
|
459 | Total: 44 20 (120.0% bigger) |
General Comments 0
You need to be logged in to leave comments.
Login now