Show More
The requested changes are too big and content was truncated. Show full diff
@@ -1,513 +1,516 b'' | |||
|
1 | 1 | # remotefilelog.py - filelog implementation where filelog history is stored |
|
2 | 2 | # remotely |
|
3 | 3 | # |
|
4 | 4 | # Copyright 2013 Facebook, Inc. |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from mercurial.node import ( |
|
14 | 14 | bin, |
|
15 | 15 | nullid, |
|
16 | 16 | wdirfilenodeids, |
|
17 | 17 | wdirid, |
|
18 | 18 | ) |
|
19 | 19 | from mercurial.i18n import _ |
|
20 | 20 | from mercurial import ( |
|
21 | 21 | ancestor, |
|
22 | 22 | error, |
|
23 | 23 | mdiff, |
|
24 | 24 | pycompat, |
|
25 | 25 | revlog, |
|
26 | 26 | util, |
|
27 | 27 | ) |
|
28 | 28 | from mercurial.utils import storageutil |
|
29 | 29 | from mercurial.revlogutils import flagutil |
|
30 | 30 | |
|
31 | 31 | from . import ( |
|
32 | 32 | constants, |
|
33 | 33 | fileserverclient, |
|
34 | 34 | shallowutil, |
|
35 | 35 | ) |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | class remotefilelognodemap(object): |
|
39 | 39 | def __init__(self, filename, store): |
|
40 | 40 | self._filename = filename |
|
41 | 41 | self._store = store |
|
42 | 42 | |
|
43 | 43 | def __contains__(self, node): |
|
44 | 44 | missing = self._store.getmissing([(self._filename, node)]) |
|
45 | 45 | return not bool(missing) |
|
46 | 46 | |
|
47 | 47 | def __get__(self, node): |
|
48 | 48 | if node not in self: |
|
49 | 49 | raise KeyError(node) |
|
50 | 50 | return node |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | class remotefilelog(object): |
|
54 | 54 | |
|
55 | 55 | _generaldelta = True |
|
56 | 56 | _flagserrorclass = error.RevlogError |
|
57 | 57 | |
|
58 | 58 | def __init__(self, opener, path, repo): |
|
59 | 59 | self.opener = opener |
|
60 | 60 | self.filename = path |
|
61 | 61 | self.repo = repo |
|
62 | 62 | self.nodemap = remotefilelognodemap(self.filename, repo.contentstore) |
|
63 | 63 | |
|
64 | 64 | self.version = 1 |
|
65 | 65 | |
|
66 | 66 | self._flagprocessors = dict(flagutil.flagprocessors) |
|
67 | 67 | |
|
68 | 68 | def read(self, node): |
|
69 | 69 | """returns the file contents at this node""" |
|
70 | 70 | t = self.revision(node) |
|
71 | 71 | if not t.startswith(b'\1\n'): |
|
72 | 72 | return t |
|
73 | 73 | s = t.index(b'\1\n', 2) |
|
74 | 74 | return t[s + 2 :] |
|
75 | 75 | |
|
76 | 76 | def add(self, text, meta, transaction, linknode, p1=None, p2=None): |
|
77 | 77 | # hash with the metadata, like in vanilla filelogs |
|
78 | 78 | hashtext = shallowutil.createrevlogtext( |
|
79 | 79 | text, meta.get(b'copy'), meta.get(b'copyrev') |
|
80 | 80 | ) |
|
81 | 81 | node = storageutil.hashrevisionsha1(hashtext, p1, p2) |
|
82 | 82 | return self.addrevision( |
|
83 | 83 | hashtext, transaction, linknode, p1, p2, node=node |
|
84 | 84 | ) |
|
85 | 85 | |
|
86 | 86 | def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): |
|
87 | 87 | # text passed to "_createfileblob" does not include filelog metadata |
|
88 | 88 | header = shallowutil.buildfileblobheader(len(text), flags) |
|
89 | 89 | data = b"%s\0%s" % (header, text) |
|
90 | 90 | |
|
91 | 91 | realp1 = p1 |
|
92 | 92 | copyfrom = b"" |
|
93 | 93 | if meta and b'copy' in meta: |
|
94 | 94 | copyfrom = meta[b'copy'] |
|
95 | 95 | realp1 = bin(meta[b'copyrev']) |
|
96 | 96 | |
|
97 | 97 | data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) |
|
98 | 98 | |
|
99 | 99 | visited = set() |
|
100 | 100 | |
|
101 | 101 | pancestors = {} |
|
102 | 102 | queue = [] |
|
103 | 103 | if realp1 != nullid: |
|
104 | 104 | p1flog = self |
|
105 | 105 | if copyfrom: |
|
106 | 106 | p1flog = remotefilelog(self.opener, copyfrom, self.repo) |
|
107 | 107 | |
|
108 | 108 | pancestors.update(p1flog.ancestormap(realp1)) |
|
109 | 109 | queue.append(realp1) |
|
110 | 110 | visited.add(realp1) |
|
111 | 111 | if p2 != nullid: |
|
112 | 112 | pancestors.update(self.ancestormap(p2)) |
|
113 | 113 | queue.append(p2) |
|
114 | 114 | visited.add(p2) |
|
115 | 115 | |
|
116 | 116 | ancestortext = b"" |
|
117 | 117 | |
|
118 | 118 | # add the ancestors in topological order |
|
119 | 119 | while queue: |
|
120 | 120 | c = queue.pop(0) |
|
121 | 121 | pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] |
|
122 | 122 | |
|
123 | 123 | pacopyfrom = pacopyfrom or b'' |
|
124 | 124 | ancestortext += b"%s%s%s%s%s\0" % ( |
|
125 | 125 | c, |
|
126 | 126 | pa1, |
|
127 | 127 | pa2, |
|
128 | 128 | ancestorlinknode, |
|
129 | 129 | pacopyfrom, |
|
130 | 130 | ) |
|
131 | 131 | |
|
132 | 132 | if pa1 != nullid and pa1 not in visited: |
|
133 | 133 | queue.append(pa1) |
|
134 | 134 | visited.add(pa1) |
|
135 | 135 | if pa2 != nullid and pa2 not in visited: |
|
136 | 136 | queue.append(pa2) |
|
137 | 137 | visited.add(pa2) |
|
138 | 138 | |
|
139 | 139 | data += ancestortext |
|
140 | 140 | |
|
141 | 141 | return data |
|
142 | 142 | |
|
143 | 143 | def addrevision( |
|
144 | 144 | self, |
|
145 | 145 | text, |
|
146 | 146 | transaction, |
|
147 | 147 | linknode, |
|
148 | 148 | p1, |
|
149 | 149 | p2, |
|
150 | 150 | cachedelta=None, |
|
151 | 151 | node=None, |
|
152 | 152 | flags=revlog.REVIDX_DEFAULT_FLAGS, |
|
153 | 153 | sidedata=None, |
|
154 | 154 | ): |
|
155 | 155 | # text passed to "addrevision" includes hg filelog metadata header |
|
156 | 156 | if node is None: |
|
157 | 157 | node = storageutil.hashrevisionsha1(text, p1, p2) |
|
158 | 158 | |
|
159 | 159 | meta, metaoffset = storageutil.parsemeta(text) |
|
160 | 160 | rawtext, validatehash = flagutil.processflagswrite( |
|
161 | 161 | self, |
|
162 | 162 | text, |
|
163 | 163 | flags, |
|
164 | 164 | ) |
|
165 | 165 | return self.addrawrevision( |
|
166 | 166 | rawtext, |
|
167 | 167 | transaction, |
|
168 | 168 | linknode, |
|
169 | 169 | p1, |
|
170 | 170 | p2, |
|
171 | 171 | node, |
|
172 | 172 | flags, |
|
173 | 173 | cachedelta, |
|
174 | 174 | _metatuple=(meta, metaoffset), |
|
175 | 175 | ) |
|
176 | 176 | |
|
177 | 177 | def addrawrevision( |
|
178 | 178 | self, |
|
179 | 179 | rawtext, |
|
180 | 180 | transaction, |
|
181 | 181 | linknode, |
|
182 | 182 | p1, |
|
183 | 183 | p2, |
|
184 | 184 | node, |
|
185 | 185 | flags, |
|
186 | 186 | cachedelta=None, |
|
187 | 187 | _metatuple=None, |
|
188 | 188 | ): |
|
189 | 189 | if _metatuple: |
|
190 | 190 | # _metatuple: used by "addrevision" internally by remotefilelog |
|
191 | 191 | # meta was parsed confidently |
|
192 | 192 | meta, metaoffset = _metatuple |
|
193 | 193 | else: |
|
194 | 194 | # not from self.addrevision, but something else (repo._filecommit) |
|
195 | 195 | # calls addrawrevision directly. remotefilelog needs to get and |
|
196 | 196 | # strip filelog metadata. |
|
197 | 197 | # we don't have confidence about whether rawtext contains filelog |
|
198 | 198 | # metadata or not (flag processor could replace it), so we just |
|
199 | 199 | # parse it as best-effort. |
|
200 | 200 | # in LFS (flags != 0)'s case, the best way is to call LFS code to |
|
201 | 201 | # get the meta information, instead of storageutil.parsemeta. |
|
202 | 202 | meta, metaoffset = storageutil.parsemeta(rawtext) |
|
203 | 203 | if flags != 0: |
|
204 | 204 | # when flags != 0, be conservative and do not mangle rawtext, since |
|
205 | 205 | # a read flag processor expects the text not being mangled at all. |
|
206 | 206 | metaoffset = 0 |
|
207 | 207 | if metaoffset: |
|
208 | 208 | # remotefilelog fileblob stores copy metadata in its ancestortext, |
|
209 | 209 | # not its main blob. so we need to remove filelog metadata |
|
210 | 210 | # (containing copy information) from text. |
|
211 | 211 | blobtext = rawtext[metaoffset:] |
|
212 | 212 | else: |
|
213 | 213 | blobtext = rawtext |
|
214 | 214 | data = self._createfileblob( |
|
215 | 215 | blobtext, meta, flags, p1, p2, node, linknode |
|
216 | 216 | ) |
|
217 | 217 | self.repo.contentstore.addremotefilelognode(self.filename, node, data) |
|
218 | 218 | |
|
219 | 219 | return node |
|
220 | 220 | |
|
221 | 221 | def renamed(self, node): |
|
222 | 222 | ancestors = self.repo.metadatastore.getancestors(self.filename, node) |
|
223 | 223 | p1, p2, linknode, copyfrom = ancestors[node] |
|
224 | 224 | if copyfrom: |
|
225 | 225 | return (copyfrom, p1) |
|
226 | 226 | |
|
227 | 227 | return False |
|
228 | 228 | |
|
229 | 229 | def size(self, node): |
|
230 | 230 | """return the size of a given revision""" |
|
231 | 231 | return len(self.read(node)) |
|
232 | 232 | |
|
233 | 233 | rawsize = size |
|
234 | 234 | |
|
235 | 235 | def cmp(self, node, text): |
|
236 | 236 | """compare text with a given file revision |
|
237 | 237 | |
|
238 | 238 | returns True if text is different than what is stored. |
|
239 | 239 | """ |
|
240 | 240 | |
|
241 | 241 | if node == nullid: |
|
242 | 242 | return True |
|
243 | 243 | |
|
244 | 244 | nodetext = self.read(node) |
|
245 | 245 | return nodetext != text |
|
246 | 246 | |
|
247 | 247 | def __nonzero__(self): |
|
248 | 248 | return True |
|
249 | 249 | |
|
250 | 250 | __bool__ = __nonzero__ |
|
251 | 251 | |
|
252 | 252 | def __len__(self): |
|
253 | 253 | if self.filename == b'.hgtags': |
|
254 | 254 | # The length of .hgtags is used to fast path tag checking. |
|
255 | 255 | # remotefilelog doesn't support .hgtags since the entire .hgtags |
|
256 | 256 | # history is needed. Use the excludepattern setting to make |
|
257 | 257 | # .hgtags a normal filelog. |
|
258 | 258 | return 0 |
|
259 | 259 | |
|
260 | 260 | raise RuntimeError(b"len not supported") |
|
261 | 261 | |
|
262 | 262 | def heads(self): |
|
263 | 263 | # Fake heads of the filelog to satisfy hgweb. |
|
264 | 264 | return [] |
|
265 | 265 | |
|
266 | 266 | def empty(self): |
|
267 | 267 | return False |
|
268 | 268 | |
|
269 | 269 | def flags(self, node): |
|
270 | 270 | if isinstance(node, int): |
|
271 | 271 | raise error.ProgrammingError( |
|
272 | 272 | b'remotefilelog does not accept integer rev for flags' |
|
273 | 273 | ) |
|
274 | 274 | store = self.repo.contentstore |
|
275 | 275 | return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) |
|
276 | 276 | |
|
277 | 277 | def parents(self, node): |
|
278 | 278 | if node == nullid: |
|
279 | 279 | return nullid, nullid |
|
280 | 280 | |
|
281 | 281 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) |
|
282 | 282 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
283 | 283 | if copyfrom: |
|
284 | 284 | p1 = nullid |
|
285 | 285 | |
|
286 | 286 | return p1, p2 |
|
287 | 287 | |
|
288 | 288 | def parentrevs(self, rev): |
|
289 | 289 | # TODO(augie): this is a node and should be a rev, but for now |
|
290 | 290 | # nothing in core seems to actually break. |
|
291 | 291 | return self.parents(rev) |
|
292 | 292 | |
|
293 | 293 | def linknode(self, node): |
|
294 | 294 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) |
|
295 | 295 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
296 | 296 | return linknode |
|
297 | 297 | |
|
298 | 298 | def linkrev(self, node): |
|
299 | 299 | return self.repo.unfiltered().changelog.rev(self.linknode(node)) |
|
300 | 300 | |
|
301 | 301 | def emitrevisions( |
|
302 | 302 | self, |
|
303 | 303 | nodes, |
|
304 | 304 | nodesorder=None, |
|
305 | 305 | revisiondata=False, |
|
306 | 306 | assumehaveparentrevisions=False, |
|
307 | 307 | deltaprevious=False, |
|
308 | 308 | deltamode=None, |
|
309 | sidedata_helpers=None, | |
|
309 | 310 | ): |
|
310 | 311 | # we don't use any of these parameters here |
|
311 | 312 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious |
|
312 | 313 | del deltamode |
|
313 | 314 | prevnode = None |
|
314 | 315 | for node in nodes: |
|
315 | 316 | p1, p2 = self.parents(node) |
|
316 | 317 | if prevnode is None: |
|
317 | 318 | basenode = prevnode = p1 |
|
318 | 319 | if basenode == node: |
|
319 | 320 | basenode = nullid |
|
320 | 321 | if basenode != nullid: |
|
321 | 322 | revision = None |
|
322 | 323 | delta = self.revdiff(basenode, node) |
|
323 | 324 | else: |
|
324 | 325 | revision = self.rawdata(node) |
|
325 | 326 | delta = None |
|
326 | 327 | yield revlog.revlogrevisiondelta( |
|
327 | 328 | node=node, |
|
328 | 329 | p1node=p1, |
|
329 | 330 | p2node=p2, |
|
330 | 331 | linknode=self.linknode(node), |
|
331 | 332 | basenode=basenode, |
|
332 | 333 | flags=self.flags(node), |
|
333 | 334 | baserevisionsize=None, |
|
334 | 335 | revision=revision, |
|
335 | 336 | delta=delta, |
|
337 | # Sidedata is not supported yet | |
|
338 | sidedata=None, | |
|
336 | 339 | ) |
|
337 | 340 | |
|
338 | 341 | def revdiff(self, node1, node2): |
|
339 | 342 | return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2)) |
|
340 | 343 | |
|
341 | 344 | def lookup(self, node): |
|
342 | 345 | if len(node) == 40: |
|
343 | 346 | node = bin(node) |
|
344 | 347 | if len(node) != 20: |
|
345 | 348 | raise error.LookupError( |
|
346 | 349 | node, self.filename, _(b'invalid lookup input') |
|
347 | 350 | ) |
|
348 | 351 | |
|
349 | 352 | return node |
|
350 | 353 | |
|
351 | 354 | def rev(self, node): |
|
352 | 355 | # This is a hack to make TortoiseHG work. |
|
353 | 356 | return node |
|
354 | 357 | |
|
355 | 358 | def node(self, rev): |
|
356 | 359 | # This is a hack. |
|
357 | 360 | if isinstance(rev, int): |
|
358 | 361 | raise error.ProgrammingError( |
|
359 | 362 | b'remotefilelog does not convert integer rev to node' |
|
360 | 363 | ) |
|
361 | 364 | return rev |
|
362 | 365 | |
|
363 | 366 | def _processflags(self, text, flags, operation, raw=False): |
|
364 | 367 | """deprecated entry point to access flag processors""" |
|
365 | 368 | msg = b'_processflag(...) use the specialized variant' |
|
366 | 369 | util.nouideprecwarn(msg, b'5.2', stacklevel=2) |
|
367 | 370 | if raw: |
|
368 | 371 | return text, flagutil.processflagsraw(self, text, flags) |
|
369 | 372 | elif operation == b'read': |
|
370 | 373 | return flagutil.processflagsread(self, text, flags) |
|
371 | 374 | else: # write operation |
|
372 | 375 | return flagutil.processflagswrite(self, text, flags) |
|
373 | 376 | |
|
374 | 377 | def revision(self, node, raw=False): |
|
375 | 378 | """returns the revlog contents at this node. |
|
376 | 379 | this includes the meta data traditionally included in file revlogs. |
|
377 | 380 | this is generally only used for bundling and communicating with vanilla |
|
378 | 381 | hg clients. |
|
379 | 382 | """ |
|
380 | 383 | if node == nullid: |
|
381 | 384 | return b"" |
|
382 | 385 | if len(node) != 20: |
|
383 | 386 | raise error.LookupError( |
|
384 | 387 | node, self.filename, _(b'invalid revision input') |
|
385 | 388 | ) |
|
386 | 389 | if node == wdirid or node in wdirfilenodeids: |
|
387 | 390 | raise error.WdirUnsupported |
|
388 | 391 | |
|
389 | 392 | store = self.repo.contentstore |
|
390 | 393 | rawtext = store.get(self.filename, node) |
|
391 | 394 | if raw: |
|
392 | 395 | return rawtext |
|
393 | 396 | flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) |
|
394 | 397 | if flags == 0: |
|
395 | 398 | return rawtext |
|
396 | 399 | return flagutil.processflagsread(self, rawtext, flags)[0] |
|
397 | 400 | |
|
398 | 401 | def rawdata(self, node): |
|
399 | 402 | return self.revision(node, raw=False) |
|
400 | 403 | |
|
401 | 404 | def _read(self, id): |
|
402 | 405 | """reads the raw file blob from disk, cache, or server""" |
|
403 | 406 | fileservice = self.repo.fileservice |
|
404 | 407 | localcache = fileservice.localcache |
|
405 | 408 | cachekey = fileserverclient.getcachekey( |
|
406 | 409 | self.repo.name, self.filename, id |
|
407 | 410 | ) |
|
408 | 411 | try: |
|
409 | 412 | return localcache.read(cachekey) |
|
410 | 413 | except KeyError: |
|
411 | 414 | pass |
|
412 | 415 | |
|
413 | 416 | localkey = fileserverclient.getlocalkey(self.filename, id) |
|
414 | 417 | localpath = os.path.join(self.localpath, localkey) |
|
415 | 418 | try: |
|
416 | 419 | return shallowutil.readfile(localpath) |
|
417 | 420 | except IOError: |
|
418 | 421 | pass |
|
419 | 422 | |
|
420 | 423 | fileservice.prefetch([(self.filename, id)]) |
|
421 | 424 | try: |
|
422 | 425 | return localcache.read(cachekey) |
|
423 | 426 | except KeyError: |
|
424 | 427 | pass |
|
425 | 428 | |
|
426 | 429 | raise error.LookupError(id, self.filename, _(b'no node')) |
|
427 | 430 | |
|
428 | 431 | def ancestormap(self, node): |
|
429 | 432 | return self.repo.metadatastore.getancestors(self.filename, node) |
|
430 | 433 | |
|
431 | 434 | def ancestor(self, a, b): |
|
432 | 435 | if a == nullid or b == nullid: |
|
433 | 436 | return nullid |
|
434 | 437 | |
|
435 | 438 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
436 | 439 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} |
|
437 | 440 | |
|
438 | 441 | ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) |
|
439 | 442 | if ancs: |
|
440 | 443 | # choose a consistent winner when there's a tie |
|
441 | 444 | return min(map(nodemap.__getitem__, ancs)) |
|
442 | 445 | return nullid |
|
443 | 446 | |
|
444 | 447 | def commonancestorsheads(self, a, b): |
|
445 | 448 | """calculate all the heads of the common ancestors of nodes a and b""" |
|
446 | 449 | |
|
447 | 450 | if a == nullid or b == nullid: |
|
448 | 451 | return nullid |
|
449 | 452 | |
|
450 | 453 | revmap, parentfunc = self._buildrevgraph(a, b) |
|
451 | 454 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} |
|
452 | 455 | |
|
453 | 456 | ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) |
|
454 | 457 | return map(nodemap.__getitem__, ancs) |
|
455 | 458 | |
|
456 | 459 | def _buildrevgraph(self, a, b): |
|
457 | 460 | """Builds a numeric revision graph for the given two nodes. |
|
458 | 461 | Returns a node->rev map and a rev->[revs] parent function. |
|
459 | 462 | """ |
|
460 | 463 | amap = self.ancestormap(a) |
|
461 | 464 | bmap = self.ancestormap(b) |
|
462 | 465 | |
|
463 | 466 | # Union the two maps |
|
464 | 467 | parentsmap = collections.defaultdict(list) |
|
465 | 468 | allparents = set() |
|
466 | 469 | for mapping in (amap, bmap): |
|
467 | 470 | for node, pdata in pycompat.iteritems(mapping): |
|
468 | 471 | parents = parentsmap[node] |
|
469 | 472 | p1, p2, linknode, copyfrom = pdata |
|
470 | 473 | # Don't follow renames (copyfrom). |
|
471 | 474 | # remotefilectx.ancestor does that. |
|
472 | 475 | if p1 != nullid and not copyfrom: |
|
473 | 476 | parents.append(p1) |
|
474 | 477 | allparents.add(p1) |
|
475 | 478 | if p2 != nullid: |
|
476 | 479 | parents.append(p2) |
|
477 | 480 | allparents.add(p2) |
|
478 | 481 | |
|
479 | 482 | # Breadth first traversal to build linkrev graph |
|
480 | 483 | parentrevs = collections.defaultdict(list) |
|
481 | 484 | revmap = {} |
|
482 | 485 | queue = collections.deque( |
|
483 | 486 | ((None, n) for n in parentsmap if n not in allparents) |
|
484 | 487 | ) |
|
485 | 488 | while queue: |
|
486 | 489 | prevrev, current = queue.pop() |
|
487 | 490 | if current in revmap: |
|
488 | 491 | if prevrev: |
|
489 | 492 | parentrevs[prevrev].append(revmap[current]) |
|
490 | 493 | continue |
|
491 | 494 | |
|
492 | 495 | # Assign linkrevs in reverse order, so start at |
|
493 | 496 | # len(parentsmap) and work backwards. |
|
494 | 497 | currentrev = len(parentsmap) - len(revmap) - 1 |
|
495 | 498 | revmap[current] = currentrev |
|
496 | 499 | |
|
497 | 500 | if prevrev: |
|
498 | 501 | parentrevs[prevrev].append(currentrev) |
|
499 | 502 | |
|
500 | 503 | for parent in parentsmap.get(current): |
|
501 | 504 | queue.appendleft((currentrev, parent)) |
|
502 | 505 | |
|
503 | 506 | return revmap, parentrevs.__getitem__ |
|
504 | 507 | |
|
505 | 508 | def strip(self, minlink, transaction): |
|
506 | 509 | pass |
|
507 | 510 | |
|
508 | 511 | # misc unused things |
|
509 | 512 | def files(self): |
|
510 | 513 | return [] |
|
511 | 514 | |
|
512 | 515 | def checksize(self): |
|
513 | 516 | return 0, 0 |
@@ -1,303 +1,307 b'' | |||
|
1 | 1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2013 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | from __future__ import absolute_import |
|
8 | 8 | |
|
9 | 9 | from mercurial.i18n import _ |
|
10 | 10 | from mercurial.node import bin, hex, nullid |
|
11 | 11 | from mercurial import ( |
|
12 | 12 | bundlerepo, |
|
13 | 13 | changegroup, |
|
14 | 14 | error, |
|
15 | 15 | match, |
|
16 | 16 | mdiff, |
|
17 | 17 | pycompat, |
|
18 | 18 | ) |
|
19 | 19 | from . import ( |
|
20 | 20 | constants, |
|
21 | 21 | remotefilelog, |
|
22 | 22 | shallowutil, |
|
23 | 23 | ) |
|
24 | 24 | |
|
25 | 25 | NoFiles = 0 |
|
26 | 26 | LocalFiles = 1 |
|
27 | 27 | AllFiles = 2 |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): |
|
31 | 31 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
32 | 32 | for c in super(cls, self).group(nodelist, rlog, lookup, units=units): |
|
33 | 33 | yield c |
|
34 | 34 | return |
|
35 | 35 | |
|
36 | 36 | if len(nodelist) == 0: |
|
37 | 37 | yield self.close() |
|
38 | 38 | return |
|
39 | 39 | |
|
40 | 40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) |
|
41 | 41 | |
|
42 | 42 | # add the parent of the first rev |
|
43 | 43 | p = rlog.parents(nodelist[0])[0] |
|
44 | 44 | nodelist.insert(0, p) |
|
45 | 45 | |
|
46 | 46 | # build deltas |
|
47 | 47 | for i in pycompat.xrange(len(nodelist) - 1): |
|
48 | 48 | prev, curr = nodelist[i], nodelist[i + 1] |
|
49 | 49 | linknode = lookup(curr) |
|
50 | 50 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
51 | 51 | yield c |
|
52 | 52 | |
|
53 | 53 | yield self.close() |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | class shallowcg1packer(changegroup.cgpacker): |
|
57 | 57 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs): |
|
58 | 58 | if shallowutil.isenabled(self._repo): |
|
59 | 59 | fastpathlinkrev = False |
|
60 | 60 | |
|
61 | 61 | return super(shallowcg1packer, self).generate( |
|
62 | 62 | commonrevs, clnodes, fastpathlinkrev, source, **kwargs |
|
63 | 63 | ) |
|
64 | 64 | |
|
65 | 65 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): |
|
66 | 66 | return shallowgroup( |
|
67 | 67 | shallowcg1packer, self, nodelist, rlog, lookup, units=units |
|
68 | 68 | ) |
|
69 | 69 | |
|
70 | def generatefiles(self, changedfiles, *args): | |
|
70 | def generatefiles(self, changedfiles, *args, **kwargs): | |
|
71 | 71 | try: |
|
72 | 72 | linknodes, commonrevs, source = args |
|
73 | 73 | except ValueError: |
|
74 | 74 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args |
|
75 | 75 | if shallowutil.isenabled(self._repo): |
|
76 | 76 | repo = self._repo |
|
77 | 77 | if isinstance(repo, bundlerepo.bundlerepository): |
|
78 | 78 | # If the bundle contains filelogs, we can't pull from it, since |
|
79 | 79 | # bundlerepo is heavily tied to revlogs. Instead require that |
|
80 | 80 | # the user use unbundle instead. |
|
81 | 81 | # Force load the filelog data. |
|
82 | 82 | bundlerepo.bundlerepository.file(repo, b'foo') |
|
83 | 83 | if repo._cgfilespos: |
|
84 | 84 | raise error.Abort( |
|
85 | 85 | b"cannot pull from full bundles", |
|
86 | 86 | hint=b"use `hg unbundle` instead", |
|
87 | 87 | ) |
|
88 | 88 | return [] |
|
89 | 89 | filestosend = self.shouldaddfilegroups(source) |
|
90 | 90 | if filestosend == NoFiles: |
|
91 | 91 | changedfiles = list( |
|
92 | 92 | [f for f in changedfiles if not repo.shallowmatch(f)] |
|
93 | 93 | ) |
|
94 | 94 | |
|
95 |
return super(shallowcg1packer, self).generatefiles( |
|
|
95 | return super(shallowcg1packer, self).generatefiles( | |
|
96 | changedfiles, *args, **kwargs | |
|
97 | ) | |
|
96 | 98 | |
|
97 | 99 | def shouldaddfilegroups(self, source): |
|
98 | 100 | repo = self._repo |
|
99 | 101 | if not shallowutil.isenabled(repo): |
|
100 | 102 | return AllFiles |
|
101 | 103 | |
|
102 | 104 | if source == b"push" or source == b"bundle": |
|
103 | 105 | return AllFiles |
|
104 | 106 | |
|
105 | 107 | caps = self._bundlecaps or [] |
|
106 | 108 | if source == b"serve" or source == b"pull": |
|
107 | 109 | if constants.BUNDLE2_CAPABLITY in caps: |
|
108 | 110 | return LocalFiles |
|
109 | 111 | else: |
|
110 | 112 | # Serving to a full repo requires us to serve everything |
|
111 | 113 | repo.ui.warn(_(b"pulling from a shallow repo\n")) |
|
112 | 114 | return AllFiles |
|
113 | 115 | |
|
114 | 116 | return NoFiles |
|
115 | 117 | |
|
116 | 118 | def prune(self, rlog, missing, commonrevs): |
|
117 | 119 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
118 | 120 | return super(shallowcg1packer, self).prune( |
|
119 | 121 | rlog, missing, commonrevs |
|
120 | 122 | ) |
|
121 | 123 | |
|
122 | 124 | repo = self._repo |
|
123 | 125 | results = [] |
|
124 | 126 | for fnode in missing: |
|
125 | 127 | fctx = repo.filectx(rlog.filename, fileid=fnode) |
|
126 | 128 | if fctx.linkrev() not in commonrevs: |
|
127 | 129 | results.append(fnode) |
|
128 | 130 | return results |
|
129 | 131 | |
|
130 | 132 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
131 | 133 | prefix = b'' |
|
132 | 134 | if prevnode == nullid: |
|
133 | 135 | delta = revlog.rawdata(node) |
|
134 | 136 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
135 | 137 | else: |
|
136 | 138 | # Actually uses remotefilelog.revdiff which works on nodes, not revs |
|
137 | 139 | delta = revlog.revdiff(prevnode, node) |
|
138 | 140 | p1, p2 = revlog.parents(node) |
|
139 | 141 | flags = revlog.flags(node) |
|
140 | 142 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) |
|
141 | 143 | meta += prefix |
|
142 | 144 | l = len(meta) + len(delta) |
|
143 | 145 | yield changegroup.chunkheader(l) |
|
144 | 146 | yield meta |
|
145 | 147 | yield delta |
|
146 | 148 | |
|
147 | 149 | |
|
148 | 150 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): |
|
149 | 151 | if not shallowutil.isenabled(repo): |
|
150 | 152 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
151 | 153 | |
|
152 | 154 | original = repo.shallowmatch |
|
153 | 155 | try: |
|
154 | 156 | # if serving, only send files the clients has patterns for |
|
155 | 157 | if source == b'serve': |
|
156 | 158 | bundlecaps = kwargs.get('bundlecaps') |
|
157 | 159 | includepattern = None |
|
158 | 160 | excludepattern = None |
|
159 | 161 | for cap in bundlecaps or []: |
|
160 | 162 | if cap.startswith(b"includepattern="): |
|
161 | 163 | raw = cap[len(b"includepattern=") :] |
|
162 | 164 | if raw: |
|
163 | 165 | includepattern = raw.split(b'\0') |
|
164 | 166 | elif cap.startswith(b"excludepattern="): |
|
165 | 167 | raw = cap[len(b"excludepattern=") :] |
|
166 | 168 | if raw: |
|
167 | 169 | excludepattern = raw.split(b'\0') |
|
168 | 170 | if includepattern or excludepattern: |
|
169 | 171 | repo.shallowmatch = match.match( |
|
170 | 172 | repo.root, b'', None, includepattern, excludepattern |
|
171 | 173 | ) |
|
172 | 174 | else: |
|
173 | 175 | repo.shallowmatch = match.always() |
|
174 | 176 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
175 | 177 | finally: |
|
176 | 178 | repo.shallowmatch = original |
|
177 | 179 | |
|
178 | 180 | |
|
179 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): | |
|
181 | def addchangegroupfiles( | |
|
182 | orig, repo, source, revmap, trp, expectedfiles, *args, **kwargs | |
|
183 | ): | |
|
180 | 184 | if not shallowutil.isenabled(repo): |
|
181 | return orig(repo, source, revmap, trp, expectedfiles, *args) | |
|
185 | return orig(repo, source, revmap, trp, expectedfiles, *args, **kwargs) | |
|
182 | 186 | |
|
183 | 187 | newfiles = 0 |
|
184 | 188 | visited = set() |
|
185 | 189 | revisiondatas = {} |
|
186 | 190 | queue = [] |
|
187 | 191 | |
|
188 | 192 | # Normal Mercurial processes each file one at a time, adding all |
|
189 | 193 | # the new revisions for that file at once. In remotefilelog a file |
|
190 | 194 | # revision may depend on a different file's revision (in the case |
|
191 | 195 | # of a rename/copy), so we must lay all revisions down across all |
|
192 | 196 | # files in topological order. |
|
193 | 197 | |
|
194 | 198 | # read all the file chunks but don't add them |
|
195 | 199 | progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles) |
|
196 | 200 | while True: |
|
197 | 201 | chunkdata = source.filelogheader() |
|
198 | 202 | if not chunkdata: |
|
199 | 203 | break |
|
200 | 204 | f = chunkdata[b"filename"] |
|
201 | 205 | repo.ui.debug(b"adding %s revisions\n" % f) |
|
202 | 206 | progress.increment() |
|
203 | 207 | |
|
204 | 208 | if not repo.shallowmatch(f): |
|
205 | 209 | fl = repo.file(f) |
|
206 | 210 | deltas = source.deltaiter() |
|
207 | 211 | fl.addgroup(deltas, revmap, trp) |
|
208 | 212 | continue |
|
209 | 213 | |
|
210 | 214 | chain = None |
|
211 | 215 | while True: |
|
212 | 216 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None |
|
213 | 217 | revisiondata = source.deltachunk(chain) |
|
214 | 218 | if not revisiondata: |
|
215 | 219 | break |
|
216 | 220 | |
|
217 | 221 | chain = revisiondata[0] |
|
218 | 222 | |
|
219 | 223 | revisiondatas[(f, chain)] = revisiondata |
|
220 | 224 | queue.append((f, chain)) |
|
221 | 225 | |
|
222 | 226 | if f not in visited: |
|
223 | 227 | newfiles += 1 |
|
224 | 228 | visited.add(f) |
|
225 | 229 | |
|
226 | 230 | if chain is None: |
|
227 | 231 | raise error.Abort(_(b"received file revlog group is empty")) |
|
228 | 232 | |
|
229 | 233 | processed = set() |
|
230 | 234 | |
|
231 | 235 | def available(f, node, depf, depnode): |
|
232 | 236 | if depnode != nullid and (depf, depnode) not in processed: |
|
233 | 237 | if not (depf, depnode) in revisiondatas: |
|
234 | 238 | # It's not in the changegroup, assume it's already |
|
235 | 239 | # in the repo |
|
236 | 240 | return True |
|
237 | 241 | # re-add self to queue |
|
238 | 242 | queue.insert(0, (f, node)) |
|
239 | 243 | # add dependency in front |
|
240 | 244 | queue.insert(0, (depf, depnode)) |
|
241 | 245 | return False |
|
242 | 246 | return True |
|
243 | 247 | |
|
244 | 248 | skipcount = 0 |
|
245 | 249 | |
|
246 | 250 | # Prefetch the non-bundled revisions that we will need |
|
247 | 251 | prefetchfiles = [] |
|
248 | 252 | for f, node in queue: |
|
249 | 253 | revisiondata = revisiondatas[(f, node)] |
|
250 | 254 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
251 | 255 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
252 | 256 | |
|
253 | 257 | for dependent in dependents: |
|
254 | 258 | if dependent == nullid or (f, dependent) in revisiondatas: |
|
255 | 259 | continue |
|
256 | 260 | prefetchfiles.append((f, hex(dependent))) |
|
257 | 261 | |
|
258 | 262 | repo.fileservice.prefetch(prefetchfiles) |
|
259 | 263 | |
|
260 | 264 | # Apply the revisions in topological order such that a revision |
|
261 | 265 | # is only written once it's deltabase and parents have been written. |
|
262 | 266 | while queue: |
|
263 | 267 | f, node = queue.pop(0) |
|
264 | 268 | if (f, node) in processed: |
|
265 | 269 | continue |
|
266 | 270 | |
|
267 | 271 | skipcount += 1 |
|
268 | 272 | if skipcount > len(queue) + 1: |
|
269 | 273 | raise error.Abort(_(b"circular node dependency")) |
|
270 | 274 | |
|
271 | 275 | fl = repo.file(f) |
|
272 | 276 | |
|
273 | 277 | revisiondata = revisiondatas[(f, node)] |
|
274 | 278 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
275 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata | |
|
279 | node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata | |
|
276 | 280 | |
|
277 | 281 | if not available(f, node, f, deltabase): |
|
278 | 282 | continue |
|
279 | 283 | |
|
280 | 284 | base = fl.rawdata(deltabase) |
|
281 | 285 | text = mdiff.patch(base, delta) |
|
282 | 286 | if not isinstance(text, bytes): |
|
283 | 287 | text = bytes(text) |
|
284 | 288 | |
|
285 | 289 | meta, text = shallowutil.parsemeta(text) |
|
286 | 290 | if b'copy' in meta: |
|
287 | 291 | copyfrom = meta[b'copy'] |
|
288 | 292 | copynode = bin(meta[b'copyrev']) |
|
289 | 293 | if not available(f, node, copyfrom, copynode): |
|
290 | 294 | continue |
|
291 | 295 | |
|
292 | 296 | for p in [p1, p2]: |
|
293 | 297 | if p != nullid: |
|
294 | 298 | if not available(f, node, f, p): |
|
295 | 299 | continue |
|
296 | 300 | |
|
297 | 301 | fl.add(text, meta, trp, linknode, p1, p2) |
|
298 | 302 | processed.add((f, node)) |
|
299 | 303 | skipcount = 0 |
|
300 | 304 | |
|
301 | 305 | progress.complete() |
|
302 | 306 | |
|
303 | 307 | return len(revisiondatas), newfiles |
@@ -1,1301 +1,1310 b'' | |||
|
1 | 1 | # sqlitestore.py - Storage backend that uses SQLite |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """store repository data in SQLite (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | The sqlitestore extension enables the storage of repository data in SQLite. |
|
11 | 11 | |
|
12 | 12 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
13 | 13 | GUARANTEES. This means that repositories created with this extension may |
|
14 | 14 | only be usable with the exact version of this extension/Mercurial that was |
|
15 | 15 | used. The extension attempts to enforce this in order to prevent repository |
|
16 | 16 | corruption. |
|
17 | 17 | |
|
18 | 18 | In addition, several features are not yet supported or have known bugs: |
|
19 | 19 | |
|
20 | 20 | * Only some data is stored in SQLite. Changeset, manifest, and other repository |
|
21 | 21 | data is not yet stored in SQLite. |
|
22 | 22 | * Transactions are not robust. If the process is aborted at the right time |
|
23 | 23 | during transaction close/rollback, the repository could be in an inconsistent |
|
24 | 24 | state. This problem will diminish once all repository data is tracked by |
|
25 | 25 | SQLite. |
|
26 | 26 | * Bundle repositories do not work (the ability to use e.g. |
|
27 | 27 | `hg -R <bundle-file> log` to automatically overlay a bundle on top of the |
|
28 | 28 | existing repository). |
|
29 | 29 | * Various other features don't work. |
|
30 | 30 | |
|
31 | 31 | This extension should work for basic clone/pull, update, and commit workflows. |
|
32 | 32 | Some history rewriting operations may fail due to lack of support for bundle |
|
33 | 33 | repositories. |
|
34 | 34 | |
|
35 | 35 | To use, activate the extension and set the ``storage.new-repo-backend`` config |
|
36 | 36 | option to ``sqlite`` to enable new repositories to use SQLite for storage. |
|
37 | 37 | """ |
|
38 | 38 | |
|
39 | 39 | # To run the test suite with repos using SQLite by default, execute the |
|
40 | 40 | # following: |
|
41 | 41 | # |
|
42 | 42 | # HGREPOFEATURES="sqlitestore" run-tests.py \ |
|
43 | 43 | # --extra-config-opt extensions.sqlitestore= \ |
|
44 | 44 | # --extra-config-opt storage.new-repo-backend=sqlite |
|
45 | 45 | |
|
46 | 46 | from __future__ import absolute_import |
|
47 | 47 | |
|
48 | 48 | import sqlite3 |
|
49 | 49 | import struct |
|
50 | 50 | import threading |
|
51 | 51 | import zlib |
|
52 | 52 | |
|
53 | 53 | from mercurial.i18n import _ |
|
54 | 54 | from mercurial.node import ( |
|
55 | 55 | nullid, |
|
56 | 56 | nullrev, |
|
57 | 57 | short, |
|
58 | 58 | ) |
|
59 | 59 | from mercurial.thirdparty import attr |
|
60 | 60 | from mercurial import ( |
|
61 | 61 | ancestor, |
|
62 | 62 | dagop, |
|
63 | 63 | encoding, |
|
64 | 64 | error, |
|
65 | 65 | extensions, |
|
66 | 66 | localrepo, |
|
67 | 67 | mdiff, |
|
68 | 68 | pycompat, |
|
69 | 69 | registrar, |
|
70 | 70 | requirements, |
|
71 | 71 | util, |
|
72 | 72 | verify, |
|
73 | 73 | ) |
|
74 | 74 | from mercurial.interfaces import ( |
|
75 | 75 | repository, |
|
76 | 76 | util as interfaceutil, |
|
77 | 77 | ) |
|
78 | 78 | from mercurial.utils import ( |
|
79 | 79 | hashutil, |
|
80 | 80 | storageutil, |
|
81 | 81 | ) |
|
82 | 82 | |
|
83 | 83 | try: |
|
84 | 84 | from mercurial import zstd |
|
85 | 85 | |
|
86 | 86 | zstd.__version__ |
|
87 | 87 | except ImportError: |
|
88 | 88 | zstd = None |
|
89 | 89 | |
|
90 | 90 | configtable = {} |
|
91 | 91 | configitem = registrar.configitem(configtable) |
|
92 | 92 | |
|
93 | 93 | # experimental config: storage.sqlite.compression |
|
94 | 94 | configitem( |
|
95 | 95 | b'storage', |
|
96 | 96 | b'sqlite.compression', |
|
97 | 97 | default=b'zstd' if zstd else b'zlib', |
|
98 | 98 | experimental=True, |
|
99 | 99 | ) |
|
100 | 100 | |
|
101 | 101 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
102 | 102 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
103 | 103 | # be specifying the version(s) of Mercurial they are tested with, or |
|
104 | 104 | # leave the attribute unspecified. |
|
105 | 105 | testedwith = b'ships-with-hg-core' |
|
106 | 106 | |
|
107 | 107 | REQUIREMENT = b'exp-sqlite-001' |
|
108 | 108 | REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd' |
|
109 | 109 | REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib' |
|
110 | 110 | REQUIREMENT_NONE = b'exp-sqlite-comp-001=none' |
|
111 | 111 | REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files' |
|
112 | 112 | |
|
113 | 113 | CURRENT_SCHEMA_VERSION = 1 |
|
114 | 114 | |
|
115 | 115 | COMPRESSION_NONE = 1 |
|
116 | 116 | COMPRESSION_ZSTD = 2 |
|
117 | 117 | COMPRESSION_ZLIB = 3 |
|
118 | 118 | |
|
119 | 119 | FLAG_CENSORED = 1 |
|
120 | 120 | FLAG_MISSING_P1 = 2 |
|
121 | 121 | FLAG_MISSING_P2 = 4 |
|
122 | 122 | |
|
123 | 123 | CREATE_SCHEMA = [ |
|
124 | 124 | # Deltas are stored as content-indexed blobs. |
|
125 | 125 | # compression column holds COMPRESSION_* constant for how the |
|
126 | 126 | # delta is encoded. |
|
127 | 127 | 'CREATE TABLE delta (' |
|
128 | 128 | ' id INTEGER PRIMARY KEY, ' |
|
129 | 129 | ' compression INTEGER NOT NULL, ' |
|
130 | 130 | ' hash BLOB UNIQUE ON CONFLICT ABORT, ' |
|
131 | 131 | ' delta BLOB NOT NULL ' |
|
132 | 132 | ')', |
|
133 | 133 | # Tracked paths are denormalized to integers to avoid redundant |
|
134 | 134 | # storage of the path name. |
|
135 | 135 | 'CREATE TABLE filepath (' |
|
136 | 136 | ' id INTEGER PRIMARY KEY, ' |
|
137 | 137 | ' path BLOB NOT NULL ' |
|
138 | 138 | ')', |
|
139 | 139 | 'CREATE UNIQUE INDEX filepath_path ON filepath (path)', |
|
140 | 140 | # We have a single table for all file revision data. |
|
141 | 141 | # Each file revision is uniquely described by a (path, rev) and |
|
142 | 142 | # (path, node). |
|
143 | 143 | # |
|
144 | 144 | # Revision data is stored as a pointer to the delta producing this |
|
145 | 145 | # revision and the file revision whose delta should be applied before |
|
146 | 146 | # that one. One can reconstruct the delta chain by recursively following |
|
147 | 147 | # the delta base revision pointers until one encounters NULL. |
|
148 | 148 | # |
|
149 | 149 | # flags column holds bitwise integer flags controlling storage options. |
|
150 | 150 | # These flags are defined by the FLAG_* constants. |
|
151 | 151 | 'CREATE TABLE fileindex (' |
|
152 | 152 | ' id INTEGER PRIMARY KEY, ' |
|
153 | 153 | ' pathid INTEGER REFERENCES filepath(id), ' |
|
154 | 154 | ' revnum INTEGER NOT NULL, ' |
|
155 | 155 | ' p1rev INTEGER NOT NULL, ' |
|
156 | 156 | ' p2rev INTEGER NOT NULL, ' |
|
157 | 157 | ' linkrev INTEGER NOT NULL, ' |
|
158 | 158 | ' flags INTEGER NOT NULL, ' |
|
159 | 159 | ' deltaid INTEGER REFERENCES delta(id), ' |
|
160 | 160 | ' deltabaseid INTEGER REFERENCES fileindex(id), ' |
|
161 | 161 | ' node BLOB NOT NULL ' |
|
162 | 162 | ')', |
|
163 | 163 | 'CREATE UNIQUE INDEX fileindex_pathrevnum ' |
|
164 | 164 | ' ON fileindex (pathid, revnum)', |
|
165 | 165 | 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)', |
|
166 | 166 | # Provide a view over all file data for convenience. |
|
167 | 167 | 'CREATE VIEW filedata AS ' |
|
168 | 168 | 'SELECT ' |
|
169 | 169 | ' fileindex.id AS id, ' |
|
170 | 170 | ' filepath.id AS pathid, ' |
|
171 | 171 | ' filepath.path AS path, ' |
|
172 | 172 | ' fileindex.revnum AS revnum, ' |
|
173 | 173 | ' fileindex.node AS node, ' |
|
174 | 174 | ' fileindex.p1rev AS p1rev, ' |
|
175 | 175 | ' fileindex.p2rev AS p2rev, ' |
|
176 | 176 | ' fileindex.linkrev AS linkrev, ' |
|
177 | 177 | ' fileindex.flags AS flags, ' |
|
178 | 178 | ' fileindex.deltaid AS deltaid, ' |
|
179 | 179 | ' fileindex.deltabaseid AS deltabaseid ' |
|
180 | 180 | 'FROM filepath, fileindex ' |
|
181 | 181 | 'WHERE fileindex.pathid=filepath.id', |
|
182 | 182 | 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION, |
|
183 | 183 | ] |
|
184 | 184 | |
|
185 | 185 | |
|
186 | 186 | def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None): |
|
187 | 187 | """Resolve a delta chain for a file node.""" |
|
188 | 188 | |
|
189 | 189 | # TODO the "not in ({stops})" here is possibly slowing down the query |
|
190 | 190 | # because it needs to perform the lookup on every recursive invocation. |
|
191 | 191 | # This could possibly be faster if we created a temporary query with |
|
192 | 192 | # baseid "poisoned" to null and limited the recursive filter to |
|
193 | 193 | # "is not null". |
|
194 | 194 | res = db.execute( |
|
195 | 195 | 'WITH RECURSIVE ' |
|
196 | 196 | ' deltachain(deltaid, baseid) AS (' |
|
197 | 197 | ' SELECT deltaid, deltabaseid FROM fileindex ' |
|
198 | 198 | ' WHERE pathid=? AND node=? ' |
|
199 | 199 | ' UNION ALL ' |
|
200 | 200 | ' SELECT fileindex.deltaid, deltabaseid ' |
|
201 | 201 | ' FROM fileindex, deltachain ' |
|
202 | 202 | ' WHERE ' |
|
203 | 203 | ' fileindex.id=deltachain.baseid ' |
|
204 | 204 | ' AND deltachain.baseid IS NOT NULL ' |
|
205 | 205 | ' AND fileindex.id NOT IN ({stops}) ' |
|
206 | 206 | ' ) ' |
|
207 | 207 | 'SELECT deltachain.baseid, compression, delta ' |
|
208 | 208 | 'FROM deltachain, delta ' |
|
209 | 209 | 'WHERE delta.id=deltachain.deltaid'.format( |
|
210 | 210 | stops=','.join(['?'] * len(stoprids)) |
|
211 | 211 | ), |
|
212 | 212 | tuple([pathid, node] + list(stoprids.keys())), |
|
213 | 213 | ) |
|
214 | 214 | |
|
215 | 215 | deltas = [] |
|
216 | 216 | lastdeltabaseid = None |
|
217 | 217 | |
|
218 | 218 | for deltabaseid, compression, delta in res: |
|
219 | 219 | lastdeltabaseid = deltabaseid |
|
220 | 220 | |
|
221 | 221 | if compression == COMPRESSION_ZSTD: |
|
222 | 222 | delta = zstddctx.decompress(delta) |
|
223 | 223 | elif compression == COMPRESSION_NONE: |
|
224 | 224 | delta = delta |
|
225 | 225 | elif compression == COMPRESSION_ZLIB: |
|
226 | 226 | delta = zlib.decompress(delta) |
|
227 | 227 | else: |
|
228 | 228 | raise SQLiteStoreError( |
|
229 | 229 | b'unhandled compression type: %d' % compression |
|
230 | 230 | ) |
|
231 | 231 | |
|
232 | 232 | deltas.append(delta) |
|
233 | 233 | |
|
234 | 234 | if lastdeltabaseid in stoprids: |
|
235 | 235 | basetext = revisioncache[stoprids[lastdeltabaseid]] |
|
236 | 236 | else: |
|
237 | 237 | basetext = deltas.pop() |
|
238 | 238 | |
|
239 | 239 | deltas.reverse() |
|
240 | 240 | fulltext = mdiff.patches(basetext, deltas) |
|
241 | 241 | |
|
242 | 242 | # SQLite returns buffer instances for blob columns on Python 2. This |
|
243 | 243 | # type can propagate through the delta application layer. Because |
|
244 | 244 | # downstream callers assume revisions are bytes, cast as needed. |
|
245 | 245 | if not isinstance(fulltext, bytes): |
|
246 | 246 | fulltext = bytes(delta) |
|
247 | 247 | |
|
248 | 248 | return fulltext |
|
249 | 249 | |
|
250 | 250 | |
|
251 | 251 | def insertdelta(db, compression, hash, delta): |
|
252 | 252 | try: |
|
253 | 253 | return db.execute( |
|
254 | 254 | 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)', |
|
255 | 255 | (compression, hash, delta), |
|
256 | 256 | ).lastrowid |
|
257 | 257 | except sqlite3.IntegrityError: |
|
258 | 258 | return db.execute( |
|
259 | 259 | 'SELECT id FROM delta WHERE hash=?', (hash,) |
|
260 | 260 | ).fetchone()[0] |
|
261 | 261 | |
|
262 | 262 | |
|
263 | 263 | class SQLiteStoreError(error.StorageError): |
|
264 | 264 | pass |
|
265 | 265 | |
|
266 | 266 | |
|
267 | 267 | @attr.s |
|
268 | 268 | class revisionentry(object): |
|
269 | 269 | rid = attr.ib() |
|
270 | 270 | rev = attr.ib() |
|
271 | 271 | node = attr.ib() |
|
272 | 272 | p1rev = attr.ib() |
|
273 | 273 | p2rev = attr.ib() |
|
274 | 274 | p1node = attr.ib() |
|
275 | 275 | p2node = attr.ib() |
|
276 | 276 | linkrev = attr.ib() |
|
277 | 277 | flags = attr.ib() |
|
278 | 278 | |
|
279 | 279 | |
|
280 | 280 | @interfaceutil.implementer(repository.irevisiondelta) |
|
281 | 281 | @attr.s(slots=True) |
|
282 | 282 | class sqliterevisiondelta(object): |
|
283 | 283 | node = attr.ib() |
|
284 | 284 | p1node = attr.ib() |
|
285 | 285 | p2node = attr.ib() |
|
286 | 286 | basenode = attr.ib() |
|
287 | 287 | flags = attr.ib() |
|
288 | 288 | baserevisionsize = attr.ib() |
|
289 | 289 | revision = attr.ib() |
|
290 | 290 | delta = attr.ib() |
|
291 | 291 | linknode = attr.ib(default=None) |
|
292 | 292 | |
|
293 | 293 | |
|
294 | 294 | @interfaceutil.implementer(repository.iverifyproblem) |
|
295 | 295 | @attr.s(frozen=True) |
|
296 | 296 | class sqliteproblem(object): |
|
297 | 297 | warning = attr.ib(default=None) |
|
298 | 298 | error = attr.ib(default=None) |
|
299 | 299 | node = attr.ib(default=None) |
|
300 | 300 | |
|
301 | 301 | |
|
302 | 302 | @interfaceutil.implementer(repository.ifilestorage) |
|
303 | 303 | class sqlitefilestore(object): |
|
304 | 304 | """Implements storage for an individual tracked path.""" |
|
305 | 305 | |
|
306 | 306 | def __init__(self, db, path, compression): |
|
307 | 307 | self._db = db |
|
308 | 308 | self._path = path |
|
309 | 309 | |
|
310 | 310 | self._pathid = None |
|
311 | 311 | |
|
312 | 312 | # revnum -> node |
|
313 | 313 | self._revtonode = {} |
|
314 | 314 | # node -> revnum |
|
315 | 315 | self._nodetorev = {} |
|
316 | 316 | # node -> data structure |
|
317 | 317 | self._revisions = {} |
|
318 | 318 | |
|
319 | 319 | self._revisioncache = util.lrucachedict(10) |
|
320 | 320 | |
|
321 | 321 | self._compengine = compression |
|
322 | 322 | |
|
323 | 323 | if compression == b'zstd': |
|
324 | 324 | self._cctx = zstd.ZstdCompressor(level=3) |
|
325 | 325 | self._dctx = zstd.ZstdDecompressor() |
|
326 | 326 | else: |
|
327 | 327 | self._cctx = None |
|
328 | 328 | self._dctx = None |
|
329 | 329 | |
|
330 | 330 | self._refreshindex() |
|
331 | 331 | |
|
332 | 332 | def _refreshindex(self): |
|
333 | 333 | self._revtonode = {} |
|
334 | 334 | self._nodetorev = {} |
|
335 | 335 | self._revisions = {} |
|
336 | 336 | |
|
337 | 337 | res = list( |
|
338 | 338 | self._db.execute( |
|
339 | 339 | 'SELECT id FROM filepath WHERE path=?', (self._path,) |
|
340 | 340 | ) |
|
341 | 341 | ) |
|
342 | 342 | |
|
343 | 343 | if not res: |
|
344 | 344 | self._pathid = None |
|
345 | 345 | return |
|
346 | 346 | |
|
347 | 347 | self._pathid = res[0][0] |
|
348 | 348 | |
|
349 | 349 | res = self._db.execute( |
|
350 | 350 | 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags ' |
|
351 | 351 | 'FROM fileindex ' |
|
352 | 352 | 'WHERE pathid=? ' |
|
353 | 353 | 'ORDER BY revnum ASC', |
|
354 | 354 | (self._pathid,), |
|
355 | 355 | ) |
|
356 | 356 | |
|
357 | 357 | for i, row in enumerate(res): |
|
358 | 358 | rid, rev, node, p1rev, p2rev, linkrev, flags = row |
|
359 | 359 | |
|
360 | 360 | if i != rev: |
|
361 | 361 | raise SQLiteStoreError( |
|
362 | 362 | _(b'sqlite database has inconsistent revision numbers') |
|
363 | 363 | ) |
|
364 | 364 | |
|
365 | 365 | if p1rev == nullrev: |
|
366 | 366 | p1node = nullid |
|
367 | 367 | else: |
|
368 | 368 | p1node = self._revtonode[p1rev] |
|
369 | 369 | |
|
370 | 370 | if p2rev == nullrev: |
|
371 | 371 | p2node = nullid |
|
372 | 372 | else: |
|
373 | 373 | p2node = self._revtonode[p2rev] |
|
374 | 374 | |
|
375 | 375 | entry = revisionentry( |
|
376 | 376 | rid=rid, |
|
377 | 377 | rev=rev, |
|
378 | 378 | node=node, |
|
379 | 379 | p1rev=p1rev, |
|
380 | 380 | p2rev=p2rev, |
|
381 | 381 | p1node=p1node, |
|
382 | 382 | p2node=p2node, |
|
383 | 383 | linkrev=linkrev, |
|
384 | 384 | flags=flags, |
|
385 | 385 | ) |
|
386 | 386 | |
|
387 | 387 | self._revtonode[rev] = node |
|
388 | 388 | self._nodetorev[node] = rev |
|
389 | 389 | self._revisions[node] = entry |
|
390 | 390 | |
|
391 | 391 | # Start of ifileindex interface. |
|
392 | 392 | |
|
393 | 393 | def __len__(self): |
|
394 | 394 | return len(self._revisions) |
|
395 | 395 | |
|
396 | 396 | def __iter__(self): |
|
397 | 397 | return iter(pycompat.xrange(len(self._revisions))) |
|
398 | 398 | |
|
399 | 399 | def hasnode(self, node): |
|
400 | 400 | if node == nullid: |
|
401 | 401 | return False |
|
402 | 402 | |
|
403 | 403 | return node in self._nodetorev |
|
404 | 404 | |
|
405 | 405 | def revs(self, start=0, stop=None): |
|
406 | 406 | return storageutil.iterrevs( |
|
407 | 407 | len(self._revisions), start=start, stop=stop |
|
408 | 408 | ) |
|
409 | 409 | |
|
410 | 410 | def parents(self, node): |
|
411 | 411 | if node == nullid: |
|
412 | 412 | return nullid, nullid |
|
413 | 413 | |
|
414 | 414 | if node not in self._revisions: |
|
415 | 415 | raise error.LookupError(node, self._path, _(b'no node')) |
|
416 | 416 | |
|
417 | 417 | entry = self._revisions[node] |
|
418 | 418 | return entry.p1node, entry.p2node |
|
419 | 419 | |
|
420 | 420 | def parentrevs(self, rev): |
|
421 | 421 | if rev == nullrev: |
|
422 | 422 | return nullrev, nullrev |
|
423 | 423 | |
|
424 | 424 | if rev not in self._revtonode: |
|
425 | 425 | raise IndexError(rev) |
|
426 | 426 | |
|
427 | 427 | entry = self._revisions[self._revtonode[rev]] |
|
428 | 428 | return entry.p1rev, entry.p2rev |
|
429 | 429 | |
|
430 | 430 | def rev(self, node): |
|
431 | 431 | if node == nullid: |
|
432 | 432 | return nullrev |
|
433 | 433 | |
|
434 | 434 | if node not in self._nodetorev: |
|
435 | 435 | raise error.LookupError(node, self._path, _(b'no node')) |
|
436 | 436 | |
|
437 | 437 | return self._nodetorev[node] |
|
438 | 438 | |
|
439 | 439 | def node(self, rev): |
|
440 | 440 | if rev == nullrev: |
|
441 | 441 | return nullid |
|
442 | 442 | |
|
443 | 443 | if rev not in self._revtonode: |
|
444 | 444 | raise IndexError(rev) |
|
445 | 445 | |
|
446 | 446 | return self._revtonode[rev] |
|
447 | 447 | |
|
448 | 448 | def lookup(self, node): |
|
449 | 449 | return storageutil.fileidlookup(self, node, self._path) |
|
450 | 450 | |
|
451 | 451 | def linkrev(self, rev): |
|
452 | 452 | if rev == nullrev: |
|
453 | 453 | return nullrev |
|
454 | 454 | |
|
455 | 455 | if rev not in self._revtonode: |
|
456 | 456 | raise IndexError(rev) |
|
457 | 457 | |
|
458 | 458 | entry = self._revisions[self._revtonode[rev]] |
|
459 | 459 | return entry.linkrev |
|
460 | 460 | |
|
461 | 461 | def iscensored(self, rev): |
|
462 | 462 | if rev == nullrev: |
|
463 | 463 | return False |
|
464 | 464 | |
|
465 | 465 | if rev not in self._revtonode: |
|
466 | 466 | raise IndexError(rev) |
|
467 | 467 | |
|
468 | 468 | return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED |
|
469 | 469 | |
|
470 | 470 | def commonancestorsheads(self, node1, node2): |
|
471 | 471 | rev1 = self.rev(node1) |
|
472 | 472 | rev2 = self.rev(node2) |
|
473 | 473 | |
|
474 | 474 | ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2) |
|
475 | 475 | return pycompat.maplist(self.node, ancestors) |
|
476 | 476 | |
|
477 | 477 | def descendants(self, revs): |
|
478 | 478 | # TODO we could implement this using a recursive SQL query, which |
|
479 | 479 | # might be faster. |
|
480 | 480 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) |
|
481 | 481 | |
|
482 | 482 | def heads(self, start=None, stop=None): |
|
483 | 483 | if start is None and stop is None: |
|
484 | 484 | if not len(self): |
|
485 | 485 | return [nullid] |
|
486 | 486 | |
|
487 | 487 | startrev = self.rev(start) if start is not None else nullrev |
|
488 | 488 | stoprevs = {self.rev(n) for n in stop or []} |
|
489 | 489 | |
|
490 | 490 | revs = dagop.headrevssubset( |
|
491 | 491 | self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs |
|
492 | 492 | ) |
|
493 | 493 | |
|
494 | 494 | return [self.node(rev) for rev in revs] |
|
495 | 495 | |
|
496 | 496 | def children(self, node): |
|
497 | 497 | rev = self.rev(node) |
|
498 | 498 | |
|
499 | 499 | res = self._db.execute( |
|
500 | 500 | 'SELECT' |
|
501 | 501 | ' node ' |
|
502 | 502 | ' FROM filedata ' |
|
503 | 503 | ' WHERE path=? AND (p1rev=? OR p2rev=?) ' |
|
504 | 504 | ' ORDER BY revnum ASC', |
|
505 | 505 | (self._path, rev, rev), |
|
506 | 506 | ) |
|
507 | 507 | |
|
508 | 508 | return [row[0] for row in res] |
|
509 | 509 | |
|
510 | 510 | # End of ifileindex interface. |
|
511 | 511 | |
|
512 | 512 | # Start of ifiledata interface. |
|
513 | 513 | |
|
514 | 514 | def size(self, rev): |
|
515 | 515 | if rev == nullrev: |
|
516 | 516 | return 0 |
|
517 | 517 | |
|
518 | 518 | if rev not in self._revtonode: |
|
519 | 519 | raise IndexError(rev) |
|
520 | 520 | |
|
521 | 521 | node = self._revtonode[rev] |
|
522 | 522 | |
|
523 | 523 | if self.renamed(node): |
|
524 | 524 | return len(self.read(node)) |
|
525 | 525 | |
|
526 | 526 | return len(self.revision(node)) |
|
527 | 527 | |
|
528 | 528 | def revision(self, node, raw=False, _verifyhash=True): |
|
529 | 529 | if node in (nullid, nullrev): |
|
530 | 530 | return b'' |
|
531 | 531 | |
|
532 | 532 | if isinstance(node, int): |
|
533 | 533 | node = self.node(node) |
|
534 | 534 | |
|
535 | 535 | if node not in self._nodetorev: |
|
536 | 536 | raise error.LookupError(node, self._path, _(b'no node')) |
|
537 | 537 | |
|
538 | 538 | if node in self._revisioncache: |
|
539 | 539 | return self._revisioncache[node] |
|
540 | 540 | |
|
541 | 541 | # Because we have a fulltext revision cache, we are able to |
|
542 | 542 | # short-circuit delta chain traversal and decompression as soon as |
|
543 | 543 | # we encounter a revision in the cache. |
|
544 | 544 | |
|
545 | 545 | stoprids = {self._revisions[n].rid: n for n in self._revisioncache} |
|
546 | 546 | |
|
547 | 547 | if not stoprids: |
|
548 | 548 | stoprids[-1] = None |
|
549 | 549 | |
|
550 | 550 | fulltext = resolvedeltachain( |
|
551 | 551 | self._db, |
|
552 | 552 | self._pathid, |
|
553 | 553 | node, |
|
554 | 554 | self._revisioncache, |
|
555 | 555 | stoprids, |
|
556 | 556 | zstddctx=self._dctx, |
|
557 | 557 | ) |
|
558 | 558 | |
|
559 | 559 | # Don't verify hashes if parent nodes were rewritten, as the hash |
|
560 | 560 | # wouldn't verify. |
|
561 | 561 | if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2): |
|
562 | 562 | _verifyhash = False |
|
563 | 563 | |
|
564 | 564 | if _verifyhash: |
|
565 | 565 | self._checkhash(fulltext, node) |
|
566 | 566 | self._revisioncache[node] = fulltext |
|
567 | 567 | |
|
568 | 568 | return fulltext |
|
569 | 569 | |
|
570 | 570 | def rawdata(self, *args, **kwargs): |
|
571 | 571 | return self.revision(*args, **kwargs) |
|
572 | 572 | |
|
573 | 573 | def read(self, node): |
|
574 | 574 | return storageutil.filtermetadata(self.revision(node)) |
|
575 | 575 | |
|
576 | 576 | def renamed(self, node): |
|
577 | 577 | return storageutil.filerevisioncopied(self, node) |
|
578 | 578 | |
|
579 | 579 | def cmp(self, node, fulltext): |
|
580 | 580 | return not storageutil.filedataequivalent(self, node, fulltext) |
|
581 | 581 | |
|
582 | 582 | def emitrevisions( |
|
583 | 583 | self, |
|
584 | 584 | nodes, |
|
585 | 585 | nodesorder=None, |
|
586 | 586 | revisiondata=False, |
|
587 | 587 | assumehaveparentrevisions=False, |
|
588 | 588 | deltamode=repository.CG_DELTAMODE_STD, |
|
589 | 589 | ): |
|
590 | 590 | if nodesorder not in (b'nodes', b'storage', b'linear', None): |
|
591 | 591 | raise error.ProgrammingError( |
|
592 | 592 | b'unhandled value for nodesorder: %s' % nodesorder |
|
593 | 593 | ) |
|
594 | 594 | |
|
595 | 595 | nodes = [n for n in nodes if n != nullid] |
|
596 | 596 | |
|
597 | 597 | if not nodes: |
|
598 | 598 | return |
|
599 | 599 | |
|
600 | 600 | # TODO perform in a single query. |
|
601 | 601 | res = self._db.execute( |
|
602 | 602 | 'SELECT revnum, deltaid FROM fileindex ' |
|
603 | 603 | 'WHERE pathid=? ' |
|
604 | 604 | ' AND node in (%s)' % (','.join(['?'] * len(nodes))), |
|
605 | 605 | tuple([self._pathid] + nodes), |
|
606 | 606 | ) |
|
607 | 607 | |
|
608 | 608 | deltabases = {} |
|
609 | 609 | |
|
610 | 610 | for rev, deltaid in res: |
|
611 | 611 | res = self._db.execute( |
|
612 | 612 | 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?', |
|
613 | 613 | (self._pathid, deltaid), |
|
614 | 614 | ) |
|
615 | 615 | deltabases[rev] = res.fetchone()[0] |
|
616 | 616 | |
|
617 | 617 | # TODO define revdifffn so we can use delta from storage. |
|
618 | 618 | for delta in storageutil.emitrevisions( |
|
619 | 619 | self, |
|
620 | 620 | nodes, |
|
621 | 621 | nodesorder, |
|
622 | 622 | sqliterevisiondelta, |
|
623 | 623 | deltaparentfn=deltabases.__getitem__, |
|
624 | 624 | revisiondata=revisiondata, |
|
625 | 625 | assumehaveparentrevisions=assumehaveparentrevisions, |
|
626 | 626 | deltamode=deltamode, |
|
627 | 627 | ): |
|
628 | 628 | |
|
629 | 629 | yield delta |
|
630 | 630 | |
|
631 | 631 | # End of ifiledata interface. |
|
632 | 632 | |
|
633 | 633 | # Start of ifilemutation interface. |
|
634 | 634 | |
|
635 | 635 | def add(self, filedata, meta, transaction, linkrev, p1, p2): |
|
636 | 636 | if meta or filedata.startswith(b'\x01\n'): |
|
637 | 637 | filedata = storageutil.packmeta(meta, filedata) |
|
638 | 638 | |
|
639 | 639 | rev = self.addrevision(filedata, transaction, linkrev, p1, p2) |
|
640 | 640 | return self.node(rev) |
|
641 | 641 | |
|
642 | 642 | def addrevision( |
|
643 | 643 | self, |
|
644 | 644 | revisiondata, |
|
645 | 645 | transaction, |
|
646 | 646 | linkrev, |
|
647 | 647 | p1, |
|
648 | 648 | p2, |
|
649 | 649 | node=None, |
|
650 | 650 | flags=0, |
|
651 | 651 | cachedelta=None, |
|
652 | 652 | ): |
|
653 | 653 | if flags: |
|
654 | 654 | raise SQLiteStoreError(_(b'flags not supported on revisions')) |
|
655 | 655 | |
|
656 | 656 | validatehash = node is not None |
|
657 | 657 | node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2) |
|
658 | 658 | |
|
659 | 659 | if validatehash: |
|
660 | 660 | self._checkhash(revisiondata, node, p1, p2) |
|
661 | 661 | |
|
662 | 662 | rev = self._nodetorev.get(node) |
|
663 | 663 | if rev is not None: |
|
664 | 664 | return rev |
|
665 | 665 | |
|
666 | 666 | rev = self._addrawrevision( |
|
667 | 667 | node, revisiondata, transaction, linkrev, p1, p2 |
|
668 | 668 | ) |
|
669 | 669 | |
|
670 | 670 | self._revisioncache[node] = revisiondata |
|
671 | 671 | return rev |
|
672 | 672 | |
|
673 | 673 | def addgroup( |
|
674 | 674 | self, |
|
675 | 675 | deltas, |
|
676 | 676 | linkmapper, |
|
677 | 677 | transaction, |
|
678 | 678 | addrevisioncb=None, |
|
679 | 679 | duplicaterevisioncb=None, |
|
680 | 680 | maybemissingparents=False, |
|
681 | 681 | ): |
|
682 | 682 | empty = True |
|
683 | 683 | |
|
684 | for node, p1, p2, linknode, deltabase, delta, wireflags in deltas: | |
|
684 | for ( | |
|
685 | node, | |
|
686 | p1, | |
|
687 | p2, | |
|
688 | linknode, | |
|
689 | deltabase, | |
|
690 | delta, | |
|
691 | wireflags, | |
|
692 | sidedata, | |
|
693 | ) in deltas: | |
|
685 | 694 | storeflags = 0 |
|
686 | 695 | |
|
687 | 696 | if wireflags & repository.REVISION_FLAG_CENSORED: |
|
688 | 697 | storeflags |= FLAG_CENSORED |
|
689 | 698 | |
|
690 | 699 | if wireflags & ~repository.REVISION_FLAG_CENSORED: |
|
691 | 700 | raise SQLiteStoreError(b'unhandled revision flag') |
|
692 | 701 | |
|
693 | 702 | if maybemissingparents: |
|
694 | 703 | if p1 != nullid and not self.hasnode(p1): |
|
695 | 704 | p1 = nullid |
|
696 | 705 | storeflags |= FLAG_MISSING_P1 |
|
697 | 706 | |
|
698 | 707 | if p2 != nullid and not self.hasnode(p2): |
|
699 | 708 | p2 = nullid |
|
700 | 709 | storeflags |= FLAG_MISSING_P2 |
|
701 | 710 | |
|
702 | 711 | baserev = self.rev(deltabase) |
|
703 | 712 | |
|
704 | 713 | # If base is censored, delta must be full replacement in a single |
|
705 | 714 | # patch operation. |
|
706 | 715 | if baserev != nullrev and self.iscensored(baserev): |
|
707 | 716 | hlen = struct.calcsize(b'>lll') |
|
708 | 717 | oldlen = len(self.rawdata(deltabase, _verifyhash=False)) |
|
709 | 718 | newlen = len(delta) - hlen |
|
710 | 719 | |
|
711 | 720 | if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): |
|
712 | 721 | raise error.CensoredBaseError(self._path, deltabase) |
|
713 | 722 | |
|
714 | 723 | if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored( |
|
715 | 724 | delta, baserev, lambda x: len(self.rawdata(x)) |
|
716 | 725 | ): |
|
717 | 726 | storeflags |= FLAG_CENSORED |
|
718 | 727 | |
|
719 | 728 | linkrev = linkmapper(linknode) |
|
720 | 729 | |
|
721 | 730 | if node in self._revisions: |
|
722 | 731 | # Possibly reset parents to make them proper. |
|
723 | 732 | entry = self._revisions[node] |
|
724 | 733 | |
|
725 | 734 | if entry.flags & FLAG_MISSING_P1 and p1 != nullid: |
|
726 | 735 | entry.p1node = p1 |
|
727 | 736 | entry.p1rev = self._nodetorev[p1] |
|
728 | 737 | entry.flags &= ~FLAG_MISSING_P1 |
|
729 | 738 | |
|
730 | 739 | self._db.execute( |
|
731 | 740 | 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?', |
|
732 | 741 | (self._nodetorev[p1], entry.flags, entry.rid), |
|
733 | 742 | ) |
|
734 | 743 | |
|
735 | 744 | if entry.flags & FLAG_MISSING_P2 and p2 != nullid: |
|
736 | 745 | entry.p2node = p2 |
|
737 | 746 | entry.p2rev = self._nodetorev[p2] |
|
738 | 747 | entry.flags &= ~FLAG_MISSING_P2 |
|
739 | 748 | |
|
740 | 749 | self._db.execute( |
|
741 | 750 | 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?', |
|
742 | 751 | (self._nodetorev[p1], entry.flags, entry.rid), |
|
743 | 752 | ) |
|
744 | 753 | |
|
745 | 754 | if duplicaterevisioncb: |
|
746 | 755 | duplicaterevisioncb(self, self.rev(node)) |
|
747 | 756 | empty = False |
|
748 | 757 | continue |
|
749 | 758 | |
|
750 | 759 | if deltabase == nullid: |
|
751 | 760 | text = mdiff.patch(b'', delta) |
|
752 | 761 | storedelta = None |
|
753 | 762 | else: |
|
754 | 763 | text = None |
|
755 | 764 | storedelta = (deltabase, delta) |
|
756 | 765 | |
|
757 | 766 | rev = self._addrawrevision( |
|
758 | 767 | node, |
|
759 | 768 | text, |
|
760 | 769 | transaction, |
|
761 | 770 | linkrev, |
|
762 | 771 | p1, |
|
763 | 772 | p2, |
|
764 | 773 | storedelta=storedelta, |
|
765 | 774 | flags=storeflags, |
|
766 | 775 | ) |
|
767 | 776 | |
|
768 | 777 | if addrevisioncb: |
|
769 | 778 | addrevisioncb(self, rev) |
|
770 | 779 | empty = False |
|
771 | 780 | |
|
772 | 781 | return not empty |
|
773 | 782 | |
|
774 | 783 | def censorrevision(self, tr, censornode, tombstone=b''): |
|
775 | 784 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') |
|
776 | 785 | |
|
777 | 786 | # This restriction is cargo culted from revlogs and makes no sense for |
|
778 | 787 | # SQLite, since columns can be resized at will. |
|
779 | 788 | if len(tombstone) > len(self.rawdata(censornode)): |
|
780 | 789 | raise error.Abort( |
|
781 | 790 | _(b'censor tombstone must be no longer than censored data') |
|
782 | 791 | ) |
|
783 | 792 | |
|
784 | 793 | # We need to replace the censored revision's data with the tombstone. |
|
785 | 794 | # But replacing that data will have implications for delta chains that |
|
786 | 795 | # reference it. |
|
787 | 796 | # |
|
788 | 797 | # While "better," more complex strategies are possible, we do something |
|
789 | 798 | # simple: we find delta chain children of the censored revision and we |
|
790 | 799 | # replace those incremental deltas with fulltexts of their corresponding |
|
791 | 800 | # revision. Then we delete the now-unreferenced delta and original |
|
792 | 801 | # revision and insert a replacement. |
|
793 | 802 | |
|
794 | 803 | # Find the delta to be censored. |
|
795 | 804 | censoreddeltaid = self._db.execute( |
|
796 | 805 | 'SELECT deltaid FROM fileindex WHERE id=?', |
|
797 | 806 | (self._revisions[censornode].rid,), |
|
798 | 807 | ).fetchone()[0] |
|
799 | 808 | |
|
800 | 809 | # Find all its delta chain children. |
|
801 | 810 | # TODO once we support storing deltas for !files, we'll need to look |
|
802 | 811 | # for those delta chains too. |
|
803 | 812 | rows = list( |
|
804 | 813 | self._db.execute( |
|
805 | 814 | 'SELECT id, pathid, node FROM fileindex ' |
|
806 | 815 | 'WHERE deltabaseid=? OR deltaid=?', |
|
807 | 816 | (censoreddeltaid, censoreddeltaid), |
|
808 | 817 | ) |
|
809 | 818 | ) |
|
810 | 819 | |
|
811 | 820 | for row in rows: |
|
812 | 821 | rid, pathid, node = row |
|
813 | 822 | |
|
814 | 823 | fulltext = resolvedeltachain( |
|
815 | 824 | self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx |
|
816 | 825 | ) |
|
817 | 826 | |
|
818 | 827 | deltahash = hashutil.sha1(fulltext).digest() |
|
819 | 828 | |
|
820 | 829 | if self._compengine == b'zstd': |
|
821 | 830 | deltablob = self._cctx.compress(fulltext) |
|
822 | 831 | compression = COMPRESSION_ZSTD |
|
823 | 832 | elif self._compengine == b'zlib': |
|
824 | 833 | deltablob = zlib.compress(fulltext) |
|
825 | 834 | compression = COMPRESSION_ZLIB |
|
826 | 835 | elif self._compengine == b'none': |
|
827 | 836 | deltablob = fulltext |
|
828 | 837 | compression = COMPRESSION_NONE |
|
829 | 838 | else: |
|
830 | 839 | raise error.ProgrammingError( |
|
831 | 840 | b'unhandled compression engine: %s' % self._compengine |
|
832 | 841 | ) |
|
833 | 842 | |
|
834 | 843 | if len(deltablob) >= len(fulltext): |
|
835 | 844 | deltablob = fulltext |
|
836 | 845 | compression = COMPRESSION_NONE |
|
837 | 846 | |
|
838 | 847 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) |
|
839 | 848 | |
|
840 | 849 | self._db.execute( |
|
841 | 850 | 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL ' |
|
842 | 851 | 'WHERE id=?', |
|
843 | 852 | (deltaid, rid), |
|
844 | 853 | ) |
|
845 | 854 | |
|
846 | 855 | # Now create the tombstone delta and replace the delta on the censored |
|
847 | 856 | # node. |
|
848 | 857 | deltahash = hashutil.sha1(tombstone).digest() |
|
849 | 858 | tombstonedeltaid = insertdelta( |
|
850 | 859 | self._db, COMPRESSION_NONE, deltahash, tombstone |
|
851 | 860 | ) |
|
852 | 861 | |
|
853 | 862 | flags = self._revisions[censornode].flags |
|
854 | 863 | flags |= FLAG_CENSORED |
|
855 | 864 | |
|
856 | 865 | self._db.execute( |
|
857 | 866 | 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL ' |
|
858 | 867 | 'WHERE pathid=? AND node=?', |
|
859 | 868 | (flags, tombstonedeltaid, self._pathid, censornode), |
|
860 | 869 | ) |
|
861 | 870 | |
|
862 | 871 | self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,)) |
|
863 | 872 | |
|
864 | 873 | self._refreshindex() |
|
865 | 874 | self._revisioncache.clear() |
|
866 | 875 | |
|
867 | 876 | def getstrippoint(self, minlink): |
|
868 | 877 | return storageutil.resolvestripinfo( |
|
869 | 878 | minlink, |
|
870 | 879 | len(self) - 1, |
|
871 | 880 | [self.rev(n) for n in self.heads()], |
|
872 | 881 | self.linkrev, |
|
873 | 882 | self.parentrevs, |
|
874 | 883 | ) |
|
875 | 884 | |
|
876 | 885 | def strip(self, minlink, transaction): |
|
877 | 886 | if not len(self): |
|
878 | 887 | return |
|
879 | 888 | |
|
880 | 889 | rev, _ignored = self.getstrippoint(minlink) |
|
881 | 890 | |
|
882 | 891 | if rev == len(self): |
|
883 | 892 | return |
|
884 | 893 | |
|
885 | 894 | for rev in self.revs(rev): |
|
886 | 895 | self._db.execute( |
|
887 | 896 | 'DELETE FROM fileindex WHERE pathid=? AND node=?', |
|
888 | 897 | (self._pathid, self.node(rev)), |
|
889 | 898 | ) |
|
890 | 899 | |
|
891 | 900 | # TODO how should we garbage collect data in delta table? |
|
892 | 901 | |
|
893 | 902 | self._refreshindex() |
|
894 | 903 | |
|
895 | 904 | # End of ifilemutation interface. |
|
896 | 905 | |
|
897 | 906 | # Start of ifilestorage interface. |
|
898 | 907 | |
|
899 | 908 | def files(self): |
|
900 | 909 | return [] |
|
901 | 910 | |
|
902 | 911 | def storageinfo( |
|
903 | 912 | self, |
|
904 | 913 | exclusivefiles=False, |
|
905 | 914 | sharedfiles=False, |
|
906 | 915 | revisionscount=False, |
|
907 | 916 | trackedsize=False, |
|
908 | 917 | storedsize=False, |
|
909 | 918 | ): |
|
910 | 919 | d = {} |
|
911 | 920 | |
|
912 | 921 | if exclusivefiles: |
|
913 | 922 | d[b'exclusivefiles'] = [] |
|
914 | 923 | |
|
915 | 924 | if sharedfiles: |
|
916 | 925 | # TODO list sqlite file(s) here. |
|
917 | 926 | d[b'sharedfiles'] = [] |
|
918 | 927 | |
|
919 | 928 | if revisionscount: |
|
920 | 929 | d[b'revisionscount'] = len(self) |
|
921 | 930 | |
|
922 | 931 | if trackedsize: |
|
923 | 932 | d[b'trackedsize'] = sum( |
|
924 | 933 | len(self.revision(node)) for node in self._nodetorev |
|
925 | 934 | ) |
|
926 | 935 | |
|
927 | 936 | if storedsize: |
|
928 | 937 | # TODO implement this? |
|
929 | 938 | d[b'storedsize'] = None |
|
930 | 939 | |
|
931 | 940 | return d |
|
932 | 941 | |
|
933 | 942 | def verifyintegrity(self, state): |
|
934 | 943 | state[b'skipread'] = set() |
|
935 | 944 | |
|
936 | 945 | for rev in self: |
|
937 | 946 | node = self.node(rev) |
|
938 | 947 | |
|
939 | 948 | try: |
|
940 | 949 | self.revision(node) |
|
941 | 950 | except Exception as e: |
|
942 | 951 | yield sqliteproblem( |
|
943 | 952 | error=_(b'unpacking %s: %s') % (short(node), e), node=node |
|
944 | 953 | ) |
|
945 | 954 | |
|
946 | 955 | state[b'skipread'].add(node) |
|
947 | 956 | |
|
948 | 957 | # End of ifilestorage interface. |
|
949 | 958 | |
|
950 | 959 | def _checkhash(self, fulltext, node, p1=None, p2=None): |
|
951 | 960 | if p1 is None and p2 is None: |
|
952 | 961 | p1, p2 = self.parents(node) |
|
953 | 962 | |
|
954 | 963 | if node == storageutil.hashrevisionsha1(fulltext, p1, p2): |
|
955 | 964 | return |
|
956 | 965 | |
|
957 | 966 | try: |
|
958 | 967 | del self._revisioncache[node] |
|
959 | 968 | except KeyError: |
|
960 | 969 | pass |
|
961 | 970 | |
|
962 | 971 | if storageutil.iscensoredtext(fulltext): |
|
963 | 972 | raise error.CensoredNodeError(self._path, node, fulltext) |
|
964 | 973 | |
|
965 | 974 | raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path) |
|
966 | 975 | |
|
967 | 976 | def _addrawrevision( |
|
968 | 977 | self, |
|
969 | 978 | node, |
|
970 | 979 | revisiondata, |
|
971 | 980 | transaction, |
|
972 | 981 | linkrev, |
|
973 | 982 | p1, |
|
974 | 983 | p2, |
|
975 | 984 | storedelta=None, |
|
976 | 985 | flags=0, |
|
977 | 986 | ): |
|
978 | 987 | if self._pathid is None: |
|
979 | 988 | res = self._db.execute( |
|
980 | 989 | 'INSERT INTO filepath (path) VALUES (?)', (self._path,) |
|
981 | 990 | ) |
|
982 | 991 | self._pathid = res.lastrowid |
|
983 | 992 | |
|
984 | 993 | # For simplicity, always store a delta against p1. |
|
985 | 994 | # TODO we need a lot more logic here to make behavior reasonable. |
|
986 | 995 | |
|
987 | 996 | if storedelta: |
|
988 | 997 | deltabase, delta = storedelta |
|
989 | 998 | |
|
990 | 999 | if isinstance(deltabase, int): |
|
991 | 1000 | deltabase = self.node(deltabase) |
|
992 | 1001 | |
|
993 | 1002 | else: |
|
994 | 1003 | assert revisiondata is not None |
|
995 | 1004 | deltabase = p1 |
|
996 | 1005 | |
|
997 | 1006 | if deltabase == nullid: |
|
998 | 1007 | delta = revisiondata |
|
999 | 1008 | else: |
|
1000 | 1009 | delta = mdiff.textdiff( |
|
1001 | 1010 | self.revision(self.rev(deltabase)), revisiondata |
|
1002 | 1011 | ) |
|
1003 | 1012 | |
|
1004 | 1013 | # File index stores a pointer to its delta and the parent delta. |
|
1005 | 1014 | # The parent delta is stored via a pointer to the fileindex PK. |
|
1006 | 1015 | if deltabase == nullid: |
|
1007 | 1016 | baseid = None |
|
1008 | 1017 | else: |
|
1009 | 1018 | baseid = self._revisions[deltabase].rid |
|
1010 | 1019 | |
|
1011 | 1020 | # Deltas are stored with a hash of their content. This allows |
|
1012 | 1021 | # us to de-duplicate. The table is configured to ignore conflicts |
|
1013 | 1022 | # and it is faster to just insert and silently noop than to look |
|
1014 | 1023 | # first. |
|
1015 | 1024 | deltahash = hashutil.sha1(delta).digest() |
|
1016 | 1025 | |
|
1017 | 1026 | if self._compengine == b'zstd': |
|
1018 | 1027 | deltablob = self._cctx.compress(delta) |
|
1019 | 1028 | compression = COMPRESSION_ZSTD |
|
1020 | 1029 | elif self._compengine == b'zlib': |
|
1021 | 1030 | deltablob = zlib.compress(delta) |
|
1022 | 1031 | compression = COMPRESSION_ZLIB |
|
1023 | 1032 | elif self._compengine == b'none': |
|
1024 | 1033 | deltablob = delta |
|
1025 | 1034 | compression = COMPRESSION_NONE |
|
1026 | 1035 | else: |
|
1027 | 1036 | raise error.ProgrammingError( |
|
1028 | 1037 | b'unhandled compression engine: %s' % self._compengine |
|
1029 | 1038 | ) |
|
1030 | 1039 | |
|
1031 | 1040 | # Don't store compressed data if it isn't practical. |
|
1032 | 1041 | if len(deltablob) >= len(delta): |
|
1033 | 1042 | deltablob = delta |
|
1034 | 1043 | compression = COMPRESSION_NONE |
|
1035 | 1044 | |
|
1036 | 1045 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) |
|
1037 | 1046 | |
|
1038 | 1047 | rev = len(self) |
|
1039 | 1048 | |
|
1040 | 1049 | if p1 == nullid: |
|
1041 | 1050 | p1rev = nullrev |
|
1042 | 1051 | else: |
|
1043 | 1052 | p1rev = self._nodetorev[p1] |
|
1044 | 1053 | |
|
1045 | 1054 | if p2 == nullid: |
|
1046 | 1055 | p2rev = nullrev |
|
1047 | 1056 | else: |
|
1048 | 1057 | p2rev = self._nodetorev[p2] |
|
1049 | 1058 | |
|
1050 | 1059 | rid = self._db.execute( |
|
1051 | 1060 | 'INSERT INTO fileindex (' |
|
1052 | 1061 | ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, ' |
|
1053 | 1062 | ' deltaid, deltabaseid) ' |
|
1054 | 1063 | ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', |
|
1055 | 1064 | ( |
|
1056 | 1065 | self._pathid, |
|
1057 | 1066 | rev, |
|
1058 | 1067 | node, |
|
1059 | 1068 | p1rev, |
|
1060 | 1069 | p2rev, |
|
1061 | 1070 | linkrev, |
|
1062 | 1071 | flags, |
|
1063 | 1072 | deltaid, |
|
1064 | 1073 | baseid, |
|
1065 | 1074 | ), |
|
1066 | 1075 | ).lastrowid |
|
1067 | 1076 | |
|
1068 | 1077 | entry = revisionentry( |
|
1069 | 1078 | rid=rid, |
|
1070 | 1079 | rev=rev, |
|
1071 | 1080 | node=node, |
|
1072 | 1081 | p1rev=p1rev, |
|
1073 | 1082 | p2rev=p2rev, |
|
1074 | 1083 | p1node=p1, |
|
1075 | 1084 | p2node=p2, |
|
1076 | 1085 | linkrev=linkrev, |
|
1077 | 1086 | flags=flags, |
|
1078 | 1087 | ) |
|
1079 | 1088 | |
|
1080 | 1089 | self._nodetorev[node] = rev |
|
1081 | 1090 | self._revtonode[rev] = node |
|
1082 | 1091 | self._revisions[node] = entry |
|
1083 | 1092 | |
|
1084 | 1093 | return rev |
|
1085 | 1094 | |
|
1086 | 1095 | |
|
1087 | 1096 | class sqliterepository(localrepo.localrepository): |
|
1088 | 1097 | def cancopy(self): |
|
1089 | 1098 | return False |
|
1090 | 1099 | |
|
1091 | 1100 | def transaction(self, *args, **kwargs): |
|
1092 | 1101 | current = self.currenttransaction() |
|
1093 | 1102 | |
|
1094 | 1103 | tr = super(sqliterepository, self).transaction(*args, **kwargs) |
|
1095 | 1104 | |
|
1096 | 1105 | if current: |
|
1097 | 1106 | return tr |
|
1098 | 1107 | |
|
1099 | 1108 | self._dbconn.execute('BEGIN TRANSACTION') |
|
1100 | 1109 | |
|
1101 | 1110 | def committransaction(_): |
|
1102 | 1111 | self._dbconn.commit() |
|
1103 | 1112 | |
|
1104 | 1113 | tr.addfinalize(b'sqlitestore', committransaction) |
|
1105 | 1114 | |
|
1106 | 1115 | return tr |
|
1107 | 1116 | |
|
1108 | 1117 | @property |
|
1109 | 1118 | def _dbconn(self): |
|
1110 | 1119 | # SQLite connections can only be used on the thread that created |
|
1111 | 1120 | # them. In most cases, this "just works." However, hgweb uses |
|
1112 | 1121 | # multiple threads. |
|
1113 | 1122 | tid = threading.current_thread().ident |
|
1114 | 1123 | |
|
1115 | 1124 | if self._db: |
|
1116 | 1125 | if self._db[0] == tid: |
|
1117 | 1126 | return self._db[1] |
|
1118 | 1127 | |
|
1119 | 1128 | db = makedb(self.svfs.join(b'db.sqlite')) |
|
1120 | 1129 | self._db = (tid, db) |
|
1121 | 1130 | |
|
1122 | 1131 | return db |
|
1123 | 1132 | |
|
1124 | 1133 | |
|
1125 | 1134 | def makedb(path): |
|
1126 | 1135 | """Construct a database handle for a database at path.""" |
|
1127 | 1136 | |
|
1128 | 1137 | db = sqlite3.connect(encoding.strfromlocal(path)) |
|
1129 | 1138 | db.text_factory = bytes |
|
1130 | 1139 | |
|
1131 | 1140 | res = db.execute('PRAGMA user_version').fetchone()[0] |
|
1132 | 1141 | |
|
1133 | 1142 | # New database. |
|
1134 | 1143 | if res == 0: |
|
1135 | 1144 | for statement in CREATE_SCHEMA: |
|
1136 | 1145 | db.execute(statement) |
|
1137 | 1146 | |
|
1138 | 1147 | db.commit() |
|
1139 | 1148 | |
|
1140 | 1149 | elif res == CURRENT_SCHEMA_VERSION: |
|
1141 | 1150 | pass |
|
1142 | 1151 | |
|
1143 | 1152 | else: |
|
1144 | 1153 | raise error.Abort(_(b'sqlite database has unrecognized version')) |
|
1145 | 1154 | |
|
1146 | 1155 | db.execute('PRAGMA journal_mode=WAL') |
|
1147 | 1156 | |
|
1148 | 1157 | return db |
|
1149 | 1158 | |
|
1150 | 1159 | |
|
1151 | 1160 | def featuresetup(ui, supported): |
|
1152 | 1161 | supported.add(REQUIREMENT) |
|
1153 | 1162 | |
|
1154 | 1163 | if zstd: |
|
1155 | 1164 | supported.add(REQUIREMENT_ZSTD) |
|
1156 | 1165 | |
|
1157 | 1166 | supported.add(REQUIREMENT_ZLIB) |
|
1158 | 1167 | supported.add(REQUIREMENT_NONE) |
|
1159 | 1168 | supported.add(REQUIREMENT_SHALLOW_FILES) |
|
1160 | 1169 | supported.add(requirements.NARROW_REQUIREMENT) |
|
1161 | 1170 | |
|
1162 | 1171 | |
|
1163 | 1172 | def newreporequirements(orig, ui, createopts): |
|
1164 | 1173 | if createopts[b'backend'] != b'sqlite': |
|
1165 | 1174 | return orig(ui, createopts) |
|
1166 | 1175 | |
|
1167 | 1176 | # This restriction can be lifted once we have more confidence. |
|
1168 | 1177 | if b'sharedrepo' in createopts: |
|
1169 | 1178 | raise error.Abort( |
|
1170 | 1179 | _(b'shared repositories not supported with SQLite store') |
|
1171 | 1180 | ) |
|
1172 | 1181 | |
|
1173 | 1182 | # This filtering is out of an abundance of caution: we want to ensure |
|
1174 | 1183 | # we honor creation options and we do that by annotating exactly the |
|
1175 | 1184 | # creation options we recognize. |
|
1176 | 1185 | known = { |
|
1177 | 1186 | b'narrowfiles', |
|
1178 | 1187 | b'backend', |
|
1179 | 1188 | b'shallowfilestore', |
|
1180 | 1189 | } |
|
1181 | 1190 | |
|
1182 | 1191 | unsupported = set(createopts) - known |
|
1183 | 1192 | if unsupported: |
|
1184 | 1193 | raise error.Abort( |
|
1185 | 1194 | _(b'SQLite store does not support repo creation option: %s') |
|
1186 | 1195 | % b', '.join(sorted(unsupported)) |
|
1187 | 1196 | ) |
|
1188 | 1197 | |
|
1189 | 1198 | # Since we're a hybrid store that still relies on revlogs, we fall back |
|
1190 | 1199 | # to using the revlogv1 backend's storage requirements then adding our |
|
1191 | 1200 | # own requirement. |
|
1192 | 1201 | createopts[b'backend'] = b'revlogv1' |
|
1193 | 1202 | requirements = orig(ui, createopts) |
|
1194 | 1203 | requirements.add(REQUIREMENT) |
|
1195 | 1204 | |
|
1196 | 1205 | compression = ui.config(b'storage', b'sqlite.compression') |
|
1197 | 1206 | |
|
1198 | 1207 | if compression == b'zstd' and not zstd: |
|
1199 | 1208 | raise error.Abort( |
|
1200 | 1209 | _( |
|
1201 | 1210 | b'storage.sqlite.compression set to "zstd" but ' |
|
1202 | 1211 | b'zstandard compression not available to this ' |
|
1203 | 1212 | b'Mercurial install' |
|
1204 | 1213 | ) |
|
1205 | 1214 | ) |
|
1206 | 1215 | |
|
1207 | 1216 | if compression == b'zstd': |
|
1208 | 1217 | requirements.add(REQUIREMENT_ZSTD) |
|
1209 | 1218 | elif compression == b'zlib': |
|
1210 | 1219 | requirements.add(REQUIREMENT_ZLIB) |
|
1211 | 1220 | elif compression == b'none': |
|
1212 | 1221 | requirements.add(REQUIREMENT_NONE) |
|
1213 | 1222 | else: |
|
1214 | 1223 | raise error.Abort( |
|
1215 | 1224 | _( |
|
1216 | 1225 | b'unknown compression engine defined in ' |
|
1217 | 1226 | b'storage.sqlite.compression: %s' |
|
1218 | 1227 | ) |
|
1219 | 1228 | % compression |
|
1220 | 1229 | ) |
|
1221 | 1230 | |
|
1222 | 1231 | if createopts.get(b'shallowfilestore'): |
|
1223 | 1232 | requirements.add(REQUIREMENT_SHALLOW_FILES) |
|
1224 | 1233 | |
|
1225 | 1234 | return requirements |
|
1226 | 1235 | |
|
1227 | 1236 | |
|
1228 | 1237 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) |
|
1229 | 1238 | class sqlitefilestorage(object): |
|
1230 | 1239 | """Repository file storage backed by SQLite.""" |
|
1231 | 1240 | |
|
1232 | 1241 | def file(self, path): |
|
1233 | 1242 | if path[0] == b'/': |
|
1234 | 1243 | path = path[1:] |
|
1235 | 1244 | |
|
1236 | 1245 | if REQUIREMENT_ZSTD in self.requirements: |
|
1237 | 1246 | compression = b'zstd' |
|
1238 | 1247 | elif REQUIREMENT_ZLIB in self.requirements: |
|
1239 | 1248 | compression = b'zlib' |
|
1240 | 1249 | elif REQUIREMENT_NONE in self.requirements: |
|
1241 | 1250 | compression = b'none' |
|
1242 | 1251 | else: |
|
1243 | 1252 | raise error.Abort( |
|
1244 | 1253 | _( |
|
1245 | 1254 | b'unable to determine what compression engine ' |
|
1246 | 1255 | b'to use for SQLite storage' |
|
1247 | 1256 | ) |
|
1248 | 1257 | ) |
|
1249 | 1258 | |
|
1250 | 1259 | return sqlitefilestore(self._dbconn, path, compression) |
|
1251 | 1260 | |
|
1252 | 1261 | |
|
1253 | 1262 | def makefilestorage(orig, requirements, features, **kwargs): |
|
1254 | 1263 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" |
|
1255 | 1264 | if REQUIREMENT in requirements: |
|
1256 | 1265 | if REQUIREMENT_SHALLOW_FILES in requirements: |
|
1257 | 1266 | features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE) |
|
1258 | 1267 | |
|
1259 | 1268 | return sqlitefilestorage |
|
1260 | 1269 | else: |
|
1261 | 1270 | return orig(requirements=requirements, features=features, **kwargs) |
|
1262 | 1271 | |
|
1263 | 1272 | |
|
1264 | 1273 | def makemain(orig, ui, requirements, **kwargs): |
|
1265 | 1274 | if REQUIREMENT in requirements: |
|
1266 | 1275 | if REQUIREMENT_ZSTD in requirements and not zstd: |
|
1267 | 1276 | raise error.Abort( |
|
1268 | 1277 | _( |
|
1269 | 1278 | b'repository uses zstandard compression, which ' |
|
1270 | 1279 | b'is not available to this Mercurial install' |
|
1271 | 1280 | ) |
|
1272 | 1281 | ) |
|
1273 | 1282 | |
|
1274 | 1283 | return sqliterepository |
|
1275 | 1284 | |
|
1276 | 1285 | return orig(requirements=requirements, **kwargs) |
|
1277 | 1286 | |
|
1278 | 1287 | |
|
1279 | 1288 | def verifierinit(orig, self, *args, **kwargs): |
|
1280 | 1289 | orig(self, *args, **kwargs) |
|
1281 | 1290 | |
|
1282 | 1291 | # We don't care that files in the store don't align with what is |
|
1283 | 1292 | # advertised. So suppress these warnings. |
|
1284 | 1293 | self.warnorphanstorefiles = False |
|
1285 | 1294 | |
|
1286 | 1295 | |
|
1287 | 1296 | def extsetup(ui): |
|
1288 | 1297 | localrepo.featuresetupfuncs.add(featuresetup) |
|
1289 | 1298 | extensions.wrapfunction( |
|
1290 | 1299 | localrepo, b'newreporequirements', newreporequirements |
|
1291 | 1300 | ) |
|
1292 | 1301 | extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage) |
|
1293 | 1302 | extensions.wrapfunction(localrepo, b'makemain', makemain) |
|
1294 | 1303 | extensions.wrapfunction(verify.verifier, b'__init__', verifierinit) |
|
1295 | 1304 | |
|
1296 | 1305 | |
|
1297 | 1306 | def reposetup(ui, repo): |
|
1298 | 1307 | if isinstance(repo, sqliterepository): |
|
1299 | 1308 | repo._db = None |
|
1300 | 1309 | |
|
1301 | 1310 | # TODO check for bundlerepository? |
@@ -1,680 +1,680 b'' | |||
|
1 | 1 | # bundlerepo.py - repository class for viewing uncompressed bundles |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """Repository class for viewing uncompressed bundles. |
|
9 | 9 | |
|
10 | 10 | This provides a read-only repository interface to bundles as if they |
|
11 | 11 | were part of the actual repository. |
|
12 | 12 | """ |
|
13 | 13 | |
|
14 | 14 | from __future__ import absolute_import |
|
15 | 15 | |
|
16 | 16 | import os |
|
17 | 17 | import shutil |
|
18 | 18 | |
|
19 | 19 | from .i18n import _ |
|
20 | 20 | from .node import ( |
|
21 | 21 | hex, |
|
22 | 22 | nullid, |
|
23 | 23 | nullrev, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | from . import ( |
|
27 | 27 | bundle2, |
|
28 | 28 | changegroup, |
|
29 | 29 | changelog, |
|
30 | 30 | cmdutil, |
|
31 | 31 | discovery, |
|
32 | 32 | encoding, |
|
33 | 33 | error, |
|
34 | 34 | exchange, |
|
35 | 35 | filelog, |
|
36 | 36 | localrepo, |
|
37 | 37 | manifest, |
|
38 | 38 | mdiff, |
|
39 | 39 | pathutil, |
|
40 | 40 | phases, |
|
41 | 41 | pycompat, |
|
42 | 42 | revlog, |
|
43 | 43 | util, |
|
44 | 44 | vfs as vfsmod, |
|
45 | 45 | ) |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | class bundlerevlog(revlog.revlog): |
|
49 | 49 | def __init__(self, opener, indexfile, cgunpacker, linkmapper): |
|
50 | 50 | # How it works: |
|
51 | 51 | # To retrieve a revision, we need to know the offset of the revision in |
|
52 | 52 | # the bundle (an unbundle object). We store this offset in the index |
|
53 | 53 | # (start). The base of the delta is stored in the base field. |
|
54 | 54 | # |
|
55 | 55 | # To differentiate a rev in the bundle from a rev in the revlog, we |
|
56 | 56 | # check revision against repotiprev. |
|
57 | 57 | opener = vfsmod.readonlyvfs(opener) |
|
58 | 58 | revlog.revlog.__init__(self, opener, indexfile) |
|
59 | 59 | self.bundle = cgunpacker |
|
60 | 60 | n = len(self) |
|
61 | 61 | self.repotiprev = n - 1 |
|
62 | 62 | self.bundlerevs = set() # used by 'bundle()' revset expression |
|
63 | 63 | for deltadata in cgunpacker.deltaiter(): |
|
64 | node, p1, p2, cs, deltabase, delta, flags = deltadata | |
|
64 | node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata | |
|
65 | 65 | |
|
66 | 66 | size = len(delta) |
|
67 | 67 | start = cgunpacker.tell() - size |
|
68 | 68 | |
|
69 | 69 | if self.index.has_node(node): |
|
70 | 70 | # this can happen if two branches make the same change |
|
71 | 71 | self.bundlerevs.add(self.index.rev(node)) |
|
72 | 72 | continue |
|
73 | 73 | if cs == node: |
|
74 | 74 | linkrev = nullrev |
|
75 | 75 | else: |
|
76 | 76 | linkrev = linkmapper(cs) |
|
77 | 77 | |
|
78 | 78 | for p in (p1, p2): |
|
79 | 79 | if not self.index.has_node(p): |
|
80 | 80 | raise error.LookupError( |
|
81 | 81 | p, self.indexfile, _(b"unknown parent") |
|
82 | 82 | ) |
|
83 | 83 | |
|
84 | 84 | if not self.index.has_node(deltabase): |
|
85 | 85 | raise LookupError( |
|
86 | 86 | deltabase, self.indexfile, _(b'unknown delta base') |
|
87 | 87 | ) |
|
88 | 88 | |
|
89 | 89 | baserev = self.rev(deltabase) |
|
90 | 90 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
91 | 91 | e = ( |
|
92 | 92 | revlog.offset_type(start, flags), |
|
93 | 93 | size, |
|
94 | 94 | -1, |
|
95 | 95 | baserev, |
|
96 | 96 | linkrev, |
|
97 | 97 | self.rev(p1), |
|
98 | 98 | self.rev(p2), |
|
99 | 99 | node, |
|
100 | 100 | ) |
|
101 | 101 | self.index.append(e) |
|
102 | 102 | self.bundlerevs.add(n) |
|
103 | 103 | n += 1 |
|
104 | 104 | |
|
105 | 105 | def _chunk(self, rev, df=None): |
|
106 | 106 | # Warning: in case of bundle, the diff is against what we stored as |
|
107 | 107 | # delta base, not against rev - 1 |
|
108 | 108 | # XXX: could use some caching |
|
109 | 109 | if rev <= self.repotiprev: |
|
110 | 110 | return revlog.revlog._chunk(self, rev) |
|
111 | 111 | self.bundle.seek(self.start(rev)) |
|
112 | 112 | return self.bundle.read(self.length(rev)) |
|
113 | 113 | |
|
114 | 114 | def revdiff(self, rev1, rev2): |
|
115 | 115 | """return or calculate a delta between two revisions""" |
|
116 | 116 | if rev1 > self.repotiprev and rev2 > self.repotiprev: |
|
117 | 117 | # hot path for bundle |
|
118 | 118 | revb = self.index[rev2][3] |
|
119 | 119 | if revb == rev1: |
|
120 | 120 | return self._chunk(rev2) |
|
121 | 121 | elif rev1 <= self.repotiprev and rev2 <= self.repotiprev: |
|
122 | 122 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
123 | 123 | |
|
124 | 124 | return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) |
|
125 | 125 | |
|
126 | 126 | def _rawtext(self, node, rev, _df=None): |
|
127 | 127 | if rev is None: |
|
128 | 128 | rev = self.rev(node) |
|
129 | 129 | validated = False |
|
130 | 130 | rawtext = None |
|
131 | 131 | chain = [] |
|
132 | 132 | iterrev = rev |
|
133 | 133 | # reconstruct the revision if it is from a changegroup |
|
134 | 134 | while iterrev > self.repotiprev: |
|
135 | 135 | if self._revisioncache and self._revisioncache[1] == iterrev: |
|
136 | 136 | rawtext = self._revisioncache[2] |
|
137 | 137 | break |
|
138 | 138 | chain.append(iterrev) |
|
139 | 139 | iterrev = self.index[iterrev][3] |
|
140 | 140 | if iterrev == nullrev: |
|
141 | 141 | rawtext = b'' |
|
142 | 142 | elif rawtext is None: |
|
143 | 143 | r = super(bundlerevlog, self)._rawtext( |
|
144 | 144 | self.node(iterrev), iterrev, _df=_df |
|
145 | 145 | ) |
|
146 | 146 | __, rawtext, validated = r |
|
147 | 147 | if chain: |
|
148 | 148 | validated = False |
|
149 | 149 | while chain: |
|
150 | 150 | delta = self._chunk(chain.pop()) |
|
151 | 151 | rawtext = mdiff.patches(rawtext, [delta]) |
|
152 | 152 | return rev, rawtext, validated |
|
153 | 153 | |
|
154 | 154 | def addrevision(self, *args, **kwargs): |
|
155 | 155 | raise NotImplementedError |
|
156 | 156 | |
|
157 | 157 | def addgroup(self, *args, **kwargs): |
|
158 | 158 | raise NotImplementedError |
|
159 | 159 | |
|
160 | 160 | def strip(self, *args, **kwargs): |
|
161 | 161 | raise NotImplementedError |
|
162 | 162 | |
|
163 | 163 | def checksize(self): |
|
164 | 164 | raise NotImplementedError |
|
165 | 165 | |
|
166 | 166 | |
|
167 | 167 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
168 | 168 | def __init__(self, opener, cgunpacker): |
|
169 | 169 | changelog.changelog.__init__(self, opener) |
|
170 | 170 | linkmapper = lambda x: x |
|
171 | 171 | bundlerevlog.__init__( |
|
172 | 172 | self, opener, self.indexfile, cgunpacker, linkmapper |
|
173 | 173 | ) |
|
174 | 174 | |
|
175 | 175 | |
|
176 | 176 | class bundlemanifest(bundlerevlog, manifest.manifestrevlog): |
|
177 | 177 | def __init__( |
|
178 | 178 | self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b'' |
|
179 | 179 | ): |
|
180 | 180 | manifest.manifestrevlog.__init__(self, opener, tree=dir) |
|
181 | 181 | bundlerevlog.__init__( |
|
182 | 182 | self, opener, self.indexfile, cgunpacker, linkmapper |
|
183 | 183 | ) |
|
184 | 184 | if dirlogstarts is None: |
|
185 | 185 | dirlogstarts = {} |
|
186 | 186 | if self.bundle.version == b"03": |
|
187 | 187 | dirlogstarts = _getfilestarts(self.bundle) |
|
188 | 188 | self._dirlogstarts = dirlogstarts |
|
189 | 189 | self._linkmapper = linkmapper |
|
190 | 190 | |
|
191 | 191 | def dirlog(self, d): |
|
192 | 192 | if d in self._dirlogstarts: |
|
193 | 193 | self.bundle.seek(self._dirlogstarts[d]) |
|
194 | 194 | return bundlemanifest( |
|
195 | 195 | self.opener, |
|
196 | 196 | self.bundle, |
|
197 | 197 | self._linkmapper, |
|
198 | 198 | self._dirlogstarts, |
|
199 | 199 | dir=d, |
|
200 | 200 | ) |
|
201 | 201 | return super(bundlemanifest, self).dirlog(d) |
|
202 | 202 | |
|
203 | 203 | |
|
204 | 204 | class bundlefilelog(filelog.filelog): |
|
205 | 205 | def __init__(self, opener, path, cgunpacker, linkmapper): |
|
206 | 206 | filelog.filelog.__init__(self, opener, path) |
|
207 | 207 | self._revlog = bundlerevlog( |
|
208 | 208 | opener, self.indexfile, cgunpacker, linkmapper |
|
209 | 209 | ) |
|
210 | 210 | |
|
211 | 211 | |
|
212 | 212 | class bundlepeer(localrepo.localpeer): |
|
213 | 213 | def canpush(self): |
|
214 | 214 | return False |
|
215 | 215 | |
|
216 | 216 | |
|
217 | 217 | class bundlephasecache(phases.phasecache): |
|
218 | 218 | def __init__(self, *args, **kwargs): |
|
219 | 219 | super(bundlephasecache, self).__init__(*args, **kwargs) |
|
220 | 220 | if util.safehasattr(self, 'opener'): |
|
221 | 221 | self.opener = vfsmod.readonlyvfs(self.opener) |
|
222 | 222 | |
|
223 | 223 | def write(self): |
|
224 | 224 | raise NotImplementedError |
|
225 | 225 | |
|
226 | 226 | def _write(self, fp): |
|
227 | 227 | raise NotImplementedError |
|
228 | 228 | |
|
229 | 229 | def _updateroots(self, phase, newroots, tr): |
|
230 | 230 | self.phaseroots[phase] = newroots |
|
231 | 231 | self.invalidate() |
|
232 | 232 | self.dirty = True |
|
233 | 233 | |
|
234 | 234 | |
|
235 | 235 | def _getfilestarts(cgunpacker): |
|
236 | 236 | filespos = {} |
|
237 | 237 | for chunkdata in iter(cgunpacker.filelogheader, {}): |
|
238 | 238 | fname = chunkdata[b'filename'] |
|
239 | 239 | filespos[fname] = cgunpacker.tell() |
|
240 | 240 | for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): |
|
241 | 241 | pass |
|
242 | 242 | return filespos |
|
243 | 243 | |
|
244 | 244 | |
|
245 | 245 | class bundlerepository(object): |
|
246 | 246 | """A repository instance that is a union of a local repo and a bundle. |
|
247 | 247 | |
|
248 | 248 | Instances represent a read-only repository composed of a local repository |
|
249 | 249 | with the contents of a bundle file applied. The repository instance is |
|
250 | 250 | conceptually similar to the state of a repository after an |
|
251 | 251 | ``hg unbundle`` operation. However, the contents of the bundle are never |
|
252 | 252 | applied to the actual base repository. |
|
253 | 253 | |
|
254 | 254 | Instances constructed directly are not usable as repository objects. |
|
255 | 255 | Use instance() or makebundlerepository() to create instances. |
|
256 | 256 | """ |
|
257 | 257 | |
|
258 | 258 | def __init__(self, bundlepath, url, tempparent): |
|
259 | 259 | self._tempparent = tempparent |
|
260 | 260 | self._url = url |
|
261 | 261 | |
|
262 | 262 | self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo') |
|
263 | 263 | |
|
264 | 264 | self.tempfile = None |
|
265 | 265 | f = util.posixfile(bundlepath, b"rb") |
|
266 | 266 | bundle = exchange.readbundle(self.ui, f, bundlepath) |
|
267 | 267 | |
|
268 | 268 | if isinstance(bundle, bundle2.unbundle20): |
|
269 | 269 | self._bundlefile = bundle |
|
270 | 270 | self._cgunpacker = None |
|
271 | 271 | |
|
272 | 272 | cgpart = None |
|
273 | 273 | for part in bundle.iterparts(seekable=True): |
|
274 | 274 | if part.type == b'changegroup': |
|
275 | 275 | if cgpart: |
|
276 | 276 | raise NotImplementedError( |
|
277 | 277 | b"can't process multiple changegroups" |
|
278 | 278 | ) |
|
279 | 279 | cgpart = part |
|
280 | 280 | |
|
281 | 281 | self._handlebundle2part(bundle, part) |
|
282 | 282 | |
|
283 | 283 | if not cgpart: |
|
284 | 284 | raise error.Abort(_(b"No changegroups found")) |
|
285 | 285 | |
|
286 | 286 | # This is required to placate a later consumer, which expects |
|
287 | 287 | # the payload offset to be at the beginning of the changegroup. |
|
288 | 288 | # We need to do this after the iterparts() generator advances |
|
289 | 289 | # because iterparts() will seek to end of payload after the |
|
290 | 290 | # generator returns control to iterparts(). |
|
291 | 291 | cgpart.seek(0, os.SEEK_SET) |
|
292 | 292 | |
|
293 | 293 | elif isinstance(bundle, changegroup.cg1unpacker): |
|
294 | 294 | if bundle.compressed(): |
|
295 | 295 | f = self._writetempbundle( |
|
296 | 296 | bundle.read, b'.hg10un', header=b'HG10UN' |
|
297 | 297 | ) |
|
298 | 298 | bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs) |
|
299 | 299 | |
|
300 | 300 | self._bundlefile = bundle |
|
301 | 301 | self._cgunpacker = bundle |
|
302 | 302 | else: |
|
303 | 303 | raise error.Abort( |
|
304 | 304 | _(b'bundle type %s cannot be read') % type(bundle) |
|
305 | 305 | ) |
|
306 | 306 | |
|
307 | 307 | # dict with the mapping 'filename' -> position in the changegroup. |
|
308 | 308 | self._cgfilespos = {} |
|
309 | 309 | |
|
310 | 310 | self.firstnewrev = self.changelog.repotiprev + 1 |
|
311 | 311 | phases.retractboundary( |
|
312 | 312 | self, |
|
313 | 313 | None, |
|
314 | 314 | phases.draft, |
|
315 | 315 | [ctx.node() for ctx in self[self.firstnewrev :]], |
|
316 | 316 | ) |
|
317 | 317 | |
|
318 | 318 | def _handlebundle2part(self, bundle, part): |
|
319 | 319 | if part.type != b'changegroup': |
|
320 | 320 | return |
|
321 | 321 | |
|
322 | 322 | cgstream = part |
|
323 | 323 | version = part.params.get(b'version', b'01') |
|
324 | 324 | legalcgvers = changegroup.supportedincomingversions(self) |
|
325 | 325 | if version not in legalcgvers: |
|
326 | 326 | msg = _(b'Unsupported changegroup version: %s') |
|
327 | 327 | raise error.Abort(msg % version) |
|
328 | 328 | if bundle.compressed(): |
|
329 | 329 | cgstream = self._writetempbundle(part.read, b'.cg%sun' % version) |
|
330 | 330 | |
|
331 | 331 | self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN') |
|
332 | 332 | |
|
333 | 333 | def _writetempbundle(self, readfn, suffix, header=b''): |
|
334 | 334 | """Write a temporary file to disk""" |
|
335 | 335 | fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix) |
|
336 | 336 | self.tempfile = temp |
|
337 | 337 | |
|
338 | 338 | with os.fdopen(fdtemp, 'wb') as fptemp: |
|
339 | 339 | fptemp.write(header) |
|
340 | 340 | while True: |
|
341 | 341 | chunk = readfn(2 ** 18) |
|
342 | 342 | if not chunk: |
|
343 | 343 | break |
|
344 | 344 | fptemp.write(chunk) |
|
345 | 345 | |
|
346 | 346 | return self.vfs.open(self.tempfile, mode=b"rb") |
|
347 | 347 | |
|
348 | 348 | @localrepo.unfilteredpropertycache |
|
349 | 349 | def _phasecache(self): |
|
350 | 350 | return bundlephasecache(self, self._phasedefaults) |
|
351 | 351 | |
|
352 | 352 | @localrepo.unfilteredpropertycache |
|
353 | 353 | def changelog(self): |
|
354 | 354 | # consume the header if it exists |
|
355 | 355 | self._cgunpacker.changelogheader() |
|
356 | 356 | c = bundlechangelog(self.svfs, self._cgunpacker) |
|
357 | 357 | self.manstart = self._cgunpacker.tell() |
|
358 | 358 | return c |
|
359 | 359 | |
|
360 | 360 | def _refreshchangelog(self): |
|
361 | 361 | # changelog for bundle repo are not filecache, this method is not |
|
362 | 362 | # applicable. |
|
363 | 363 | pass |
|
364 | 364 | |
|
365 | 365 | @localrepo.unfilteredpropertycache |
|
366 | 366 | def manifestlog(self): |
|
367 | 367 | self._cgunpacker.seek(self.manstart) |
|
368 | 368 | # consume the header if it exists |
|
369 | 369 | self._cgunpacker.manifestheader() |
|
370 | 370 | linkmapper = self.unfiltered().changelog.rev |
|
371 | 371 | rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper) |
|
372 | 372 | self.filestart = self._cgunpacker.tell() |
|
373 | 373 | |
|
374 | 374 | return manifest.manifestlog( |
|
375 | 375 | self.svfs, self, rootstore, self.narrowmatch() |
|
376 | 376 | ) |
|
377 | 377 | |
|
378 | 378 | def _consumemanifest(self): |
|
379 | 379 | """Consumes the manifest portion of the bundle, setting filestart so the |
|
380 | 380 | file portion can be read.""" |
|
381 | 381 | self._cgunpacker.seek(self.manstart) |
|
382 | 382 | self._cgunpacker.manifestheader() |
|
383 | 383 | for delta in self._cgunpacker.deltaiter(): |
|
384 | 384 | pass |
|
385 | 385 | self.filestart = self._cgunpacker.tell() |
|
386 | 386 | |
|
387 | 387 | @localrepo.unfilteredpropertycache |
|
388 | 388 | def manstart(self): |
|
389 | 389 | self.changelog |
|
390 | 390 | return self.manstart |
|
391 | 391 | |
|
392 | 392 | @localrepo.unfilteredpropertycache |
|
393 | 393 | def filestart(self): |
|
394 | 394 | self.manifestlog |
|
395 | 395 | |
|
396 | 396 | # If filestart was not set by self.manifestlog, that means the |
|
397 | 397 | # manifestlog implementation did not consume the manifests from the |
|
398 | 398 | # changegroup (ex: it might be consuming trees from a separate bundle2 |
|
399 | 399 | # part instead). So we need to manually consume it. |
|
400 | 400 | if 'filestart' not in self.__dict__: |
|
401 | 401 | self._consumemanifest() |
|
402 | 402 | |
|
403 | 403 | return self.filestart |
|
404 | 404 | |
|
405 | 405 | def url(self): |
|
406 | 406 | return self._url |
|
407 | 407 | |
|
408 | 408 | def file(self, f): |
|
409 | 409 | if not self._cgfilespos: |
|
410 | 410 | self._cgunpacker.seek(self.filestart) |
|
411 | 411 | self._cgfilespos = _getfilestarts(self._cgunpacker) |
|
412 | 412 | |
|
413 | 413 | if f in self._cgfilespos: |
|
414 | 414 | self._cgunpacker.seek(self._cgfilespos[f]) |
|
415 | 415 | linkmapper = self.unfiltered().changelog.rev |
|
416 | 416 | return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper) |
|
417 | 417 | else: |
|
418 | 418 | return super(bundlerepository, self).file(f) |
|
419 | 419 | |
|
420 | 420 | def close(self): |
|
421 | 421 | """Close assigned bundle file immediately.""" |
|
422 | 422 | self._bundlefile.close() |
|
423 | 423 | if self.tempfile is not None: |
|
424 | 424 | self.vfs.unlink(self.tempfile) |
|
425 | 425 | if self._tempparent: |
|
426 | 426 | shutil.rmtree(self._tempparent, True) |
|
427 | 427 | |
|
428 | 428 | def cancopy(self): |
|
429 | 429 | return False |
|
430 | 430 | |
|
431 | 431 | def peer(self): |
|
432 | 432 | return bundlepeer(self) |
|
433 | 433 | |
|
434 | 434 | def getcwd(self): |
|
435 | 435 | return encoding.getcwd() # always outside the repo |
|
436 | 436 | |
|
437 | 437 | # Check if parents exist in localrepo before setting |
|
438 | 438 | def setparents(self, p1, p2=nullid): |
|
439 | 439 | p1rev = self.changelog.rev(p1) |
|
440 | 440 | p2rev = self.changelog.rev(p2) |
|
441 | 441 | msg = _(b"setting parent to node %s that only exists in the bundle\n") |
|
442 | 442 | if self.changelog.repotiprev < p1rev: |
|
443 | 443 | self.ui.warn(msg % hex(p1)) |
|
444 | 444 | if self.changelog.repotiprev < p2rev: |
|
445 | 445 | self.ui.warn(msg % hex(p2)) |
|
446 | 446 | return super(bundlerepository, self).setparents(p1, p2) |
|
447 | 447 | |
|
448 | 448 | |
|
449 | 449 | def instance(ui, path, create, intents=None, createopts=None): |
|
450 | 450 | if create: |
|
451 | 451 | raise error.Abort(_(b'cannot create new bundle repository')) |
|
452 | 452 | # internal config: bundle.mainreporoot |
|
453 | 453 | parentpath = ui.config(b"bundle", b"mainreporoot") |
|
454 | 454 | if not parentpath: |
|
455 | 455 | # try to find the correct path to the working directory repo |
|
456 | 456 | parentpath = cmdutil.findrepo(encoding.getcwd()) |
|
457 | 457 | if parentpath is None: |
|
458 | 458 | parentpath = b'' |
|
459 | 459 | if parentpath: |
|
460 | 460 | # Try to make the full path relative so we get a nice, short URL. |
|
461 | 461 | # In particular, we don't want temp dir names in test outputs. |
|
462 | 462 | cwd = encoding.getcwd() |
|
463 | 463 | if parentpath == cwd: |
|
464 | 464 | parentpath = b'' |
|
465 | 465 | else: |
|
466 | 466 | cwd = pathutil.normasprefix(cwd) |
|
467 | 467 | if parentpath.startswith(cwd): |
|
468 | 468 | parentpath = parentpath[len(cwd) :] |
|
469 | 469 | u = util.url(path) |
|
470 | 470 | path = u.localpath() |
|
471 | 471 | if u.scheme == b'bundle': |
|
472 | 472 | s = path.split(b"+", 1) |
|
473 | 473 | if len(s) == 1: |
|
474 | 474 | repopath, bundlename = parentpath, s[0] |
|
475 | 475 | else: |
|
476 | 476 | repopath, bundlename = s |
|
477 | 477 | else: |
|
478 | 478 | repopath, bundlename = parentpath, path |
|
479 | 479 | |
|
480 | 480 | return makebundlerepository(ui, repopath, bundlename) |
|
481 | 481 | |
|
482 | 482 | |
|
483 | 483 | def makebundlerepository(ui, repopath, bundlepath): |
|
484 | 484 | """Make a bundle repository object based on repo and bundle paths.""" |
|
485 | 485 | if repopath: |
|
486 | 486 | url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath) |
|
487 | 487 | else: |
|
488 | 488 | url = b'bundle:%s' % bundlepath |
|
489 | 489 | |
|
490 | 490 | # Because we can't make any guarantees about the type of the base |
|
491 | 491 | # repository, we can't have a static class representing the bundle |
|
492 | 492 | # repository. We also can't make any guarantees about how to even |
|
493 | 493 | # call the base repository's constructor! |
|
494 | 494 | # |
|
495 | 495 | # So, our strategy is to go through ``localrepo.instance()`` to construct |
|
496 | 496 | # a repo instance. Then, we dynamically create a new type derived from |
|
497 | 497 | # both it and our ``bundlerepository`` class which overrides some |
|
498 | 498 | # functionality. We then change the type of the constructed repository |
|
499 | 499 | # to this new type and initialize the bundle-specific bits of it. |
|
500 | 500 | |
|
501 | 501 | try: |
|
502 | 502 | repo = localrepo.instance(ui, repopath, create=False) |
|
503 | 503 | tempparent = None |
|
504 | 504 | except error.RepoError: |
|
505 | 505 | tempparent = pycompat.mkdtemp() |
|
506 | 506 | try: |
|
507 | 507 | repo = localrepo.instance(ui, tempparent, create=True) |
|
508 | 508 | except Exception: |
|
509 | 509 | shutil.rmtree(tempparent) |
|
510 | 510 | raise |
|
511 | 511 | |
|
512 | 512 | class derivedbundlerepository(bundlerepository, repo.__class__): |
|
513 | 513 | pass |
|
514 | 514 | |
|
515 | 515 | repo.__class__ = derivedbundlerepository |
|
516 | 516 | bundlerepository.__init__(repo, bundlepath, url, tempparent) |
|
517 | 517 | |
|
518 | 518 | return repo |
|
519 | 519 | |
|
520 | 520 | |
|
521 | 521 | class bundletransactionmanager(object): |
|
522 | 522 | def transaction(self): |
|
523 | 523 | return None |
|
524 | 524 | |
|
525 | 525 | def close(self): |
|
526 | 526 | raise NotImplementedError |
|
527 | 527 | |
|
528 | 528 | def release(self): |
|
529 | 529 | raise NotImplementedError |
|
530 | 530 | |
|
531 | 531 | |
|
532 | 532 | def getremotechanges( |
|
533 | 533 | ui, repo, peer, onlyheads=None, bundlename=None, force=False |
|
534 | 534 | ): |
|
535 | 535 | """obtains a bundle of changes incoming from peer |
|
536 | 536 | |
|
537 | 537 | "onlyheads" restricts the returned changes to those reachable from the |
|
538 | 538 | specified heads. |
|
539 | 539 | "bundlename", if given, stores the bundle to this file path permanently; |
|
540 | 540 | otherwise it's stored to a temp file and gets deleted again when you call |
|
541 | 541 | the returned "cleanupfn". |
|
542 | 542 | "force" indicates whether to proceed on unrelated repos. |
|
543 | 543 | |
|
544 | 544 | Returns a tuple (local, csets, cleanupfn): |
|
545 | 545 | |
|
546 | 546 | "local" is a local repo from which to obtain the actual incoming |
|
547 | 547 | changesets; it is a bundlerepo for the obtained bundle when the |
|
548 | 548 | original "peer" is remote. |
|
549 | 549 | "csets" lists the incoming changeset node ids. |
|
550 | 550 | "cleanupfn" must be called without arguments when you're done processing |
|
551 | 551 | the changes; it closes both the original "peer" and the one returned |
|
552 | 552 | here. |
|
553 | 553 | """ |
|
554 | 554 | tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force) |
|
555 | 555 | common, incoming, rheads = tmp |
|
556 | 556 | if not incoming: |
|
557 | 557 | try: |
|
558 | 558 | if bundlename: |
|
559 | 559 | os.unlink(bundlename) |
|
560 | 560 | except OSError: |
|
561 | 561 | pass |
|
562 | 562 | return repo, [], peer.close |
|
563 | 563 | |
|
564 | 564 | commonset = set(common) |
|
565 | 565 | rheads = [x for x in rheads if x not in commonset] |
|
566 | 566 | |
|
567 | 567 | bundle = None |
|
568 | 568 | bundlerepo = None |
|
569 | 569 | localrepo = peer.local() |
|
570 | 570 | if bundlename or not localrepo: |
|
571 | 571 | # create a bundle (uncompressed if peer repo is not local) |
|
572 | 572 | |
|
573 | 573 | # developer config: devel.legacy.exchange |
|
574 | 574 | legexc = ui.configlist(b'devel', b'legacy.exchange') |
|
575 | 575 | forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc |
|
576 | 576 | canbundle2 = ( |
|
577 | 577 | not forcebundle1 |
|
578 | 578 | and peer.capable(b'getbundle') |
|
579 | 579 | and peer.capable(b'bundle2') |
|
580 | 580 | ) |
|
581 | 581 | if canbundle2: |
|
582 | 582 | with peer.commandexecutor() as e: |
|
583 | 583 | b2 = e.callcommand( |
|
584 | 584 | b'getbundle', |
|
585 | 585 | { |
|
586 | 586 | b'source': b'incoming', |
|
587 | 587 | b'common': common, |
|
588 | 588 | b'heads': rheads, |
|
589 | 589 | b'bundlecaps': exchange.caps20to10( |
|
590 | 590 | repo, role=b'client' |
|
591 | 591 | ), |
|
592 | 592 | b'cg': True, |
|
593 | 593 | }, |
|
594 | 594 | ).result() |
|
595 | 595 | |
|
596 | 596 | fname = bundle = changegroup.writechunks( |
|
597 | 597 | ui, b2._forwardchunks(), bundlename |
|
598 | 598 | ) |
|
599 | 599 | else: |
|
600 | 600 | if peer.capable(b'getbundle'): |
|
601 | 601 | with peer.commandexecutor() as e: |
|
602 | 602 | cg = e.callcommand( |
|
603 | 603 | b'getbundle', |
|
604 | 604 | { |
|
605 | 605 | b'source': b'incoming', |
|
606 | 606 | b'common': common, |
|
607 | 607 | b'heads': rheads, |
|
608 | 608 | }, |
|
609 | 609 | ).result() |
|
610 | 610 | elif onlyheads is None and not peer.capable(b'changegroupsubset'): |
|
611 | 611 | # compat with older servers when pulling all remote heads |
|
612 | 612 | |
|
613 | 613 | with peer.commandexecutor() as e: |
|
614 | 614 | cg = e.callcommand( |
|
615 | 615 | b'changegroup', |
|
616 | 616 | { |
|
617 | 617 | b'nodes': incoming, |
|
618 | 618 | b'source': b'incoming', |
|
619 | 619 | }, |
|
620 | 620 | ).result() |
|
621 | 621 | |
|
622 | 622 | rheads = None |
|
623 | 623 | else: |
|
624 | 624 | with peer.commandexecutor() as e: |
|
625 | 625 | cg = e.callcommand( |
|
626 | 626 | b'changegroupsubset', |
|
627 | 627 | { |
|
628 | 628 | b'bases': incoming, |
|
629 | 629 | b'heads': rheads, |
|
630 | 630 | b'source': b'incoming', |
|
631 | 631 | }, |
|
632 | 632 | ).result() |
|
633 | 633 | |
|
634 | 634 | if localrepo: |
|
635 | 635 | bundletype = b"HG10BZ" |
|
636 | 636 | else: |
|
637 | 637 | bundletype = b"HG10UN" |
|
638 | 638 | fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype) |
|
639 | 639 | # keep written bundle? |
|
640 | 640 | if bundlename: |
|
641 | 641 | bundle = None |
|
642 | 642 | if not localrepo: |
|
643 | 643 | # use the created uncompressed bundlerepo |
|
644 | 644 | localrepo = bundlerepo = makebundlerepository( |
|
645 | 645 | repo.baseui, repo.root, fname |
|
646 | 646 | ) |
|
647 | 647 | |
|
648 | 648 | # this repo contains local and peer now, so filter out local again |
|
649 | 649 | common = repo.heads() |
|
650 | 650 | if localrepo: |
|
651 | 651 | # Part of common may be remotely filtered |
|
652 | 652 | # So use an unfiltered version |
|
653 | 653 | # The discovery process probably need cleanup to avoid that |
|
654 | 654 | localrepo = localrepo.unfiltered() |
|
655 | 655 | |
|
656 | 656 | csets = localrepo.changelog.findmissing(common, rheads) |
|
657 | 657 | |
|
658 | 658 | if bundlerepo: |
|
659 | 659 | reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] |
|
660 | 660 | |
|
661 | 661 | with peer.commandexecutor() as e: |
|
662 | 662 | remotephases = e.callcommand( |
|
663 | 663 | b'listkeys', |
|
664 | 664 | { |
|
665 | 665 | b'namespace': b'phases', |
|
666 | 666 | }, |
|
667 | 667 | ).result() |
|
668 | 668 | |
|
669 | 669 | pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes) |
|
670 | 670 | pullop.trmanager = bundletransactionmanager() |
|
671 | 671 | exchange._pullapplyphases(pullop, remotephases) |
|
672 | 672 | |
|
673 | 673 | def cleanup(): |
|
674 | 674 | if bundlerepo: |
|
675 | 675 | bundlerepo.close() |
|
676 | 676 | if bundle: |
|
677 | 677 | os.unlink(bundle) |
|
678 | 678 | peer.close() |
|
679 | 679 | |
|
680 | 680 | return (localrepo, csets, cleanup) |
@@ -1,1710 +1,1784 b'' | |||
|
1 | 1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import struct |
|
12 | 12 | import weakref |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import ( |
|
16 | 16 | hex, |
|
17 | 17 | nullid, |
|
18 | 18 | nullrev, |
|
19 | 19 | short, |
|
20 | 20 | ) |
|
21 | 21 | from .pycompat import open |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | error, |
|
25 | 25 | match as matchmod, |
|
26 | 26 | mdiff, |
|
27 | 27 | phases, |
|
28 | 28 | pycompat, |
|
29 | 29 | requirements, |
|
30 | 30 | scmutil, |
|
31 | 31 | util, |
|
32 | 32 | ) |
|
33 | 33 | |
|
34 | 34 | from .interfaces import repository |
|
35 | from .revlogutils import sidedata as sidedatamod | |
|
35 | 36 | |
|
36 | 37 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s") |
|
37 | 38 | _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s") |
|
38 | 39 | _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH") |
|
39 | 40 | |
|
40 | 41 | LFS_REQUIREMENT = b'lfs' |
|
41 | 42 | |
|
42 | 43 | readexactly = util.readexactly |
|
43 | 44 | |
|
44 | 45 | |
|
45 | 46 | def getchunk(stream): |
|
46 | 47 | """return the next chunk from stream as a string""" |
|
47 | 48 | d = readexactly(stream, 4) |
|
48 | 49 | l = struct.unpack(b">l", d)[0] |
|
49 | 50 | if l <= 4: |
|
50 | 51 | if l: |
|
51 | 52 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
52 | 53 | return b"" |
|
53 | 54 | return readexactly(stream, l - 4) |
|
54 | 55 | |
|
55 | 56 | |
|
56 | 57 | def chunkheader(length): |
|
57 | 58 | """return a changegroup chunk header (string)""" |
|
58 | 59 | return struct.pack(b">l", length + 4) |
|
59 | 60 | |
|
60 | 61 | |
|
61 | 62 | def closechunk(): |
|
62 | 63 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
63 | 64 | return struct.pack(b">l", 0) |
|
64 | 65 | |
|
65 | 66 | |
|
66 | 67 | def _fileheader(path): |
|
67 | 68 | """Obtain a changegroup chunk header for a named path.""" |
|
68 | 69 | return chunkheader(len(path)) + path |
|
69 | 70 | |
|
70 | 71 | |
|
71 | 72 | def writechunks(ui, chunks, filename, vfs=None): |
|
72 | 73 | """Write chunks to a file and return its filename. |
|
73 | 74 | |
|
74 | 75 | The stream is assumed to be a bundle file. |
|
75 | 76 | Existing files will not be overwritten. |
|
76 | 77 | If no filename is specified, a temporary file is created. |
|
77 | 78 | """ |
|
78 | 79 | fh = None |
|
79 | 80 | cleanup = None |
|
80 | 81 | try: |
|
81 | 82 | if filename: |
|
82 | 83 | if vfs: |
|
83 | 84 | fh = vfs.open(filename, b"wb") |
|
84 | 85 | else: |
|
85 | 86 | # Increase default buffer size because default is usually |
|
86 | 87 | # small (4k is common on Linux). |
|
87 | 88 | fh = open(filename, b"wb", 131072) |
|
88 | 89 | else: |
|
89 | 90 | fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg") |
|
90 | 91 | fh = os.fdopen(fd, "wb") |
|
91 | 92 | cleanup = filename |
|
92 | 93 | for c in chunks: |
|
93 | 94 | fh.write(c) |
|
94 | 95 | cleanup = None |
|
95 | 96 | return filename |
|
96 | 97 | finally: |
|
97 | 98 | if fh is not None: |
|
98 | 99 | fh.close() |
|
99 | 100 | if cleanup is not None: |
|
100 | 101 | if filename and vfs: |
|
101 | 102 | vfs.unlink(cleanup) |
|
102 | 103 | else: |
|
103 | 104 | os.unlink(cleanup) |
|
104 | 105 | |
|
105 | 106 | |
|
106 | 107 | class cg1unpacker(object): |
|
107 | 108 | """Unpacker for cg1 changegroup streams. |
|
108 | 109 | |
|
109 | 110 | A changegroup unpacker handles the framing of the revision data in |
|
110 | 111 | the wire format. Most consumers will want to use the apply() |
|
111 | 112 | method to add the changes from the changegroup to a repository. |
|
112 | 113 | |
|
113 | 114 | If you're forwarding a changegroup unmodified to another consumer, |
|
114 | 115 | use getchunks(), which returns an iterator of changegroup |
|
115 | 116 | chunks. This is mostly useful for cases where you need to know the |
|
116 | 117 | data stream has ended by observing the end of the changegroup. |
|
117 | 118 | |
|
118 | 119 | deltachunk() is useful only if you're applying delta data. Most |
|
119 | 120 | consumers should prefer apply() instead. |
|
120 | 121 | |
|
121 | 122 | A few other public methods exist. Those are used only for |
|
122 | 123 | bundlerepo and some debug commands - their use is discouraged. |
|
123 | 124 | """ |
|
124 | 125 | |
|
125 | 126 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
126 | 127 | deltaheadersize = deltaheader.size |
|
127 | 128 | version = b'01' |
|
128 | 129 | _grouplistcount = 1 # One list of files after the manifests |
|
129 | 130 | |
|
130 | 131 | def __init__(self, fh, alg, extras=None): |
|
131 | 132 | if alg is None: |
|
132 | 133 | alg = b'UN' |
|
133 | 134 | if alg not in util.compengines.supportedbundletypes: |
|
134 | 135 | raise error.Abort(_(b'unknown stream compression type: %s') % alg) |
|
135 | 136 | if alg == b'BZ': |
|
136 | 137 | alg = b'_truncatedBZ' |
|
137 | 138 | |
|
138 | 139 | compengine = util.compengines.forbundletype(alg) |
|
139 | 140 | self._stream = compengine.decompressorreader(fh) |
|
140 | 141 | self._type = alg |
|
141 | 142 | self.extras = extras or {} |
|
142 | 143 | self.callback = None |
|
143 | 144 | |
|
144 | 145 | # These methods (compressed, read, seek, tell) all appear to only |
|
145 | 146 | # be used by bundlerepo, but it's a little hard to tell. |
|
146 | 147 | def compressed(self): |
|
147 | 148 | return self._type is not None and self._type != b'UN' |
|
148 | 149 | |
|
149 | 150 | def read(self, l): |
|
150 | 151 | return self._stream.read(l) |
|
151 | 152 | |
|
152 | 153 | def seek(self, pos): |
|
153 | 154 | return self._stream.seek(pos) |
|
154 | 155 | |
|
155 | 156 | def tell(self): |
|
156 | 157 | return self._stream.tell() |
|
157 | 158 | |
|
158 | 159 | def close(self): |
|
159 | 160 | return self._stream.close() |
|
160 | 161 | |
|
161 | 162 | def _chunklength(self): |
|
162 | 163 | d = readexactly(self._stream, 4) |
|
163 | 164 | l = struct.unpack(b">l", d)[0] |
|
164 | 165 | if l <= 4: |
|
165 | 166 | if l: |
|
166 | 167 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
167 | 168 | return 0 |
|
168 | 169 | if self.callback: |
|
169 | 170 | self.callback() |
|
170 | 171 | return l - 4 |
|
171 | 172 | |
|
172 | 173 | def changelogheader(self): |
|
173 | 174 | """v10 does not have a changelog header chunk""" |
|
174 | 175 | return {} |
|
175 | 176 | |
|
176 | 177 | def manifestheader(self): |
|
177 | 178 | """v10 does not have a manifest header chunk""" |
|
178 | 179 | return {} |
|
179 | 180 | |
|
180 | 181 | def filelogheader(self): |
|
181 | 182 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
182 | 183 | l = self._chunklength() |
|
183 | 184 | if not l: |
|
184 | 185 | return {} |
|
185 | 186 | fname = readexactly(self._stream, l) |
|
186 | 187 | return {b'filename': fname} |
|
187 | 188 | |
|
188 | 189 | def _deltaheader(self, headertuple, prevnode): |
|
189 | 190 | node, p1, p2, cs = headertuple |
|
190 | 191 | if prevnode is None: |
|
191 | 192 | deltabase = p1 |
|
192 | 193 | else: |
|
193 | 194 | deltabase = prevnode |
|
194 | 195 | flags = 0 |
|
195 | 196 | return node, p1, p2, deltabase, cs, flags |
|
196 | 197 | |
|
197 | 198 | def deltachunk(self, prevnode): |
|
198 | 199 | l = self._chunklength() |
|
199 | 200 | if not l: |
|
200 | 201 | return {} |
|
201 | 202 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
202 | 203 | header = self.deltaheader.unpack(headerdata) |
|
203 | 204 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
204 | 205 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
205 | return (node, p1, p2, cs, deltabase, delta, flags) | |
|
206 | # cg4 forward-compat | |
|
207 | sidedata = {} | |
|
208 | return (node, p1, p2, cs, deltabase, delta, flags, sidedata) | |
|
206 | 209 | |
|
207 | 210 | def getchunks(self): |
|
208 | 211 | """returns all the chunks contains in the bundle |
|
209 | 212 | |
|
210 | 213 | Used when you need to forward the binary stream to a file or another |
|
211 | 214 | network API. To do so, it parse the changegroup data, otherwise it will |
|
212 | 215 | block in case of sshrepo because it don't know the end of the stream. |
|
213 | 216 | """ |
|
214 | 217 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
215 | 218 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
216 | 219 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
217 | 220 | # filelogs. |
|
218 | 221 | # |
|
219 | 222 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
220 | 223 | # tree and file parts are a list of entry sections. Each entry section |
|
221 | 224 | # is a series of chunks terminating in an empty chunk. The list of these |
|
222 | 225 | # entry sections is terminated in yet another empty chunk, so we know |
|
223 | 226 | # we've reached the end of the tree/file list when we reach an empty |
|
224 | 227 | # chunk that was proceeded by no non-empty chunks. |
|
225 | 228 | |
|
226 | 229 | parts = 0 |
|
227 | 230 | while parts < 2 + self._grouplistcount: |
|
228 | 231 | noentries = True |
|
229 | 232 | while True: |
|
230 | 233 | chunk = getchunk(self) |
|
231 | 234 | if not chunk: |
|
232 | 235 | # The first two empty chunks represent the end of the |
|
233 | 236 | # changelog and the manifestlog portions. The remaining |
|
234 | 237 | # empty chunks represent either A) the end of individual |
|
235 | 238 | # tree or file entries in the file list, or B) the end of |
|
236 | 239 | # the entire list. It's the end of the entire list if there |
|
237 | 240 | # were no entries (i.e. noentries is True). |
|
238 | 241 | if parts < 2: |
|
239 | 242 | parts += 1 |
|
240 | 243 | elif noentries: |
|
241 | 244 | parts += 1 |
|
242 | 245 | break |
|
243 | 246 | noentries = False |
|
244 | 247 | yield chunkheader(len(chunk)) |
|
245 | 248 | pos = 0 |
|
246 | 249 | while pos < len(chunk): |
|
247 | 250 | next = pos + 2 ** 20 |
|
248 | 251 | yield chunk[pos:next] |
|
249 | 252 | pos = next |
|
250 | 253 | yield closechunk() |
|
251 | 254 | |
|
252 | 255 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
253 | 256 | self.callback = prog.increment |
|
254 | 257 | # no need to check for empty manifest group here: |
|
255 | 258 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
256 | 259 | # no new manifest will be created and the manifest group will |
|
257 | 260 | # be empty during the pull |
|
258 | 261 | self.manifestheader() |
|
259 | 262 | deltas = self.deltaiter() |
|
260 | 263 | repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp) |
|
261 | 264 | prog.complete() |
|
262 | 265 | self.callback = None |
|
263 | 266 | |
|
264 | 267 | def apply( |
|
265 | 268 | self, |
|
266 | 269 | repo, |
|
267 | 270 | tr, |
|
268 | 271 | srctype, |
|
269 | 272 | url, |
|
270 | 273 | targetphase=phases.draft, |
|
271 | 274 | expectedtotal=None, |
|
272 | 275 | ): |
|
273 | 276 | """Add the changegroup returned by source.read() to this repo. |
|
274 | 277 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
275 | 278 | the URL of the repo where this changegroup is coming from. |
|
276 | 279 | |
|
277 | 280 | Return an integer summarizing the change to this repo: |
|
278 | 281 | - nothing changed or no source: 0 |
|
279 | 282 | - more heads than before: 1+added heads (2..n) |
|
280 | 283 | - fewer heads than before: -1-removed heads (-2..-n) |
|
281 | 284 | - number of heads stays the same: 1 |
|
282 | 285 | """ |
|
283 | 286 | repo = repo.unfiltered() |
|
284 | 287 | |
|
285 | 288 | def csmap(x): |
|
286 | 289 | repo.ui.debug(b"add changeset %s\n" % short(x)) |
|
287 | 290 | return len(cl) |
|
288 | 291 | |
|
289 | 292 | def revmap(x): |
|
290 | 293 | return cl.rev(x) |
|
291 | 294 | |
|
292 | 295 | try: |
|
293 | 296 | # The transaction may already carry source information. In this |
|
294 | 297 | # case we use the top level data. We overwrite the argument |
|
295 | 298 | # because we need to use the top level value (if they exist) |
|
296 | 299 | # in this function. |
|
297 | 300 | srctype = tr.hookargs.setdefault(b'source', srctype) |
|
298 | 301 | tr.hookargs.setdefault(b'url', url) |
|
299 | 302 | repo.hook( |
|
300 | 303 | b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs) |
|
301 | 304 | ) |
|
302 | 305 | |
|
303 | 306 | # write changelog data to temp files so concurrent readers |
|
304 | 307 | # will not see an inconsistent view |
|
305 | 308 | cl = repo.changelog |
|
306 | 309 | cl.delayupdate(tr) |
|
307 | 310 | oldheads = set(cl.heads()) |
|
308 | 311 | |
|
309 | 312 | trp = weakref.proxy(tr) |
|
310 | 313 | # pull off the changeset group |
|
311 | 314 | repo.ui.status(_(b"adding changesets\n")) |
|
312 | 315 | clstart = len(cl) |
|
313 | 316 | progress = repo.ui.makeprogress( |
|
314 | 317 | _(b'changesets'), unit=_(b'chunks'), total=expectedtotal |
|
315 | 318 | ) |
|
316 | 319 | self.callback = progress.increment |
|
317 | 320 | |
|
318 | 321 | efilesset = set() |
|
319 | 322 | duprevs = [] |
|
320 | 323 | |
|
321 | 324 | def ondupchangelog(cl, rev): |
|
322 | 325 | if rev < clstart: |
|
323 | 326 | duprevs.append(rev) |
|
324 | 327 | |
|
325 | 328 | def onchangelog(cl, rev): |
|
326 | 329 | ctx = cl.changelogrevision(rev) |
|
327 | 330 | efilesset.update(ctx.files) |
|
328 | 331 | repo.register_changeset(rev, ctx) |
|
329 | 332 | |
|
330 | 333 | self.changelogheader() |
|
331 | 334 | deltas = self.deltaiter() |
|
332 | 335 | if not cl.addgroup( |
|
333 | 336 | deltas, |
|
334 | 337 | csmap, |
|
335 | 338 | trp, |
|
336 | 339 | alwayscache=True, |
|
337 | 340 | addrevisioncb=onchangelog, |
|
338 | 341 | duplicaterevisioncb=ondupchangelog, |
|
339 | 342 | ): |
|
340 | 343 | repo.ui.develwarn( |
|
341 | 344 | b'applied empty changelog from changegroup', |
|
342 | 345 | config=b'warn-empty-changegroup', |
|
343 | 346 | ) |
|
344 | 347 | efiles = len(efilesset) |
|
345 | 348 | clend = len(cl) |
|
346 | 349 | changesets = clend - clstart |
|
347 | 350 | progress.complete() |
|
348 | 351 | del deltas |
|
349 | 352 | # TODO Python 2.7 removal |
|
350 | 353 | # del efilesset |
|
351 | 354 | efilesset = None |
|
352 | 355 | self.callback = None |
|
353 | 356 | |
|
354 | 357 | # pull off the manifest group |
|
355 | 358 | repo.ui.status(_(b"adding manifests\n")) |
|
356 | 359 | # We know that we'll never have more manifests than we had |
|
357 | 360 | # changesets. |
|
358 | 361 | progress = repo.ui.makeprogress( |
|
359 | 362 | _(b'manifests'), unit=_(b'chunks'), total=changesets |
|
360 | 363 | ) |
|
361 | 364 | self._unpackmanifests(repo, revmap, trp, progress) |
|
362 | 365 | |
|
363 | 366 | needfiles = {} |
|
364 | 367 | if repo.ui.configbool(b'server', b'validate'): |
|
365 | 368 | cl = repo.changelog |
|
366 | 369 | ml = repo.manifestlog |
|
367 | 370 | # validate incoming csets have their manifests |
|
368 | 371 | for cset in pycompat.xrange(clstart, clend): |
|
369 | 372 | mfnode = cl.changelogrevision(cset).manifest |
|
370 | 373 | mfest = ml[mfnode].readdelta() |
|
371 | 374 | # store file nodes we must see |
|
372 | 375 | for f, n in pycompat.iteritems(mfest): |
|
373 | 376 | needfiles.setdefault(f, set()).add(n) |
|
374 | 377 | |
|
375 | 378 | # process the files |
|
376 | 379 | repo.ui.status(_(b"adding file changes\n")) |
|
377 | 380 | newrevs, newfiles = _addchangegroupfiles( |
|
378 | 381 | repo, self, revmap, trp, efiles, needfiles |
|
379 | 382 | ) |
|
380 | 383 | |
|
381 | 384 | # making sure the value exists |
|
382 | 385 | tr.changes.setdefault(b'changegroup-count-changesets', 0) |
|
383 | 386 | tr.changes.setdefault(b'changegroup-count-revisions', 0) |
|
384 | 387 | tr.changes.setdefault(b'changegroup-count-files', 0) |
|
385 | 388 | tr.changes.setdefault(b'changegroup-count-heads', 0) |
|
386 | 389 | |
|
387 | 390 | # some code use bundle operation for internal purpose. They usually |
|
388 | 391 | # set `ui.quiet` to do this outside of user sight. Size the report |
|
389 | 392 | # of such operation now happens at the end of the transaction, that |
|
390 | 393 | # ui.quiet has not direct effect on the output. |
|
391 | 394 | # |
|
392 | 395 | # To preserve this intend use an inelegant hack, we fail to report |
|
393 | 396 | # the change if `quiet` is set. We should probably move to |
|
394 | 397 | # something better, but this is a good first step to allow the "end |
|
395 | 398 | # of transaction report" to pass tests. |
|
396 | 399 | if not repo.ui.quiet: |
|
397 | 400 | tr.changes[b'changegroup-count-changesets'] += changesets |
|
398 | 401 | tr.changes[b'changegroup-count-revisions'] += newrevs |
|
399 | 402 | tr.changes[b'changegroup-count-files'] += newfiles |
|
400 | 403 | |
|
401 | 404 | deltaheads = 0 |
|
402 | 405 | if oldheads: |
|
403 | 406 | heads = cl.heads() |
|
404 | 407 | deltaheads += len(heads) - len(oldheads) |
|
405 | 408 | for h in heads: |
|
406 | 409 | if h not in oldheads and repo[h].closesbranch(): |
|
407 | 410 | deltaheads -= 1 |
|
408 | 411 | |
|
409 | 412 | # see previous comment about checking ui.quiet |
|
410 | 413 | if not repo.ui.quiet: |
|
411 | 414 | tr.changes[b'changegroup-count-heads'] += deltaheads |
|
412 | 415 | repo.invalidatevolatilesets() |
|
413 | 416 | |
|
414 | 417 | if changesets > 0: |
|
415 | 418 | if b'node' not in tr.hookargs: |
|
416 | 419 | tr.hookargs[b'node'] = hex(cl.node(clstart)) |
|
417 | 420 | tr.hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
418 | 421 | hookargs = dict(tr.hookargs) |
|
419 | 422 | else: |
|
420 | 423 | hookargs = dict(tr.hookargs) |
|
421 | 424 | hookargs[b'node'] = hex(cl.node(clstart)) |
|
422 | 425 | hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
423 | 426 | repo.hook( |
|
424 | 427 | b'pretxnchangegroup', |
|
425 | 428 | throw=True, |
|
426 | 429 | **pycompat.strkwargs(hookargs) |
|
427 | 430 | ) |
|
428 | 431 | |
|
429 | 432 | added = pycompat.xrange(clstart, clend) |
|
430 | 433 | phaseall = None |
|
431 | 434 | if srctype in (b'push', b'serve'): |
|
432 | 435 | # Old servers can not push the boundary themselves. |
|
433 | 436 | # New servers won't push the boundary if changeset already |
|
434 | 437 | # exists locally as secret |
|
435 | 438 | # |
|
436 | 439 | # We should not use added here but the list of all change in |
|
437 | 440 | # the bundle |
|
438 | 441 | if repo.publishing(): |
|
439 | 442 | targetphase = phaseall = phases.public |
|
440 | 443 | else: |
|
441 | 444 | # closer target phase computation |
|
442 | 445 | |
|
443 | 446 | # Those changesets have been pushed from the |
|
444 | 447 | # outside, their phases are going to be pushed |
|
445 | 448 | # alongside. Therefor `targetphase` is |
|
446 | 449 | # ignored. |
|
447 | 450 | targetphase = phaseall = phases.draft |
|
448 | 451 | if added: |
|
449 | 452 | phases.registernew(repo, tr, targetphase, added) |
|
450 | 453 | if phaseall is not None: |
|
451 | 454 | if duprevs: |
|
452 | 455 | duprevs.extend(added) |
|
453 | 456 | else: |
|
454 | 457 | duprevs = added |
|
455 | 458 | phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs) |
|
456 | 459 | duprevs = [] |
|
457 | 460 | |
|
458 | 461 | if changesets > 0: |
|
459 | 462 | |
|
460 | 463 | def runhooks(unused_success): |
|
461 | 464 | # These hooks run when the lock releases, not when the |
|
462 | 465 | # transaction closes. So it's possible for the changelog |
|
463 | 466 | # to have changed since we last saw it. |
|
464 | 467 | if clstart >= len(repo): |
|
465 | 468 | return |
|
466 | 469 | |
|
467 | 470 | repo.hook(b"changegroup", **pycompat.strkwargs(hookargs)) |
|
468 | 471 | |
|
469 | 472 | for rev in added: |
|
470 | 473 | args = hookargs.copy() |
|
471 | 474 | args[b'node'] = hex(cl.node(rev)) |
|
472 | 475 | del args[b'node_last'] |
|
473 | 476 | repo.hook(b"incoming", **pycompat.strkwargs(args)) |
|
474 | 477 | |
|
475 | 478 | newheads = [h for h in repo.heads() if h not in oldheads] |
|
476 | 479 | repo.ui.log( |
|
477 | 480 | b"incoming", |
|
478 | 481 | b"%d incoming changes - new heads: %s\n", |
|
479 | 482 | len(added), |
|
480 | 483 | b', '.join([hex(c[:6]) for c in newheads]), |
|
481 | 484 | ) |
|
482 | 485 | |
|
483 | 486 | tr.addpostclose( |
|
484 | 487 | b'changegroup-runhooks-%020i' % clstart, |
|
485 | 488 | lambda tr: repo._afterlock(runhooks), |
|
486 | 489 | ) |
|
487 | 490 | finally: |
|
488 | 491 | repo.ui.flush() |
|
489 | 492 | # never return 0 here: |
|
490 | 493 | if deltaheads < 0: |
|
491 | 494 | ret = deltaheads - 1 |
|
492 | 495 | else: |
|
493 | 496 | ret = deltaheads + 1 |
|
494 | 497 | return ret |
|
495 | 498 | |
|
496 | 499 | def deltaiter(self): |
|
497 | 500 | """ |
|
498 | 501 | returns an iterator of the deltas in this changegroup |
|
499 | 502 | |
|
500 | 503 | Useful for passing to the underlying storage system to be stored. |
|
501 | 504 | """ |
|
502 | 505 | chain = None |
|
503 | 506 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
504 | 507 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
505 | 508 | yield chunkdata |
|
506 | 509 | chain = chunkdata[0] |
|
507 | 510 | |
|
508 | 511 | |
|
509 | 512 | class cg2unpacker(cg1unpacker): |
|
510 | 513 | """Unpacker for cg2 streams. |
|
511 | 514 | |
|
512 | 515 | cg2 streams add support for generaldelta, so the delta header |
|
513 | 516 | format is slightly different. All other features about the data |
|
514 | 517 | remain the same. |
|
515 | 518 | """ |
|
516 | 519 | |
|
517 | 520 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
518 | 521 | deltaheadersize = deltaheader.size |
|
519 | 522 | version = b'02' |
|
520 | 523 | |
|
521 | 524 | def _deltaheader(self, headertuple, prevnode): |
|
522 | 525 | node, p1, p2, deltabase, cs = headertuple |
|
523 | 526 | flags = 0 |
|
524 | 527 | return node, p1, p2, deltabase, cs, flags |
|
525 | 528 | |
|
526 | 529 | |
|
527 | 530 | class cg3unpacker(cg2unpacker): |
|
528 | 531 | """Unpacker for cg3 streams. |
|
529 | 532 | |
|
530 | 533 | cg3 streams add support for exchanging treemanifests and revlog |
|
531 | 534 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
532 | 535 | separating manifests and files. |
|
533 | 536 | """ |
|
534 | 537 | |
|
535 | 538 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
536 | 539 | deltaheadersize = deltaheader.size |
|
537 | 540 | version = b'03' |
|
538 | 541 | _grouplistcount = 2 # One list of manifests and one list of files |
|
539 | 542 | |
|
540 | 543 | def _deltaheader(self, headertuple, prevnode): |
|
541 | 544 | node, p1, p2, deltabase, cs, flags = headertuple |
|
542 | 545 | return node, p1, p2, deltabase, cs, flags |
|
543 | 546 | |
|
544 | 547 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
545 | 548 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) |
|
546 | 549 | for chunkdata in iter(self.filelogheader, {}): |
|
547 | 550 | # If we get here, there are directory manifests in the changegroup |
|
548 | 551 | d = chunkdata[b"filename"] |
|
549 | 552 | repo.ui.debug(b"adding %s revisions\n" % d) |
|
550 | 553 | deltas = self.deltaiter() |
|
551 | 554 | if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp): |
|
552 | 555 | raise error.Abort(_(b"received dir revlog group is empty")) |
|
553 | 556 | |
|
554 | 557 | |
|
558 | class cg4unpacker(cg3unpacker): | |
|
559 | """Unpacker for cg4 streams. | |
|
560 | ||
|
561 | cg4 streams add support for exchanging sidedata. | |
|
562 | """ | |
|
563 | ||
|
564 | version = b'04' | |
|
565 | ||
|
566 | def deltachunk(self, prevnode): | |
|
567 | res = super(cg4unpacker, self).deltachunk(prevnode) | |
|
568 | if not res: | |
|
569 | return res | |
|
570 | ||
|
571 | (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res | |
|
572 | ||
|
573 | sidedata_raw = getchunk(self._stream) | |
|
574 | sidedata = {} | |
|
575 | if len(sidedata_raw) > 0: | |
|
576 | sidedata = sidedatamod.deserialize_sidedata(sidedata_raw) | |
|
577 | ||
|
578 | return node, p1, p2, cs, deltabase, delta, flags, sidedata | |
|
579 | ||
|
580 | ||
|
555 | 581 | class headerlessfixup(object): |
|
556 | 582 | def __init__(self, fh, h): |
|
557 | 583 | self._h = h |
|
558 | 584 | self._fh = fh |
|
559 | 585 | |
|
560 | 586 | def read(self, n): |
|
561 | 587 | if self._h: |
|
562 | 588 | d, self._h = self._h[:n], self._h[n:] |
|
563 | 589 | if len(d) < n: |
|
564 | 590 | d += readexactly(self._fh, n - len(d)) |
|
565 | 591 | return d |
|
566 | 592 | return readexactly(self._fh, n) |
|
567 | 593 | |
|
568 | 594 | |
|
569 | 595 | def _revisiondeltatochunks(delta, headerfn): |
|
570 | 596 | """Serialize a revisiondelta to changegroup chunks.""" |
|
571 | 597 | |
|
572 | 598 | # The captured revision delta may be encoded as a delta against |
|
573 | 599 | # a base revision or as a full revision. The changegroup format |
|
574 | 600 | # requires that everything on the wire be deltas. So for full |
|
575 | 601 | # revisions, we need to invent a header that says to rewrite |
|
576 | 602 | # data. |
|
577 | 603 | |
|
578 | 604 | if delta.delta is not None: |
|
579 | 605 | prefix, data = b'', delta.delta |
|
580 | 606 | elif delta.basenode == nullid: |
|
581 | 607 | data = delta.revision |
|
582 | 608 | prefix = mdiff.trivialdiffheader(len(data)) |
|
583 | 609 | else: |
|
584 | 610 | data = delta.revision |
|
585 | 611 | prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data)) |
|
586 | 612 | |
|
587 | 613 | meta = headerfn(delta) |
|
588 | 614 | |
|
589 | 615 | yield chunkheader(len(meta) + len(prefix) + len(data)) |
|
590 | 616 | yield meta |
|
591 | 617 | if prefix: |
|
592 | 618 | yield prefix |
|
593 | 619 | yield data |
|
594 | 620 | |
|
595 | 621 | |
|
596 | 622 | def _sortnodesellipsis(store, nodes, cl, lookup): |
|
597 | 623 | """Sort nodes for changegroup generation.""" |
|
598 | 624 | # Ellipses serving mode. |
|
599 | 625 | # |
|
600 | 626 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
601 | 627 | # for non-changelog revlogs. In practice, we haven't started doing |
|
602 | 628 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
603 | 629 | # are actually full of bogus parentage on all the ellipsis |
|
604 | 630 | # nodes. This has the side effect that, while the contents are |
|
605 | 631 | # correct, the individual DAGs might be completely out of whack in |
|
606 | 632 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
607 | 633 | # revisions or so) in the main hg repo. |
|
608 | 634 | # |
|
609 | 635 | # The one invariant we *know* holds is that the new (potentially |
|
610 | 636 | # bogus) DAG shape will be valid if we order the nodes in the |
|
611 | 637 | # order that they're introduced in dramatis personae by the |
|
612 | 638 | # changelog, so what we do is we sort the non-changelog histories |
|
613 | 639 | # by the order in which they are used by the changelog. |
|
614 | 640 | key = lambda n: cl.rev(lookup(n)) |
|
615 | 641 | return sorted(nodes, key=key) |
|
616 | 642 | |
|
617 | 643 | |
|
618 | 644 | def _resolvenarrowrevisioninfo( |
|
619 | 645 | cl, |
|
620 | 646 | store, |
|
621 | 647 | ischangelog, |
|
622 | 648 | rev, |
|
623 | 649 | linkrev, |
|
624 | 650 | linknode, |
|
625 | 651 | clrevtolocalrev, |
|
626 | 652 | fullclnodes, |
|
627 | 653 | precomputedellipsis, |
|
628 | 654 | ): |
|
629 | 655 | linkparents = precomputedellipsis[linkrev] |
|
630 | 656 | |
|
631 | 657 | def local(clrev): |
|
632 | 658 | """Turn a changelog revnum into a local revnum. |
|
633 | 659 | |
|
634 | 660 | The ellipsis dag is stored as revnums on the changelog, |
|
635 | 661 | but when we're producing ellipsis entries for |
|
636 | 662 | non-changelog revlogs, we need to turn those numbers into |
|
637 | 663 | something local. This does that for us, and during the |
|
638 | 664 | changelog sending phase will also expand the stored |
|
639 | 665 | mappings as needed. |
|
640 | 666 | """ |
|
641 | 667 | if clrev == nullrev: |
|
642 | 668 | return nullrev |
|
643 | 669 | |
|
644 | 670 | if ischangelog: |
|
645 | 671 | return clrev |
|
646 | 672 | |
|
647 | 673 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
648 | 674 | # change that has been linked from the current revlog. |
|
649 | 675 | # |
|
650 | 676 | # For a flat manifest revlog only a single step should be necessary |
|
651 | 677 | # as all relevant changelog entries are relevant to the flat |
|
652 | 678 | # manifest. |
|
653 | 679 | # |
|
654 | 680 | # For a filelog or tree manifest dirlog however not every changelog |
|
655 | 681 | # entry will have been relevant, so we need to skip some changelog |
|
656 | 682 | # nodes even after ellipsis-izing. |
|
657 | 683 | walk = [clrev] |
|
658 | 684 | while walk: |
|
659 | 685 | p = walk[0] |
|
660 | 686 | walk = walk[1:] |
|
661 | 687 | if p in clrevtolocalrev: |
|
662 | 688 | return clrevtolocalrev[p] |
|
663 | 689 | elif p in fullclnodes: |
|
664 | 690 | walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev]) |
|
665 | 691 | elif p in precomputedellipsis: |
|
666 | 692 | walk.extend( |
|
667 | 693 | [pp for pp in precomputedellipsis[p] if pp != nullrev] |
|
668 | 694 | ) |
|
669 | 695 | else: |
|
670 | 696 | # In this case, we've got an ellipsis with parents |
|
671 | 697 | # outside the current bundle (likely an |
|
672 | 698 | # incremental pull). We "know" that we can use the |
|
673 | 699 | # value of this same revlog at whatever revision |
|
674 | 700 | # is pointed to by linknode. "Know" is in scare |
|
675 | 701 | # quotes because I haven't done enough examination |
|
676 | 702 | # of edge cases to convince myself this is really |
|
677 | 703 | # a fact - it works for all the (admittedly |
|
678 | 704 | # thorough) cases in our testsuite, but I would be |
|
679 | 705 | # somewhat unsurprised to find a case in the wild |
|
680 | 706 | # where this breaks down a bit. That said, I don't |
|
681 | 707 | # know if it would hurt anything. |
|
682 | 708 | for i in pycompat.xrange(rev, 0, -1): |
|
683 | 709 | if store.linkrev(i) == clrev: |
|
684 | 710 | return i |
|
685 | 711 | # We failed to resolve a parent for this node, so |
|
686 | 712 | # we crash the changegroup construction. |
|
687 | 713 | raise error.Abort( |
|
688 | 714 | b"unable to resolve parent while packing '%s' %r" |
|
689 | 715 | b' for changeset %r' % (store.indexfile, rev, clrev) |
|
690 | 716 | ) |
|
691 | 717 | |
|
692 | 718 | return nullrev |
|
693 | 719 | |
|
694 | 720 | if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)): |
|
695 | 721 | p1, p2 = nullrev, nullrev |
|
696 | 722 | elif len(linkparents) == 1: |
|
697 | 723 | (p1,) = sorted(local(p) for p in linkparents) |
|
698 | 724 | p2 = nullrev |
|
699 | 725 | else: |
|
700 | 726 | p1, p2 = sorted(local(p) for p in linkparents) |
|
701 | 727 | |
|
702 | 728 | p1node, p2node = store.node(p1), store.node(p2) |
|
703 | 729 | |
|
704 | 730 | return p1node, p2node, linknode |
|
705 | 731 | |
|
706 | 732 | |
|
707 | 733 | def deltagroup( |
|
708 | 734 | repo, |
|
709 | 735 | store, |
|
710 | 736 | nodes, |
|
711 | 737 | ischangelog, |
|
712 | 738 | lookup, |
|
713 | 739 | forcedeltaparentprev, |
|
714 | 740 | topic=None, |
|
715 | 741 | ellipses=False, |
|
716 | 742 | clrevtolocalrev=None, |
|
717 | 743 | fullclnodes=None, |
|
718 | 744 | precomputedellipsis=None, |
|
719 | 745 | ): |
|
720 | 746 | """Calculate deltas for a set of revisions. |
|
721 | 747 | |
|
722 | 748 | Is a generator of ``revisiondelta`` instances. |
|
723 | 749 | |
|
724 | 750 | If topic is not None, progress detail will be generated using this |
|
725 | 751 | topic name (e.g. changesets, manifests, etc). |
|
726 | 752 | """ |
|
727 | 753 | if not nodes: |
|
728 | 754 | return |
|
729 | 755 | |
|
730 | 756 | cl = repo.changelog |
|
731 | 757 | |
|
732 | 758 | if ischangelog: |
|
733 | 759 | # `hg log` shows changesets in storage order. To preserve order |
|
734 | 760 | # across clones, send out changesets in storage order. |
|
735 | 761 | nodesorder = b'storage' |
|
736 | 762 | elif ellipses: |
|
737 | 763 | nodes = _sortnodesellipsis(store, nodes, cl, lookup) |
|
738 | 764 | nodesorder = b'nodes' |
|
739 | 765 | else: |
|
740 | 766 | nodesorder = None |
|
741 | 767 | |
|
742 | 768 | # Perform ellipses filtering and revision massaging. We do this before |
|
743 | 769 | # emitrevisions() because a) filtering out revisions creates less work |
|
744 | 770 | # for emitrevisions() b) dropping revisions would break emitrevisions()'s |
|
745 | 771 | # assumptions about delta choices and we would possibly send a delta |
|
746 | 772 | # referencing a missing base revision. |
|
747 | 773 | # |
|
748 | 774 | # Also, calling lookup() has side-effects with regards to populating |
|
749 | 775 | # data structures. If we don't call lookup() for each node or if we call |
|
750 | 776 | # lookup() after the first pass through each node, things can break - |
|
751 | 777 | # possibly intermittently depending on the python hash seed! For that |
|
752 | 778 | # reason, we store a mapping of all linknodes during the initial node |
|
753 | 779 | # pass rather than use lookup() on the output side. |
|
754 | 780 | if ellipses: |
|
755 | 781 | filtered = [] |
|
756 | 782 | adjustedparents = {} |
|
757 | 783 | linknodes = {} |
|
758 | 784 | |
|
759 | 785 | for node in nodes: |
|
760 | 786 | rev = store.rev(node) |
|
761 | 787 | linknode = lookup(node) |
|
762 | 788 | linkrev = cl.rev(linknode) |
|
763 | 789 | clrevtolocalrev[linkrev] = rev |
|
764 | 790 | |
|
765 | 791 | # If linknode is in fullclnodes, it means the corresponding |
|
766 | 792 | # changeset was a full changeset and is being sent unaltered. |
|
767 | 793 | if linknode in fullclnodes: |
|
768 | 794 | linknodes[node] = linknode |
|
769 | 795 | |
|
770 | 796 | # If the corresponding changeset wasn't in the set computed |
|
771 | 797 | # as relevant to us, it should be dropped outright. |
|
772 | 798 | elif linkrev not in precomputedellipsis: |
|
773 | 799 | continue |
|
774 | 800 | |
|
775 | 801 | else: |
|
776 | 802 | # We could probably do this later and avoid the dict |
|
777 | 803 | # holding state. But it likely doesn't matter. |
|
778 | 804 | p1node, p2node, linknode = _resolvenarrowrevisioninfo( |
|
779 | 805 | cl, |
|
780 | 806 | store, |
|
781 | 807 | ischangelog, |
|
782 | 808 | rev, |
|
783 | 809 | linkrev, |
|
784 | 810 | linknode, |
|
785 | 811 | clrevtolocalrev, |
|
786 | 812 | fullclnodes, |
|
787 | 813 | precomputedellipsis, |
|
788 | 814 | ) |
|
789 | 815 | |
|
790 | 816 | adjustedparents[node] = (p1node, p2node) |
|
791 | 817 | linknodes[node] = linknode |
|
792 | 818 | |
|
793 | 819 | filtered.append(node) |
|
794 | 820 | |
|
795 | 821 | nodes = filtered |
|
796 | 822 | |
|
797 | 823 | # We expect the first pass to be fast, so we only engage the progress |
|
798 | 824 | # meter for constructing the revision deltas. |
|
799 | 825 | progress = None |
|
800 | 826 | if topic is not None: |
|
801 | 827 | progress = repo.ui.makeprogress( |
|
802 | 828 | topic, unit=_(b'chunks'), total=len(nodes) |
|
803 | 829 | ) |
|
804 | 830 | |
|
805 | 831 | configtarget = repo.ui.config(b'devel', b'bundle.delta') |
|
806 | 832 | if configtarget not in (b'', b'p1', b'full'): |
|
807 | 833 | msg = _("""config "devel.bundle.delta" as unknown value: %s""") |
|
808 | 834 | repo.ui.warn(msg % configtarget) |
|
809 | 835 | |
|
810 | 836 | deltamode = repository.CG_DELTAMODE_STD |
|
811 | 837 | if forcedeltaparentprev: |
|
812 | 838 | deltamode = repository.CG_DELTAMODE_PREV |
|
813 | 839 | elif configtarget == b'p1': |
|
814 | 840 | deltamode = repository.CG_DELTAMODE_P1 |
|
815 | 841 | elif configtarget == b'full': |
|
816 | 842 | deltamode = repository.CG_DELTAMODE_FULL |
|
817 | 843 | |
|
818 | 844 | revisions = store.emitrevisions( |
|
819 | 845 | nodes, |
|
820 | 846 | nodesorder=nodesorder, |
|
821 | 847 | revisiondata=True, |
|
822 | 848 | assumehaveparentrevisions=not ellipses, |
|
823 | 849 | deltamode=deltamode, |
|
824 | 850 | ) |
|
825 | 851 | |
|
826 | 852 | for i, revision in enumerate(revisions): |
|
827 | 853 | if progress: |
|
828 | 854 | progress.update(i + 1) |
|
829 | 855 | |
|
830 | 856 | if ellipses: |
|
831 | 857 | linknode = linknodes[revision.node] |
|
832 | 858 | |
|
833 | 859 | if revision.node in adjustedparents: |
|
834 | 860 | p1node, p2node = adjustedparents[revision.node] |
|
835 | 861 | revision.p1node = p1node |
|
836 | 862 | revision.p2node = p2node |
|
837 | 863 | revision.flags |= repository.REVISION_FLAG_ELLIPSIS |
|
838 | 864 | |
|
839 | 865 | else: |
|
840 | 866 | linknode = lookup(revision.node) |
|
841 | 867 | |
|
842 | 868 | revision.linknode = linknode |
|
843 | 869 | yield revision |
|
844 | 870 | |
|
845 | 871 | if progress: |
|
846 | 872 | progress.complete() |
|
847 | 873 | |
|
848 | 874 | |
|
849 | 875 | class cgpacker(object): |
|
850 | 876 | def __init__( |
|
851 | 877 | self, |
|
852 | 878 | repo, |
|
853 | 879 | oldmatcher, |
|
854 | 880 | matcher, |
|
855 | 881 | version, |
|
856 | 882 | builddeltaheader, |
|
857 | 883 | manifestsend, |
|
858 | 884 | forcedeltaparentprev=False, |
|
859 | 885 | bundlecaps=None, |
|
860 | 886 | ellipses=False, |
|
861 | 887 | shallow=False, |
|
862 | 888 | ellipsisroots=None, |
|
863 | 889 | fullnodes=None, |
|
890 | remote_sidedata=None, | |
|
864 | 891 | ): |
|
865 | 892 | """Given a source repo, construct a bundler. |
|
866 | 893 | |
|
867 | 894 | oldmatcher is a matcher that matches on files the client already has. |
|
868 | 895 | These will not be included in the changegroup. |
|
869 | 896 | |
|
870 | 897 | matcher is a matcher that matches on files to include in the |
|
871 | 898 | changegroup. Used to facilitate sparse changegroups. |
|
872 | 899 | |
|
873 | 900 | forcedeltaparentprev indicates whether delta parents must be against |
|
874 | 901 | the previous revision in a delta group. This should only be used for |
|
875 | 902 | compatibility with changegroup version 1. |
|
876 | 903 | |
|
877 | 904 | builddeltaheader is a callable that constructs the header for a group |
|
878 | 905 | delta. |
|
879 | 906 | |
|
880 | 907 | manifestsend is a chunk to send after manifests have been fully emitted. |
|
881 | 908 | |
|
882 | 909 | ellipses indicates whether ellipsis serving mode is enabled. |
|
883 | 910 | |
|
884 | 911 | bundlecaps is optional and can be used to specify the set of |
|
885 | 912 | capabilities which can be used to build the bundle. While bundlecaps is |
|
886 | 913 | unused in core Mercurial, extensions rely on this feature to communicate |
|
887 | 914 | capabilities to customize the changegroup packer. |
|
888 | 915 | |
|
889 | 916 | shallow indicates whether shallow data might be sent. The packer may |
|
890 | 917 | need to pack file contents not introduced by the changes being packed. |
|
891 | 918 | |
|
892 | 919 | fullnodes is the set of changelog nodes which should not be ellipsis |
|
893 | 920 | nodes. We store this rather than the set of nodes that should be |
|
894 | 921 | ellipsis because for very large histories we expect this to be |
|
895 | 922 | significantly smaller. |
|
923 | ||
|
924 | remote_sidedata is the set of sidedata categories wanted by the remote. | |
|
896 | 925 | """ |
|
897 | 926 | assert oldmatcher |
|
898 | 927 | assert matcher |
|
899 | 928 | self._oldmatcher = oldmatcher |
|
900 | 929 | self._matcher = matcher |
|
901 | 930 | |
|
902 | 931 | self.version = version |
|
903 | 932 | self._forcedeltaparentprev = forcedeltaparentprev |
|
904 | 933 | self._builddeltaheader = builddeltaheader |
|
905 | 934 | self._manifestsend = manifestsend |
|
906 | 935 | self._ellipses = ellipses |
|
907 | 936 | |
|
908 | 937 | # Set of capabilities we can use to build the bundle. |
|
909 | 938 | if bundlecaps is None: |
|
910 | 939 | bundlecaps = set() |
|
911 | 940 | self._bundlecaps = bundlecaps |
|
912 | 941 | self._isshallow = shallow |
|
913 | 942 | self._fullclnodes = fullnodes |
|
914 | 943 | |
|
915 | 944 | # Maps ellipsis revs to their roots at the changelog level. |
|
916 | 945 | self._precomputedellipsis = ellipsisroots |
|
917 | 946 | |
|
918 | 947 | self._repo = repo |
|
919 | 948 | |
|
920 | 949 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
921 | 950 | self._verbosenote = self._repo.ui.note |
|
922 | 951 | else: |
|
923 | 952 | self._verbosenote = lambda s: None |
|
924 | 953 | |
|
925 | 954 | def generate( |
|
926 | 955 | self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True |
|
927 | 956 | ): |
|
928 | 957 | """Yield a sequence of changegroup byte chunks. |
|
929 | 958 | If changelog is False, changelog data won't be added to changegroup |
|
930 | 959 | """ |
|
931 | 960 | |
|
932 | 961 | repo = self._repo |
|
933 | 962 | cl = repo.changelog |
|
934 | 963 | |
|
935 | 964 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) |
|
936 | 965 | size = 0 |
|
937 | 966 | |
|
938 | 967 | clstate, deltas = self._generatechangelog( |
|
939 | 968 | cl, clnodes, generate=changelog |
|
940 | 969 | ) |
|
941 | 970 | for delta in deltas: |
|
942 | 971 | for chunk in _revisiondeltatochunks(delta, self._builddeltaheader): |
|
943 | 972 | size += len(chunk) |
|
944 | 973 | yield chunk |
|
945 | 974 | |
|
946 | 975 | close = closechunk() |
|
947 | 976 | size += len(close) |
|
948 | 977 | yield closechunk() |
|
949 | 978 | |
|
950 | 979 | self._verbosenote(_(b'%8.i (changelog)\n') % size) |
|
951 | 980 | |
|
952 | 981 | clrevorder = clstate[b'clrevorder'] |
|
953 | 982 | manifests = clstate[b'manifests'] |
|
954 | 983 | changedfiles = clstate[b'changedfiles'] |
|
955 | 984 | |
|
956 | 985 | # We need to make sure that the linkrev in the changegroup refers to |
|
957 | 986 | # the first changeset that introduced the manifest or file revision. |
|
958 | 987 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
959 | 988 | # are walked in revlog order. |
|
960 | 989 | # |
|
961 | 990 | # When taking the slowpath when the manifest revlog uses generaldelta, |
|
962 | 991 | # the manifest may be walked in the "wrong" order. Without 'clrevorder', |
|
963 | 992 | # we would get an incorrect linkrev (see fix in cc0ff93d0c0c). |
|
964 | 993 | # |
|
965 | 994 | # When taking the fastpath, we are only vulnerable to reordering |
|
966 | 995 | # of the changelog itself. The changelog never uses generaldelta and is |
|
967 | 996 | # never reordered. To handle this case, we simply take the slowpath, |
|
968 | 997 | # which already has the 'clrevorder' logic. This was also fixed in |
|
969 | 998 | # cc0ff93d0c0c. |
|
970 | 999 | |
|
971 | 1000 | # Treemanifests don't work correctly with fastpathlinkrev |
|
972 | 1001 | # either, because we don't discover which directory nodes to |
|
973 | 1002 | # send along with files. This could probably be fixed. |
|
974 | 1003 | fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo) |
|
975 | 1004 | |
|
976 | 1005 | fnodes = {} # needed file nodes |
|
977 | 1006 | |
|
978 | 1007 | size = 0 |
|
979 | 1008 | it = self.generatemanifests( |
|
980 | 1009 | commonrevs, |
|
981 | 1010 | clrevorder, |
|
982 | 1011 | fastpathlinkrev, |
|
983 | 1012 | manifests, |
|
984 | 1013 | fnodes, |
|
985 | 1014 | source, |
|
986 | 1015 | clstate[b'clrevtomanifestrev'], |
|
987 | 1016 | ) |
|
988 | 1017 | |
|
989 | 1018 | for tree, deltas in it: |
|
990 | 1019 | if tree: |
|
991 |
assert self.version |
|
|
1020 | assert self.version in (b'03', b'04') | |
|
992 | 1021 | chunk = _fileheader(tree) |
|
993 | 1022 | size += len(chunk) |
|
994 | 1023 | yield chunk |
|
995 | 1024 | |
|
996 | 1025 | for delta in deltas: |
|
997 | 1026 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
998 | 1027 | for chunk in chunks: |
|
999 | 1028 | size += len(chunk) |
|
1000 | 1029 | yield chunk |
|
1001 | 1030 | |
|
1002 | 1031 | close = closechunk() |
|
1003 | 1032 | size += len(close) |
|
1004 | 1033 | yield close |
|
1005 | 1034 | |
|
1006 | 1035 | self._verbosenote(_(b'%8.i (manifests)\n') % size) |
|
1007 | 1036 | yield self._manifestsend |
|
1008 | 1037 | |
|
1009 | 1038 | mfdicts = None |
|
1010 | 1039 | if self._ellipses and self._isshallow: |
|
1011 | 1040 | mfdicts = [ |
|
1012 | 1041 | (repo.manifestlog[n].read(), lr) |
|
1013 | 1042 | for (n, lr) in pycompat.iteritems(manifests) |
|
1014 | 1043 | ] |
|
1015 | 1044 | |
|
1016 | 1045 | manifests.clear() |
|
1017 | 1046 | clrevs = {cl.rev(x) for x in clnodes} |
|
1018 | 1047 | |
|
1019 | 1048 | it = self.generatefiles( |
|
1020 | 1049 | changedfiles, |
|
1021 | 1050 | commonrevs, |
|
1022 | 1051 | source, |
|
1023 | 1052 | mfdicts, |
|
1024 | 1053 | fastpathlinkrev, |
|
1025 | 1054 | fnodes, |
|
1026 | 1055 | clrevs, |
|
1027 | 1056 | ) |
|
1028 | 1057 | |
|
1029 | 1058 | for path, deltas in it: |
|
1030 | 1059 | h = _fileheader(path) |
|
1031 | 1060 | size = len(h) |
|
1032 | 1061 | yield h |
|
1033 | 1062 | |
|
1034 | 1063 | for delta in deltas: |
|
1035 | 1064 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
1036 | 1065 | for chunk in chunks: |
|
1037 | 1066 | size += len(chunk) |
|
1038 | 1067 | yield chunk |
|
1039 | 1068 | |
|
1040 | 1069 | close = closechunk() |
|
1041 | 1070 | size += len(close) |
|
1042 | 1071 | yield close |
|
1043 | 1072 | |
|
1044 | 1073 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) |
|
1045 | 1074 | |
|
1046 | 1075 | yield closechunk() |
|
1047 | 1076 | |
|
1048 | 1077 | if clnodes: |
|
1049 | 1078 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) |
|
1050 | 1079 | |
|
1051 | 1080 | def _generatechangelog(self, cl, nodes, generate=True): |
|
1052 | 1081 | """Generate data for changelog chunks. |
|
1053 | 1082 | |
|
1054 | 1083 | Returns a 2-tuple of a dict containing state and an iterable of |
|
1055 | 1084 | byte chunks. The state will not be fully populated until the |
|
1056 | 1085 | chunk stream has been fully consumed. |
|
1057 | 1086 | |
|
1058 | 1087 | if generate is False, the state will be fully populated and no chunk |
|
1059 | 1088 | stream will be yielded |
|
1060 | 1089 | """ |
|
1061 | 1090 | clrevorder = {} |
|
1062 | 1091 | manifests = {} |
|
1063 | 1092 | mfl = self._repo.manifestlog |
|
1064 | 1093 | changedfiles = set() |
|
1065 | 1094 | clrevtomanifestrev = {} |
|
1066 | 1095 | |
|
1067 | 1096 | state = { |
|
1068 | 1097 | b'clrevorder': clrevorder, |
|
1069 | 1098 | b'manifests': manifests, |
|
1070 | 1099 | b'changedfiles': changedfiles, |
|
1071 | 1100 | b'clrevtomanifestrev': clrevtomanifestrev, |
|
1072 | 1101 | } |
|
1073 | 1102 | |
|
1074 | 1103 | if not (generate or self._ellipses): |
|
1075 | 1104 | # sort the nodes in storage order |
|
1076 | 1105 | nodes = sorted(nodes, key=cl.rev) |
|
1077 | 1106 | for node in nodes: |
|
1078 | 1107 | c = cl.changelogrevision(node) |
|
1079 | 1108 | clrevorder[node] = len(clrevorder) |
|
1080 | 1109 | # record the first changeset introducing this manifest version |
|
1081 | 1110 | manifests.setdefault(c.manifest, node) |
|
1082 | 1111 | # Record a complete list of potentially-changed files in |
|
1083 | 1112 | # this manifest. |
|
1084 | 1113 | changedfiles.update(c.files) |
|
1085 | 1114 | |
|
1086 | 1115 | return state, () |
|
1087 | 1116 | |
|
1088 | 1117 | # Callback for the changelog, used to collect changed files and |
|
1089 | 1118 | # manifest nodes. |
|
1090 | 1119 | # Returns the linkrev node (identity in the changelog case). |
|
1091 | 1120 | def lookupcl(x): |
|
1092 | 1121 | c = cl.changelogrevision(x) |
|
1093 | 1122 | clrevorder[x] = len(clrevorder) |
|
1094 | 1123 | |
|
1095 | 1124 | if self._ellipses: |
|
1096 | 1125 | # Only update manifests if x is going to be sent. Otherwise we |
|
1097 | 1126 | # end up with bogus linkrevs specified for manifests and |
|
1098 | 1127 | # we skip some manifest nodes that we should otherwise |
|
1099 | 1128 | # have sent. |
|
1100 | 1129 | if ( |
|
1101 | 1130 | x in self._fullclnodes |
|
1102 | 1131 | or cl.rev(x) in self._precomputedellipsis |
|
1103 | 1132 | ): |
|
1104 | 1133 | |
|
1105 | 1134 | manifestnode = c.manifest |
|
1106 | 1135 | # Record the first changeset introducing this manifest |
|
1107 | 1136 | # version. |
|
1108 | 1137 | manifests.setdefault(manifestnode, x) |
|
1109 | 1138 | # Set this narrow-specific dict so we have the lowest |
|
1110 | 1139 | # manifest revnum to look up for this cl revnum. (Part of |
|
1111 | 1140 | # mapping changelog ellipsis parents to manifest ellipsis |
|
1112 | 1141 | # parents) |
|
1113 | 1142 | clrevtomanifestrev.setdefault( |
|
1114 | 1143 | cl.rev(x), mfl.rev(manifestnode) |
|
1115 | 1144 | ) |
|
1116 | 1145 | # We can't trust the changed files list in the changeset if the |
|
1117 | 1146 | # client requested a shallow clone. |
|
1118 | 1147 | if self._isshallow: |
|
1119 | 1148 | changedfiles.update(mfl[c.manifest].read().keys()) |
|
1120 | 1149 | else: |
|
1121 | 1150 | changedfiles.update(c.files) |
|
1122 | 1151 | else: |
|
1123 | 1152 | # record the first changeset introducing this manifest version |
|
1124 | 1153 | manifests.setdefault(c.manifest, x) |
|
1125 | 1154 | # Record a complete list of potentially-changed files in |
|
1126 | 1155 | # this manifest. |
|
1127 | 1156 | changedfiles.update(c.files) |
|
1128 | 1157 | |
|
1129 | 1158 | return x |
|
1130 | 1159 | |
|
1131 | 1160 | gen = deltagroup( |
|
1132 | 1161 | self._repo, |
|
1133 | 1162 | cl, |
|
1134 | 1163 | nodes, |
|
1135 | 1164 | True, |
|
1136 | 1165 | lookupcl, |
|
1137 | 1166 | self._forcedeltaparentprev, |
|
1138 | 1167 | ellipses=self._ellipses, |
|
1139 | 1168 | topic=_(b'changesets'), |
|
1140 | 1169 | clrevtolocalrev={}, |
|
1141 | 1170 | fullclnodes=self._fullclnodes, |
|
1142 | 1171 | precomputedellipsis=self._precomputedellipsis, |
|
1143 | 1172 | ) |
|
1144 | 1173 | |
|
1145 | 1174 | return state, gen |
|
1146 | 1175 | |
|
1147 | 1176 | def generatemanifests( |
|
1148 | 1177 | self, |
|
1149 | 1178 | commonrevs, |
|
1150 | 1179 | clrevorder, |
|
1151 | 1180 | fastpathlinkrev, |
|
1152 | 1181 | manifests, |
|
1153 | 1182 | fnodes, |
|
1154 | 1183 | source, |
|
1155 | 1184 | clrevtolocalrev, |
|
1156 | 1185 | ): |
|
1157 | 1186 | """Returns an iterator of changegroup chunks containing manifests. |
|
1158 | 1187 | |
|
1159 | 1188 | `source` is unused here, but is used by extensions like remotefilelog to |
|
1160 | 1189 | change what is sent based in pulls vs pushes, etc. |
|
1161 | 1190 | """ |
|
1162 | 1191 | repo = self._repo |
|
1163 | 1192 | mfl = repo.manifestlog |
|
1164 | 1193 | tmfnodes = {b'': manifests} |
|
1165 | 1194 | |
|
1166 | 1195 | # Callback for the manifest, used to collect linkrevs for filelog |
|
1167 | 1196 | # revisions. |
|
1168 | 1197 | # Returns the linkrev node (collected in lookupcl). |
|
1169 | 1198 | def makelookupmflinknode(tree, nodes): |
|
1170 | 1199 | if fastpathlinkrev: |
|
1171 | 1200 | assert not tree |
|
1172 | 1201 | return ( |
|
1173 | 1202 | manifests.__getitem__ |
|
1174 | 1203 | ) # pytype: disable=unsupported-operands |
|
1175 | 1204 | |
|
1176 | 1205 | def lookupmflinknode(x): |
|
1177 | 1206 | """Callback for looking up the linknode for manifests. |
|
1178 | 1207 | |
|
1179 | 1208 | Returns the linkrev node for the specified manifest. |
|
1180 | 1209 | |
|
1181 | 1210 | SIDE EFFECT: |
|
1182 | 1211 | |
|
1183 | 1212 | 1) fclnodes gets populated with the list of relevant |
|
1184 | 1213 | file nodes if we're not using fastpathlinkrev |
|
1185 | 1214 | 2) When treemanifests are in use, collects treemanifest nodes |
|
1186 | 1215 | to send |
|
1187 | 1216 | |
|
1188 | 1217 | Note that this means manifests must be completely sent to |
|
1189 | 1218 | the client before you can trust the list of files and |
|
1190 | 1219 | treemanifests to send. |
|
1191 | 1220 | """ |
|
1192 | 1221 | clnode = nodes[x] |
|
1193 | 1222 | mdata = mfl.get(tree, x).readfast(shallow=True) |
|
1194 | 1223 | for p, n, fl in mdata.iterentries(): |
|
1195 | 1224 | if fl == b't': # subdirectory manifest |
|
1196 | 1225 | subtree = tree + p + b'/' |
|
1197 | 1226 | tmfclnodes = tmfnodes.setdefault(subtree, {}) |
|
1198 | 1227 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
1199 | 1228 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
1200 | 1229 | tmfclnodes[n] = clnode |
|
1201 | 1230 | else: |
|
1202 | 1231 | f = tree + p |
|
1203 | 1232 | fclnodes = fnodes.setdefault(f, {}) |
|
1204 | 1233 | fclnode = fclnodes.setdefault(n, clnode) |
|
1205 | 1234 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
1206 | 1235 | fclnodes[n] = clnode |
|
1207 | 1236 | return clnode |
|
1208 | 1237 | |
|
1209 | 1238 | return lookupmflinknode |
|
1210 | 1239 | |
|
1211 | 1240 | while tmfnodes: |
|
1212 | 1241 | tree, nodes = tmfnodes.popitem() |
|
1213 | 1242 | |
|
1214 | 1243 | should_visit = self._matcher.visitdir(tree[:-1]) |
|
1215 | 1244 | if tree and not should_visit: |
|
1216 | 1245 | continue |
|
1217 | 1246 | |
|
1218 | 1247 | store = mfl.getstorage(tree) |
|
1219 | 1248 | |
|
1220 | 1249 | if not should_visit: |
|
1221 | 1250 | # No nodes to send because this directory is out of |
|
1222 | 1251 | # the client's view of the repository (probably |
|
1223 | 1252 | # because of narrow clones). Do this even for the root |
|
1224 | 1253 | # directory (tree=='') |
|
1225 | 1254 | prunednodes = [] |
|
1226 | 1255 | else: |
|
1227 | 1256 | # Avoid sending any manifest nodes we can prove the |
|
1228 | 1257 | # client already has by checking linkrevs. See the |
|
1229 | 1258 | # related comment in generatefiles(). |
|
1230 | 1259 | prunednodes = self._prunemanifests(store, nodes, commonrevs) |
|
1231 | 1260 | |
|
1232 | 1261 | if tree and not prunednodes: |
|
1233 | 1262 | continue |
|
1234 | 1263 | |
|
1235 | 1264 | lookupfn = makelookupmflinknode(tree, nodes) |
|
1236 | 1265 | |
|
1237 | 1266 | deltas = deltagroup( |
|
1238 | 1267 | self._repo, |
|
1239 | 1268 | store, |
|
1240 | 1269 | prunednodes, |
|
1241 | 1270 | False, |
|
1242 | 1271 | lookupfn, |
|
1243 | 1272 | self._forcedeltaparentprev, |
|
1244 | 1273 | ellipses=self._ellipses, |
|
1245 | 1274 | topic=_(b'manifests'), |
|
1246 | 1275 | clrevtolocalrev=clrevtolocalrev, |
|
1247 | 1276 | fullclnodes=self._fullclnodes, |
|
1248 | 1277 | precomputedellipsis=self._precomputedellipsis, |
|
1249 | 1278 | ) |
|
1250 | 1279 | |
|
1251 | 1280 | if not self._oldmatcher.visitdir(store.tree[:-1]): |
|
1252 | 1281 | yield tree, deltas |
|
1253 | 1282 | else: |
|
1254 | 1283 | # 'deltas' is a generator and we need to consume it even if |
|
1255 | 1284 | # we are not going to send it because a side-effect is that |
|
1256 | 1285 | # it updates tmdnodes (via lookupfn) |
|
1257 | 1286 | for d in deltas: |
|
1258 | 1287 | pass |
|
1259 | 1288 | if not tree: |
|
1260 | 1289 | yield tree, [] |
|
1261 | 1290 | |
|
1262 | 1291 | def _prunemanifests(self, store, nodes, commonrevs): |
|
1263 | 1292 | if not self._ellipses: |
|
1264 | 1293 | # In non-ellipses case and large repositories, it is better to |
|
1265 | 1294 | # prevent calling of store.rev and store.linkrev on a lot of |
|
1266 | 1295 | # nodes as compared to sending some extra data |
|
1267 | 1296 | return nodes.copy() |
|
1268 | 1297 | # This is split out as a separate method to allow filtering |
|
1269 | 1298 | # commonrevs in extension code. |
|
1270 | 1299 | # |
|
1271 | 1300 | # TODO(augie): this shouldn't be required, instead we should |
|
1272 | 1301 | # make filtering of revisions to send delegated to the store |
|
1273 | 1302 | # layer. |
|
1274 | 1303 | frev, flr = store.rev, store.linkrev |
|
1275 | 1304 | return [n for n in nodes if flr(frev(n)) not in commonrevs] |
|
1276 | 1305 | |
|
1277 | 1306 | # The 'source' parameter is useful for extensions |
|
1278 | 1307 | def generatefiles( |
|
1279 | 1308 | self, |
|
1280 | 1309 | changedfiles, |
|
1281 | 1310 | commonrevs, |
|
1282 | 1311 | source, |
|
1283 | 1312 | mfdicts, |
|
1284 | 1313 | fastpathlinkrev, |
|
1285 | 1314 | fnodes, |
|
1286 | 1315 | clrevs, |
|
1287 | 1316 | ): |
|
1288 | 1317 | changedfiles = [ |
|
1289 | 1318 | f |
|
1290 | 1319 | for f in changedfiles |
|
1291 | 1320 | if self._matcher(f) and not self._oldmatcher(f) |
|
1292 | 1321 | ] |
|
1293 | 1322 | |
|
1294 | 1323 | if not fastpathlinkrev: |
|
1295 | 1324 | |
|
1296 | 1325 | def normallinknodes(unused, fname): |
|
1297 | 1326 | return fnodes.get(fname, {}) |
|
1298 | 1327 | |
|
1299 | 1328 | else: |
|
1300 | 1329 | cln = self._repo.changelog.node |
|
1301 | 1330 | |
|
1302 | 1331 | def normallinknodes(store, fname): |
|
1303 | 1332 | flinkrev = store.linkrev |
|
1304 | 1333 | fnode = store.node |
|
1305 | 1334 | revs = ((r, flinkrev(r)) for r in store) |
|
1306 | 1335 | return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs} |
|
1307 | 1336 | |
|
1308 | 1337 | clrevtolocalrev = {} |
|
1309 | 1338 | |
|
1310 | 1339 | if self._isshallow: |
|
1311 | 1340 | # In a shallow clone, the linknodes callback needs to also include |
|
1312 | 1341 | # those file nodes that are in the manifests we sent but weren't |
|
1313 | 1342 | # introduced by those manifests. |
|
1314 | 1343 | commonctxs = [self._repo[c] for c in commonrevs] |
|
1315 | 1344 | clrev = self._repo.changelog.rev |
|
1316 | 1345 | |
|
1317 | 1346 | def linknodes(flog, fname): |
|
1318 | 1347 | for c in commonctxs: |
|
1319 | 1348 | try: |
|
1320 | 1349 | fnode = c.filenode(fname) |
|
1321 | 1350 | clrevtolocalrev[c.rev()] = flog.rev(fnode) |
|
1322 | 1351 | except error.ManifestLookupError: |
|
1323 | 1352 | pass |
|
1324 | 1353 | links = normallinknodes(flog, fname) |
|
1325 | 1354 | if len(links) != len(mfdicts): |
|
1326 | 1355 | for mf, lr in mfdicts: |
|
1327 | 1356 | fnode = mf.get(fname, None) |
|
1328 | 1357 | if fnode in links: |
|
1329 | 1358 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
1330 | 1359 | elif fnode: |
|
1331 | 1360 | links[fnode] = lr |
|
1332 | 1361 | return links |
|
1333 | 1362 | |
|
1334 | 1363 | else: |
|
1335 | 1364 | linknodes = normallinknodes |
|
1336 | 1365 | |
|
1337 | 1366 | repo = self._repo |
|
1338 | 1367 | progress = repo.ui.makeprogress( |
|
1339 | 1368 | _(b'files'), unit=_(b'files'), total=len(changedfiles) |
|
1340 | 1369 | ) |
|
1341 | 1370 | for i, fname in enumerate(sorted(changedfiles)): |
|
1342 | 1371 | filerevlog = repo.file(fname) |
|
1343 | 1372 | if not filerevlog: |
|
1344 | 1373 | raise error.Abort( |
|
1345 | 1374 | _(b"empty or missing file data for %s") % fname |
|
1346 | 1375 | ) |
|
1347 | 1376 | |
|
1348 | 1377 | clrevtolocalrev.clear() |
|
1349 | 1378 | |
|
1350 | 1379 | linkrevnodes = linknodes(filerevlog, fname) |
|
1351 | 1380 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
1352 | 1381 | # fastpath case and with lookupmf in the slowpath case. |
|
1353 | 1382 | def lookupfilelog(x): |
|
1354 | 1383 | return linkrevnodes[x] |
|
1355 | 1384 | |
|
1356 | 1385 | frev, flr = filerevlog.rev, filerevlog.linkrev |
|
1357 | 1386 | # Skip sending any filenode we know the client already |
|
1358 | 1387 | # has. This avoids over-sending files relatively |
|
1359 | 1388 | # inexpensively, so it's not a problem if we under-filter |
|
1360 | 1389 | # here. |
|
1361 | 1390 | filenodes = [ |
|
1362 | 1391 | n for n in linkrevnodes if flr(frev(n)) not in commonrevs |
|
1363 | 1392 | ] |
|
1364 | 1393 | |
|
1365 | 1394 | if not filenodes: |
|
1366 | 1395 | continue |
|
1367 | 1396 | |
|
1368 | 1397 | progress.update(i + 1, item=fname) |
|
1369 | 1398 | |
|
1370 | 1399 | deltas = deltagroup( |
|
1371 | 1400 | self._repo, |
|
1372 | 1401 | filerevlog, |
|
1373 | 1402 | filenodes, |
|
1374 | 1403 | False, |
|
1375 | 1404 | lookupfilelog, |
|
1376 | 1405 | self._forcedeltaparentprev, |
|
1377 | 1406 | ellipses=self._ellipses, |
|
1378 | 1407 | clrevtolocalrev=clrevtolocalrev, |
|
1379 | 1408 | fullclnodes=self._fullclnodes, |
|
1380 | 1409 | precomputedellipsis=self._precomputedellipsis, |
|
1381 | 1410 | ) |
|
1382 | 1411 | |
|
1383 | 1412 | yield fname, deltas |
|
1384 | 1413 | |
|
1385 | 1414 | progress.complete() |
|
1386 | 1415 | |
|
1387 | 1416 | |
|
1388 | 1417 | def _makecg1packer( |
|
1389 | 1418 | repo, |
|
1390 | 1419 | oldmatcher, |
|
1391 | 1420 | matcher, |
|
1392 | 1421 | bundlecaps, |
|
1393 | 1422 | ellipses=False, |
|
1394 | 1423 | shallow=False, |
|
1395 | 1424 | ellipsisroots=None, |
|
1396 | 1425 | fullnodes=None, |
|
1426 | remote_sidedata=None, | |
|
1397 | 1427 | ): |
|
1398 | 1428 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( |
|
1399 | 1429 | d.node, d.p1node, d.p2node, d.linknode |
|
1400 | 1430 | ) |
|
1401 | 1431 | |
|
1402 | 1432 | return cgpacker( |
|
1403 | 1433 | repo, |
|
1404 | 1434 | oldmatcher, |
|
1405 | 1435 | matcher, |
|
1406 | 1436 | b'01', |
|
1407 | 1437 | builddeltaheader=builddeltaheader, |
|
1408 | 1438 | manifestsend=b'', |
|
1409 | 1439 | forcedeltaparentprev=True, |
|
1410 | 1440 | bundlecaps=bundlecaps, |
|
1411 | 1441 | ellipses=ellipses, |
|
1412 | 1442 | shallow=shallow, |
|
1413 | 1443 | ellipsisroots=ellipsisroots, |
|
1414 | 1444 | fullnodes=fullnodes, |
|
1415 | 1445 | ) |
|
1416 | 1446 | |
|
1417 | 1447 | |
|
1418 | 1448 | def _makecg2packer( |
|
1419 | 1449 | repo, |
|
1420 | 1450 | oldmatcher, |
|
1421 | 1451 | matcher, |
|
1422 | 1452 | bundlecaps, |
|
1423 | 1453 | ellipses=False, |
|
1424 | 1454 | shallow=False, |
|
1425 | 1455 | ellipsisroots=None, |
|
1426 | 1456 | fullnodes=None, |
|
1457 | remote_sidedata=None, | |
|
1427 | 1458 | ): |
|
1428 | 1459 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( |
|
1429 | 1460 | d.node, d.p1node, d.p2node, d.basenode, d.linknode |
|
1430 | 1461 | ) |
|
1431 | 1462 | |
|
1432 | 1463 | return cgpacker( |
|
1433 | 1464 | repo, |
|
1434 | 1465 | oldmatcher, |
|
1435 | 1466 | matcher, |
|
1436 | 1467 | b'02', |
|
1437 | 1468 | builddeltaheader=builddeltaheader, |
|
1438 | 1469 | manifestsend=b'', |
|
1439 | 1470 | bundlecaps=bundlecaps, |
|
1440 | 1471 | ellipses=ellipses, |
|
1441 | 1472 | shallow=shallow, |
|
1442 | 1473 | ellipsisroots=ellipsisroots, |
|
1443 | 1474 | fullnodes=fullnodes, |
|
1444 | 1475 | ) |
|
1445 | 1476 | |
|
1446 | 1477 | |
|
1447 | 1478 | def _makecg3packer( |
|
1448 | 1479 | repo, |
|
1449 | 1480 | oldmatcher, |
|
1450 | 1481 | matcher, |
|
1451 | 1482 | bundlecaps, |
|
1452 | 1483 | ellipses=False, |
|
1453 | 1484 | shallow=False, |
|
1454 | 1485 | ellipsisroots=None, |
|
1455 | 1486 | fullnodes=None, |
|
1487 | remote_sidedata=None, | |
|
1456 | 1488 | ): |
|
1457 | 1489 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( |
|
1458 | 1490 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags |
|
1459 | 1491 | ) |
|
1460 | 1492 | |
|
1461 | 1493 | return cgpacker( |
|
1462 | 1494 | repo, |
|
1463 | 1495 | oldmatcher, |
|
1464 | 1496 | matcher, |
|
1465 | 1497 | b'03', |
|
1466 | 1498 | builddeltaheader=builddeltaheader, |
|
1467 | 1499 | manifestsend=closechunk(), |
|
1468 | 1500 | bundlecaps=bundlecaps, |
|
1469 | 1501 | ellipses=ellipses, |
|
1470 | 1502 | shallow=shallow, |
|
1471 | 1503 | ellipsisroots=ellipsisroots, |
|
1472 | 1504 | fullnodes=fullnodes, |
|
1473 | 1505 | ) |
|
1474 | 1506 | |
|
1475 | 1507 | |
|
1508 | def _makecg4packer( | |
|
1509 | repo, | |
|
1510 | oldmatcher, | |
|
1511 | matcher, | |
|
1512 | bundlecaps, | |
|
1513 | ellipses=False, | |
|
1514 | shallow=False, | |
|
1515 | ellipsisroots=None, | |
|
1516 | fullnodes=None, | |
|
1517 | remote_sidedata=None, | |
|
1518 | ): | |
|
1519 | # Same header func as cg3. Sidedata is in a separate chunk from the delta to | |
|
1520 | # differenciate "raw delta" and sidedata. | |
|
1521 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( | |
|
1522 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags | |
|
1523 | ) | |
|
1524 | ||
|
1525 | return cgpacker( | |
|
1526 | repo, | |
|
1527 | oldmatcher, | |
|
1528 | matcher, | |
|
1529 | b'04', | |
|
1530 | builddeltaheader=builddeltaheader, | |
|
1531 | manifestsend=closechunk(), | |
|
1532 | bundlecaps=bundlecaps, | |
|
1533 | ellipses=ellipses, | |
|
1534 | shallow=shallow, | |
|
1535 | ellipsisroots=ellipsisroots, | |
|
1536 | fullnodes=fullnodes, | |
|
1537 | remote_sidedata=remote_sidedata, | |
|
1538 | ) | |
|
1539 | ||
|
1540 | ||
|
1476 | 1541 | _packermap = { |
|
1477 | 1542 | b'01': (_makecg1packer, cg1unpacker), |
|
1478 | 1543 | # cg2 adds support for exchanging generaldelta |
|
1479 | 1544 | b'02': (_makecg2packer, cg2unpacker), |
|
1480 | 1545 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
1481 | 1546 | b'03': (_makecg3packer, cg3unpacker), |
|
1547 | # ch4 adds support for exchanging sidedata | |
|
1548 | b'04': (_makecg4packer, cg4unpacker), | |
|
1482 | 1549 | } |
|
1483 | 1550 | |
|
1484 | 1551 | |
|
1485 | 1552 | def allsupportedversions(repo): |
|
1486 | 1553 | versions = set(_packermap.keys()) |
|
1487 | 1554 | needv03 = False |
|
1488 | 1555 | if ( |
|
1489 | 1556 | repo.ui.configbool(b'experimental', b'changegroup3') |
|
1490 | 1557 | or repo.ui.configbool(b'experimental', b'treemanifest') |
|
1491 | 1558 | or scmutil.istreemanifest(repo) |
|
1492 | 1559 | ): |
|
1493 | 1560 | # we keep version 03 because we need to to exchange treemanifest data |
|
1494 | 1561 | # |
|
1495 | 1562 | # we also keep vresion 01 and 02, because it is possible for repo to |
|
1496 | 1563 | # contains both normal and tree manifest at the same time. so using |
|
1497 | 1564 | # older version to pull data is viable |
|
1498 | 1565 | # |
|
1499 | 1566 | # (or even to push subset of history) |
|
1500 | 1567 | needv03 = True |
|
1501 | if b'exp-sidedata-flag' in repo.requirements: | |
|
1502 | needv03 = True | |
|
1503 | # don't attempt to use 01/02 until we do sidedata cleaning | |
|
1504 | versions.discard(b'01') | |
|
1505 | versions.discard(b'02') | |
|
1568 | has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements | |
|
1569 | if not has_revlogv2: | |
|
1570 | versions.discard(b'04') | |
|
1506 | 1571 | if not needv03: |
|
1507 | 1572 | versions.discard(b'03') |
|
1508 | 1573 | return versions |
|
1509 | 1574 | |
|
1510 | 1575 | |
|
1511 | 1576 | # Changegroup versions that can be applied to the repo |
|
1512 | 1577 | def supportedincomingversions(repo): |
|
1513 | 1578 | return allsupportedversions(repo) |
|
1514 | 1579 | |
|
1515 | 1580 | |
|
1516 | 1581 | # Changegroup versions that can be created from the repo |
|
1517 | 1582 | def supportedoutgoingversions(repo): |
|
1518 | 1583 | versions = allsupportedversions(repo) |
|
1519 | 1584 | if scmutil.istreemanifest(repo): |
|
1520 | 1585 | # Versions 01 and 02 support only flat manifests and it's just too |
|
1521 | 1586 | # expensive to convert between the flat manifest and tree manifest on |
|
1522 | 1587 | # the fly. Since tree manifests are hashed differently, all of history |
|
1523 | 1588 | # would have to be converted. Instead, we simply don't even pretend to |
|
1524 | 1589 | # support versions 01 and 02. |
|
1525 | 1590 | versions.discard(b'01') |
|
1526 | 1591 | versions.discard(b'02') |
|
1527 | 1592 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
1528 | 1593 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1529 | 1594 | # support that for stripping and unbundling to work. |
|
1530 | 1595 | versions.discard(b'01') |
|
1531 | 1596 | versions.discard(b'02') |
|
1532 | 1597 | if LFS_REQUIREMENT in repo.requirements: |
|
1533 | 1598 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1534 | 1599 | # mark LFS entries with REVIDX_EXTSTORED. |
|
1535 | 1600 | versions.discard(b'01') |
|
1536 | 1601 | versions.discard(b'02') |
|
1537 | 1602 | |
|
1538 | 1603 | return versions |
|
1539 | 1604 | |
|
1540 | 1605 | |
|
1541 | 1606 | def localversion(repo): |
|
1542 | 1607 | # Finds the best version to use for bundles that are meant to be used |
|
1543 | 1608 | # locally, such as those from strip and shelve, and temporary bundles. |
|
1544 | 1609 | return max(supportedoutgoingversions(repo)) |
|
1545 | 1610 | |
|
1546 | 1611 | |
|
1547 | 1612 | def safeversion(repo): |
|
1548 | 1613 | # Finds the smallest version that it's safe to assume clients of the repo |
|
1549 | 1614 | # will support. For example, all hg versions that support generaldelta also |
|
1550 | 1615 | # support changegroup 02. |
|
1551 | 1616 | versions = supportedoutgoingversions(repo) |
|
1552 | 1617 | if requirements.GENERALDELTA_REQUIREMENT in repo.requirements: |
|
1553 | 1618 | versions.discard(b'01') |
|
1554 | 1619 | assert versions |
|
1555 | 1620 | return min(versions) |
|
1556 | 1621 | |
|
1557 | 1622 | |
|
1558 | 1623 | def getbundler( |
|
1559 | 1624 | version, |
|
1560 | 1625 | repo, |
|
1561 | 1626 | bundlecaps=None, |
|
1562 | 1627 | oldmatcher=None, |
|
1563 | 1628 | matcher=None, |
|
1564 | 1629 | ellipses=False, |
|
1565 | 1630 | shallow=False, |
|
1566 | 1631 | ellipsisroots=None, |
|
1567 | 1632 | fullnodes=None, |
|
1633 | remote_sidedata=None, | |
|
1568 | 1634 | ): |
|
1569 | 1635 | assert version in supportedoutgoingversions(repo) |
|
1570 | 1636 | |
|
1571 | 1637 | if matcher is None: |
|
1572 | 1638 | matcher = matchmod.always() |
|
1573 | 1639 | if oldmatcher is None: |
|
1574 | 1640 | oldmatcher = matchmod.never() |
|
1575 | 1641 | |
|
1576 | 1642 | if version == b'01' and not matcher.always(): |
|
1577 | 1643 | raise error.ProgrammingError( |
|
1578 | 1644 | b'version 01 changegroups do not support sparse file matchers' |
|
1579 | 1645 | ) |
|
1580 | 1646 | |
|
1581 | 1647 | if ellipses and version in (b'01', b'02'): |
|
1582 | 1648 | raise error.Abort( |
|
1583 | 1649 | _( |
|
1584 | 1650 | b'ellipsis nodes require at least cg3 on client and server, ' |
|
1585 | 1651 | b'but negotiated version %s' |
|
1586 | 1652 | ) |
|
1587 | 1653 | % version |
|
1588 | 1654 | ) |
|
1589 | 1655 | |
|
1590 | 1656 | # Requested files could include files not in the local store. So |
|
1591 | 1657 | # filter those out. |
|
1592 | 1658 | matcher = repo.narrowmatch(matcher) |
|
1593 | 1659 | |
|
1594 | 1660 | fn = _packermap[version][0] |
|
1595 | 1661 | return fn( |
|
1596 | 1662 | repo, |
|
1597 | 1663 | oldmatcher, |
|
1598 | 1664 | matcher, |
|
1599 | 1665 | bundlecaps, |
|
1600 | 1666 | ellipses=ellipses, |
|
1601 | 1667 | shallow=shallow, |
|
1602 | 1668 | ellipsisroots=ellipsisroots, |
|
1603 | 1669 | fullnodes=fullnodes, |
|
1670 | remote_sidedata=remote_sidedata, | |
|
1604 | 1671 | ) |
|
1605 | 1672 | |
|
1606 | 1673 | |
|
1607 | 1674 | def getunbundler(version, fh, alg, extras=None): |
|
1608 | 1675 | return _packermap[version][1](fh, alg, extras=extras) |
|
1609 | 1676 | |
|
1610 | 1677 | |
|
1611 | 1678 | def _changegroupinfo(repo, nodes, source): |
|
1612 | 1679 | if repo.ui.verbose or source == b'bundle': |
|
1613 | 1680 | repo.ui.status(_(b"%d changesets found\n") % len(nodes)) |
|
1614 | 1681 | if repo.ui.debugflag: |
|
1615 | 1682 | repo.ui.debug(b"list of changesets:\n") |
|
1616 | 1683 | for node in nodes: |
|
1617 | 1684 | repo.ui.debug(b"%s\n" % hex(node)) |
|
1618 | 1685 | |
|
1619 | 1686 | |
|
1620 | 1687 | def makechangegroup( |
|
1621 | 1688 | repo, outgoing, version, source, fastpath=False, bundlecaps=None |
|
1622 | 1689 | ): |
|
1623 | 1690 | cgstream = makestream( |
|
1624 | 1691 | repo, |
|
1625 | 1692 | outgoing, |
|
1626 | 1693 | version, |
|
1627 | 1694 | source, |
|
1628 | 1695 | fastpath=fastpath, |
|
1629 | 1696 | bundlecaps=bundlecaps, |
|
1630 | 1697 | ) |
|
1631 | 1698 | return getunbundler( |
|
1632 | 1699 | version, |
|
1633 | 1700 | util.chunkbuffer(cgstream), |
|
1634 | 1701 | None, |
|
1635 | 1702 | {b'clcount': len(outgoing.missing)}, |
|
1636 | 1703 | ) |
|
1637 | 1704 | |
|
1638 | 1705 | |
|
1639 | 1706 | def makestream( |
|
1640 | 1707 | repo, |
|
1641 | 1708 | outgoing, |
|
1642 | 1709 | version, |
|
1643 | 1710 | source, |
|
1644 | 1711 | fastpath=False, |
|
1645 | 1712 | bundlecaps=None, |
|
1646 | 1713 | matcher=None, |
|
1714 | remote_sidedata=None, | |
|
1647 | 1715 | ): |
|
1648 | bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher) | |
|
1716 | bundler = getbundler( | |
|
1717 | version, | |
|
1718 | repo, | |
|
1719 | bundlecaps=bundlecaps, | |
|
1720 | matcher=matcher, | |
|
1721 | remote_sidedata=remote_sidedata, | |
|
1722 | ) | |
|
1649 | 1723 | |
|
1650 | 1724 | repo = repo.unfiltered() |
|
1651 | 1725 | commonrevs = outgoing.common |
|
1652 | 1726 | csets = outgoing.missing |
|
1653 | 1727 | heads = outgoing.ancestorsof |
|
1654 | 1728 | # We go through the fast path if we get told to, or if all (unfiltered |
|
1655 | 1729 | # heads have been requested (since we then know there all linkrevs will |
|
1656 | 1730 | # be pulled by the client). |
|
1657 | 1731 | heads.sort() |
|
1658 | 1732 | fastpathlinkrev = fastpath or ( |
|
1659 | 1733 | repo.filtername is None and heads == sorted(repo.heads()) |
|
1660 | 1734 | ) |
|
1661 | 1735 | |
|
1662 | 1736 | repo.hook(b'preoutgoing', throw=True, source=source) |
|
1663 | 1737 | _changegroupinfo(repo, csets, source) |
|
1664 | 1738 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
1665 | 1739 | |
|
1666 | 1740 | |
|
1667 | 1741 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
1668 | 1742 | revisions = 0 |
|
1669 | 1743 | files = 0 |
|
1670 | 1744 | progress = repo.ui.makeprogress( |
|
1671 | 1745 | _(b'files'), unit=_(b'files'), total=expectedfiles |
|
1672 | 1746 | ) |
|
1673 | 1747 | for chunkdata in iter(source.filelogheader, {}): |
|
1674 | 1748 | files += 1 |
|
1675 | 1749 | f = chunkdata[b"filename"] |
|
1676 | 1750 | repo.ui.debug(b"adding %s revisions\n" % f) |
|
1677 | 1751 | progress.increment() |
|
1678 | 1752 | fl = repo.file(f) |
|
1679 | 1753 | o = len(fl) |
|
1680 | 1754 | try: |
|
1681 | 1755 | deltas = source.deltaiter() |
|
1682 | 1756 | if not fl.addgroup(deltas, revmap, trp): |
|
1683 | 1757 | raise error.Abort(_(b"received file revlog group is empty")) |
|
1684 | 1758 | except error.CensoredBaseError as e: |
|
1685 | 1759 | raise error.Abort(_(b"received delta base is censored: %s") % e) |
|
1686 | 1760 | revisions += len(fl) - o |
|
1687 | 1761 | if f in needfiles: |
|
1688 | 1762 | needs = needfiles[f] |
|
1689 | 1763 | for new in pycompat.xrange(o, len(fl)): |
|
1690 | 1764 | n = fl.node(new) |
|
1691 | 1765 | if n in needs: |
|
1692 | 1766 | needs.remove(n) |
|
1693 | 1767 | else: |
|
1694 | 1768 | raise error.Abort(_(b"received spurious file revlog entry")) |
|
1695 | 1769 | if not needs: |
|
1696 | 1770 | del needfiles[f] |
|
1697 | 1771 | progress.complete() |
|
1698 | 1772 | |
|
1699 | 1773 | for f, needs in pycompat.iteritems(needfiles): |
|
1700 | 1774 | fl = repo.file(f) |
|
1701 | 1775 | for n in needs: |
|
1702 | 1776 | try: |
|
1703 | 1777 | fl.rev(n) |
|
1704 | 1778 | except error.LookupError: |
|
1705 | 1779 | raise error.Abort( |
|
1706 | 1780 | _(b'missing file data for %s:%s - run hg verify') |
|
1707 | 1781 | % (f, hex(n)) |
|
1708 | 1782 | ) |
|
1709 | 1783 | |
|
1710 | 1784 | return revisions, files |
@@ -1,4773 +1,4773 b'' | |||
|
1 | 1 | # debugcommands.py - command processing for debug* commands |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2016 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import codecs |
|
11 | 11 | import collections |
|
12 | 12 | import difflib |
|
13 | 13 | import errno |
|
14 | 14 | import glob |
|
15 | 15 | import operator |
|
16 | 16 | import os |
|
17 | 17 | import platform |
|
18 | 18 | import random |
|
19 | 19 | import re |
|
20 | 20 | import socket |
|
21 | 21 | import ssl |
|
22 | 22 | import stat |
|
23 | 23 | import string |
|
24 | 24 | import subprocess |
|
25 | 25 | import sys |
|
26 | 26 | import time |
|
27 | 27 | |
|
28 | 28 | from .i18n import _ |
|
29 | 29 | from .node import ( |
|
30 | 30 | bin, |
|
31 | 31 | hex, |
|
32 | 32 | nullid, |
|
33 | 33 | nullrev, |
|
34 | 34 | short, |
|
35 | 35 | ) |
|
36 | 36 | from .pycompat import ( |
|
37 | 37 | getattr, |
|
38 | 38 | open, |
|
39 | 39 | ) |
|
40 | 40 | from . import ( |
|
41 | 41 | bundle2, |
|
42 | 42 | bundlerepo, |
|
43 | 43 | changegroup, |
|
44 | 44 | cmdutil, |
|
45 | 45 | color, |
|
46 | 46 | context, |
|
47 | 47 | copies, |
|
48 | 48 | dagparser, |
|
49 | 49 | encoding, |
|
50 | 50 | error, |
|
51 | 51 | exchange, |
|
52 | 52 | extensions, |
|
53 | 53 | filemerge, |
|
54 | 54 | filesetlang, |
|
55 | 55 | formatter, |
|
56 | 56 | hg, |
|
57 | 57 | httppeer, |
|
58 | 58 | localrepo, |
|
59 | 59 | lock as lockmod, |
|
60 | 60 | logcmdutil, |
|
61 | 61 | mergestate as mergestatemod, |
|
62 | 62 | metadata, |
|
63 | 63 | obsolete, |
|
64 | 64 | obsutil, |
|
65 | 65 | pathutil, |
|
66 | 66 | phases, |
|
67 | 67 | policy, |
|
68 | 68 | pvec, |
|
69 | 69 | pycompat, |
|
70 | 70 | registrar, |
|
71 | 71 | repair, |
|
72 | 72 | repoview, |
|
73 | 73 | revlog, |
|
74 | 74 | revset, |
|
75 | 75 | revsetlang, |
|
76 | 76 | scmutil, |
|
77 | 77 | setdiscovery, |
|
78 | 78 | simplemerge, |
|
79 | 79 | sshpeer, |
|
80 | 80 | sslutil, |
|
81 | 81 | streamclone, |
|
82 | 82 | strip, |
|
83 | 83 | tags as tagsmod, |
|
84 | 84 | templater, |
|
85 | 85 | treediscovery, |
|
86 | 86 | upgrade, |
|
87 | 87 | url as urlmod, |
|
88 | 88 | util, |
|
89 | 89 | vfs as vfsmod, |
|
90 | 90 | wireprotoframing, |
|
91 | 91 | wireprotoserver, |
|
92 | 92 | wireprotov2peer, |
|
93 | 93 | ) |
|
94 | 94 | from .utils import ( |
|
95 | 95 | cborutil, |
|
96 | 96 | compression, |
|
97 | 97 | dateutil, |
|
98 | 98 | procutil, |
|
99 | 99 | stringutil, |
|
100 | 100 | ) |
|
101 | 101 | |
|
102 | 102 | from .revlogutils import ( |
|
103 | 103 | deltas as deltautil, |
|
104 | 104 | nodemap, |
|
105 | 105 | sidedata, |
|
106 | 106 | ) |
|
107 | 107 | |
|
108 | 108 | release = lockmod.release |
|
109 | 109 | |
|
110 | 110 | table = {} |
|
111 | 111 | table.update(strip.command._table) |
|
112 | 112 | command = registrar.command(table) |
|
113 | 113 | |
|
114 | 114 | |
|
115 | 115 | @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True) |
|
116 | 116 | def debugancestor(ui, repo, *args): |
|
117 | 117 | """find the ancestor revision of two revisions in a given index""" |
|
118 | 118 | if len(args) == 3: |
|
119 | 119 | index, rev1, rev2 = args |
|
120 | 120 | r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index) |
|
121 | 121 | lookup = r.lookup |
|
122 | 122 | elif len(args) == 2: |
|
123 | 123 | if not repo: |
|
124 | 124 | raise error.Abort( |
|
125 | 125 | _(b'there is no Mercurial repository here (.hg not found)') |
|
126 | 126 | ) |
|
127 | 127 | rev1, rev2 = args |
|
128 | 128 | r = repo.changelog |
|
129 | 129 | lookup = repo.lookup |
|
130 | 130 | else: |
|
131 | 131 | raise error.Abort(_(b'either two or three arguments required')) |
|
132 | 132 | a = r.ancestor(lookup(rev1), lookup(rev2)) |
|
133 | 133 | ui.write(b'%d:%s\n' % (r.rev(a), hex(a))) |
|
134 | 134 | |
|
135 | 135 | |
|
136 | 136 | @command(b'debugantivirusrunning', []) |
|
137 | 137 | def debugantivirusrunning(ui, repo): |
|
138 | 138 | """attempt to trigger an antivirus scanner to see if one is active""" |
|
139 | 139 | with repo.cachevfs.open('eicar-test-file.com', b'wb') as f: |
|
140 | 140 | f.write( |
|
141 | 141 | util.b85decode( |
|
142 | 142 | # This is a base85-armored version of the EICAR test file. See |
|
143 | 143 | # https://en.wikipedia.org/wiki/EICAR_test_file for details. |
|
144 | 144 | b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P' |
|
145 | 145 | b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx' |
|
146 | 146 | ) |
|
147 | 147 | ) |
|
148 | 148 | # Give an AV engine time to scan the file. |
|
149 | 149 | time.sleep(2) |
|
150 | 150 | util.unlink(repo.cachevfs.join('eicar-test-file.com')) |
|
151 | 151 | |
|
152 | 152 | |
|
153 | 153 | @command(b'debugapplystreamclonebundle', [], b'FILE') |
|
154 | 154 | def debugapplystreamclonebundle(ui, repo, fname): |
|
155 | 155 | """apply a stream clone bundle file""" |
|
156 | 156 | f = hg.openpath(ui, fname) |
|
157 | 157 | gen = exchange.readbundle(ui, f, fname) |
|
158 | 158 | gen.apply(repo) |
|
159 | 159 | |
|
160 | 160 | |
|
161 | 161 | @command( |
|
162 | 162 | b'debugbuilddag', |
|
163 | 163 | [ |
|
164 | 164 | ( |
|
165 | 165 | b'm', |
|
166 | 166 | b'mergeable-file', |
|
167 | 167 | None, |
|
168 | 168 | _(b'add single file mergeable changes'), |
|
169 | 169 | ), |
|
170 | 170 | ( |
|
171 | 171 | b'o', |
|
172 | 172 | b'overwritten-file', |
|
173 | 173 | None, |
|
174 | 174 | _(b'add single file all revs overwrite'), |
|
175 | 175 | ), |
|
176 | 176 | (b'n', b'new-file', None, _(b'add new file at each rev')), |
|
177 | 177 | ], |
|
178 | 178 | _(b'[OPTION]... [TEXT]'), |
|
179 | 179 | ) |
|
180 | 180 | def debugbuilddag( |
|
181 | 181 | ui, |
|
182 | 182 | repo, |
|
183 | 183 | text=None, |
|
184 | 184 | mergeable_file=False, |
|
185 | 185 | overwritten_file=False, |
|
186 | 186 | new_file=False, |
|
187 | 187 | ): |
|
188 | 188 | """builds a repo with a given DAG from scratch in the current empty repo |
|
189 | 189 | |
|
190 | 190 | The description of the DAG is read from stdin if not given on the |
|
191 | 191 | command line. |
|
192 | 192 | |
|
193 | 193 | Elements: |
|
194 | 194 | |
|
195 | 195 | - "+n" is a linear run of n nodes based on the current default parent |
|
196 | 196 | - "." is a single node based on the current default parent |
|
197 | 197 | - "$" resets the default parent to null (implied at the start); |
|
198 | 198 | otherwise the default parent is always the last node created |
|
199 | 199 | - "<p" sets the default parent to the backref p |
|
200 | 200 | - "*p" is a fork at parent p, which is a backref |
|
201 | 201 | - "*p1/p2" is a merge of parents p1 and p2, which are backrefs |
|
202 | 202 | - "/p2" is a merge of the preceding node and p2 |
|
203 | 203 | - ":tag" defines a local tag for the preceding node |
|
204 | 204 | - "@branch" sets the named branch for subsequent nodes |
|
205 | 205 | - "#...\\n" is a comment up to the end of the line |
|
206 | 206 | |
|
207 | 207 | Whitespace between the above elements is ignored. |
|
208 | 208 | |
|
209 | 209 | A backref is either |
|
210 | 210 | |
|
211 | 211 | - a number n, which references the node curr-n, where curr is the current |
|
212 | 212 | node, or |
|
213 | 213 | - the name of a local tag you placed earlier using ":tag", or |
|
214 | 214 | - empty to denote the default parent. |
|
215 | 215 | |
|
216 | 216 | All string valued-elements are either strictly alphanumeric, or must |
|
217 | 217 | be enclosed in double quotes ("..."), with "\\" as escape character. |
|
218 | 218 | """ |
|
219 | 219 | |
|
220 | 220 | if text is None: |
|
221 | 221 | ui.status(_(b"reading DAG from stdin\n")) |
|
222 | 222 | text = ui.fin.read() |
|
223 | 223 | |
|
224 | 224 | cl = repo.changelog |
|
225 | 225 | if len(cl) > 0: |
|
226 | 226 | raise error.Abort(_(b'repository is not empty')) |
|
227 | 227 | |
|
228 | 228 | # determine number of revs in DAG |
|
229 | 229 | total = 0 |
|
230 | 230 | for type, data in dagparser.parsedag(text): |
|
231 | 231 | if type == b'n': |
|
232 | 232 | total += 1 |
|
233 | 233 | |
|
234 | 234 | if mergeable_file: |
|
235 | 235 | linesperrev = 2 |
|
236 | 236 | # make a file with k lines per rev |
|
237 | 237 | initialmergedlines = [ |
|
238 | 238 | b'%d' % i for i in pycompat.xrange(0, total * linesperrev) |
|
239 | 239 | ] |
|
240 | 240 | initialmergedlines.append(b"") |
|
241 | 241 | |
|
242 | 242 | tags = [] |
|
243 | 243 | progress = ui.makeprogress( |
|
244 | 244 | _(b'building'), unit=_(b'revisions'), total=total |
|
245 | 245 | ) |
|
246 | 246 | with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"): |
|
247 | 247 | at = -1 |
|
248 | 248 | atbranch = b'default' |
|
249 | 249 | nodeids = [] |
|
250 | 250 | id = 0 |
|
251 | 251 | progress.update(id) |
|
252 | 252 | for type, data in dagparser.parsedag(text): |
|
253 | 253 | if type == b'n': |
|
254 | 254 | ui.note((b'node %s\n' % pycompat.bytestr(data))) |
|
255 | 255 | id, ps = data |
|
256 | 256 | |
|
257 | 257 | files = [] |
|
258 | 258 | filecontent = {} |
|
259 | 259 | |
|
260 | 260 | p2 = None |
|
261 | 261 | if mergeable_file: |
|
262 | 262 | fn = b"mf" |
|
263 | 263 | p1 = repo[ps[0]] |
|
264 | 264 | if len(ps) > 1: |
|
265 | 265 | p2 = repo[ps[1]] |
|
266 | 266 | pa = p1.ancestor(p2) |
|
267 | 267 | base, local, other = [ |
|
268 | 268 | x[fn].data() for x in (pa, p1, p2) |
|
269 | 269 | ] |
|
270 | 270 | m3 = simplemerge.Merge3Text(base, local, other) |
|
271 | 271 | ml = [l.strip() for l in m3.merge_lines()] |
|
272 | 272 | ml.append(b"") |
|
273 | 273 | elif at > 0: |
|
274 | 274 | ml = p1[fn].data().split(b"\n") |
|
275 | 275 | else: |
|
276 | 276 | ml = initialmergedlines |
|
277 | 277 | ml[id * linesperrev] += b" r%i" % id |
|
278 | 278 | mergedtext = b"\n".join(ml) |
|
279 | 279 | files.append(fn) |
|
280 | 280 | filecontent[fn] = mergedtext |
|
281 | 281 | |
|
282 | 282 | if overwritten_file: |
|
283 | 283 | fn = b"of" |
|
284 | 284 | files.append(fn) |
|
285 | 285 | filecontent[fn] = b"r%i\n" % id |
|
286 | 286 | |
|
287 | 287 | if new_file: |
|
288 | 288 | fn = b"nf%i" % id |
|
289 | 289 | files.append(fn) |
|
290 | 290 | filecontent[fn] = b"r%i\n" % id |
|
291 | 291 | if len(ps) > 1: |
|
292 | 292 | if not p2: |
|
293 | 293 | p2 = repo[ps[1]] |
|
294 | 294 | for fn in p2: |
|
295 | 295 | if fn.startswith(b"nf"): |
|
296 | 296 | files.append(fn) |
|
297 | 297 | filecontent[fn] = p2[fn].data() |
|
298 | 298 | |
|
299 | 299 | def fctxfn(repo, cx, path): |
|
300 | 300 | if path in filecontent: |
|
301 | 301 | return context.memfilectx( |
|
302 | 302 | repo, cx, path, filecontent[path] |
|
303 | 303 | ) |
|
304 | 304 | return None |
|
305 | 305 | |
|
306 | 306 | if len(ps) == 0 or ps[0] < 0: |
|
307 | 307 | pars = [None, None] |
|
308 | 308 | elif len(ps) == 1: |
|
309 | 309 | pars = [nodeids[ps[0]], None] |
|
310 | 310 | else: |
|
311 | 311 | pars = [nodeids[p] for p in ps] |
|
312 | 312 | cx = context.memctx( |
|
313 | 313 | repo, |
|
314 | 314 | pars, |
|
315 | 315 | b"r%i" % id, |
|
316 | 316 | files, |
|
317 | 317 | fctxfn, |
|
318 | 318 | date=(id, 0), |
|
319 | 319 | user=b"debugbuilddag", |
|
320 | 320 | extra={b'branch': atbranch}, |
|
321 | 321 | ) |
|
322 | 322 | nodeid = repo.commitctx(cx) |
|
323 | 323 | nodeids.append(nodeid) |
|
324 | 324 | at = id |
|
325 | 325 | elif type == b'l': |
|
326 | 326 | id, name = data |
|
327 | 327 | ui.note((b'tag %s\n' % name)) |
|
328 | 328 | tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name)) |
|
329 | 329 | elif type == b'a': |
|
330 | 330 | ui.note((b'branch %s\n' % data)) |
|
331 | 331 | atbranch = data |
|
332 | 332 | progress.update(id) |
|
333 | 333 | |
|
334 | 334 | if tags: |
|
335 | 335 | repo.vfs.write(b"localtags", b"".join(tags)) |
|
336 | 336 | |
|
337 | 337 | |
|
338 | 338 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): |
|
339 | 339 | indent_string = b' ' * indent |
|
340 | 340 | if all: |
|
341 | 341 | ui.writenoi18n( |
|
342 | 342 | b"%sformat: id, p1, p2, cset, delta base, len(delta)\n" |
|
343 | 343 | % indent_string |
|
344 | 344 | ) |
|
345 | 345 | |
|
346 | 346 | def showchunks(named): |
|
347 | 347 | ui.write(b"\n%s%s\n" % (indent_string, named)) |
|
348 | 348 | for deltadata in gen.deltaiter(): |
|
349 | node, p1, p2, cs, deltabase, delta, flags = deltadata | |
|
349 | node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata | |
|
350 | 350 | ui.write( |
|
351 | 351 | b"%s%s %s %s %s %s %d\n" |
|
352 | 352 | % ( |
|
353 | 353 | indent_string, |
|
354 | 354 | hex(node), |
|
355 | 355 | hex(p1), |
|
356 | 356 | hex(p2), |
|
357 | 357 | hex(cs), |
|
358 | 358 | hex(deltabase), |
|
359 | 359 | len(delta), |
|
360 | 360 | ) |
|
361 | 361 | ) |
|
362 | 362 | |
|
363 | 363 | gen.changelogheader() |
|
364 | 364 | showchunks(b"changelog") |
|
365 | 365 | gen.manifestheader() |
|
366 | 366 | showchunks(b"manifest") |
|
367 | 367 | for chunkdata in iter(gen.filelogheader, {}): |
|
368 | 368 | fname = chunkdata[b'filename'] |
|
369 | 369 | showchunks(fname) |
|
370 | 370 | else: |
|
371 | 371 | if isinstance(gen, bundle2.unbundle20): |
|
372 | 372 | raise error.Abort(_(b'use debugbundle2 for this file')) |
|
373 | 373 | gen.changelogheader() |
|
374 | 374 | for deltadata in gen.deltaiter(): |
|
375 | node, p1, p2, cs, deltabase, delta, flags = deltadata | |
|
375 | node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata | |
|
376 | 376 | ui.write(b"%s%s\n" % (indent_string, hex(node))) |
|
377 | 377 | |
|
378 | 378 | |
|
379 | 379 | def _debugobsmarkers(ui, part, indent=0, **opts): |
|
380 | 380 | """display version and markers contained in 'data'""" |
|
381 | 381 | opts = pycompat.byteskwargs(opts) |
|
382 | 382 | data = part.read() |
|
383 | 383 | indent_string = b' ' * indent |
|
384 | 384 | try: |
|
385 | 385 | version, markers = obsolete._readmarkers(data) |
|
386 | 386 | except error.UnknownVersion as exc: |
|
387 | 387 | msg = b"%sunsupported version: %s (%d bytes)\n" |
|
388 | 388 | msg %= indent_string, exc.version, len(data) |
|
389 | 389 | ui.write(msg) |
|
390 | 390 | else: |
|
391 | 391 | msg = b"%sversion: %d (%d bytes)\n" |
|
392 | 392 | msg %= indent_string, version, len(data) |
|
393 | 393 | ui.write(msg) |
|
394 | 394 | fm = ui.formatter(b'debugobsolete', opts) |
|
395 | 395 | for rawmarker in sorted(markers): |
|
396 | 396 | m = obsutil.marker(None, rawmarker) |
|
397 | 397 | fm.startitem() |
|
398 | 398 | fm.plain(indent_string) |
|
399 | 399 | cmdutil.showmarker(fm, m) |
|
400 | 400 | fm.end() |
|
401 | 401 | |
|
402 | 402 | |
|
403 | 403 | def _debugphaseheads(ui, data, indent=0): |
|
404 | 404 | """display version and markers contained in 'data'""" |
|
405 | 405 | indent_string = b' ' * indent |
|
406 | 406 | headsbyphase = phases.binarydecode(data) |
|
407 | 407 | for phase in phases.allphases: |
|
408 | 408 | for head in headsbyphase[phase]: |
|
409 | 409 | ui.write(indent_string) |
|
410 | 410 | ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase])) |
|
411 | 411 | |
|
412 | 412 | |
|
413 | 413 | def _quasirepr(thing): |
|
414 | 414 | if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)): |
|
415 | 415 | return b'{%s}' % ( |
|
416 | 416 | b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)) |
|
417 | 417 | ) |
|
418 | 418 | return pycompat.bytestr(repr(thing)) |
|
419 | 419 | |
|
420 | 420 | |
|
421 | 421 | def _debugbundle2(ui, gen, all=None, **opts): |
|
422 | 422 | """lists the contents of a bundle2""" |
|
423 | 423 | if not isinstance(gen, bundle2.unbundle20): |
|
424 | 424 | raise error.Abort(_(b'not a bundle2 file')) |
|
425 | 425 | ui.write((b'Stream params: %s\n' % _quasirepr(gen.params))) |
|
426 | 426 | parttypes = opts.get('part_type', []) |
|
427 | 427 | for part in gen.iterparts(): |
|
428 | 428 | if parttypes and part.type not in parttypes: |
|
429 | 429 | continue |
|
430 | 430 | msg = b'%s -- %s (mandatory: %r)\n' |
|
431 | 431 | ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory))) |
|
432 | 432 | if part.type == b'changegroup': |
|
433 | 433 | version = part.params.get(b'version', b'01') |
|
434 | 434 | cg = changegroup.getunbundler(version, part, b'UN') |
|
435 | 435 | if not ui.quiet: |
|
436 | 436 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) |
|
437 | 437 | if part.type == b'obsmarkers': |
|
438 | 438 | if not ui.quiet: |
|
439 | 439 | _debugobsmarkers(ui, part, indent=4, **opts) |
|
440 | 440 | if part.type == b'phase-heads': |
|
441 | 441 | if not ui.quiet: |
|
442 | 442 | _debugphaseheads(ui, part, indent=4) |
|
443 | 443 | |
|
444 | 444 | |
|
445 | 445 | @command( |
|
446 | 446 | b'debugbundle', |
|
447 | 447 | [ |
|
448 | 448 | (b'a', b'all', None, _(b'show all details')), |
|
449 | 449 | (b'', b'part-type', [], _(b'show only the named part type')), |
|
450 | 450 | (b'', b'spec', None, _(b'print the bundlespec of the bundle')), |
|
451 | 451 | ], |
|
452 | 452 | _(b'FILE'), |
|
453 | 453 | norepo=True, |
|
454 | 454 | ) |
|
455 | 455 | def debugbundle(ui, bundlepath, all=None, spec=None, **opts): |
|
456 | 456 | """lists the contents of a bundle""" |
|
457 | 457 | with hg.openpath(ui, bundlepath) as f: |
|
458 | 458 | if spec: |
|
459 | 459 | spec = exchange.getbundlespec(ui, f) |
|
460 | 460 | ui.write(b'%s\n' % spec) |
|
461 | 461 | return |
|
462 | 462 | |
|
463 | 463 | gen = exchange.readbundle(ui, f, bundlepath) |
|
464 | 464 | if isinstance(gen, bundle2.unbundle20): |
|
465 | 465 | return _debugbundle2(ui, gen, all=all, **opts) |
|
466 | 466 | _debugchangegroup(ui, gen, all=all, **opts) |
|
467 | 467 | |
|
468 | 468 | |
|
469 | 469 | @command(b'debugcapabilities', [], _(b'PATH'), norepo=True) |
|
470 | 470 | def debugcapabilities(ui, path, **opts): |
|
471 | 471 | """lists the capabilities of a remote peer""" |
|
472 | 472 | opts = pycompat.byteskwargs(opts) |
|
473 | 473 | peer = hg.peer(ui, opts, path) |
|
474 | 474 | try: |
|
475 | 475 | caps = peer.capabilities() |
|
476 | 476 | ui.writenoi18n(b'Main capabilities:\n') |
|
477 | 477 | for c in sorted(caps): |
|
478 | 478 | ui.write(b' %s\n' % c) |
|
479 | 479 | b2caps = bundle2.bundle2caps(peer) |
|
480 | 480 | if b2caps: |
|
481 | 481 | ui.writenoi18n(b'Bundle2 capabilities:\n') |
|
482 | 482 | for key, values in sorted(pycompat.iteritems(b2caps)): |
|
483 | 483 | ui.write(b' %s\n' % key) |
|
484 | 484 | for v in values: |
|
485 | 485 | ui.write(b' %s\n' % v) |
|
486 | 486 | finally: |
|
487 | 487 | peer.close() |
|
488 | 488 | |
|
489 | 489 | |
|
490 | 490 | @command( |
|
491 | 491 | b'debugchangedfiles', |
|
492 | 492 | [ |
|
493 | 493 | ( |
|
494 | 494 | b'', |
|
495 | 495 | b'compute', |
|
496 | 496 | False, |
|
497 | 497 | b"compute information instead of reading it from storage", |
|
498 | 498 | ), |
|
499 | 499 | ], |
|
500 | 500 | b'REV', |
|
501 | 501 | ) |
|
502 | 502 | def debugchangedfiles(ui, repo, rev, **opts): |
|
503 | 503 | """list the stored files changes for a revision""" |
|
504 | 504 | ctx = scmutil.revsingle(repo, rev, None) |
|
505 | 505 | files = None |
|
506 | 506 | |
|
507 | 507 | if opts['compute']: |
|
508 | 508 | files = metadata.compute_all_files_changes(ctx) |
|
509 | 509 | else: |
|
510 | 510 | sd = repo.changelog.sidedata(ctx.rev()) |
|
511 | 511 | files_block = sd.get(sidedata.SD_FILES) |
|
512 | 512 | if files_block is not None: |
|
513 | 513 | files = metadata.decode_files_sidedata(sd) |
|
514 | 514 | if files is not None: |
|
515 | 515 | for f in sorted(files.touched): |
|
516 | 516 | if f in files.added: |
|
517 | 517 | action = b"added" |
|
518 | 518 | elif f in files.removed: |
|
519 | 519 | action = b"removed" |
|
520 | 520 | elif f in files.merged: |
|
521 | 521 | action = b"merged" |
|
522 | 522 | elif f in files.salvaged: |
|
523 | 523 | action = b"salvaged" |
|
524 | 524 | else: |
|
525 | 525 | action = b"touched" |
|
526 | 526 | |
|
527 | 527 | copy_parent = b"" |
|
528 | 528 | copy_source = b"" |
|
529 | 529 | if f in files.copied_from_p1: |
|
530 | 530 | copy_parent = b"p1" |
|
531 | 531 | copy_source = files.copied_from_p1[f] |
|
532 | 532 | elif f in files.copied_from_p2: |
|
533 | 533 | copy_parent = b"p2" |
|
534 | 534 | copy_source = files.copied_from_p2[f] |
|
535 | 535 | |
|
536 | 536 | data = (action, copy_parent, f, copy_source) |
|
537 | 537 | template = b"%-8s %2s: %s, %s;\n" |
|
538 | 538 | ui.write(template % data) |
|
539 | 539 | |
|
540 | 540 | |
|
541 | 541 | @command(b'debugcheckstate', [], b'') |
|
542 | 542 | def debugcheckstate(ui, repo): |
|
543 | 543 | """validate the correctness of the current dirstate""" |
|
544 | 544 | parent1, parent2 = repo.dirstate.parents() |
|
545 | 545 | m1 = repo[parent1].manifest() |
|
546 | 546 | m2 = repo[parent2].manifest() |
|
547 | 547 | errors = 0 |
|
548 | 548 | for f in repo.dirstate: |
|
549 | 549 | state = repo.dirstate[f] |
|
550 | 550 | if state in b"nr" and f not in m1: |
|
551 | 551 | ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state)) |
|
552 | 552 | errors += 1 |
|
553 | 553 | if state in b"a" and f in m1: |
|
554 | 554 | ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state)) |
|
555 | 555 | errors += 1 |
|
556 | 556 | if state in b"m" and f not in m1 and f not in m2: |
|
557 | 557 | ui.warn( |
|
558 | 558 | _(b"%s in state %s, but not in either manifest\n") % (f, state) |
|
559 | 559 | ) |
|
560 | 560 | errors += 1 |
|
561 | 561 | for f in m1: |
|
562 | 562 | state = repo.dirstate[f] |
|
563 | 563 | if state not in b"nrm": |
|
564 | 564 | ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state)) |
|
565 | 565 | errors += 1 |
|
566 | 566 | if errors: |
|
567 | 567 | errstr = _(b".hg/dirstate inconsistent with current parent's manifest") |
|
568 | 568 | raise error.Abort(errstr) |
|
569 | 569 | |
|
570 | 570 | |
|
571 | 571 | @command( |
|
572 | 572 | b'debugcolor', |
|
573 | 573 | [(b'', b'style', None, _(b'show all configured styles'))], |
|
574 | 574 | b'hg debugcolor', |
|
575 | 575 | ) |
|
576 | 576 | def debugcolor(ui, repo, **opts): |
|
577 | 577 | """show available color, effects or style""" |
|
578 | 578 | ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode)) |
|
579 | 579 | if opts.get('style'): |
|
580 | 580 | return _debugdisplaystyle(ui) |
|
581 | 581 | else: |
|
582 | 582 | return _debugdisplaycolor(ui) |
|
583 | 583 | |
|
584 | 584 | |
|
585 | 585 | def _debugdisplaycolor(ui): |
|
586 | 586 | ui = ui.copy() |
|
587 | 587 | ui._styles.clear() |
|
588 | 588 | for effect in color._activeeffects(ui).keys(): |
|
589 | 589 | ui._styles[effect] = effect |
|
590 | 590 | if ui._terminfoparams: |
|
591 | 591 | for k, v in ui.configitems(b'color'): |
|
592 | 592 | if k.startswith(b'color.'): |
|
593 | 593 | ui._styles[k] = k[6:] |
|
594 | 594 | elif k.startswith(b'terminfo.'): |
|
595 | 595 | ui._styles[k] = k[9:] |
|
596 | 596 | ui.write(_(b'available colors:\n')) |
|
597 | 597 | # sort label with a '_' after the other to group '_background' entry. |
|
598 | 598 | items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1])) |
|
599 | 599 | for colorname, label in items: |
|
600 | 600 | ui.write(b'%s\n' % colorname, label=label) |
|
601 | 601 | |
|
602 | 602 | |
|
603 | 603 | def _debugdisplaystyle(ui): |
|
604 | 604 | ui.write(_(b'available style:\n')) |
|
605 | 605 | if not ui._styles: |
|
606 | 606 | return |
|
607 | 607 | width = max(len(s) for s in ui._styles) |
|
608 | 608 | for label, effects in sorted(ui._styles.items()): |
|
609 | 609 | ui.write(b'%s' % label, label=label) |
|
610 | 610 | if effects: |
|
611 | 611 | # 50 |
|
612 | 612 | ui.write(b': ') |
|
613 | 613 | ui.write(b' ' * (max(0, width - len(label)))) |
|
614 | 614 | ui.write(b', '.join(ui.label(e, e) for e in effects.split())) |
|
615 | 615 | ui.write(b'\n') |
|
616 | 616 | |
|
617 | 617 | |
|
618 | 618 | @command(b'debugcreatestreamclonebundle', [], b'FILE') |
|
619 | 619 | def debugcreatestreamclonebundle(ui, repo, fname): |
|
620 | 620 | """create a stream clone bundle file |
|
621 | 621 | |
|
622 | 622 | Stream bundles are special bundles that are essentially archives of |
|
623 | 623 | revlog files. They are commonly used for cloning very quickly. |
|
624 | 624 | """ |
|
625 | 625 | # TODO we may want to turn this into an abort when this functionality |
|
626 | 626 | # is moved into `hg bundle`. |
|
627 | 627 | if phases.hassecret(repo): |
|
628 | 628 | ui.warn( |
|
629 | 629 | _( |
|
630 | 630 | b'(warning: stream clone bundle will contain secret ' |
|
631 | 631 | b'revisions)\n' |
|
632 | 632 | ) |
|
633 | 633 | ) |
|
634 | 634 | |
|
635 | 635 | requirements, gen = streamclone.generatebundlev1(repo) |
|
636 | 636 | changegroup.writechunks(ui, gen, fname) |
|
637 | 637 | |
|
638 | 638 | ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements))) |
|
639 | 639 | |
|
640 | 640 | |
|
641 | 641 | @command( |
|
642 | 642 | b'debugdag', |
|
643 | 643 | [ |
|
644 | 644 | (b't', b'tags', None, _(b'use tags as labels')), |
|
645 | 645 | (b'b', b'branches', None, _(b'annotate with branch names')), |
|
646 | 646 | (b'', b'dots', None, _(b'use dots for runs')), |
|
647 | 647 | (b's', b'spaces', None, _(b'separate elements by spaces')), |
|
648 | 648 | ], |
|
649 | 649 | _(b'[OPTION]... [FILE [REV]...]'), |
|
650 | 650 | optionalrepo=True, |
|
651 | 651 | ) |
|
652 | 652 | def debugdag(ui, repo, file_=None, *revs, **opts): |
|
653 | 653 | """format the changelog or an index DAG as a concise textual description |
|
654 | 654 | |
|
655 | 655 | If you pass a revlog index, the revlog's DAG is emitted. If you list |
|
656 | 656 | revision numbers, they get labeled in the output as rN. |
|
657 | 657 | |
|
658 | 658 | Otherwise, the changelog DAG of the current repo is emitted. |
|
659 | 659 | """ |
|
660 | 660 | spaces = opts.get('spaces') |
|
661 | 661 | dots = opts.get('dots') |
|
662 | 662 | if file_: |
|
663 | 663 | rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_) |
|
664 | 664 | revs = {int(r) for r in revs} |
|
665 | 665 | |
|
666 | 666 | def events(): |
|
667 | 667 | for r in rlog: |
|
668 | 668 | yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1)) |
|
669 | 669 | if r in revs: |
|
670 | 670 | yield b'l', (r, b"r%i" % r) |
|
671 | 671 | |
|
672 | 672 | elif repo: |
|
673 | 673 | cl = repo.changelog |
|
674 | 674 | tags = opts.get('tags') |
|
675 | 675 | branches = opts.get('branches') |
|
676 | 676 | if tags: |
|
677 | 677 | labels = {} |
|
678 | 678 | for l, n in repo.tags().items(): |
|
679 | 679 | labels.setdefault(cl.rev(n), []).append(l) |
|
680 | 680 | |
|
681 | 681 | def events(): |
|
682 | 682 | b = b"default" |
|
683 | 683 | for r in cl: |
|
684 | 684 | if branches: |
|
685 | 685 | newb = cl.read(cl.node(r))[5][b'branch'] |
|
686 | 686 | if newb != b: |
|
687 | 687 | yield b'a', newb |
|
688 | 688 | b = newb |
|
689 | 689 | yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1)) |
|
690 | 690 | if tags: |
|
691 | 691 | ls = labels.get(r) |
|
692 | 692 | if ls: |
|
693 | 693 | for l in ls: |
|
694 | 694 | yield b'l', (r, l) |
|
695 | 695 | |
|
696 | 696 | else: |
|
697 | 697 | raise error.Abort(_(b'need repo for changelog dag')) |
|
698 | 698 | |
|
699 | 699 | for line in dagparser.dagtextlines( |
|
700 | 700 | events(), |
|
701 | 701 | addspaces=spaces, |
|
702 | 702 | wraplabels=True, |
|
703 | 703 | wrapannotations=True, |
|
704 | 704 | wrapnonlinear=dots, |
|
705 | 705 | usedots=dots, |
|
706 | 706 | maxlinewidth=70, |
|
707 | 707 | ): |
|
708 | 708 | ui.write(line) |
|
709 | 709 | ui.write(b"\n") |
|
710 | 710 | |
|
711 | 711 | |
|
712 | 712 | @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV')) |
|
713 | 713 | def debugdata(ui, repo, file_, rev=None, **opts): |
|
714 | 714 | """dump the contents of a data file revision""" |
|
715 | 715 | opts = pycompat.byteskwargs(opts) |
|
716 | 716 | if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'): |
|
717 | 717 | if rev is not None: |
|
718 | 718 | raise error.CommandError(b'debugdata', _(b'invalid arguments')) |
|
719 | 719 | file_, rev = None, file_ |
|
720 | 720 | elif rev is None: |
|
721 | 721 | raise error.CommandError(b'debugdata', _(b'invalid arguments')) |
|
722 | 722 | r = cmdutil.openstorage(repo, b'debugdata', file_, opts) |
|
723 | 723 | try: |
|
724 | 724 | ui.write(r.rawdata(r.lookup(rev))) |
|
725 | 725 | except KeyError: |
|
726 | 726 | raise error.Abort(_(b'invalid revision identifier %s') % rev) |
|
727 | 727 | |
|
728 | 728 | |
|
729 | 729 | @command( |
|
730 | 730 | b'debugdate', |
|
731 | 731 | [(b'e', b'extended', None, _(b'try extended date formats'))], |
|
732 | 732 | _(b'[-e] DATE [RANGE]'), |
|
733 | 733 | norepo=True, |
|
734 | 734 | optionalrepo=True, |
|
735 | 735 | ) |
|
736 | 736 | def debugdate(ui, date, range=None, **opts): |
|
737 | 737 | """parse and display a date""" |
|
738 | 738 | if opts["extended"]: |
|
739 | 739 | d = dateutil.parsedate(date, dateutil.extendeddateformats) |
|
740 | 740 | else: |
|
741 | 741 | d = dateutil.parsedate(date) |
|
742 | 742 | ui.writenoi18n(b"internal: %d %d\n" % d) |
|
743 | 743 | ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d)) |
|
744 | 744 | if range: |
|
745 | 745 | m = dateutil.matchdate(range) |
|
746 | 746 | ui.writenoi18n(b"match: %s\n" % m(d[0])) |
|
747 | 747 | |
|
748 | 748 | |
|
749 | 749 | @command( |
|
750 | 750 | b'debugdeltachain', |
|
751 | 751 | cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
752 | 752 | _(b'-c|-m|FILE'), |
|
753 | 753 | optionalrepo=True, |
|
754 | 754 | ) |
|
755 | 755 | def debugdeltachain(ui, repo, file_=None, **opts): |
|
756 | 756 | """dump information about delta chains in a revlog |
|
757 | 757 | |
|
758 | 758 | Output can be templatized. Available template keywords are: |
|
759 | 759 | |
|
760 | 760 | :``rev``: revision number |
|
761 | 761 | :``chainid``: delta chain identifier (numbered by unique base) |
|
762 | 762 | :``chainlen``: delta chain length to this revision |
|
763 | 763 | :``prevrev``: previous revision in delta chain |
|
764 | 764 | :``deltatype``: role of delta / how it was computed |
|
765 | 765 | :``compsize``: compressed size of revision |
|
766 | 766 | :``uncompsize``: uncompressed size of revision |
|
767 | 767 | :``chainsize``: total size of compressed revisions in chain |
|
768 | 768 | :``chainratio``: total chain size divided by uncompressed revision size |
|
769 | 769 | (new delta chains typically start at ratio 2.00) |
|
770 | 770 | :``lindist``: linear distance from base revision in delta chain to end |
|
771 | 771 | of this revision |
|
772 | 772 | :``extradist``: total size of revisions not part of this delta chain from |
|
773 | 773 | base of delta chain to end of this revision; a measurement |
|
774 | 774 | of how much extra data we need to read/seek across to read |
|
775 | 775 | the delta chain for this revision |
|
776 | 776 | :``extraratio``: extradist divided by chainsize; another representation of |
|
777 | 777 | how much unrelated data is needed to load this delta chain |
|
778 | 778 | |
|
779 | 779 | If the repository is configured to use the sparse read, additional keywords |
|
780 | 780 | are available: |
|
781 | 781 | |
|
782 | 782 | :``readsize``: total size of data read from the disk for a revision |
|
783 | 783 | (sum of the sizes of all the blocks) |
|
784 | 784 | :``largestblock``: size of the largest block of data read from the disk |
|
785 | 785 | :``readdensity``: density of useful bytes in the data read from the disk |
|
786 | 786 | :``srchunks``: in how many data hunks the whole revision would be read |
|
787 | 787 | |
|
788 | 788 | The sparse read can be enabled with experimental.sparse-read = True |
|
789 | 789 | """ |
|
790 | 790 | opts = pycompat.byteskwargs(opts) |
|
791 | 791 | r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts) |
|
792 | 792 | index = r.index |
|
793 | 793 | start = r.start |
|
794 | 794 | length = r.length |
|
795 | 795 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
796 | 796 | withsparseread = getattr(r, '_withsparseread', False) |
|
797 | 797 | |
|
798 | 798 | def revinfo(rev): |
|
799 | 799 | e = index[rev] |
|
800 | 800 | compsize = e[1] |
|
801 | 801 | uncompsize = e[2] |
|
802 | 802 | chainsize = 0 |
|
803 | 803 | |
|
804 | 804 | if generaldelta: |
|
805 | 805 | if e[3] == e[5]: |
|
806 | 806 | deltatype = b'p1' |
|
807 | 807 | elif e[3] == e[6]: |
|
808 | 808 | deltatype = b'p2' |
|
809 | 809 | elif e[3] == rev - 1: |
|
810 | 810 | deltatype = b'prev' |
|
811 | 811 | elif e[3] == rev: |
|
812 | 812 | deltatype = b'base' |
|
813 | 813 | else: |
|
814 | 814 | deltatype = b'other' |
|
815 | 815 | else: |
|
816 | 816 | if e[3] == rev: |
|
817 | 817 | deltatype = b'base' |
|
818 | 818 | else: |
|
819 | 819 | deltatype = b'prev' |
|
820 | 820 | |
|
821 | 821 | chain = r._deltachain(rev)[0] |
|
822 | 822 | for iterrev in chain: |
|
823 | 823 | e = index[iterrev] |
|
824 | 824 | chainsize += e[1] |
|
825 | 825 | |
|
826 | 826 | return compsize, uncompsize, deltatype, chain, chainsize |
|
827 | 827 | |
|
828 | 828 | fm = ui.formatter(b'debugdeltachain', opts) |
|
829 | 829 | |
|
830 | 830 | fm.plain( |
|
831 | 831 | b' rev chain# chainlen prev delta ' |
|
832 | 832 | b'size rawsize chainsize ratio lindist extradist ' |
|
833 | 833 | b'extraratio' |
|
834 | 834 | ) |
|
835 | 835 | if withsparseread: |
|
836 | 836 | fm.plain(b' readsize largestblk rddensity srchunks') |
|
837 | 837 | fm.plain(b'\n') |
|
838 | 838 | |
|
839 | 839 | chainbases = {} |
|
840 | 840 | for rev in r: |
|
841 | 841 | comp, uncomp, deltatype, chain, chainsize = revinfo(rev) |
|
842 | 842 | chainbase = chain[0] |
|
843 | 843 | chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) |
|
844 | 844 | basestart = start(chainbase) |
|
845 | 845 | revstart = start(rev) |
|
846 | 846 | lineardist = revstart + comp - basestart |
|
847 | 847 | extradist = lineardist - chainsize |
|
848 | 848 | try: |
|
849 | 849 | prevrev = chain[-2] |
|
850 | 850 | except IndexError: |
|
851 | 851 | prevrev = -1 |
|
852 | 852 | |
|
853 | 853 | if uncomp != 0: |
|
854 | 854 | chainratio = float(chainsize) / float(uncomp) |
|
855 | 855 | else: |
|
856 | 856 | chainratio = chainsize |
|
857 | 857 | |
|
858 | 858 | if chainsize != 0: |
|
859 | 859 | extraratio = float(extradist) / float(chainsize) |
|
860 | 860 | else: |
|
861 | 861 | extraratio = extradist |
|
862 | 862 | |
|
863 | 863 | fm.startitem() |
|
864 | 864 | fm.write( |
|
865 | 865 | b'rev chainid chainlen prevrev deltatype compsize ' |
|
866 | 866 | b'uncompsize chainsize chainratio lindist extradist ' |
|
867 | 867 | b'extraratio', |
|
868 | 868 | b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', |
|
869 | 869 | rev, |
|
870 | 870 | chainid, |
|
871 | 871 | len(chain), |
|
872 | 872 | prevrev, |
|
873 | 873 | deltatype, |
|
874 | 874 | comp, |
|
875 | 875 | uncomp, |
|
876 | 876 | chainsize, |
|
877 | 877 | chainratio, |
|
878 | 878 | lineardist, |
|
879 | 879 | extradist, |
|
880 | 880 | extraratio, |
|
881 | 881 | rev=rev, |
|
882 | 882 | chainid=chainid, |
|
883 | 883 | chainlen=len(chain), |
|
884 | 884 | prevrev=prevrev, |
|
885 | 885 | deltatype=deltatype, |
|
886 | 886 | compsize=comp, |
|
887 | 887 | uncompsize=uncomp, |
|
888 | 888 | chainsize=chainsize, |
|
889 | 889 | chainratio=chainratio, |
|
890 | 890 | lindist=lineardist, |
|
891 | 891 | extradist=extradist, |
|
892 | 892 | extraratio=extraratio, |
|
893 | 893 | ) |
|
894 | 894 | if withsparseread: |
|
895 | 895 | readsize = 0 |
|
896 | 896 | largestblock = 0 |
|
897 | 897 | srchunks = 0 |
|
898 | 898 | |
|
899 | 899 | for revschunk in deltautil.slicechunk(r, chain): |
|
900 | 900 | srchunks += 1 |
|
901 | 901 | blkend = start(revschunk[-1]) + length(revschunk[-1]) |
|
902 | 902 | blksize = blkend - start(revschunk[0]) |
|
903 | 903 | |
|
904 | 904 | readsize += blksize |
|
905 | 905 | if largestblock < blksize: |
|
906 | 906 | largestblock = blksize |
|
907 | 907 | |
|
908 | 908 | if readsize: |
|
909 | 909 | readdensity = float(chainsize) / float(readsize) |
|
910 | 910 | else: |
|
911 | 911 | readdensity = 1 |
|
912 | 912 | |
|
913 | 913 | fm.write( |
|
914 | 914 | b'readsize largestblock readdensity srchunks', |
|
915 | 915 | b' %10d %10d %9.5f %8d', |
|
916 | 916 | readsize, |
|
917 | 917 | largestblock, |
|
918 | 918 | readdensity, |
|
919 | 919 | srchunks, |
|
920 | 920 | readsize=readsize, |
|
921 | 921 | largestblock=largestblock, |
|
922 | 922 | readdensity=readdensity, |
|
923 | 923 | srchunks=srchunks, |
|
924 | 924 | ) |
|
925 | 925 | |
|
926 | 926 | fm.plain(b'\n') |
|
927 | 927 | |
|
928 | 928 | fm.end() |
|
929 | 929 | |
|
930 | 930 | |
|
931 | 931 | @command( |
|
932 | 932 | b'debugdirstate|debugstate', |
|
933 | 933 | [ |
|
934 | 934 | ( |
|
935 | 935 | b'', |
|
936 | 936 | b'nodates', |
|
937 | 937 | None, |
|
938 | 938 | _(b'do not display the saved mtime (DEPRECATED)'), |
|
939 | 939 | ), |
|
940 | 940 | (b'', b'dates', True, _(b'display the saved mtime')), |
|
941 | 941 | (b'', b'datesort', None, _(b'sort by saved mtime')), |
|
942 | 942 | ], |
|
943 | 943 | _(b'[OPTION]...'), |
|
944 | 944 | ) |
|
945 | 945 | def debugstate(ui, repo, **opts): |
|
946 | 946 | """show the contents of the current dirstate""" |
|
947 | 947 | |
|
948 | 948 | nodates = not opts['dates'] |
|
949 | 949 | if opts.get('nodates') is not None: |
|
950 | 950 | nodates = True |
|
951 | 951 | datesort = opts.get('datesort') |
|
952 | 952 | |
|
953 | 953 | if datesort: |
|
954 | 954 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename |
|
955 | 955 | else: |
|
956 | 956 | keyfunc = None # sort by filename |
|
957 | 957 | for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc): |
|
958 | 958 | if ent[3] == -1: |
|
959 | 959 | timestr = b'unset ' |
|
960 | 960 | elif nodates: |
|
961 | 961 | timestr = b'set ' |
|
962 | 962 | else: |
|
963 | 963 | timestr = time.strftime( |
|
964 | 964 | "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]) |
|
965 | 965 | ) |
|
966 | 966 | timestr = encoding.strtolocal(timestr) |
|
967 | 967 | if ent[1] & 0o20000: |
|
968 | 968 | mode = b'lnk' |
|
969 | 969 | else: |
|
970 | 970 | mode = b'%3o' % (ent[1] & 0o777 & ~util.umask) |
|
971 | 971 | ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) |
|
972 | 972 | for f in repo.dirstate.copies(): |
|
973 | 973 | ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
974 | 974 | |
|
975 | 975 | |
|
976 | 976 | @command( |
|
977 | 977 | b'debugdiscovery', |
|
978 | 978 | [ |
|
979 | 979 | (b'', b'old', None, _(b'use old-style discovery')), |
|
980 | 980 | ( |
|
981 | 981 | b'', |
|
982 | 982 | b'nonheads', |
|
983 | 983 | None, |
|
984 | 984 | _(b'use old-style discovery with non-heads included'), |
|
985 | 985 | ), |
|
986 | 986 | (b'', b'rev', [], b'restrict discovery to this set of revs'), |
|
987 | 987 | (b'', b'seed', b'12323', b'specify the random seed use for discovery'), |
|
988 | 988 | ( |
|
989 | 989 | b'', |
|
990 | 990 | b'local-as-revs', |
|
991 | 991 | "", |
|
992 | 992 | 'treat local has having these revisions only', |
|
993 | 993 | ), |
|
994 | 994 | ( |
|
995 | 995 | b'', |
|
996 | 996 | b'remote-as-revs', |
|
997 | 997 | "", |
|
998 | 998 | 'use local as remote, with only these these revisions', |
|
999 | 999 | ), |
|
1000 | 1000 | ] |
|
1001 | 1001 | + cmdutil.remoteopts, |
|
1002 | 1002 | _(b'[--rev REV] [OTHER]'), |
|
1003 | 1003 | ) |
|
1004 | 1004 | def debugdiscovery(ui, repo, remoteurl=b"default", **opts): |
|
1005 | 1005 | """runs the changeset discovery protocol in isolation |
|
1006 | 1006 | |
|
1007 | 1007 | The local peer can be "replaced" by a subset of the local repository by |
|
1008 | 1008 | using the `--local-as-revs` flag. Int he same way, usual `remote` peer can |
|
1009 | 1009 | be "replaced" by a subset of the local repository using the |
|
1010 | 1010 | `--local-as-revs` flag. This is useful to efficiently debug pathological |
|
1011 | 1011 | discovery situation. |
|
1012 | 1012 | """ |
|
1013 | 1013 | opts = pycompat.byteskwargs(opts) |
|
1014 | 1014 | unfi = repo.unfiltered() |
|
1015 | 1015 | |
|
1016 | 1016 | # setup potential extra filtering |
|
1017 | 1017 | local_revs = opts[b"local_as_revs"] |
|
1018 | 1018 | remote_revs = opts[b"remote_as_revs"] |
|
1019 | 1019 | |
|
1020 | 1020 | # make sure tests are repeatable |
|
1021 | 1021 | random.seed(int(opts[b'seed'])) |
|
1022 | 1022 | |
|
1023 | 1023 | if not remote_revs: |
|
1024 | 1024 | |
|
1025 | 1025 | remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) |
|
1026 | 1026 | remote = hg.peer(repo, opts, remoteurl) |
|
1027 | 1027 | ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl)) |
|
1028 | 1028 | else: |
|
1029 | 1029 | branches = (None, []) |
|
1030 | 1030 | remote_filtered_revs = scmutil.revrange( |
|
1031 | 1031 | unfi, [b"not (::(%s))" % remote_revs] |
|
1032 | 1032 | ) |
|
1033 | 1033 | remote_filtered_revs = frozenset(remote_filtered_revs) |
|
1034 | 1034 | |
|
1035 | 1035 | def remote_func(x): |
|
1036 | 1036 | return remote_filtered_revs |
|
1037 | 1037 | |
|
1038 | 1038 | repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func |
|
1039 | 1039 | |
|
1040 | 1040 | remote = repo.peer() |
|
1041 | 1041 | remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter') |
|
1042 | 1042 | |
|
1043 | 1043 | if local_revs: |
|
1044 | 1044 | local_filtered_revs = scmutil.revrange( |
|
1045 | 1045 | unfi, [b"not (::(%s))" % local_revs] |
|
1046 | 1046 | ) |
|
1047 | 1047 | local_filtered_revs = frozenset(local_filtered_revs) |
|
1048 | 1048 | |
|
1049 | 1049 | def local_func(x): |
|
1050 | 1050 | return local_filtered_revs |
|
1051 | 1051 | |
|
1052 | 1052 | repoview.filtertable[b'debug-discovery-local-filter'] = local_func |
|
1053 | 1053 | repo = repo.filtered(b'debug-discovery-local-filter') |
|
1054 | 1054 | |
|
1055 | 1055 | data = {} |
|
1056 | 1056 | if opts.get(b'old'): |
|
1057 | 1057 | |
|
1058 | 1058 | def doit(pushedrevs, remoteheads, remote=remote): |
|
1059 | 1059 | if not util.safehasattr(remote, b'branches'): |
|
1060 | 1060 | # enable in-client legacy support |
|
1061 | 1061 | remote = localrepo.locallegacypeer(remote.local()) |
|
1062 | 1062 | common, _in, hds = treediscovery.findcommonincoming( |
|
1063 | 1063 | repo, remote, force=True, audit=data |
|
1064 | 1064 | ) |
|
1065 | 1065 | common = set(common) |
|
1066 | 1066 | if not opts.get(b'nonheads'): |
|
1067 | 1067 | ui.writenoi18n( |
|
1068 | 1068 | b"unpruned common: %s\n" |
|
1069 | 1069 | % b" ".join(sorted(short(n) for n in common)) |
|
1070 | 1070 | ) |
|
1071 | 1071 | |
|
1072 | 1072 | clnode = repo.changelog.node |
|
1073 | 1073 | common = repo.revs(b'heads(::%ln)', common) |
|
1074 | 1074 | common = {clnode(r) for r in common} |
|
1075 | 1075 | return common, hds |
|
1076 | 1076 | |
|
1077 | 1077 | else: |
|
1078 | 1078 | |
|
1079 | 1079 | def doit(pushedrevs, remoteheads, remote=remote): |
|
1080 | 1080 | nodes = None |
|
1081 | 1081 | if pushedrevs: |
|
1082 | 1082 | revs = scmutil.revrange(repo, pushedrevs) |
|
1083 | 1083 | nodes = [repo[r].node() for r in revs] |
|
1084 | 1084 | common, any, hds = setdiscovery.findcommonheads( |
|
1085 | 1085 | ui, repo, remote, ancestorsof=nodes, audit=data |
|
1086 | 1086 | ) |
|
1087 | 1087 | return common, hds |
|
1088 | 1088 | |
|
1089 | 1089 | remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) |
|
1090 | 1090 | localrevs = opts[b'rev'] |
|
1091 | 1091 | with util.timedcm('debug-discovery') as t: |
|
1092 | 1092 | common, hds = doit(localrevs, remoterevs) |
|
1093 | 1093 | |
|
1094 | 1094 | # compute all statistics |
|
1095 | 1095 | heads_common = set(common) |
|
1096 | 1096 | heads_remote = set(hds) |
|
1097 | 1097 | heads_local = set(repo.heads()) |
|
1098 | 1098 | # note: they cannot be a local or remote head that is in common and not |
|
1099 | 1099 | # itself a head of common. |
|
1100 | 1100 | heads_common_local = heads_common & heads_local |
|
1101 | 1101 | heads_common_remote = heads_common & heads_remote |
|
1102 | 1102 | heads_common_both = heads_common & heads_remote & heads_local |
|
1103 | 1103 | |
|
1104 | 1104 | all = repo.revs(b'all()') |
|
1105 | 1105 | common = repo.revs(b'::%ln', common) |
|
1106 | 1106 | roots_common = repo.revs(b'roots(::%ld)', common) |
|
1107 | 1107 | missing = repo.revs(b'not ::%ld', common) |
|
1108 | 1108 | heads_missing = repo.revs(b'heads(%ld)', missing) |
|
1109 | 1109 | roots_missing = repo.revs(b'roots(%ld)', missing) |
|
1110 | 1110 | assert len(common) + len(missing) == len(all) |
|
1111 | 1111 | |
|
1112 | 1112 | initial_undecided = repo.revs( |
|
1113 | 1113 | b'not (::%ln or %ln::)', heads_common_remote, heads_common_local |
|
1114 | 1114 | ) |
|
1115 | 1115 | heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided) |
|
1116 | 1116 | roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided) |
|
1117 | 1117 | common_initial_undecided = initial_undecided & common |
|
1118 | 1118 | missing_initial_undecided = initial_undecided & missing |
|
1119 | 1119 | |
|
1120 | 1120 | data[b'elapsed'] = t.elapsed |
|
1121 | 1121 | data[b'nb-common-heads'] = len(heads_common) |
|
1122 | 1122 | data[b'nb-common-heads-local'] = len(heads_common_local) |
|
1123 | 1123 | data[b'nb-common-heads-remote'] = len(heads_common_remote) |
|
1124 | 1124 | data[b'nb-common-heads-both'] = len(heads_common_both) |
|
1125 | 1125 | data[b'nb-common-roots'] = len(roots_common) |
|
1126 | 1126 | data[b'nb-head-local'] = len(heads_local) |
|
1127 | 1127 | data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local) |
|
1128 | 1128 | data[b'nb-head-remote'] = len(heads_remote) |
|
1129 | 1129 | data[b'nb-head-remote-unknown'] = len(heads_remote) - len( |
|
1130 | 1130 | heads_common_remote |
|
1131 | 1131 | ) |
|
1132 | 1132 | data[b'nb-revs'] = len(all) |
|
1133 | 1133 | data[b'nb-revs-common'] = len(common) |
|
1134 | 1134 | data[b'nb-revs-missing'] = len(missing) |
|
1135 | 1135 | data[b'nb-missing-heads'] = len(heads_missing) |
|
1136 | 1136 | data[b'nb-missing-roots'] = len(roots_missing) |
|
1137 | 1137 | data[b'nb-ini_und'] = len(initial_undecided) |
|
1138 | 1138 | data[b'nb-ini_und-heads'] = len(heads_initial_undecided) |
|
1139 | 1139 | data[b'nb-ini_und-roots'] = len(roots_initial_undecided) |
|
1140 | 1140 | data[b'nb-ini_und-common'] = len(common_initial_undecided) |
|
1141 | 1141 | data[b'nb-ini_und-missing'] = len(missing_initial_undecided) |
|
1142 | 1142 | |
|
1143 | 1143 | # display discovery summary |
|
1144 | 1144 | ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data) |
|
1145 | 1145 | ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data) |
|
1146 | 1146 | ui.writenoi18n(b"heads summary:\n") |
|
1147 | 1147 | ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data) |
|
1148 | 1148 | ui.writenoi18n( |
|
1149 | 1149 | b" also local heads: %(nb-common-heads-local)9d\n" % data |
|
1150 | 1150 | ) |
|
1151 | 1151 | ui.writenoi18n( |
|
1152 | 1152 | b" also remote heads: %(nb-common-heads-remote)9d\n" % data |
|
1153 | 1153 | ) |
|
1154 | 1154 | ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data) |
|
1155 | 1155 | ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data) |
|
1156 | 1156 | ui.writenoi18n( |
|
1157 | 1157 | b" common: %(nb-common-heads-local)9d\n" % data |
|
1158 | 1158 | ) |
|
1159 | 1159 | ui.writenoi18n( |
|
1160 | 1160 | b" missing: %(nb-head-local-missing)9d\n" % data |
|
1161 | 1161 | ) |
|
1162 | 1162 | ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data) |
|
1163 | 1163 | ui.writenoi18n( |
|
1164 | 1164 | b" common: %(nb-common-heads-remote)9d\n" % data |
|
1165 | 1165 | ) |
|
1166 | 1166 | ui.writenoi18n( |
|
1167 | 1167 | b" unknown: %(nb-head-remote-unknown)9d\n" % data |
|
1168 | 1168 | ) |
|
1169 | 1169 | ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data) |
|
1170 | 1170 | ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data) |
|
1171 | 1171 | ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data) |
|
1172 | 1172 | ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data) |
|
1173 | 1173 | ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data) |
|
1174 | 1174 | ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data) |
|
1175 | 1175 | ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data) |
|
1176 | 1176 | ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data) |
|
1177 | 1177 | ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data) |
|
1178 | 1178 | ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data) |
|
1179 | 1179 | ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data) |
|
1180 | 1180 | ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data) |
|
1181 | 1181 | |
|
1182 | 1182 | if ui.verbose: |
|
1183 | 1183 | ui.writenoi18n( |
|
1184 | 1184 | b"common heads: %s\n" |
|
1185 | 1185 | % b" ".join(sorted(short(n) for n in heads_common)) |
|
1186 | 1186 | ) |
|
1187 | 1187 | |
|
1188 | 1188 | |
|
1189 | 1189 | _chunksize = 4 << 10 |
|
1190 | 1190 | |
|
1191 | 1191 | |
|
1192 | 1192 | @command( |
|
1193 | 1193 | b'debugdownload', |
|
1194 | 1194 | [ |
|
1195 | 1195 | (b'o', b'output', b'', _(b'path')), |
|
1196 | 1196 | ], |
|
1197 | 1197 | optionalrepo=True, |
|
1198 | 1198 | ) |
|
1199 | 1199 | def debugdownload(ui, repo, url, output=None, **opts): |
|
1200 | 1200 | """download a resource using Mercurial logic and config""" |
|
1201 | 1201 | fh = urlmod.open(ui, url, output) |
|
1202 | 1202 | |
|
1203 | 1203 | dest = ui |
|
1204 | 1204 | if output: |
|
1205 | 1205 | dest = open(output, b"wb", _chunksize) |
|
1206 | 1206 | try: |
|
1207 | 1207 | data = fh.read(_chunksize) |
|
1208 | 1208 | while data: |
|
1209 | 1209 | dest.write(data) |
|
1210 | 1210 | data = fh.read(_chunksize) |
|
1211 | 1211 | finally: |
|
1212 | 1212 | if output: |
|
1213 | 1213 | dest.close() |
|
1214 | 1214 | |
|
1215 | 1215 | |
|
1216 | 1216 | @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True) |
|
1217 | 1217 | def debugextensions(ui, repo, **opts): |
|
1218 | 1218 | '''show information about active extensions''' |
|
1219 | 1219 | opts = pycompat.byteskwargs(opts) |
|
1220 | 1220 | exts = extensions.extensions(ui) |
|
1221 | 1221 | hgver = util.version() |
|
1222 | 1222 | fm = ui.formatter(b'debugextensions', opts) |
|
1223 | 1223 | for extname, extmod in sorted(exts, key=operator.itemgetter(0)): |
|
1224 | 1224 | isinternal = extensions.ismoduleinternal(extmod) |
|
1225 | 1225 | extsource = None |
|
1226 | 1226 | |
|
1227 | 1227 | if util.safehasattr(extmod, '__file__'): |
|
1228 | 1228 | extsource = pycompat.fsencode(extmod.__file__) |
|
1229 | 1229 | elif getattr(sys, 'oxidized', False): |
|
1230 | 1230 | extsource = pycompat.sysexecutable |
|
1231 | 1231 | if isinternal: |
|
1232 | 1232 | exttestedwith = [] # never expose magic string to users |
|
1233 | 1233 | else: |
|
1234 | 1234 | exttestedwith = getattr(extmod, 'testedwith', b'').split() |
|
1235 | 1235 | extbuglink = getattr(extmod, 'buglink', None) |
|
1236 | 1236 | |
|
1237 | 1237 | fm.startitem() |
|
1238 | 1238 | |
|
1239 | 1239 | if ui.quiet or ui.verbose: |
|
1240 | 1240 | fm.write(b'name', b'%s\n', extname) |
|
1241 | 1241 | else: |
|
1242 | 1242 | fm.write(b'name', b'%s', extname) |
|
1243 | 1243 | if isinternal or hgver in exttestedwith: |
|
1244 | 1244 | fm.plain(b'\n') |
|
1245 | 1245 | elif not exttestedwith: |
|
1246 | 1246 | fm.plain(_(b' (untested!)\n')) |
|
1247 | 1247 | else: |
|
1248 | 1248 | lasttestedversion = exttestedwith[-1] |
|
1249 | 1249 | fm.plain(b' (%s!)\n' % lasttestedversion) |
|
1250 | 1250 | |
|
1251 | 1251 | fm.condwrite( |
|
1252 | 1252 | ui.verbose and extsource, |
|
1253 | 1253 | b'source', |
|
1254 | 1254 | _(b' location: %s\n'), |
|
1255 | 1255 | extsource or b"", |
|
1256 | 1256 | ) |
|
1257 | 1257 | |
|
1258 | 1258 | if ui.verbose: |
|
1259 | 1259 | fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal]) |
|
1260 | 1260 | fm.data(bundled=isinternal) |
|
1261 | 1261 | |
|
1262 | 1262 | fm.condwrite( |
|
1263 | 1263 | ui.verbose and exttestedwith, |
|
1264 | 1264 | b'testedwith', |
|
1265 | 1265 | _(b' tested with: %s\n'), |
|
1266 | 1266 | fm.formatlist(exttestedwith, name=b'ver'), |
|
1267 | 1267 | ) |
|
1268 | 1268 | |
|
1269 | 1269 | fm.condwrite( |
|
1270 | 1270 | ui.verbose and extbuglink, |
|
1271 | 1271 | b'buglink', |
|
1272 | 1272 | _(b' bug reporting: %s\n'), |
|
1273 | 1273 | extbuglink or b"", |
|
1274 | 1274 | ) |
|
1275 | 1275 | |
|
1276 | 1276 | fm.end() |
|
1277 | 1277 | |
|
1278 | 1278 | |
|
1279 | 1279 | @command( |
|
1280 | 1280 | b'debugfileset', |
|
1281 | 1281 | [ |
|
1282 | 1282 | ( |
|
1283 | 1283 | b'r', |
|
1284 | 1284 | b'rev', |
|
1285 | 1285 | b'', |
|
1286 | 1286 | _(b'apply the filespec on this revision'), |
|
1287 | 1287 | _(b'REV'), |
|
1288 | 1288 | ), |
|
1289 | 1289 | ( |
|
1290 | 1290 | b'', |
|
1291 | 1291 | b'all-files', |
|
1292 | 1292 | False, |
|
1293 | 1293 | _(b'test files from all revisions and working directory'), |
|
1294 | 1294 | ), |
|
1295 | 1295 | ( |
|
1296 | 1296 | b's', |
|
1297 | 1297 | b'show-matcher', |
|
1298 | 1298 | None, |
|
1299 | 1299 | _(b'print internal representation of matcher'), |
|
1300 | 1300 | ), |
|
1301 | 1301 | ( |
|
1302 | 1302 | b'p', |
|
1303 | 1303 | b'show-stage', |
|
1304 | 1304 | [], |
|
1305 | 1305 | _(b'print parsed tree at the given stage'), |
|
1306 | 1306 | _(b'NAME'), |
|
1307 | 1307 | ), |
|
1308 | 1308 | ], |
|
1309 | 1309 | _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'), |
|
1310 | 1310 | ) |
|
1311 | 1311 | def debugfileset(ui, repo, expr, **opts): |
|
1312 | 1312 | '''parse and apply a fileset specification''' |
|
1313 | 1313 | from . import fileset |
|
1314 | 1314 | |
|
1315 | 1315 | fileset.symbols # force import of fileset so we have predicates to optimize |
|
1316 | 1316 | opts = pycompat.byteskwargs(opts) |
|
1317 | 1317 | ctx = scmutil.revsingle(repo, opts.get(b'rev'), None) |
|
1318 | 1318 | |
|
1319 | 1319 | stages = [ |
|
1320 | 1320 | (b'parsed', pycompat.identity), |
|
1321 | 1321 | (b'analyzed', filesetlang.analyze), |
|
1322 | 1322 | (b'optimized', filesetlang.optimize), |
|
1323 | 1323 | ] |
|
1324 | 1324 | stagenames = {n for n, f in stages} |
|
1325 | 1325 | |
|
1326 | 1326 | showalways = set() |
|
1327 | 1327 | if ui.verbose and not opts[b'show_stage']: |
|
1328 | 1328 | # show parsed tree by --verbose (deprecated) |
|
1329 | 1329 | showalways.add(b'parsed') |
|
1330 | 1330 | if opts[b'show_stage'] == [b'all']: |
|
1331 | 1331 | showalways.update(stagenames) |
|
1332 | 1332 | else: |
|
1333 | 1333 | for n in opts[b'show_stage']: |
|
1334 | 1334 | if n not in stagenames: |
|
1335 | 1335 | raise error.Abort(_(b'invalid stage name: %s') % n) |
|
1336 | 1336 | showalways.update(opts[b'show_stage']) |
|
1337 | 1337 | |
|
1338 | 1338 | tree = filesetlang.parse(expr) |
|
1339 | 1339 | for n, f in stages: |
|
1340 | 1340 | tree = f(tree) |
|
1341 | 1341 | if n in showalways: |
|
1342 | 1342 | if opts[b'show_stage'] or n != b'parsed': |
|
1343 | 1343 | ui.write(b"* %s:\n" % n) |
|
1344 | 1344 | ui.write(filesetlang.prettyformat(tree), b"\n") |
|
1345 | 1345 | |
|
1346 | 1346 | files = set() |
|
1347 | 1347 | if opts[b'all_files']: |
|
1348 | 1348 | for r in repo: |
|
1349 | 1349 | c = repo[r] |
|
1350 | 1350 | files.update(c.files()) |
|
1351 | 1351 | files.update(c.substate) |
|
1352 | 1352 | if opts[b'all_files'] or ctx.rev() is None: |
|
1353 | 1353 | wctx = repo[None] |
|
1354 | 1354 | files.update( |
|
1355 | 1355 | repo.dirstate.walk( |
|
1356 | 1356 | scmutil.matchall(repo), |
|
1357 | 1357 | subrepos=list(wctx.substate), |
|
1358 | 1358 | unknown=True, |
|
1359 | 1359 | ignored=True, |
|
1360 | 1360 | ) |
|
1361 | 1361 | ) |
|
1362 | 1362 | files.update(wctx.substate) |
|
1363 | 1363 | else: |
|
1364 | 1364 | files.update(ctx.files()) |
|
1365 | 1365 | files.update(ctx.substate) |
|
1366 | 1366 | |
|
1367 | 1367 | m = ctx.matchfileset(repo.getcwd(), expr) |
|
1368 | 1368 | if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose): |
|
1369 | 1369 | ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n') |
|
1370 | 1370 | for f in sorted(files): |
|
1371 | 1371 | if not m(f): |
|
1372 | 1372 | continue |
|
1373 | 1373 | ui.write(b"%s\n" % f) |
|
1374 | 1374 | |
|
1375 | 1375 | |
|
1376 | 1376 | @command(b'debugformat', [] + cmdutil.formatteropts) |
|
1377 | 1377 | def debugformat(ui, repo, **opts): |
|
1378 | 1378 | """display format information about the current repository |
|
1379 | 1379 | |
|
1380 | 1380 | Use --verbose to get extra information about current config value and |
|
1381 | 1381 | Mercurial default.""" |
|
1382 | 1382 | opts = pycompat.byteskwargs(opts) |
|
1383 | 1383 | maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) |
|
1384 | 1384 | maxvariantlength = max(len(b'format-variant'), maxvariantlength) |
|
1385 | 1385 | |
|
1386 | 1386 | def makeformatname(name): |
|
1387 | 1387 | return b'%s:' + (b' ' * (maxvariantlength - len(name))) |
|
1388 | 1388 | |
|
1389 | 1389 | fm = ui.formatter(b'debugformat', opts) |
|
1390 | 1390 | if fm.isplain(): |
|
1391 | 1391 | |
|
1392 | 1392 | def formatvalue(value): |
|
1393 | 1393 | if util.safehasattr(value, b'startswith'): |
|
1394 | 1394 | return value |
|
1395 | 1395 | if value: |
|
1396 | 1396 | return b'yes' |
|
1397 | 1397 | else: |
|
1398 | 1398 | return b'no' |
|
1399 | 1399 | |
|
1400 | 1400 | else: |
|
1401 | 1401 | formatvalue = pycompat.identity |
|
1402 | 1402 | |
|
1403 | 1403 | fm.plain(b'format-variant') |
|
1404 | 1404 | fm.plain(b' ' * (maxvariantlength - len(b'format-variant'))) |
|
1405 | 1405 | fm.plain(b' repo') |
|
1406 | 1406 | if ui.verbose: |
|
1407 | 1407 | fm.plain(b' config default') |
|
1408 | 1408 | fm.plain(b'\n') |
|
1409 | 1409 | for fv in upgrade.allformatvariant: |
|
1410 | 1410 | fm.startitem() |
|
1411 | 1411 | repovalue = fv.fromrepo(repo) |
|
1412 | 1412 | configvalue = fv.fromconfig(repo) |
|
1413 | 1413 | |
|
1414 | 1414 | if repovalue != configvalue: |
|
1415 | 1415 | namelabel = b'formatvariant.name.mismatchconfig' |
|
1416 | 1416 | repolabel = b'formatvariant.repo.mismatchconfig' |
|
1417 | 1417 | elif repovalue != fv.default: |
|
1418 | 1418 | namelabel = b'formatvariant.name.mismatchdefault' |
|
1419 | 1419 | repolabel = b'formatvariant.repo.mismatchdefault' |
|
1420 | 1420 | else: |
|
1421 | 1421 | namelabel = b'formatvariant.name.uptodate' |
|
1422 | 1422 | repolabel = b'formatvariant.repo.uptodate' |
|
1423 | 1423 | |
|
1424 | 1424 | fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel) |
|
1425 | 1425 | fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel) |
|
1426 | 1426 | if fv.default != configvalue: |
|
1427 | 1427 | configlabel = b'formatvariant.config.special' |
|
1428 | 1428 | else: |
|
1429 | 1429 | configlabel = b'formatvariant.config.default' |
|
1430 | 1430 | fm.condwrite( |
|
1431 | 1431 | ui.verbose, |
|
1432 | 1432 | b'config', |
|
1433 | 1433 | b' %6s', |
|
1434 | 1434 | formatvalue(configvalue), |
|
1435 | 1435 | label=configlabel, |
|
1436 | 1436 | ) |
|
1437 | 1437 | fm.condwrite( |
|
1438 | 1438 | ui.verbose, |
|
1439 | 1439 | b'default', |
|
1440 | 1440 | b' %7s', |
|
1441 | 1441 | formatvalue(fv.default), |
|
1442 | 1442 | label=b'formatvariant.default', |
|
1443 | 1443 | ) |
|
1444 | 1444 | fm.plain(b'\n') |
|
1445 | 1445 | fm.end() |
|
1446 | 1446 | |
|
1447 | 1447 | |
|
1448 | 1448 | @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True) |
|
1449 | 1449 | def debugfsinfo(ui, path=b"."): |
|
1450 | 1450 | """show information detected about current filesystem""" |
|
1451 | 1451 | ui.writenoi18n(b'path: %s\n' % path) |
|
1452 | 1452 | ui.writenoi18n( |
|
1453 | 1453 | b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)') |
|
1454 | 1454 | ) |
|
1455 | 1455 | ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no')) |
|
1456 | 1456 | ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)')) |
|
1457 | 1457 | ui.writenoi18n( |
|
1458 | 1458 | b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no') |
|
1459 | 1459 | ) |
|
1460 | 1460 | ui.writenoi18n( |
|
1461 | 1461 | b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no') |
|
1462 | 1462 | ) |
|
1463 | 1463 | casesensitive = b'(unknown)' |
|
1464 | 1464 | try: |
|
1465 | 1465 | with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f: |
|
1466 | 1466 | casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no' |
|
1467 | 1467 | except OSError: |
|
1468 | 1468 | pass |
|
1469 | 1469 | ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive) |
|
1470 | 1470 | |
|
1471 | 1471 | |
|
1472 | 1472 | @command( |
|
1473 | 1473 | b'debuggetbundle', |
|
1474 | 1474 | [ |
|
1475 | 1475 | (b'H', b'head', [], _(b'id of head node'), _(b'ID')), |
|
1476 | 1476 | (b'C', b'common', [], _(b'id of common node'), _(b'ID')), |
|
1477 | 1477 | ( |
|
1478 | 1478 | b't', |
|
1479 | 1479 | b'type', |
|
1480 | 1480 | b'bzip2', |
|
1481 | 1481 | _(b'bundle compression type to use'), |
|
1482 | 1482 | _(b'TYPE'), |
|
1483 | 1483 | ), |
|
1484 | 1484 | ], |
|
1485 | 1485 | _(b'REPO FILE [-H|-C ID]...'), |
|
1486 | 1486 | norepo=True, |
|
1487 | 1487 | ) |
|
1488 | 1488 | def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): |
|
1489 | 1489 | """retrieves a bundle from a repo |
|
1490 | 1490 | |
|
1491 | 1491 | Every ID must be a full-length hex node id string. Saves the bundle to the |
|
1492 | 1492 | given file. |
|
1493 | 1493 | """ |
|
1494 | 1494 | opts = pycompat.byteskwargs(opts) |
|
1495 | 1495 | repo = hg.peer(ui, opts, repopath) |
|
1496 | 1496 | if not repo.capable(b'getbundle'): |
|
1497 | 1497 | raise error.Abort(b"getbundle() not supported by target repository") |
|
1498 | 1498 | args = {} |
|
1499 | 1499 | if common: |
|
1500 | 1500 | args['common'] = [bin(s) for s in common] |
|
1501 | 1501 | if head: |
|
1502 | 1502 | args['heads'] = [bin(s) for s in head] |
|
1503 | 1503 | # TODO: get desired bundlecaps from command line. |
|
1504 | 1504 | args['bundlecaps'] = None |
|
1505 | 1505 | bundle = repo.getbundle(b'debug', **args) |
|
1506 | 1506 | |
|
1507 | 1507 | bundletype = opts.get(b'type', b'bzip2').lower() |
|
1508 | 1508 | btypes = { |
|
1509 | 1509 | b'none': b'HG10UN', |
|
1510 | 1510 | b'bzip2': b'HG10BZ', |
|
1511 | 1511 | b'gzip': b'HG10GZ', |
|
1512 | 1512 | b'bundle2': b'HG20', |
|
1513 | 1513 | } |
|
1514 | 1514 | bundletype = btypes.get(bundletype) |
|
1515 | 1515 | if bundletype not in bundle2.bundletypes: |
|
1516 | 1516 | raise error.Abort(_(b'unknown bundle type specified with --type')) |
|
1517 | 1517 | bundle2.writebundle(ui, bundle, bundlepath, bundletype) |
|
1518 | 1518 | |
|
1519 | 1519 | |
|
1520 | 1520 | @command(b'debugignore', [], b'[FILE]') |
|
1521 | 1521 | def debugignore(ui, repo, *files, **opts): |
|
1522 | 1522 | """display the combined ignore pattern and information about ignored files |
|
1523 | 1523 | |
|
1524 | 1524 | With no argument display the combined ignore pattern. |
|
1525 | 1525 | |
|
1526 | 1526 | Given space separated file names, shows if the given file is ignored and |
|
1527 | 1527 | if so, show the ignore rule (file and line number) that matched it. |
|
1528 | 1528 | """ |
|
1529 | 1529 | ignore = repo.dirstate._ignore |
|
1530 | 1530 | if not files: |
|
1531 | 1531 | # Show all the patterns |
|
1532 | 1532 | ui.write(b"%s\n" % pycompat.byterepr(ignore)) |
|
1533 | 1533 | else: |
|
1534 | 1534 | m = scmutil.match(repo[None], pats=files) |
|
1535 | 1535 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
1536 | 1536 | for f in m.files(): |
|
1537 | 1537 | nf = util.normpath(f) |
|
1538 | 1538 | ignored = None |
|
1539 | 1539 | ignoredata = None |
|
1540 | 1540 | if nf != b'.': |
|
1541 | 1541 | if ignore(nf): |
|
1542 | 1542 | ignored = nf |
|
1543 | 1543 | ignoredata = repo.dirstate._ignorefileandline(nf) |
|
1544 | 1544 | else: |
|
1545 | 1545 | for p in pathutil.finddirs(nf): |
|
1546 | 1546 | if ignore(p): |
|
1547 | 1547 | ignored = p |
|
1548 | 1548 | ignoredata = repo.dirstate._ignorefileandline(p) |
|
1549 | 1549 | break |
|
1550 | 1550 | if ignored: |
|
1551 | 1551 | if ignored == nf: |
|
1552 | 1552 | ui.write(_(b"%s is ignored\n") % uipathfn(f)) |
|
1553 | 1553 | else: |
|
1554 | 1554 | ui.write( |
|
1555 | 1555 | _( |
|
1556 | 1556 | b"%s is ignored because of " |
|
1557 | 1557 | b"containing directory %s\n" |
|
1558 | 1558 | ) |
|
1559 | 1559 | % (uipathfn(f), ignored) |
|
1560 | 1560 | ) |
|
1561 | 1561 | ignorefile, lineno, line = ignoredata |
|
1562 | 1562 | ui.write( |
|
1563 | 1563 | _(b"(ignore rule in %s, line %d: '%s')\n") |
|
1564 | 1564 | % (ignorefile, lineno, line) |
|
1565 | 1565 | ) |
|
1566 | 1566 | else: |
|
1567 | 1567 | ui.write(_(b"%s is not ignored\n") % uipathfn(f)) |
|
1568 | 1568 | |
|
1569 | 1569 | |
|
1570 | 1570 | @command( |
|
1571 | 1571 | b'debugindex', |
|
1572 | 1572 | cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
1573 | 1573 | _(b'-c|-m|FILE'), |
|
1574 | 1574 | ) |
|
1575 | 1575 | def debugindex(ui, repo, file_=None, **opts): |
|
1576 | 1576 | """dump index data for a storage primitive""" |
|
1577 | 1577 | opts = pycompat.byteskwargs(opts) |
|
1578 | 1578 | store = cmdutil.openstorage(repo, b'debugindex', file_, opts) |
|
1579 | 1579 | |
|
1580 | 1580 | if ui.debugflag: |
|
1581 | 1581 | shortfn = hex |
|
1582 | 1582 | else: |
|
1583 | 1583 | shortfn = short |
|
1584 | 1584 | |
|
1585 | 1585 | idlen = 12 |
|
1586 | 1586 | for i in store: |
|
1587 | 1587 | idlen = len(shortfn(store.node(i))) |
|
1588 | 1588 | break |
|
1589 | 1589 | |
|
1590 | 1590 | fm = ui.formatter(b'debugindex', opts) |
|
1591 | 1591 | fm.plain( |
|
1592 | 1592 | b' rev linkrev %s %s p2\n' |
|
1593 | 1593 | % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen)) |
|
1594 | 1594 | ) |
|
1595 | 1595 | |
|
1596 | 1596 | for rev in store: |
|
1597 | 1597 | node = store.node(rev) |
|
1598 | 1598 | parents = store.parents(node) |
|
1599 | 1599 | |
|
1600 | 1600 | fm.startitem() |
|
1601 | 1601 | fm.write(b'rev', b'%6d ', rev) |
|
1602 | 1602 | fm.write(b'linkrev', b'%7d ', store.linkrev(rev)) |
|
1603 | 1603 | fm.write(b'node', b'%s ', shortfn(node)) |
|
1604 | 1604 | fm.write(b'p1', b'%s ', shortfn(parents[0])) |
|
1605 | 1605 | fm.write(b'p2', b'%s', shortfn(parents[1])) |
|
1606 | 1606 | fm.plain(b'\n') |
|
1607 | 1607 | |
|
1608 | 1608 | fm.end() |
|
1609 | 1609 | |
|
1610 | 1610 | |
|
1611 | 1611 | @command( |
|
1612 | 1612 | b'debugindexdot', |
|
1613 | 1613 | cmdutil.debugrevlogopts, |
|
1614 | 1614 | _(b'-c|-m|FILE'), |
|
1615 | 1615 | optionalrepo=True, |
|
1616 | 1616 | ) |
|
1617 | 1617 | def debugindexdot(ui, repo, file_=None, **opts): |
|
1618 | 1618 | """dump an index DAG as a graphviz dot file""" |
|
1619 | 1619 | opts = pycompat.byteskwargs(opts) |
|
1620 | 1620 | r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts) |
|
1621 | 1621 | ui.writenoi18n(b"digraph G {\n") |
|
1622 | 1622 | for i in r: |
|
1623 | 1623 | node = r.node(i) |
|
1624 | 1624 | pp = r.parents(node) |
|
1625 | 1625 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
1626 | 1626 | if pp[1] != nullid: |
|
1627 | 1627 | ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
1628 | 1628 | ui.write(b"}\n") |
|
1629 | 1629 | |
|
1630 | 1630 | |
|
1631 | 1631 | @command(b'debugindexstats', []) |
|
1632 | 1632 | def debugindexstats(ui, repo): |
|
1633 | 1633 | """show stats related to the changelog index""" |
|
1634 | 1634 | repo.changelog.shortest(nullid, 1) |
|
1635 | 1635 | index = repo.changelog.index |
|
1636 | 1636 | if not util.safehasattr(index, b'stats'): |
|
1637 | 1637 | raise error.Abort(_(b'debugindexstats only works with native code')) |
|
1638 | 1638 | for k, v in sorted(index.stats().items()): |
|
1639 | 1639 | ui.write(b'%s: %d\n' % (k, v)) |
|
1640 | 1640 | |
|
1641 | 1641 | |
|
1642 | 1642 | @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True) |
|
1643 | 1643 | def debuginstall(ui, **opts): |
|
1644 | 1644 | """test Mercurial installation |
|
1645 | 1645 | |
|
1646 | 1646 | Returns 0 on success. |
|
1647 | 1647 | """ |
|
1648 | 1648 | opts = pycompat.byteskwargs(opts) |
|
1649 | 1649 | |
|
1650 | 1650 | problems = 0 |
|
1651 | 1651 | |
|
1652 | 1652 | fm = ui.formatter(b'debuginstall', opts) |
|
1653 | 1653 | fm.startitem() |
|
1654 | 1654 | |
|
1655 | 1655 | # encoding might be unknown or wrong. don't translate these messages. |
|
1656 | 1656 | fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding) |
|
1657 | 1657 | err = None |
|
1658 | 1658 | try: |
|
1659 | 1659 | codecs.lookup(pycompat.sysstr(encoding.encoding)) |
|
1660 | 1660 | except LookupError as inst: |
|
1661 | 1661 | err = stringutil.forcebytestr(inst) |
|
1662 | 1662 | problems += 1 |
|
1663 | 1663 | fm.condwrite( |
|
1664 | 1664 | err, |
|
1665 | 1665 | b'encodingerror', |
|
1666 | 1666 | b" %s\n (check that your locale is properly set)\n", |
|
1667 | 1667 | err, |
|
1668 | 1668 | ) |
|
1669 | 1669 | |
|
1670 | 1670 | # Python |
|
1671 | 1671 | pythonlib = None |
|
1672 | 1672 | if util.safehasattr(os, '__file__'): |
|
1673 | 1673 | pythonlib = os.path.dirname(pycompat.fsencode(os.__file__)) |
|
1674 | 1674 | elif getattr(sys, 'oxidized', False): |
|
1675 | 1675 | pythonlib = pycompat.sysexecutable |
|
1676 | 1676 | |
|
1677 | 1677 | fm.write( |
|
1678 | 1678 | b'pythonexe', |
|
1679 | 1679 | _(b"checking Python executable (%s)\n"), |
|
1680 | 1680 | pycompat.sysexecutable or _(b"unknown"), |
|
1681 | 1681 | ) |
|
1682 | 1682 | fm.write( |
|
1683 | 1683 | b'pythonimplementation', |
|
1684 | 1684 | _(b"checking Python implementation (%s)\n"), |
|
1685 | 1685 | pycompat.sysbytes(platform.python_implementation()), |
|
1686 | 1686 | ) |
|
1687 | 1687 | fm.write( |
|
1688 | 1688 | b'pythonver', |
|
1689 | 1689 | _(b"checking Python version (%s)\n"), |
|
1690 | 1690 | (b"%d.%d.%d" % sys.version_info[:3]), |
|
1691 | 1691 | ) |
|
1692 | 1692 | fm.write( |
|
1693 | 1693 | b'pythonlib', |
|
1694 | 1694 | _(b"checking Python lib (%s)...\n"), |
|
1695 | 1695 | pythonlib or _(b"unknown"), |
|
1696 | 1696 | ) |
|
1697 | 1697 | |
|
1698 | 1698 | try: |
|
1699 | 1699 | from . import rustext |
|
1700 | 1700 | |
|
1701 | 1701 | rustext.__doc__ # trigger lazy import |
|
1702 | 1702 | except ImportError: |
|
1703 | 1703 | rustext = None |
|
1704 | 1704 | |
|
1705 | 1705 | security = set(sslutil.supportedprotocols) |
|
1706 | 1706 | if sslutil.hassni: |
|
1707 | 1707 | security.add(b'sni') |
|
1708 | 1708 | |
|
1709 | 1709 | fm.write( |
|
1710 | 1710 | b'pythonsecurity', |
|
1711 | 1711 | _(b"checking Python security support (%s)\n"), |
|
1712 | 1712 | fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','), |
|
1713 | 1713 | ) |
|
1714 | 1714 | |
|
1715 | 1715 | # These are warnings, not errors. So don't increment problem count. This |
|
1716 | 1716 | # may change in the future. |
|
1717 | 1717 | if b'tls1.2' not in security: |
|
1718 | 1718 | fm.plain( |
|
1719 | 1719 | _( |
|
1720 | 1720 | b' TLS 1.2 not supported by Python install; ' |
|
1721 | 1721 | b'network connections lack modern security\n' |
|
1722 | 1722 | ) |
|
1723 | 1723 | ) |
|
1724 | 1724 | if b'sni' not in security: |
|
1725 | 1725 | fm.plain( |
|
1726 | 1726 | _( |
|
1727 | 1727 | b' SNI not supported by Python install; may have ' |
|
1728 | 1728 | b'connectivity issues with some servers\n' |
|
1729 | 1729 | ) |
|
1730 | 1730 | ) |
|
1731 | 1731 | |
|
1732 | 1732 | fm.plain( |
|
1733 | 1733 | _( |
|
1734 | 1734 | b"checking Rust extensions (%s)\n" |
|
1735 | 1735 | % (b'missing' if rustext is None else b'installed') |
|
1736 | 1736 | ), |
|
1737 | 1737 | ) |
|
1738 | 1738 | |
|
1739 | 1739 | # TODO print CA cert info |
|
1740 | 1740 | |
|
1741 | 1741 | # hg version |
|
1742 | 1742 | hgver = util.version() |
|
1743 | 1743 | fm.write( |
|
1744 | 1744 | b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0] |
|
1745 | 1745 | ) |
|
1746 | 1746 | fm.write( |
|
1747 | 1747 | b'hgverextra', |
|
1748 | 1748 | _(b"checking Mercurial custom build (%s)\n"), |
|
1749 | 1749 | b'+'.join(hgver.split(b'+')[1:]), |
|
1750 | 1750 | ) |
|
1751 | 1751 | |
|
1752 | 1752 | # compiled modules |
|
1753 | 1753 | hgmodules = None |
|
1754 | 1754 | if util.safehasattr(sys.modules[__name__], '__file__'): |
|
1755 | 1755 | hgmodules = os.path.dirname(pycompat.fsencode(__file__)) |
|
1756 | 1756 | elif getattr(sys, 'oxidized', False): |
|
1757 | 1757 | hgmodules = pycompat.sysexecutable |
|
1758 | 1758 | |
|
1759 | 1759 | fm.write( |
|
1760 | 1760 | b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy |
|
1761 | 1761 | ) |
|
1762 | 1762 | fm.write( |
|
1763 | 1763 | b'hgmodules', |
|
1764 | 1764 | _(b"checking installed modules (%s)...\n"), |
|
1765 | 1765 | hgmodules or _(b"unknown"), |
|
1766 | 1766 | ) |
|
1767 | 1767 | |
|
1768 | 1768 | rustandc = policy.policy in (b'rust+c', b'rust+c-allow') |
|
1769 | 1769 | rustext = rustandc # for now, that's the only case |
|
1770 | 1770 | cext = policy.policy in (b'c', b'allow') or rustandc |
|
1771 | 1771 | nopure = cext or rustext |
|
1772 | 1772 | if nopure: |
|
1773 | 1773 | err = None |
|
1774 | 1774 | try: |
|
1775 | 1775 | if cext: |
|
1776 | 1776 | from .cext import ( # pytype: disable=import-error |
|
1777 | 1777 | base85, |
|
1778 | 1778 | bdiff, |
|
1779 | 1779 | mpatch, |
|
1780 | 1780 | osutil, |
|
1781 | 1781 | ) |
|
1782 | 1782 | |
|
1783 | 1783 | # quiet pyflakes |
|
1784 | 1784 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) |
|
1785 | 1785 | if rustext: |
|
1786 | 1786 | from .rustext import ( # pytype: disable=import-error |
|
1787 | 1787 | ancestor, |
|
1788 | 1788 | dirstate, |
|
1789 | 1789 | ) |
|
1790 | 1790 | |
|
1791 | 1791 | dir(ancestor), dir(dirstate) # quiet pyflakes |
|
1792 | 1792 | except Exception as inst: |
|
1793 | 1793 | err = stringutil.forcebytestr(inst) |
|
1794 | 1794 | problems += 1 |
|
1795 | 1795 | fm.condwrite(err, b'extensionserror', b" %s\n", err) |
|
1796 | 1796 | |
|
1797 | 1797 | compengines = util.compengines._engines.values() |
|
1798 | 1798 | fm.write( |
|
1799 | 1799 | b'compengines', |
|
1800 | 1800 | _(b'checking registered compression engines (%s)\n'), |
|
1801 | 1801 | fm.formatlist( |
|
1802 | 1802 | sorted(e.name() for e in compengines), |
|
1803 | 1803 | name=b'compengine', |
|
1804 | 1804 | fmt=b'%s', |
|
1805 | 1805 | sep=b', ', |
|
1806 | 1806 | ), |
|
1807 | 1807 | ) |
|
1808 | 1808 | fm.write( |
|
1809 | 1809 | b'compenginesavail', |
|
1810 | 1810 | _(b'checking available compression engines (%s)\n'), |
|
1811 | 1811 | fm.formatlist( |
|
1812 | 1812 | sorted(e.name() for e in compengines if e.available()), |
|
1813 | 1813 | name=b'compengine', |
|
1814 | 1814 | fmt=b'%s', |
|
1815 | 1815 | sep=b', ', |
|
1816 | 1816 | ), |
|
1817 | 1817 | ) |
|
1818 | 1818 | wirecompengines = compression.compengines.supportedwireengines( |
|
1819 | 1819 | compression.SERVERROLE |
|
1820 | 1820 | ) |
|
1821 | 1821 | fm.write( |
|
1822 | 1822 | b'compenginesserver', |
|
1823 | 1823 | _( |
|
1824 | 1824 | b'checking available compression engines ' |
|
1825 | 1825 | b'for wire protocol (%s)\n' |
|
1826 | 1826 | ), |
|
1827 | 1827 | fm.formatlist( |
|
1828 | 1828 | [e.name() for e in wirecompengines if e.wireprotosupport()], |
|
1829 | 1829 | name=b'compengine', |
|
1830 | 1830 | fmt=b'%s', |
|
1831 | 1831 | sep=b', ', |
|
1832 | 1832 | ), |
|
1833 | 1833 | ) |
|
1834 | 1834 | re2 = b'missing' |
|
1835 | 1835 | if util._re2: |
|
1836 | 1836 | re2 = b'available' |
|
1837 | 1837 | fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2) |
|
1838 | 1838 | fm.data(re2=bool(util._re2)) |
|
1839 | 1839 | |
|
1840 | 1840 | # templates |
|
1841 | 1841 | p = templater.templatedir() |
|
1842 | 1842 | fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'') |
|
1843 | 1843 | fm.condwrite(not p, b'', _(b" no template directories found\n")) |
|
1844 | 1844 | if p: |
|
1845 | 1845 | (m, fp) = templater.try_open_template(b"map-cmdline.default") |
|
1846 | 1846 | if m: |
|
1847 | 1847 | # template found, check if it is working |
|
1848 | 1848 | err = None |
|
1849 | 1849 | try: |
|
1850 | 1850 | templater.templater.frommapfile(m) |
|
1851 | 1851 | except Exception as inst: |
|
1852 | 1852 | err = stringutil.forcebytestr(inst) |
|
1853 | 1853 | p = None |
|
1854 | 1854 | fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err) |
|
1855 | 1855 | else: |
|
1856 | 1856 | p = None |
|
1857 | 1857 | fm.condwrite( |
|
1858 | 1858 | p, b'defaulttemplate', _(b"checking default template (%s)\n"), m |
|
1859 | 1859 | ) |
|
1860 | 1860 | fm.condwrite( |
|
1861 | 1861 | not m, |
|
1862 | 1862 | b'defaulttemplatenotfound', |
|
1863 | 1863 | _(b" template '%s' not found\n"), |
|
1864 | 1864 | b"default", |
|
1865 | 1865 | ) |
|
1866 | 1866 | if not p: |
|
1867 | 1867 | problems += 1 |
|
1868 | 1868 | fm.condwrite( |
|
1869 | 1869 | not p, b'', _(b" (templates seem to have been installed incorrectly)\n") |
|
1870 | 1870 | ) |
|
1871 | 1871 | |
|
1872 | 1872 | # editor |
|
1873 | 1873 | editor = ui.geteditor() |
|
1874 | 1874 | editor = util.expandpath(editor) |
|
1875 | 1875 | editorbin = procutil.shellsplit(editor)[0] |
|
1876 | 1876 | fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin) |
|
1877 | 1877 | cmdpath = procutil.findexe(editorbin) |
|
1878 | 1878 | fm.condwrite( |
|
1879 | 1879 | not cmdpath and editor == b'vi', |
|
1880 | 1880 | b'vinotfound', |
|
1881 | 1881 | _( |
|
1882 | 1882 | b" No commit editor set and can't find %s in PATH\n" |
|
1883 | 1883 | b" (specify a commit editor in your configuration" |
|
1884 | 1884 | b" file)\n" |
|
1885 | 1885 | ), |
|
1886 | 1886 | not cmdpath and editor == b'vi' and editorbin, |
|
1887 | 1887 | ) |
|
1888 | 1888 | fm.condwrite( |
|
1889 | 1889 | not cmdpath and editor != b'vi', |
|
1890 | 1890 | b'editornotfound', |
|
1891 | 1891 | _( |
|
1892 | 1892 | b" Can't find editor '%s' in PATH\n" |
|
1893 | 1893 | b" (specify a commit editor in your configuration" |
|
1894 | 1894 | b" file)\n" |
|
1895 | 1895 | ), |
|
1896 | 1896 | not cmdpath and editorbin, |
|
1897 | 1897 | ) |
|
1898 | 1898 | if not cmdpath and editor != b'vi': |
|
1899 | 1899 | problems += 1 |
|
1900 | 1900 | |
|
1901 | 1901 | # check username |
|
1902 | 1902 | username = None |
|
1903 | 1903 | err = None |
|
1904 | 1904 | try: |
|
1905 | 1905 | username = ui.username() |
|
1906 | 1906 | except error.Abort as e: |
|
1907 | 1907 | err = e.message |
|
1908 | 1908 | problems += 1 |
|
1909 | 1909 | |
|
1910 | 1910 | fm.condwrite( |
|
1911 | 1911 | username, b'username', _(b"checking username (%s)\n"), username |
|
1912 | 1912 | ) |
|
1913 | 1913 | fm.condwrite( |
|
1914 | 1914 | err, |
|
1915 | 1915 | b'usernameerror', |
|
1916 | 1916 | _( |
|
1917 | 1917 | b"checking username...\n %s\n" |
|
1918 | 1918 | b" (specify a username in your configuration file)\n" |
|
1919 | 1919 | ), |
|
1920 | 1920 | err, |
|
1921 | 1921 | ) |
|
1922 | 1922 | |
|
1923 | 1923 | for name, mod in extensions.extensions(): |
|
1924 | 1924 | handler = getattr(mod, 'debuginstall', None) |
|
1925 | 1925 | if handler is not None: |
|
1926 | 1926 | problems += handler(ui, fm) |
|
1927 | 1927 | |
|
1928 | 1928 | fm.condwrite(not problems, b'', _(b"no problems detected\n")) |
|
1929 | 1929 | if not problems: |
|
1930 | 1930 | fm.data(problems=problems) |
|
1931 | 1931 | fm.condwrite( |
|
1932 | 1932 | problems, |
|
1933 | 1933 | b'problems', |
|
1934 | 1934 | _(b"%d problems detected, please check your install!\n"), |
|
1935 | 1935 | problems, |
|
1936 | 1936 | ) |
|
1937 | 1937 | fm.end() |
|
1938 | 1938 | |
|
1939 | 1939 | return problems |
|
1940 | 1940 | |
|
1941 | 1941 | |
|
1942 | 1942 | @command(b'debugknown', [], _(b'REPO ID...'), norepo=True) |
|
1943 | 1943 | def debugknown(ui, repopath, *ids, **opts): |
|
1944 | 1944 | """test whether node ids are known to a repo |
|
1945 | 1945 | |
|
1946 | 1946 | Every ID must be a full-length hex node id string. Returns a list of 0s |
|
1947 | 1947 | and 1s indicating unknown/known. |
|
1948 | 1948 | """ |
|
1949 | 1949 | opts = pycompat.byteskwargs(opts) |
|
1950 | 1950 | repo = hg.peer(ui, opts, repopath) |
|
1951 | 1951 | if not repo.capable(b'known'): |
|
1952 | 1952 | raise error.Abort(b"known() not supported by target repository") |
|
1953 | 1953 | flags = repo.known([bin(s) for s in ids]) |
|
1954 | 1954 | ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags]))) |
|
1955 | 1955 | |
|
1956 | 1956 | |
|
1957 | 1957 | @command(b'debuglabelcomplete', [], _(b'LABEL...')) |
|
1958 | 1958 | def debuglabelcomplete(ui, repo, *args): |
|
1959 | 1959 | '''backwards compatibility with old bash completion scripts (DEPRECATED)''' |
|
1960 | 1960 | debugnamecomplete(ui, repo, *args) |
|
1961 | 1961 | |
|
1962 | 1962 | |
|
1963 | 1963 | @command( |
|
1964 | 1964 | b'debuglocks', |
|
1965 | 1965 | [ |
|
1966 | 1966 | (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')), |
|
1967 | 1967 | ( |
|
1968 | 1968 | b'W', |
|
1969 | 1969 | b'force-free-wlock', |
|
1970 | 1970 | None, |
|
1971 | 1971 | _(b'free the working state lock (DANGEROUS)'), |
|
1972 | 1972 | ), |
|
1973 | 1973 | (b's', b'set-lock', None, _(b'set the store lock until stopped')), |
|
1974 | 1974 | ( |
|
1975 | 1975 | b'S', |
|
1976 | 1976 | b'set-wlock', |
|
1977 | 1977 | None, |
|
1978 | 1978 | _(b'set the working state lock until stopped'), |
|
1979 | 1979 | ), |
|
1980 | 1980 | ], |
|
1981 | 1981 | _(b'[OPTION]...'), |
|
1982 | 1982 | ) |
|
1983 | 1983 | def debuglocks(ui, repo, **opts): |
|
1984 | 1984 | """show or modify state of locks |
|
1985 | 1985 | |
|
1986 | 1986 | By default, this command will show which locks are held. This |
|
1987 | 1987 | includes the user and process holding the lock, the amount of time |
|
1988 | 1988 | the lock has been held, and the machine name where the process is |
|
1989 | 1989 | running if it's not local. |
|
1990 | 1990 | |
|
1991 | 1991 | Locks protect the integrity of Mercurial's data, so should be |
|
1992 | 1992 | treated with care. System crashes or other interruptions may cause |
|
1993 | 1993 | locks to not be properly released, though Mercurial will usually |
|
1994 | 1994 | detect and remove such stale locks automatically. |
|
1995 | 1995 | |
|
1996 | 1996 | However, detecting stale locks may not always be possible (for |
|
1997 | 1997 | instance, on a shared filesystem). Removing locks may also be |
|
1998 | 1998 | blocked by filesystem permissions. |
|
1999 | 1999 | |
|
2000 | 2000 | Setting a lock will prevent other commands from changing the data. |
|
2001 | 2001 | The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. |
|
2002 | 2002 | The set locks are removed when the command exits. |
|
2003 | 2003 | |
|
2004 | 2004 | Returns 0 if no locks are held. |
|
2005 | 2005 | |
|
2006 | 2006 | """ |
|
2007 | 2007 | |
|
2008 | 2008 | if opts.get('force_free_lock'): |
|
2009 | 2009 | repo.svfs.unlink(b'lock') |
|
2010 | 2010 | if opts.get('force_free_wlock'): |
|
2011 | 2011 | repo.vfs.unlink(b'wlock') |
|
2012 | 2012 | if opts.get('force_free_lock') or opts.get('force_free_wlock'): |
|
2013 | 2013 | return 0 |
|
2014 | 2014 | |
|
2015 | 2015 | locks = [] |
|
2016 | 2016 | try: |
|
2017 | 2017 | if opts.get('set_wlock'): |
|
2018 | 2018 | try: |
|
2019 | 2019 | locks.append(repo.wlock(False)) |
|
2020 | 2020 | except error.LockHeld: |
|
2021 | 2021 | raise error.Abort(_(b'wlock is already held')) |
|
2022 | 2022 | if opts.get('set_lock'): |
|
2023 | 2023 | try: |
|
2024 | 2024 | locks.append(repo.lock(False)) |
|
2025 | 2025 | except error.LockHeld: |
|
2026 | 2026 | raise error.Abort(_(b'lock is already held')) |
|
2027 | 2027 | if len(locks): |
|
2028 | 2028 | ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes")) |
|
2029 | 2029 | return 0 |
|
2030 | 2030 | finally: |
|
2031 | 2031 | release(*locks) |
|
2032 | 2032 | |
|
2033 | 2033 | now = time.time() |
|
2034 | 2034 | held = 0 |
|
2035 | 2035 | |
|
2036 | 2036 | def report(vfs, name, method): |
|
2037 | 2037 | # this causes stale locks to get reaped for more accurate reporting |
|
2038 | 2038 | try: |
|
2039 | 2039 | l = method(False) |
|
2040 | 2040 | except error.LockHeld: |
|
2041 | 2041 | l = None |
|
2042 | 2042 | |
|
2043 | 2043 | if l: |
|
2044 | 2044 | l.release() |
|
2045 | 2045 | else: |
|
2046 | 2046 | try: |
|
2047 | 2047 | st = vfs.lstat(name) |
|
2048 | 2048 | age = now - st[stat.ST_MTIME] |
|
2049 | 2049 | user = util.username(st.st_uid) |
|
2050 | 2050 | locker = vfs.readlock(name) |
|
2051 | 2051 | if b":" in locker: |
|
2052 | 2052 | host, pid = locker.split(b':') |
|
2053 | 2053 | if host == socket.gethostname(): |
|
2054 | 2054 | locker = b'user %s, process %s' % (user or b'None', pid) |
|
2055 | 2055 | else: |
|
2056 | 2056 | locker = b'user %s, process %s, host %s' % ( |
|
2057 | 2057 | user or b'None', |
|
2058 | 2058 | pid, |
|
2059 | 2059 | host, |
|
2060 | 2060 | ) |
|
2061 | 2061 | ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age)) |
|
2062 | 2062 | return 1 |
|
2063 | 2063 | except OSError as e: |
|
2064 | 2064 | if e.errno != errno.ENOENT: |
|
2065 | 2065 | raise |
|
2066 | 2066 | |
|
2067 | 2067 | ui.writenoi18n(b"%-6s free\n" % (name + b":")) |
|
2068 | 2068 | return 0 |
|
2069 | 2069 | |
|
2070 | 2070 | held += report(repo.svfs, b"lock", repo.lock) |
|
2071 | 2071 | held += report(repo.vfs, b"wlock", repo.wlock) |
|
2072 | 2072 | |
|
2073 | 2073 | return held |
|
2074 | 2074 | |
|
2075 | 2075 | |
|
2076 | 2076 | @command( |
|
2077 | 2077 | b'debugmanifestfulltextcache', |
|
2078 | 2078 | [ |
|
2079 | 2079 | (b'', b'clear', False, _(b'clear the cache')), |
|
2080 | 2080 | ( |
|
2081 | 2081 | b'a', |
|
2082 | 2082 | b'add', |
|
2083 | 2083 | [], |
|
2084 | 2084 | _(b'add the given manifest nodes to the cache'), |
|
2085 | 2085 | _(b'NODE'), |
|
2086 | 2086 | ), |
|
2087 | 2087 | ], |
|
2088 | 2088 | b'', |
|
2089 | 2089 | ) |
|
2090 | 2090 | def debugmanifestfulltextcache(ui, repo, add=(), **opts): |
|
2091 | 2091 | """show, clear or amend the contents of the manifest fulltext cache""" |
|
2092 | 2092 | |
|
2093 | 2093 | def getcache(): |
|
2094 | 2094 | r = repo.manifestlog.getstorage(b'') |
|
2095 | 2095 | try: |
|
2096 | 2096 | return r._fulltextcache |
|
2097 | 2097 | except AttributeError: |
|
2098 | 2098 | msg = _( |
|
2099 | 2099 | b"Current revlog implementation doesn't appear to have a " |
|
2100 | 2100 | b"manifest fulltext cache\n" |
|
2101 | 2101 | ) |
|
2102 | 2102 | raise error.Abort(msg) |
|
2103 | 2103 | |
|
2104 | 2104 | if opts.get('clear'): |
|
2105 | 2105 | with repo.wlock(): |
|
2106 | 2106 | cache = getcache() |
|
2107 | 2107 | cache.clear(clear_persisted_data=True) |
|
2108 | 2108 | return |
|
2109 | 2109 | |
|
2110 | 2110 | if add: |
|
2111 | 2111 | with repo.wlock(): |
|
2112 | 2112 | m = repo.manifestlog |
|
2113 | 2113 | store = m.getstorage(b'') |
|
2114 | 2114 | for n in add: |
|
2115 | 2115 | try: |
|
2116 | 2116 | manifest = m[store.lookup(n)] |
|
2117 | 2117 | except error.LookupError as e: |
|
2118 | 2118 | raise error.Abort(e, hint=b"Check your manifest node id") |
|
2119 | 2119 | manifest.read() # stores revisision in cache too |
|
2120 | 2120 | return |
|
2121 | 2121 | |
|
2122 | 2122 | cache = getcache() |
|
2123 | 2123 | if not len(cache): |
|
2124 | 2124 | ui.write(_(b'cache empty\n')) |
|
2125 | 2125 | else: |
|
2126 | 2126 | ui.write( |
|
2127 | 2127 | _( |
|
2128 | 2128 | b'cache contains %d manifest entries, in order of most to ' |
|
2129 | 2129 | b'least recent:\n' |
|
2130 | 2130 | ) |
|
2131 | 2131 | % (len(cache),) |
|
2132 | 2132 | ) |
|
2133 | 2133 | totalsize = 0 |
|
2134 | 2134 | for nodeid in cache: |
|
2135 | 2135 | # Use cache.get to not update the LRU order |
|
2136 | 2136 | data = cache.peek(nodeid) |
|
2137 | 2137 | size = len(data) |
|
2138 | 2138 | totalsize += size + 24 # 20 bytes nodeid, 4 bytes size |
|
2139 | 2139 | ui.write( |
|
2140 | 2140 | _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size)) |
|
2141 | 2141 | ) |
|
2142 | 2142 | ondisk = cache._opener.stat(b'manifestfulltextcache').st_size |
|
2143 | 2143 | ui.write( |
|
2144 | 2144 | _(b'total cache data size %s, on-disk %s\n') |
|
2145 | 2145 | % (util.bytecount(totalsize), util.bytecount(ondisk)) |
|
2146 | 2146 | ) |
|
2147 | 2147 | |
|
2148 | 2148 | |
|
2149 | 2149 | @command(b'debugmergestate', [] + cmdutil.templateopts, b'') |
|
2150 | 2150 | def debugmergestate(ui, repo, *args, **opts): |
|
2151 | 2151 | """print merge state |
|
2152 | 2152 | |
|
2153 | 2153 | Use --verbose to print out information about whether v1 or v2 merge state |
|
2154 | 2154 | was chosen.""" |
|
2155 | 2155 | |
|
2156 | 2156 | if ui.verbose: |
|
2157 | 2157 | ms = mergestatemod.mergestate(repo) |
|
2158 | 2158 | |
|
2159 | 2159 | # sort so that reasonable information is on top |
|
2160 | 2160 | v1records = ms._readrecordsv1() |
|
2161 | 2161 | v2records = ms._readrecordsv2() |
|
2162 | 2162 | |
|
2163 | 2163 | if not v1records and not v2records: |
|
2164 | 2164 | pass |
|
2165 | 2165 | elif not v2records: |
|
2166 | 2166 | ui.writenoi18n(b'no version 2 merge state\n') |
|
2167 | 2167 | elif ms._v1v2match(v1records, v2records): |
|
2168 | 2168 | ui.writenoi18n(b'v1 and v2 states match: using v2\n') |
|
2169 | 2169 | else: |
|
2170 | 2170 | ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n') |
|
2171 | 2171 | |
|
2172 | 2172 | opts = pycompat.byteskwargs(opts) |
|
2173 | 2173 | if not opts[b'template']: |
|
2174 | 2174 | opts[b'template'] = ( |
|
2175 | 2175 | b'{if(commits, "", "no merge state found\n")}' |
|
2176 | 2176 | b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}' |
|
2177 | 2177 | b'{files % "file: {path} (state \\"{state}\\")\n' |
|
2178 | 2178 | b'{if(local_path, "' |
|
2179 | 2179 | b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n' |
|
2180 | 2180 | b' ancestor path: {ancestor_path} (node {ancestor_node})\n' |
|
2181 | 2181 | b' other path: {other_path} (node {other_node})\n' |
|
2182 | 2182 | b'")}' |
|
2183 | 2183 | b'{if(rename_side, "' |
|
2184 | 2184 | b' rename side: {rename_side}\n' |
|
2185 | 2185 | b' renamed path: {renamed_path}\n' |
|
2186 | 2186 | b'")}' |
|
2187 | 2187 | b'{extras % " extra: {key} = {value}\n"}' |
|
2188 | 2188 | b'"}' |
|
2189 | 2189 | b'{extras % "extra: {file} ({key} = {value})\n"}' |
|
2190 | 2190 | ) |
|
2191 | 2191 | |
|
2192 | 2192 | ms = mergestatemod.mergestate.read(repo) |
|
2193 | 2193 | |
|
2194 | 2194 | fm = ui.formatter(b'debugmergestate', opts) |
|
2195 | 2195 | fm.startitem() |
|
2196 | 2196 | |
|
2197 | 2197 | fm_commits = fm.nested(b'commits') |
|
2198 | 2198 | if ms.active(): |
|
2199 | 2199 | for name, node, label_index in ( |
|
2200 | 2200 | (b'local', ms.local, 0), |
|
2201 | 2201 | (b'other', ms.other, 1), |
|
2202 | 2202 | ): |
|
2203 | 2203 | fm_commits.startitem() |
|
2204 | 2204 | fm_commits.data(name=name) |
|
2205 | 2205 | fm_commits.data(node=hex(node)) |
|
2206 | 2206 | if ms._labels and len(ms._labels) > label_index: |
|
2207 | 2207 | fm_commits.data(label=ms._labels[label_index]) |
|
2208 | 2208 | fm_commits.end() |
|
2209 | 2209 | |
|
2210 | 2210 | fm_files = fm.nested(b'files') |
|
2211 | 2211 | if ms.active(): |
|
2212 | 2212 | for f in ms: |
|
2213 | 2213 | fm_files.startitem() |
|
2214 | 2214 | fm_files.data(path=f) |
|
2215 | 2215 | state = ms._state[f] |
|
2216 | 2216 | fm_files.data(state=state[0]) |
|
2217 | 2217 | if state[0] in ( |
|
2218 | 2218 | mergestatemod.MERGE_RECORD_UNRESOLVED, |
|
2219 | 2219 | mergestatemod.MERGE_RECORD_RESOLVED, |
|
2220 | 2220 | ): |
|
2221 | 2221 | fm_files.data(local_key=state[1]) |
|
2222 | 2222 | fm_files.data(local_path=state[2]) |
|
2223 | 2223 | fm_files.data(ancestor_path=state[3]) |
|
2224 | 2224 | fm_files.data(ancestor_node=state[4]) |
|
2225 | 2225 | fm_files.data(other_path=state[5]) |
|
2226 | 2226 | fm_files.data(other_node=state[6]) |
|
2227 | 2227 | fm_files.data(local_flags=state[7]) |
|
2228 | 2228 | elif state[0] in ( |
|
2229 | 2229 | mergestatemod.MERGE_RECORD_UNRESOLVED_PATH, |
|
2230 | 2230 | mergestatemod.MERGE_RECORD_RESOLVED_PATH, |
|
2231 | 2231 | ): |
|
2232 | 2232 | fm_files.data(renamed_path=state[1]) |
|
2233 | 2233 | fm_files.data(rename_side=state[2]) |
|
2234 | 2234 | fm_extras = fm_files.nested(b'extras') |
|
2235 | 2235 | for k, v in sorted(ms.extras(f).items()): |
|
2236 | 2236 | fm_extras.startitem() |
|
2237 | 2237 | fm_extras.data(key=k) |
|
2238 | 2238 | fm_extras.data(value=v) |
|
2239 | 2239 | fm_extras.end() |
|
2240 | 2240 | |
|
2241 | 2241 | fm_files.end() |
|
2242 | 2242 | |
|
2243 | 2243 | fm_extras = fm.nested(b'extras') |
|
2244 | 2244 | for f, d in sorted(pycompat.iteritems(ms.allextras())): |
|
2245 | 2245 | if f in ms: |
|
2246 | 2246 | # If file is in mergestate, we have already processed it's extras |
|
2247 | 2247 | continue |
|
2248 | 2248 | for k, v in pycompat.iteritems(d): |
|
2249 | 2249 | fm_extras.startitem() |
|
2250 | 2250 | fm_extras.data(file=f) |
|
2251 | 2251 | fm_extras.data(key=k) |
|
2252 | 2252 | fm_extras.data(value=v) |
|
2253 | 2253 | fm_extras.end() |
|
2254 | 2254 | |
|
2255 | 2255 | fm.end() |
|
2256 | 2256 | |
|
2257 | 2257 | |
|
2258 | 2258 | @command(b'debugnamecomplete', [], _(b'NAME...')) |
|
2259 | 2259 | def debugnamecomplete(ui, repo, *args): |
|
2260 | 2260 | '''complete "names" - tags, open branch names, bookmark names''' |
|
2261 | 2261 | |
|
2262 | 2262 | names = set() |
|
2263 | 2263 | # since we previously only listed open branches, we will handle that |
|
2264 | 2264 | # specially (after this for loop) |
|
2265 | 2265 | for name, ns in pycompat.iteritems(repo.names): |
|
2266 | 2266 | if name != b'branches': |
|
2267 | 2267 | names.update(ns.listnames(repo)) |
|
2268 | 2268 | names.update( |
|
2269 | 2269 | tag |
|
2270 | 2270 | for (tag, heads, tip, closed) in repo.branchmap().iterbranches() |
|
2271 | 2271 | if not closed |
|
2272 | 2272 | ) |
|
2273 | 2273 | completions = set() |
|
2274 | 2274 | if not args: |
|
2275 | 2275 | args = [b''] |
|
2276 | 2276 | for a in args: |
|
2277 | 2277 | completions.update(n for n in names if n.startswith(a)) |
|
2278 | 2278 | ui.write(b'\n'.join(sorted(completions))) |
|
2279 | 2279 | ui.write(b'\n') |
|
2280 | 2280 | |
|
2281 | 2281 | |
|
2282 | 2282 | @command( |
|
2283 | 2283 | b'debugnodemap', |
|
2284 | 2284 | [ |
|
2285 | 2285 | ( |
|
2286 | 2286 | b'', |
|
2287 | 2287 | b'dump-new', |
|
2288 | 2288 | False, |
|
2289 | 2289 | _(b'write a (new) persistent binary nodemap on stdout'), |
|
2290 | 2290 | ), |
|
2291 | 2291 | (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')), |
|
2292 | 2292 | ( |
|
2293 | 2293 | b'', |
|
2294 | 2294 | b'check', |
|
2295 | 2295 | False, |
|
2296 | 2296 | _(b'check that the data on disk data are correct.'), |
|
2297 | 2297 | ), |
|
2298 | 2298 | ( |
|
2299 | 2299 | b'', |
|
2300 | 2300 | b'metadata', |
|
2301 | 2301 | False, |
|
2302 | 2302 | _(b'display the on disk meta data for the nodemap'), |
|
2303 | 2303 | ), |
|
2304 | 2304 | ], |
|
2305 | 2305 | ) |
|
2306 | 2306 | def debugnodemap(ui, repo, **opts): |
|
2307 | 2307 | """write and inspect on disk nodemap""" |
|
2308 | 2308 | if opts['dump_new']: |
|
2309 | 2309 | unfi = repo.unfiltered() |
|
2310 | 2310 | cl = unfi.changelog |
|
2311 | 2311 | if util.safehasattr(cl.index, "nodemap_data_all"): |
|
2312 | 2312 | data = cl.index.nodemap_data_all() |
|
2313 | 2313 | else: |
|
2314 | 2314 | data = nodemap.persistent_data(cl.index) |
|
2315 | 2315 | ui.write(data) |
|
2316 | 2316 | elif opts['dump_disk']: |
|
2317 | 2317 | unfi = repo.unfiltered() |
|
2318 | 2318 | cl = unfi.changelog |
|
2319 | 2319 | nm_data = nodemap.persisted_data(cl) |
|
2320 | 2320 | if nm_data is not None: |
|
2321 | 2321 | docket, data = nm_data |
|
2322 | 2322 | ui.write(data[:]) |
|
2323 | 2323 | elif opts['check']: |
|
2324 | 2324 | unfi = repo.unfiltered() |
|
2325 | 2325 | cl = unfi.changelog |
|
2326 | 2326 | nm_data = nodemap.persisted_data(cl) |
|
2327 | 2327 | if nm_data is not None: |
|
2328 | 2328 | docket, data = nm_data |
|
2329 | 2329 | return nodemap.check_data(ui, cl.index, data) |
|
2330 | 2330 | elif opts['metadata']: |
|
2331 | 2331 | unfi = repo.unfiltered() |
|
2332 | 2332 | cl = unfi.changelog |
|
2333 | 2333 | nm_data = nodemap.persisted_data(cl) |
|
2334 | 2334 | if nm_data is not None: |
|
2335 | 2335 | docket, data = nm_data |
|
2336 | 2336 | ui.write((b"uid: %s\n") % docket.uid) |
|
2337 | 2337 | ui.write((b"tip-rev: %d\n") % docket.tip_rev) |
|
2338 | 2338 | ui.write((b"tip-node: %s\n") % hex(docket.tip_node)) |
|
2339 | 2339 | ui.write((b"data-length: %d\n") % docket.data_length) |
|
2340 | 2340 | ui.write((b"data-unused: %d\n") % docket.data_unused) |
|
2341 | 2341 | unused_perc = docket.data_unused * 100.0 / docket.data_length |
|
2342 | 2342 | ui.write((b"data-unused: %2.3f%%\n") % unused_perc) |
|
2343 | 2343 | |
|
2344 | 2344 | |
|
2345 | 2345 | @command( |
|
2346 | 2346 | b'debugobsolete', |
|
2347 | 2347 | [ |
|
2348 | 2348 | (b'', b'flags', 0, _(b'markers flag')), |
|
2349 | 2349 | ( |
|
2350 | 2350 | b'', |
|
2351 | 2351 | b'record-parents', |
|
2352 | 2352 | False, |
|
2353 | 2353 | _(b'record parent information for the precursor'), |
|
2354 | 2354 | ), |
|
2355 | 2355 | (b'r', b'rev', [], _(b'display markers relevant to REV')), |
|
2356 | 2356 | ( |
|
2357 | 2357 | b'', |
|
2358 | 2358 | b'exclusive', |
|
2359 | 2359 | False, |
|
2360 | 2360 | _(b'restrict display to markers only relevant to REV'), |
|
2361 | 2361 | ), |
|
2362 | 2362 | (b'', b'index', False, _(b'display index of the marker')), |
|
2363 | 2363 | (b'', b'delete', [], _(b'delete markers specified by indices')), |
|
2364 | 2364 | ] |
|
2365 | 2365 | + cmdutil.commitopts2 |
|
2366 | 2366 | + cmdutil.formatteropts, |
|
2367 | 2367 | _(b'[OBSOLETED [REPLACEMENT ...]]'), |
|
2368 | 2368 | ) |
|
2369 | 2369 | def debugobsolete(ui, repo, precursor=None, *successors, **opts): |
|
2370 | 2370 | """create arbitrary obsolete marker |
|
2371 | 2371 | |
|
2372 | 2372 | With no arguments, displays the list of obsolescence markers.""" |
|
2373 | 2373 | |
|
2374 | 2374 | opts = pycompat.byteskwargs(opts) |
|
2375 | 2375 | |
|
2376 | 2376 | def parsenodeid(s): |
|
2377 | 2377 | try: |
|
2378 | 2378 | # We do not use revsingle/revrange functions here to accept |
|
2379 | 2379 | # arbitrary node identifiers, possibly not present in the |
|
2380 | 2380 | # local repository. |
|
2381 | 2381 | n = bin(s) |
|
2382 | 2382 | if len(n) != len(nullid): |
|
2383 | 2383 | raise TypeError() |
|
2384 | 2384 | return n |
|
2385 | 2385 | except TypeError: |
|
2386 | 2386 | raise error.InputError( |
|
2387 | 2387 | b'changeset references must be full hexadecimal ' |
|
2388 | 2388 | b'node identifiers' |
|
2389 | 2389 | ) |
|
2390 | 2390 | |
|
2391 | 2391 | if opts.get(b'delete'): |
|
2392 | 2392 | indices = [] |
|
2393 | 2393 | for v in opts.get(b'delete'): |
|
2394 | 2394 | try: |
|
2395 | 2395 | indices.append(int(v)) |
|
2396 | 2396 | except ValueError: |
|
2397 | 2397 | raise error.InputError( |
|
2398 | 2398 | _(b'invalid index value: %r') % v, |
|
2399 | 2399 | hint=_(b'use integers for indices'), |
|
2400 | 2400 | ) |
|
2401 | 2401 | |
|
2402 | 2402 | if repo.currenttransaction(): |
|
2403 | 2403 | raise error.Abort( |
|
2404 | 2404 | _(b'cannot delete obsmarkers in the middle of transaction.') |
|
2405 | 2405 | ) |
|
2406 | 2406 | |
|
2407 | 2407 | with repo.lock(): |
|
2408 | 2408 | n = repair.deleteobsmarkers(repo.obsstore, indices) |
|
2409 | 2409 | ui.write(_(b'deleted %i obsolescence markers\n') % n) |
|
2410 | 2410 | |
|
2411 | 2411 | return |
|
2412 | 2412 | |
|
2413 | 2413 | if precursor is not None: |
|
2414 | 2414 | if opts[b'rev']: |
|
2415 | 2415 | raise error.InputError( |
|
2416 | 2416 | b'cannot select revision when creating marker' |
|
2417 | 2417 | ) |
|
2418 | 2418 | metadata = {} |
|
2419 | 2419 | metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username()) |
|
2420 | 2420 | succs = tuple(parsenodeid(succ) for succ in successors) |
|
2421 | 2421 | l = repo.lock() |
|
2422 | 2422 | try: |
|
2423 | 2423 | tr = repo.transaction(b'debugobsolete') |
|
2424 | 2424 | try: |
|
2425 | 2425 | date = opts.get(b'date') |
|
2426 | 2426 | if date: |
|
2427 | 2427 | date = dateutil.parsedate(date) |
|
2428 | 2428 | else: |
|
2429 | 2429 | date = None |
|
2430 | 2430 | prec = parsenodeid(precursor) |
|
2431 | 2431 | parents = None |
|
2432 | 2432 | if opts[b'record_parents']: |
|
2433 | 2433 | if prec not in repo.unfiltered(): |
|
2434 | 2434 | raise error.Abort( |
|
2435 | 2435 | b'cannot used --record-parents on ' |
|
2436 | 2436 | b'unknown changesets' |
|
2437 | 2437 | ) |
|
2438 | 2438 | parents = repo.unfiltered()[prec].parents() |
|
2439 | 2439 | parents = tuple(p.node() for p in parents) |
|
2440 | 2440 | repo.obsstore.create( |
|
2441 | 2441 | tr, |
|
2442 | 2442 | prec, |
|
2443 | 2443 | succs, |
|
2444 | 2444 | opts[b'flags'], |
|
2445 | 2445 | parents=parents, |
|
2446 | 2446 | date=date, |
|
2447 | 2447 | metadata=metadata, |
|
2448 | 2448 | ui=ui, |
|
2449 | 2449 | ) |
|
2450 | 2450 | tr.close() |
|
2451 | 2451 | except ValueError as exc: |
|
2452 | 2452 | raise error.Abort( |
|
2453 | 2453 | _(b'bad obsmarker input: %s') % pycompat.bytestr(exc) |
|
2454 | 2454 | ) |
|
2455 | 2455 | finally: |
|
2456 | 2456 | tr.release() |
|
2457 | 2457 | finally: |
|
2458 | 2458 | l.release() |
|
2459 | 2459 | else: |
|
2460 | 2460 | if opts[b'rev']: |
|
2461 | 2461 | revs = scmutil.revrange(repo, opts[b'rev']) |
|
2462 | 2462 | nodes = [repo[r].node() for r in revs] |
|
2463 | 2463 | markers = list( |
|
2464 | 2464 | obsutil.getmarkers( |
|
2465 | 2465 | repo, nodes=nodes, exclusive=opts[b'exclusive'] |
|
2466 | 2466 | ) |
|
2467 | 2467 | ) |
|
2468 | 2468 | markers.sort(key=lambda x: x._data) |
|
2469 | 2469 | else: |
|
2470 | 2470 | markers = obsutil.getmarkers(repo) |
|
2471 | 2471 | |
|
2472 | 2472 | markerstoiter = markers |
|
2473 | 2473 | isrelevant = lambda m: True |
|
2474 | 2474 | if opts.get(b'rev') and opts.get(b'index'): |
|
2475 | 2475 | markerstoiter = obsutil.getmarkers(repo) |
|
2476 | 2476 | markerset = set(markers) |
|
2477 | 2477 | isrelevant = lambda m: m in markerset |
|
2478 | 2478 | |
|
2479 | 2479 | fm = ui.formatter(b'debugobsolete', opts) |
|
2480 | 2480 | for i, m in enumerate(markerstoiter): |
|
2481 | 2481 | if not isrelevant(m): |
|
2482 | 2482 | # marker can be irrelevant when we're iterating over a set |
|
2483 | 2483 | # of markers (markerstoiter) which is bigger than the set |
|
2484 | 2484 | # of markers we want to display (markers) |
|
2485 | 2485 | # this can happen if both --index and --rev options are |
|
2486 | 2486 | # provided and thus we need to iterate over all of the markers |
|
2487 | 2487 | # to get the correct indices, but only display the ones that |
|
2488 | 2488 | # are relevant to --rev value |
|
2489 | 2489 | continue |
|
2490 | 2490 | fm.startitem() |
|
2491 | 2491 | ind = i if opts.get(b'index') else None |
|
2492 | 2492 | cmdutil.showmarker(fm, m, index=ind) |
|
2493 | 2493 | fm.end() |
|
2494 | 2494 | |
|
2495 | 2495 | |
|
2496 | 2496 | @command( |
|
2497 | 2497 | b'debugp1copies', |
|
2498 | 2498 | [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))], |
|
2499 | 2499 | _(b'[-r REV]'), |
|
2500 | 2500 | ) |
|
2501 | 2501 | def debugp1copies(ui, repo, **opts): |
|
2502 | 2502 | """dump copy information compared to p1""" |
|
2503 | 2503 | |
|
2504 | 2504 | opts = pycompat.byteskwargs(opts) |
|
2505 | 2505 | ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None) |
|
2506 | 2506 | for dst, src in ctx.p1copies().items(): |
|
2507 | 2507 | ui.write(b'%s -> %s\n' % (src, dst)) |
|
2508 | 2508 | |
|
2509 | 2509 | |
|
2510 | 2510 | @command( |
|
2511 | 2511 | b'debugp2copies', |
|
2512 | 2512 | [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))], |
|
2513 | 2513 | _(b'[-r REV]'), |
|
2514 | 2514 | ) |
|
2515 | 2515 | def debugp1copies(ui, repo, **opts): |
|
2516 | 2516 | """dump copy information compared to p2""" |
|
2517 | 2517 | |
|
2518 | 2518 | opts = pycompat.byteskwargs(opts) |
|
2519 | 2519 | ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None) |
|
2520 | 2520 | for dst, src in ctx.p2copies().items(): |
|
2521 | 2521 | ui.write(b'%s -> %s\n' % (src, dst)) |
|
2522 | 2522 | |
|
2523 | 2523 | |
|
2524 | 2524 | @command( |
|
2525 | 2525 | b'debugpathcomplete', |
|
2526 | 2526 | [ |
|
2527 | 2527 | (b'f', b'full', None, _(b'complete an entire path')), |
|
2528 | 2528 | (b'n', b'normal', None, _(b'show only normal files')), |
|
2529 | 2529 | (b'a', b'added', None, _(b'show only added files')), |
|
2530 | 2530 | (b'r', b'removed', None, _(b'show only removed files')), |
|
2531 | 2531 | ], |
|
2532 | 2532 | _(b'FILESPEC...'), |
|
2533 | 2533 | ) |
|
2534 | 2534 | def debugpathcomplete(ui, repo, *specs, **opts): |
|
2535 | 2535 | """complete part or all of a tracked path |
|
2536 | 2536 | |
|
2537 | 2537 | This command supports shells that offer path name completion. It |
|
2538 | 2538 | currently completes only files already known to the dirstate. |
|
2539 | 2539 | |
|
2540 | 2540 | Completion extends only to the next path segment unless |
|
2541 | 2541 | --full is specified, in which case entire paths are used.""" |
|
2542 | 2542 | |
|
2543 | 2543 | def complete(path, acceptable): |
|
2544 | 2544 | dirstate = repo.dirstate |
|
2545 | 2545 | spec = os.path.normpath(os.path.join(encoding.getcwd(), path)) |
|
2546 | 2546 | rootdir = repo.root + pycompat.ossep |
|
2547 | 2547 | if spec != repo.root and not spec.startswith(rootdir): |
|
2548 | 2548 | return [], [] |
|
2549 | 2549 | if os.path.isdir(spec): |
|
2550 | 2550 | spec += b'/' |
|
2551 | 2551 | spec = spec[len(rootdir) :] |
|
2552 | 2552 | fixpaths = pycompat.ossep != b'/' |
|
2553 | 2553 | if fixpaths: |
|
2554 | 2554 | spec = spec.replace(pycompat.ossep, b'/') |
|
2555 | 2555 | speclen = len(spec) |
|
2556 | 2556 | fullpaths = opts['full'] |
|
2557 | 2557 | files, dirs = set(), set() |
|
2558 | 2558 | adddir, addfile = dirs.add, files.add |
|
2559 | 2559 | for f, st in pycompat.iteritems(dirstate): |
|
2560 | 2560 | if f.startswith(spec) and st[0] in acceptable: |
|
2561 | 2561 | if fixpaths: |
|
2562 | 2562 | f = f.replace(b'/', pycompat.ossep) |
|
2563 | 2563 | if fullpaths: |
|
2564 | 2564 | addfile(f) |
|
2565 | 2565 | continue |
|
2566 | 2566 | s = f.find(pycompat.ossep, speclen) |
|
2567 | 2567 | if s >= 0: |
|
2568 | 2568 | adddir(f[:s]) |
|
2569 | 2569 | else: |
|
2570 | 2570 | addfile(f) |
|
2571 | 2571 | return files, dirs |
|
2572 | 2572 | |
|
2573 | 2573 | acceptable = b'' |
|
2574 | 2574 | if opts['normal']: |
|
2575 | 2575 | acceptable += b'nm' |
|
2576 | 2576 | if opts['added']: |
|
2577 | 2577 | acceptable += b'a' |
|
2578 | 2578 | if opts['removed']: |
|
2579 | 2579 | acceptable += b'r' |
|
2580 | 2580 | cwd = repo.getcwd() |
|
2581 | 2581 | if not specs: |
|
2582 | 2582 | specs = [b'.'] |
|
2583 | 2583 | |
|
2584 | 2584 | files, dirs = set(), set() |
|
2585 | 2585 | for spec in specs: |
|
2586 | 2586 | f, d = complete(spec, acceptable or b'nmar') |
|
2587 | 2587 | files.update(f) |
|
2588 | 2588 | dirs.update(d) |
|
2589 | 2589 | files.update(dirs) |
|
2590 | 2590 | ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files))) |
|
2591 | 2591 | ui.write(b'\n') |
|
2592 | 2592 | |
|
2593 | 2593 | |
|
2594 | 2594 | @command( |
|
2595 | 2595 | b'debugpathcopies', |
|
2596 | 2596 | cmdutil.walkopts, |
|
2597 | 2597 | b'hg debugpathcopies REV1 REV2 [FILE]', |
|
2598 | 2598 | inferrepo=True, |
|
2599 | 2599 | ) |
|
2600 | 2600 | def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts): |
|
2601 | 2601 | """show copies between two revisions""" |
|
2602 | 2602 | ctx1 = scmutil.revsingle(repo, rev1) |
|
2603 | 2603 | ctx2 = scmutil.revsingle(repo, rev2) |
|
2604 | 2604 | m = scmutil.match(ctx1, pats, opts) |
|
2605 | 2605 | for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()): |
|
2606 | 2606 | ui.write(b'%s -> %s\n' % (src, dst)) |
|
2607 | 2607 | |
|
2608 | 2608 | |
|
2609 | 2609 | @command(b'debugpeer', [], _(b'PATH'), norepo=True) |
|
2610 | 2610 | def debugpeer(ui, path): |
|
2611 | 2611 | """establish a connection to a peer repository""" |
|
2612 | 2612 | # Always enable peer request logging. Requires --debug to display |
|
2613 | 2613 | # though. |
|
2614 | 2614 | overrides = { |
|
2615 | 2615 | (b'devel', b'debug.peer-request'): True, |
|
2616 | 2616 | } |
|
2617 | 2617 | |
|
2618 | 2618 | with ui.configoverride(overrides): |
|
2619 | 2619 | peer = hg.peer(ui, {}, path) |
|
2620 | 2620 | |
|
2621 | 2621 | try: |
|
2622 | 2622 | local = peer.local() is not None |
|
2623 | 2623 | canpush = peer.canpush() |
|
2624 | 2624 | |
|
2625 | 2625 | ui.write(_(b'url: %s\n') % peer.url()) |
|
2626 | 2626 | ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no'))) |
|
2627 | 2627 | ui.write( |
|
2628 | 2628 | _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')) |
|
2629 | 2629 | ) |
|
2630 | 2630 | finally: |
|
2631 | 2631 | peer.close() |
|
2632 | 2632 | |
|
2633 | 2633 | |
|
2634 | 2634 | @command( |
|
2635 | 2635 | b'debugpickmergetool', |
|
2636 | 2636 | [ |
|
2637 | 2637 | (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')), |
|
2638 | 2638 | (b'', b'changedelete', None, _(b'emulate merging change and delete')), |
|
2639 | 2639 | ] |
|
2640 | 2640 | + cmdutil.walkopts |
|
2641 | 2641 | + cmdutil.mergetoolopts, |
|
2642 | 2642 | _(b'[PATTERN]...'), |
|
2643 | 2643 | inferrepo=True, |
|
2644 | 2644 | ) |
|
2645 | 2645 | def debugpickmergetool(ui, repo, *pats, **opts): |
|
2646 | 2646 | """examine which merge tool is chosen for specified file |
|
2647 | 2647 | |
|
2648 | 2648 | As described in :hg:`help merge-tools`, Mercurial examines |
|
2649 | 2649 | configurations below in this order to decide which merge tool is |
|
2650 | 2650 | chosen for specified file. |
|
2651 | 2651 | |
|
2652 | 2652 | 1. ``--tool`` option |
|
2653 | 2653 | 2. ``HGMERGE`` environment variable |
|
2654 | 2654 | 3. configurations in ``merge-patterns`` section |
|
2655 | 2655 | 4. configuration of ``ui.merge`` |
|
2656 | 2656 | 5. configurations in ``merge-tools`` section |
|
2657 | 2657 | 6. ``hgmerge`` tool (for historical reason only) |
|
2658 | 2658 | 7. default tool for fallback (``:merge`` or ``:prompt``) |
|
2659 | 2659 | |
|
2660 | 2660 | This command writes out examination result in the style below:: |
|
2661 | 2661 | |
|
2662 | 2662 | FILE = MERGETOOL |
|
2663 | 2663 | |
|
2664 | 2664 | By default, all files known in the first parent context of the |
|
2665 | 2665 | working directory are examined. Use file patterns and/or -I/-X |
|
2666 | 2666 | options to limit target files. -r/--rev is also useful to examine |
|
2667 | 2667 | files in another context without actual updating to it. |
|
2668 | 2668 | |
|
2669 | 2669 | With --debug, this command shows warning messages while matching |
|
2670 | 2670 | against ``merge-patterns`` and so on, too. It is recommended to |
|
2671 | 2671 | use this option with explicit file patterns and/or -I/-X options, |
|
2672 | 2672 | because this option increases amount of output per file according |
|
2673 | 2673 | to configurations in hgrc. |
|
2674 | 2674 | |
|
2675 | 2675 | With -v/--verbose, this command shows configurations below at |
|
2676 | 2676 | first (only if specified). |
|
2677 | 2677 | |
|
2678 | 2678 | - ``--tool`` option |
|
2679 | 2679 | - ``HGMERGE`` environment variable |
|
2680 | 2680 | - configuration of ``ui.merge`` |
|
2681 | 2681 | |
|
2682 | 2682 | If merge tool is chosen before matching against |
|
2683 | 2683 | ``merge-patterns``, this command can't show any helpful |
|
2684 | 2684 | information, even with --debug. In such case, information above is |
|
2685 | 2685 | useful to know why a merge tool is chosen. |
|
2686 | 2686 | """ |
|
2687 | 2687 | opts = pycompat.byteskwargs(opts) |
|
2688 | 2688 | overrides = {} |
|
2689 | 2689 | if opts[b'tool']: |
|
2690 | 2690 | overrides[(b'ui', b'forcemerge')] = opts[b'tool'] |
|
2691 | 2691 | ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool']))) |
|
2692 | 2692 | |
|
2693 | 2693 | with ui.configoverride(overrides, b'debugmergepatterns'): |
|
2694 | 2694 | hgmerge = encoding.environ.get(b"HGMERGE") |
|
2695 | 2695 | if hgmerge is not None: |
|
2696 | 2696 | ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge))) |
|
2697 | 2697 | uimerge = ui.config(b"ui", b"merge") |
|
2698 | 2698 | if uimerge: |
|
2699 | 2699 | ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge))) |
|
2700 | 2700 | |
|
2701 | 2701 | ctx = scmutil.revsingle(repo, opts.get(b'rev')) |
|
2702 | 2702 | m = scmutil.match(ctx, pats, opts) |
|
2703 | 2703 | changedelete = opts[b'changedelete'] |
|
2704 | 2704 | for path in ctx.walk(m): |
|
2705 | 2705 | fctx = ctx[path] |
|
2706 | 2706 | try: |
|
2707 | 2707 | if not ui.debugflag: |
|
2708 | 2708 | ui.pushbuffer(error=True) |
|
2709 | 2709 | tool, toolpath = filemerge._picktool( |
|
2710 | 2710 | repo, |
|
2711 | 2711 | ui, |
|
2712 | 2712 | path, |
|
2713 | 2713 | fctx.isbinary(), |
|
2714 | 2714 | b'l' in fctx.flags(), |
|
2715 | 2715 | changedelete, |
|
2716 | 2716 | ) |
|
2717 | 2717 | finally: |
|
2718 | 2718 | if not ui.debugflag: |
|
2719 | 2719 | ui.popbuffer() |
|
2720 | 2720 | ui.write(b'%s = %s\n' % (path, tool)) |
|
2721 | 2721 | |
|
2722 | 2722 | |
|
2723 | 2723 | @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True) |
|
2724 | 2724 | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): |
|
2725 | 2725 | """access the pushkey key/value protocol |
|
2726 | 2726 | |
|
2727 | 2727 | With two args, list the keys in the given namespace. |
|
2728 | 2728 | |
|
2729 | 2729 | With five args, set a key to new if it currently is set to old. |
|
2730 | 2730 | Reports success or failure. |
|
2731 | 2731 | """ |
|
2732 | 2732 | |
|
2733 | 2733 | target = hg.peer(ui, {}, repopath) |
|
2734 | 2734 | try: |
|
2735 | 2735 | if keyinfo: |
|
2736 | 2736 | key, old, new = keyinfo |
|
2737 | 2737 | with target.commandexecutor() as e: |
|
2738 | 2738 | r = e.callcommand( |
|
2739 | 2739 | b'pushkey', |
|
2740 | 2740 | { |
|
2741 | 2741 | b'namespace': namespace, |
|
2742 | 2742 | b'key': key, |
|
2743 | 2743 | b'old': old, |
|
2744 | 2744 | b'new': new, |
|
2745 | 2745 | }, |
|
2746 | 2746 | ).result() |
|
2747 | 2747 | |
|
2748 | 2748 | ui.status(pycompat.bytestr(r) + b'\n') |
|
2749 | 2749 | return not r |
|
2750 | 2750 | else: |
|
2751 | 2751 | for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))): |
|
2752 | 2752 | ui.write( |
|
2753 | 2753 | b"%s\t%s\n" |
|
2754 | 2754 | % (stringutil.escapestr(k), stringutil.escapestr(v)) |
|
2755 | 2755 | ) |
|
2756 | 2756 | finally: |
|
2757 | 2757 | target.close() |
|
2758 | 2758 | |
|
2759 | 2759 | |
|
2760 | 2760 | @command(b'debugpvec', [], _(b'A B')) |
|
2761 | 2761 | def debugpvec(ui, repo, a, b=None): |
|
2762 | 2762 | ca = scmutil.revsingle(repo, a) |
|
2763 | 2763 | cb = scmutil.revsingle(repo, b) |
|
2764 | 2764 | pa = pvec.ctxpvec(ca) |
|
2765 | 2765 | pb = pvec.ctxpvec(cb) |
|
2766 | 2766 | if pa == pb: |
|
2767 | 2767 | rel = b"=" |
|
2768 | 2768 | elif pa > pb: |
|
2769 | 2769 | rel = b">" |
|
2770 | 2770 | elif pa < pb: |
|
2771 | 2771 | rel = b"<" |
|
2772 | 2772 | elif pa | pb: |
|
2773 | 2773 | rel = b"|" |
|
2774 | 2774 | ui.write(_(b"a: %s\n") % pa) |
|
2775 | 2775 | ui.write(_(b"b: %s\n") % pb) |
|
2776 | 2776 | ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) |
|
2777 | 2777 | ui.write( |
|
2778 | 2778 | _(b"delta: %d hdist: %d distance: %d relation: %s\n") |
|
2779 | 2779 | % ( |
|
2780 | 2780 | abs(pa._depth - pb._depth), |
|
2781 | 2781 | pvec._hamming(pa._vec, pb._vec), |
|
2782 | 2782 | pa.distance(pb), |
|
2783 | 2783 | rel, |
|
2784 | 2784 | ) |
|
2785 | 2785 | ) |
|
2786 | 2786 | |
|
2787 | 2787 | |
|
2788 | 2788 | @command( |
|
2789 | 2789 | b'debugrebuilddirstate|debugrebuildstate', |
|
2790 | 2790 | [ |
|
2791 | 2791 | (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')), |
|
2792 | 2792 | ( |
|
2793 | 2793 | b'', |
|
2794 | 2794 | b'minimal', |
|
2795 | 2795 | None, |
|
2796 | 2796 | _( |
|
2797 | 2797 | b'only rebuild files that are inconsistent with ' |
|
2798 | 2798 | b'the working copy parent' |
|
2799 | 2799 | ), |
|
2800 | 2800 | ), |
|
2801 | 2801 | ], |
|
2802 | 2802 | _(b'[-r REV]'), |
|
2803 | 2803 | ) |
|
2804 | 2804 | def debugrebuilddirstate(ui, repo, rev, **opts): |
|
2805 | 2805 | """rebuild the dirstate as it would look like for the given revision |
|
2806 | 2806 | |
|
2807 | 2807 | If no revision is specified the first current parent will be used. |
|
2808 | 2808 | |
|
2809 | 2809 | The dirstate will be set to the files of the given revision. |
|
2810 | 2810 | The actual working directory content or existing dirstate |
|
2811 | 2811 | information such as adds or removes is not considered. |
|
2812 | 2812 | |
|
2813 | 2813 | ``minimal`` will only rebuild the dirstate status for files that claim to be |
|
2814 | 2814 | tracked but are not in the parent manifest, or that exist in the parent |
|
2815 | 2815 | manifest but are not in the dirstate. It will not change adds, removes, or |
|
2816 | 2816 | modified files that are in the working copy parent. |
|
2817 | 2817 | |
|
2818 | 2818 | One use of this command is to make the next :hg:`status` invocation |
|
2819 | 2819 | check the actual file content. |
|
2820 | 2820 | """ |
|
2821 | 2821 | ctx = scmutil.revsingle(repo, rev) |
|
2822 | 2822 | with repo.wlock(): |
|
2823 | 2823 | dirstate = repo.dirstate |
|
2824 | 2824 | changedfiles = None |
|
2825 | 2825 | # See command doc for what minimal does. |
|
2826 | 2826 | if opts.get('minimal'): |
|
2827 | 2827 | manifestfiles = set(ctx.manifest().keys()) |
|
2828 | 2828 | dirstatefiles = set(dirstate) |
|
2829 | 2829 | manifestonly = manifestfiles - dirstatefiles |
|
2830 | 2830 | dsonly = dirstatefiles - manifestfiles |
|
2831 | 2831 | dsnotadded = {f for f in dsonly if dirstate[f] != b'a'} |
|
2832 | 2832 | changedfiles = manifestonly | dsnotadded |
|
2833 | 2833 | |
|
2834 | 2834 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
2835 | 2835 | |
|
2836 | 2836 | |
|
2837 | 2837 | @command(b'debugrebuildfncache', [], b'') |
|
2838 | 2838 | def debugrebuildfncache(ui, repo): |
|
2839 | 2839 | """rebuild the fncache file""" |
|
2840 | 2840 | repair.rebuildfncache(ui, repo) |
|
2841 | 2841 | |
|
2842 | 2842 | |
|
2843 | 2843 | @command( |
|
2844 | 2844 | b'debugrename', |
|
2845 | 2845 | [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))], |
|
2846 | 2846 | _(b'[-r REV] [FILE]...'), |
|
2847 | 2847 | ) |
|
2848 | 2848 | def debugrename(ui, repo, *pats, **opts): |
|
2849 | 2849 | """dump rename information""" |
|
2850 | 2850 | |
|
2851 | 2851 | opts = pycompat.byteskwargs(opts) |
|
2852 | 2852 | ctx = scmutil.revsingle(repo, opts.get(b'rev')) |
|
2853 | 2853 | m = scmutil.match(ctx, pats, opts) |
|
2854 | 2854 | for abs in ctx.walk(m): |
|
2855 | 2855 | fctx = ctx[abs] |
|
2856 | 2856 | o = fctx.filelog().renamed(fctx.filenode()) |
|
2857 | 2857 | rel = repo.pathto(abs) |
|
2858 | 2858 | if o: |
|
2859 | 2859 | ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) |
|
2860 | 2860 | else: |
|
2861 | 2861 | ui.write(_(b"%s not renamed\n") % rel) |
|
2862 | 2862 | |
|
2863 | 2863 | |
|
2864 | 2864 | @command(b'debugrequires|debugrequirements', [], b'') |
|
2865 | 2865 | def debugrequirements(ui, repo): |
|
2866 | 2866 | """ print the current repo requirements """ |
|
2867 | 2867 | for r in sorted(repo.requirements): |
|
2868 | 2868 | ui.write(b"%s\n" % r) |
|
2869 | 2869 | |
|
2870 | 2870 | |
|
2871 | 2871 | @command( |
|
2872 | 2872 | b'debugrevlog', |
|
2873 | 2873 | cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))], |
|
2874 | 2874 | _(b'-c|-m|FILE'), |
|
2875 | 2875 | optionalrepo=True, |
|
2876 | 2876 | ) |
|
2877 | 2877 | def debugrevlog(ui, repo, file_=None, **opts): |
|
2878 | 2878 | """show data and statistics about a revlog""" |
|
2879 | 2879 | opts = pycompat.byteskwargs(opts) |
|
2880 | 2880 | r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts) |
|
2881 | 2881 | |
|
2882 | 2882 | if opts.get(b"dump"): |
|
2883 | 2883 | numrevs = len(r) |
|
2884 | 2884 | ui.write( |
|
2885 | 2885 | ( |
|
2886 | 2886 | b"# rev p1rev p2rev start end deltastart base p1 p2" |
|
2887 | 2887 | b" rawsize totalsize compression heads chainlen\n" |
|
2888 | 2888 | ) |
|
2889 | 2889 | ) |
|
2890 | 2890 | ts = 0 |
|
2891 | 2891 | heads = set() |
|
2892 | 2892 | |
|
2893 | 2893 | for rev in pycompat.xrange(numrevs): |
|
2894 | 2894 | dbase = r.deltaparent(rev) |
|
2895 | 2895 | if dbase == -1: |
|
2896 | 2896 | dbase = rev |
|
2897 | 2897 | cbase = r.chainbase(rev) |
|
2898 | 2898 | clen = r.chainlen(rev) |
|
2899 | 2899 | p1, p2 = r.parentrevs(rev) |
|
2900 | 2900 | rs = r.rawsize(rev) |
|
2901 | 2901 | ts = ts + rs |
|
2902 | 2902 | heads -= set(r.parentrevs(rev)) |
|
2903 | 2903 | heads.add(rev) |
|
2904 | 2904 | try: |
|
2905 | 2905 | compression = ts / r.end(rev) |
|
2906 | 2906 | except ZeroDivisionError: |
|
2907 | 2907 | compression = 0 |
|
2908 | 2908 | ui.write( |
|
2909 | 2909 | b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " |
|
2910 | 2910 | b"%11d %5d %8d\n" |
|
2911 | 2911 | % ( |
|
2912 | 2912 | rev, |
|
2913 | 2913 | p1, |
|
2914 | 2914 | p2, |
|
2915 | 2915 | r.start(rev), |
|
2916 | 2916 | r.end(rev), |
|
2917 | 2917 | r.start(dbase), |
|
2918 | 2918 | r.start(cbase), |
|
2919 | 2919 | r.start(p1), |
|
2920 | 2920 | r.start(p2), |
|
2921 | 2921 | rs, |
|
2922 | 2922 | ts, |
|
2923 | 2923 | compression, |
|
2924 | 2924 | len(heads), |
|
2925 | 2925 | clen, |
|
2926 | 2926 | ) |
|
2927 | 2927 | ) |
|
2928 | 2928 | return 0 |
|
2929 | 2929 | |
|
2930 | 2930 | v = r.version |
|
2931 | 2931 | format = v & 0xFFFF |
|
2932 | 2932 | flags = [] |
|
2933 | 2933 | gdelta = False |
|
2934 | 2934 | if v & revlog.FLAG_INLINE_DATA: |
|
2935 | 2935 | flags.append(b'inline') |
|
2936 | 2936 | if v & revlog.FLAG_GENERALDELTA: |
|
2937 | 2937 | gdelta = True |
|
2938 | 2938 | flags.append(b'generaldelta') |
|
2939 | 2939 | if not flags: |
|
2940 | 2940 | flags = [b'(none)'] |
|
2941 | 2941 | |
|
2942 | 2942 | ### tracks merge vs single parent |
|
2943 | 2943 | nummerges = 0 |
|
2944 | 2944 | |
|
2945 | 2945 | ### tracks ways the "delta" are build |
|
2946 | 2946 | # nodelta |
|
2947 | 2947 | numempty = 0 |
|
2948 | 2948 | numemptytext = 0 |
|
2949 | 2949 | numemptydelta = 0 |
|
2950 | 2950 | # full file content |
|
2951 | 2951 | numfull = 0 |
|
2952 | 2952 | # intermediate snapshot against a prior snapshot |
|
2953 | 2953 | numsemi = 0 |
|
2954 | 2954 | # snapshot count per depth |
|
2955 | 2955 | numsnapdepth = collections.defaultdict(lambda: 0) |
|
2956 | 2956 | # delta against previous revision |
|
2957 | 2957 | numprev = 0 |
|
2958 | 2958 | # delta against first or second parent (not prev) |
|
2959 | 2959 | nump1 = 0 |
|
2960 | 2960 | nump2 = 0 |
|
2961 | 2961 | # delta against neither prev nor parents |
|
2962 | 2962 | numother = 0 |
|
2963 | 2963 | # delta against prev that are also first or second parent |
|
2964 | 2964 | # (details of `numprev`) |
|
2965 | 2965 | nump1prev = 0 |
|
2966 | 2966 | nump2prev = 0 |
|
2967 | 2967 | |
|
2968 | 2968 | # data about delta chain of each revs |
|
2969 | 2969 | chainlengths = [] |
|
2970 | 2970 | chainbases = [] |
|
2971 | 2971 | chainspans = [] |
|
2972 | 2972 | |
|
2973 | 2973 | # data about each revision |
|
2974 | 2974 | datasize = [None, 0, 0] |
|
2975 | 2975 | fullsize = [None, 0, 0] |
|
2976 | 2976 | semisize = [None, 0, 0] |
|
2977 | 2977 | # snapshot count per depth |
|
2978 | 2978 | snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) |
|
2979 | 2979 | deltasize = [None, 0, 0] |
|
2980 | 2980 | chunktypecounts = {} |
|
2981 | 2981 | chunktypesizes = {} |
|
2982 | 2982 | |
|
2983 | 2983 | def addsize(size, l): |
|
2984 | 2984 | if l[0] is None or size < l[0]: |
|
2985 | 2985 | l[0] = size |
|
2986 | 2986 | if size > l[1]: |
|
2987 | 2987 | l[1] = size |
|
2988 | 2988 | l[2] += size |
|
2989 | 2989 | |
|
2990 | 2990 | numrevs = len(r) |
|
2991 | 2991 | for rev in pycompat.xrange(numrevs): |
|
2992 | 2992 | p1, p2 = r.parentrevs(rev) |
|
2993 | 2993 | delta = r.deltaparent(rev) |
|
2994 | 2994 | if format > 0: |
|
2995 | 2995 | addsize(r.rawsize(rev), datasize) |
|
2996 | 2996 | if p2 != nullrev: |
|
2997 | 2997 | nummerges += 1 |
|
2998 | 2998 | size = r.length(rev) |
|
2999 | 2999 | if delta == nullrev: |
|
3000 | 3000 | chainlengths.append(0) |
|
3001 | 3001 | chainbases.append(r.start(rev)) |
|
3002 | 3002 | chainspans.append(size) |
|
3003 | 3003 | if size == 0: |
|
3004 | 3004 | numempty += 1 |
|
3005 | 3005 | numemptytext += 1 |
|
3006 | 3006 | else: |
|
3007 | 3007 | numfull += 1 |
|
3008 | 3008 | numsnapdepth[0] += 1 |
|
3009 | 3009 | addsize(size, fullsize) |
|
3010 | 3010 | addsize(size, snapsizedepth[0]) |
|
3011 | 3011 | else: |
|
3012 | 3012 | chainlengths.append(chainlengths[delta] + 1) |
|
3013 | 3013 | baseaddr = chainbases[delta] |
|
3014 | 3014 | revaddr = r.start(rev) |
|
3015 | 3015 | chainbases.append(baseaddr) |
|
3016 | 3016 | chainspans.append((revaddr - baseaddr) + size) |
|
3017 | 3017 | if size == 0: |
|
3018 | 3018 | numempty += 1 |
|
3019 | 3019 | numemptydelta += 1 |
|
3020 | 3020 | elif r.issnapshot(rev): |
|
3021 | 3021 | addsize(size, semisize) |
|
3022 | 3022 | numsemi += 1 |
|
3023 | 3023 | depth = r.snapshotdepth(rev) |
|
3024 | 3024 | numsnapdepth[depth] += 1 |
|
3025 | 3025 | addsize(size, snapsizedepth[depth]) |
|
3026 | 3026 | else: |
|
3027 | 3027 | addsize(size, deltasize) |
|
3028 | 3028 | if delta == rev - 1: |
|
3029 | 3029 | numprev += 1 |
|
3030 | 3030 | if delta == p1: |
|
3031 | 3031 | nump1prev += 1 |
|
3032 | 3032 | elif delta == p2: |
|
3033 | 3033 | nump2prev += 1 |
|
3034 | 3034 | elif delta == p1: |
|
3035 | 3035 | nump1 += 1 |
|
3036 | 3036 | elif delta == p2: |
|
3037 | 3037 | nump2 += 1 |
|
3038 | 3038 | elif delta != nullrev: |
|
3039 | 3039 | numother += 1 |
|
3040 | 3040 | |
|
3041 | 3041 | # Obtain data on the raw chunks in the revlog. |
|
3042 | 3042 | if util.safehasattr(r, b'_getsegmentforrevs'): |
|
3043 | 3043 | segment = r._getsegmentforrevs(rev, rev)[1] |
|
3044 | 3044 | else: |
|
3045 | 3045 | segment = r._revlog._getsegmentforrevs(rev, rev)[1] |
|
3046 | 3046 | if segment: |
|
3047 | 3047 | chunktype = bytes(segment[0:1]) |
|
3048 | 3048 | else: |
|
3049 | 3049 | chunktype = b'empty' |
|
3050 | 3050 | |
|
3051 | 3051 | if chunktype not in chunktypecounts: |
|
3052 | 3052 | chunktypecounts[chunktype] = 0 |
|
3053 | 3053 | chunktypesizes[chunktype] = 0 |
|
3054 | 3054 | |
|
3055 | 3055 | chunktypecounts[chunktype] += 1 |
|
3056 | 3056 | chunktypesizes[chunktype] += size |
|
3057 | 3057 | |
|
3058 | 3058 | # Adjust size min value for empty cases |
|
3059 | 3059 | for size in (datasize, fullsize, semisize, deltasize): |
|
3060 | 3060 | if size[0] is None: |
|
3061 | 3061 | size[0] = 0 |
|
3062 | 3062 | |
|
3063 | 3063 | numdeltas = numrevs - numfull - numempty - numsemi |
|
3064 | 3064 | numoprev = numprev - nump1prev - nump2prev |
|
3065 | 3065 | totalrawsize = datasize[2] |
|
3066 | 3066 | datasize[2] /= numrevs |
|
3067 | 3067 | fulltotal = fullsize[2] |
|
3068 | 3068 | if numfull == 0: |
|
3069 | 3069 | fullsize[2] = 0 |
|
3070 | 3070 | else: |
|
3071 | 3071 | fullsize[2] /= numfull |
|
3072 | 3072 | semitotal = semisize[2] |
|
3073 | 3073 | snaptotal = {} |
|
3074 | 3074 | if numsemi > 0: |
|
3075 | 3075 | semisize[2] /= numsemi |
|
3076 | 3076 | for depth in snapsizedepth: |
|
3077 | 3077 | snaptotal[depth] = snapsizedepth[depth][2] |
|
3078 | 3078 | snapsizedepth[depth][2] /= numsnapdepth[depth] |
|
3079 | 3079 | |
|
3080 | 3080 | deltatotal = deltasize[2] |
|
3081 | 3081 | if numdeltas > 0: |
|
3082 | 3082 | deltasize[2] /= numdeltas |
|
3083 | 3083 | totalsize = fulltotal + semitotal + deltatotal |
|
3084 | 3084 | avgchainlen = sum(chainlengths) / numrevs |
|
3085 | 3085 | maxchainlen = max(chainlengths) |
|
3086 | 3086 | maxchainspan = max(chainspans) |
|
3087 | 3087 | compratio = 1 |
|
3088 | 3088 | if totalsize: |
|
3089 | 3089 | compratio = totalrawsize / totalsize |
|
3090 | 3090 | |
|
3091 | 3091 | basedfmtstr = b'%%%dd\n' |
|
3092 | 3092 | basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n' |
|
3093 | 3093 | |
|
3094 | 3094 | def dfmtstr(max): |
|
3095 | 3095 | return basedfmtstr % len(str(max)) |
|
3096 | 3096 | |
|
3097 | 3097 | def pcfmtstr(max, padding=0): |
|
3098 | 3098 | return basepcfmtstr % (len(str(max)), b' ' * padding) |
|
3099 | 3099 | |
|
3100 | 3100 | def pcfmt(value, total): |
|
3101 | 3101 | if total: |
|
3102 | 3102 | return (value, 100 * float(value) / total) |
|
3103 | 3103 | else: |
|
3104 | 3104 | return value, 100.0 |
|
3105 | 3105 | |
|
3106 | 3106 | ui.writenoi18n(b'format : %d\n' % format) |
|
3107 | 3107 | ui.writenoi18n(b'flags : %s\n' % b', '.join(flags)) |
|
3108 | 3108 | |
|
3109 | 3109 | ui.write(b'\n') |
|
3110 | 3110 | fmt = pcfmtstr(totalsize) |
|
3111 | 3111 | fmt2 = dfmtstr(totalsize) |
|
3112 | 3112 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) |
|
3113 | 3113 | ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs)) |
|
3114 | 3114 | ui.writenoi18n( |
|
3115 | 3115 | b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs) |
|
3116 | 3116 | ) |
|
3117 | 3117 | ui.writenoi18n(b'revisions : ' + fmt2 % numrevs) |
|
3118 | 3118 | ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs)) |
|
3119 | 3119 | ui.writenoi18n( |
|
3120 | 3120 | b' text : ' |
|
3121 | 3121 | + fmt % pcfmt(numemptytext, numemptytext + numemptydelta) |
|
3122 | 3122 | ) |
|
3123 | 3123 | ui.writenoi18n( |
|
3124 | 3124 | b' delta : ' |
|
3125 | 3125 | + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta) |
|
3126 | 3126 | ) |
|
3127 | 3127 | ui.writenoi18n( |
|
3128 | 3128 | b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs) |
|
3129 | 3129 | ) |
|
3130 | 3130 | for depth in sorted(numsnapdepth): |
|
3131 | 3131 | ui.write( |
|
3132 | 3132 | (b' lvl-%-3d : ' % depth) |
|
3133 | 3133 | + fmt % pcfmt(numsnapdepth[depth], numrevs) |
|
3134 | 3134 | ) |
|
3135 | 3135 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs)) |
|
3136 | 3136 | ui.writenoi18n(b'revision size : ' + fmt2 % totalsize) |
|
3137 | 3137 | ui.writenoi18n( |
|
3138 | 3138 | b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize) |
|
3139 | 3139 | ) |
|
3140 | 3140 | for depth in sorted(numsnapdepth): |
|
3141 | 3141 | ui.write( |
|
3142 | 3142 | (b' lvl-%-3d : ' % depth) |
|
3143 | 3143 | + fmt % pcfmt(snaptotal[depth], totalsize) |
|
3144 | 3144 | ) |
|
3145 | 3145 | ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize)) |
|
3146 | 3146 | |
|
3147 | 3147 | def fmtchunktype(chunktype): |
|
3148 | 3148 | if chunktype == b'empty': |
|
3149 | 3149 | return b' %s : ' % chunktype |
|
3150 | 3150 | elif chunktype in pycompat.bytestr(string.ascii_letters): |
|
3151 | 3151 | return b' 0x%s (%s) : ' % (hex(chunktype), chunktype) |
|
3152 | 3152 | else: |
|
3153 | 3153 | return b' 0x%s : ' % hex(chunktype) |
|
3154 | 3154 | |
|
3155 | 3155 | ui.write(b'\n') |
|
3156 | 3156 | ui.writenoi18n(b'chunks : ' + fmt2 % numrevs) |
|
3157 | 3157 | for chunktype in sorted(chunktypecounts): |
|
3158 | 3158 | ui.write(fmtchunktype(chunktype)) |
|
3159 | 3159 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) |
|
3160 | 3160 | ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize) |
|
3161 | 3161 | for chunktype in sorted(chunktypecounts): |
|
3162 | 3162 | ui.write(fmtchunktype(chunktype)) |
|
3163 | 3163 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) |
|
3164 | 3164 | |
|
3165 | 3165 | ui.write(b'\n') |
|
3166 | 3166 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) |
|
3167 | 3167 | ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen) |
|
3168 | 3168 | ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen) |
|
3169 | 3169 | ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan) |
|
3170 | 3170 | ui.writenoi18n(b'compression ratio : ' + fmt % compratio) |
|
3171 | 3171 | |
|
3172 | 3172 | if format > 0: |
|
3173 | 3173 | ui.write(b'\n') |
|
3174 | 3174 | ui.writenoi18n( |
|
3175 | 3175 | b'uncompressed data size (min/max/avg) : %d / %d / %d\n' |
|
3176 | 3176 | % tuple(datasize) |
|
3177 | 3177 | ) |
|
3178 | 3178 | ui.writenoi18n( |
|
3179 | 3179 | b'full revision size (min/max/avg) : %d / %d / %d\n' |
|
3180 | 3180 | % tuple(fullsize) |
|
3181 | 3181 | ) |
|
3182 | 3182 | ui.writenoi18n( |
|
3183 | 3183 | b'inter-snapshot size (min/max/avg) : %d / %d / %d\n' |
|
3184 | 3184 | % tuple(semisize) |
|
3185 | 3185 | ) |
|
3186 | 3186 | for depth in sorted(snapsizedepth): |
|
3187 | 3187 | if depth == 0: |
|
3188 | 3188 | continue |
|
3189 | 3189 | ui.writenoi18n( |
|
3190 | 3190 | b' level-%-3d (min/max/avg) : %d / %d / %d\n' |
|
3191 | 3191 | % ((depth,) + tuple(snapsizedepth[depth])) |
|
3192 | 3192 | ) |
|
3193 | 3193 | ui.writenoi18n( |
|
3194 | 3194 | b'delta size (min/max/avg) : %d / %d / %d\n' |
|
3195 | 3195 | % tuple(deltasize) |
|
3196 | 3196 | ) |
|
3197 | 3197 | |
|
3198 | 3198 | if numdeltas > 0: |
|
3199 | 3199 | ui.write(b'\n') |
|
3200 | 3200 | fmt = pcfmtstr(numdeltas) |
|
3201 | 3201 | fmt2 = pcfmtstr(numdeltas, 4) |
|
3202 | 3202 | ui.writenoi18n( |
|
3203 | 3203 | b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas) |
|
3204 | 3204 | ) |
|
3205 | 3205 | if numprev > 0: |
|
3206 | 3206 | ui.writenoi18n( |
|
3207 | 3207 | b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev) |
|
3208 | 3208 | ) |
|
3209 | 3209 | ui.writenoi18n( |
|
3210 | 3210 | b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev) |
|
3211 | 3211 | ) |
|
3212 | 3212 | ui.writenoi18n( |
|
3213 | 3213 | b' other : ' + fmt2 % pcfmt(numoprev, numprev) |
|
3214 | 3214 | ) |
|
3215 | 3215 | if gdelta: |
|
3216 | 3216 | ui.writenoi18n( |
|
3217 | 3217 | b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas) |
|
3218 | 3218 | ) |
|
3219 | 3219 | ui.writenoi18n( |
|
3220 | 3220 | b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas) |
|
3221 | 3221 | ) |
|
3222 | 3222 | ui.writenoi18n( |
|
3223 | 3223 | b'deltas against other : ' + fmt % pcfmt(numother, numdeltas) |
|
3224 | 3224 | ) |
|
3225 | 3225 | |
|
3226 | 3226 | |
|
3227 | 3227 | @command( |
|
3228 | 3228 | b'debugrevlogindex', |
|
3229 | 3229 | cmdutil.debugrevlogopts |
|
3230 | 3230 | + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))], |
|
3231 | 3231 | _(b'[-f FORMAT] -c|-m|FILE'), |
|
3232 | 3232 | optionalrepo=True, |
|
3233 | 3233 | ) |
|
3234 | 3234 | def debugrevlogindex(ui, repo, file_=None, **opts): |
|
3235 | 3235 | """dump the contents of a revlog index""" |
|
3236 | 3236 | opts = pycompat.byteskwargs(opts) |
|
3237 | 3237 | r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts) |
|
3238 | 3238 | format = opts.get(b'format', 0) |
|
3239 | 3239 | if format not in (0, 1): |
|
3240 | 3240 | raise error.Abort(_(b"unknown format %d") % format) |
|
3241 | 3241 | |
|
3242 | 3242 | if ui.debugflag: |
|
3243 | 3243 | shortfn = hex |
|
3244 | 3244 | else: |
|
3245 | 3245 | shortfn = short |
|
3246 | 3246 | |
|
3247 | 3247 | # There might not be anything in r, so have a sane default |
|
3248 | 3248 | idlen = 12 |
|
3249 | 3249 | for i in r: |
|
3250 | 3250 | idlen = len(shortfn(r.node(i))) |
|
3251 | 3251 | break |
|
3252 | 3252 | |
|
3253 | 3253 | if format == 0: |
|
3254 | 3254 | if ui.verbose: |
|
3255 | 3255 | ui.writenoi18n( |
|
3256 | 3256 | b" rev offset length linkrev %s %s p2\n" |
|
3257 | 3257 | % (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) |
|
3258 | 3258 | ) |
|
3259 | 3259 | else: |
|
3260 | 3260 | ui.writenoi18n( |
|
3261 | 3261 | b" rev linkrev %s %s p2\n" |
|
3262 | 3262 | % (b"nodeid".ljust(idlen), b"p1".ljust(idlen)) |
|
3263 | 3263 | ) |
|
3264 | 3264 | elif format == 1: |
|
3265 | 3265 | if ui.verbose: |
|
3266 | 3266 | ui.writenoi18n( |
|
3267 | 3267 | ( |
|
3268 | 3268 | b" rev flag offset length size link p1" |
|
3269 | 3269 | b" p2 %s\n" |
|
3270 | 3270 | ) |
|
3271 | 3271 | % b"nodeid".rjust(idlen) |
|
3272 | 3272 | ) |
|
3273 | 3273 | else: |
|
3274 | 3274 | ui.writenoi18n( |
|
3275 | 3275 | b" rev flag size link p1 p2 %s\n" |
|
3276 | 3276 | % b"nodeid".rjust(idlen) |
|
3277 | 3277 | ) |
|
3278 | 3278 | |
|
3279 | 3279 | for i in r: |
|
3280 | 3280 | node = r.node(i) |
|
3281 | 3281 | if format == 0: |
|
3282 | 3282 | try: |
|
3283 | 3283 | pp = r.parents(node) |
|
3284 | 3284 | except Exception: |
|
3285 | 3285 | pp = [nullid, nullid] |
|
3286 | 3286 | if ui.verbose: |
|
3287 | 3287 | ui.write( |
|
3288 | 3288 | b"% 6d % 9d % 7d % 7d %s %s %s\n" |
|
3289 | 3289 | % ( |
|
3290 | 3290 | i, |
|
3291 | 3291 | r.start(i), |
|
3292 | 3292 | r.length(i), |
|
3293 | 3293 | r.linkrev(i), |
|
3294 | 3294 | shortfn(node), |
|
3295 | 3295 | shortfn(pp[0]), |
|
3296 | 3296 | shortfn(pp[1]), |
|
3297 | 3297 | ) |
|
3298 | 3298 | ) |
|
3299 | 3299 | else: |
|
3300 | 3300 | ui.write( |
|
3301 | 3301 | b"% 6d % 7d %s %s %s\n" |
|
3302 | 3302 | % ( |
|
3303 | 3303 | i, |
|
3304 | 3304 | r.linkrev(i), |
|
3305 | 3305 | shortfn(node), |
|
3306 | 3306 | shortfn(pp[0]), |
|
3307 | 3307 | shortfn(pp[1]), |
|
3308 | 3308 | ) |
|
3309 | 3309 | ) |
|
3310 | 3310 | elif format == 1: |
|
3311 | 3311 | pr = r.parentrevs(i) |
|
3312 | 3312 | if ui.verbose: |
|
3313 | 3313 | ui.write( |
|
3314 | 3314 | b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" |
|
3315 | 3315 | % ( |
|
3316 | 3316 | i, |
|
3317 | 3317 | r.flags(i), |
|
3318 | 3318 | r.start(i), |
|
3319 | 3319 | r.length(i), |
|
3320 | 3320 | r.rawsize(i), |
|
3321 | 3321 | r.linkrev(i), |
|
3322 | 3322 | pr[0], |
|
3323 | 3323 | pr[1], |
|
3324 | 3324 | shortfn(node), |
|
3325 | 3325 | ) |
|
3326 | 3326 | ) |
|
3327 | 3327 | else: |
|
3328 | 3328 | ui.write( |
|
3329 | 3329 | b"% 6d %04x % 8d % 6d % 6d % 6d %s\n" |
|
3330 | 3330 | % ( |
|
3331 | 3331 | i, |
|
3332 | 3332 | r.flags(i), |
|
3333 | 3333 | r.rawsize(i), |
|
3334 | 3334 | r.linkrev(i), |
|
3335 | 3335 | pr[0], |
|
3336 | 3336 | pr[1], |
|
3337 | 3337 | shortfn(node), |
|
3338 | 3338 | ) |
|
3339 | 3339 | ) |
|
3340 | 3340 | |
|
3341 | 3341 | |
|
3342 | 3342 | @command( |
|
3343 | 3343 | b'debugrevspec', |
|
3344 | 3344 | [ |
|
3345 | 3345 | ( |
|
3346 | 3346 | b'', |
|
3347 | 3347 | b'optimize', |
|
3348 | 3348 | None, |
|
3349 | 3349 | _(b'print parsed tree after optimizing (DEPRECATED)'), |
|
3350 | 3350 | ), |
|
3351 | 3351 | ( |
|
3352 | 3352 | b'', |
|
3353 | 3353 | b'show-revs', |
|
3354 | 3354 | True, |
|
3355 | 3355 | _(b'print list of result revisions (default)'), |
|
3356 | 3356 | ), |
|
3357 | 3357 | ( |
|
3358 | 3358 | b's', |
|
3359 | 3359 | b'show-set', |
|
3360 | 3360 | None, |
|
3361 | 3361 | _(b'print internal representation of result set'), |
|
3362 | 3362 | ), |
|
3363 | 3363 | ( |
|
3364 | 3364 | b'p', |
|
3365 | 3365 | b'show-stage', |
|
3366 | 3366 | [], |
|
3367 | 3367 | _(b'print parsed tree at the given stage'), |
|
3368 | 3368 | _(b'NAME'), |
|
3369 | 3369 | ), |
|
3370 | 3370 | (b'', b'no-optimized', False, _(b'evaluate tree without optimization')), |
|
3371 | 3371 | (b'', b'verify-optimized', False, _(b'verify optimized result')), |
|
3372 | 3372 | ], |
|
3373 | 3373 | b'REVSPEC', |
|
3374 | 3374 | ) |
|
3375 | 3375 | def debugrevspec(ui, repo, expr, **opts): |
|
3376 | 3376 | """parse and apply a revision specification |
|
3377 | 3377 | |
|
3378 | 3378 | Use -p/--show-stage option to print the parsed tree at the given stages. |
|
3379 | 3379 | Use -p all to print tree at every stage. |
|
3380 | 3380 | |
|
3381 | 3381 | Use --no-show-revs option with -s or -p to print only the set |
|
3382 | 3382 | representation or the parsed tree respectively. |
|
3383 | 3383 | |
|
3384 | 3384 | Use --verify-optimized to compare the optimized result with the unoptimized |
|
3385 | 3385 | one. Returns 1 if the optimized result differs. |
|
3386 | 3386 | """ |
|
3387 | 3387 | opts = pycompat.byteskwargs(opts) |
|
3388 | 3388 | aliases = ui.configitems(b'revsetalias') |
|
3389 | 3389 | stages = [ |
|
3390 | 3390 | (b'parsed', lambda tree: tree), |
|
3391 | 3391 | ( |
|
3392 | 3392 | b'expanded', |
|
3393 | 3393 | lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn), |
|
3394 | 3394 | ), |
|
3395 | 3395 | (b'concatenated', revsetlang.foldconcat), |
|
3396 | 3396 | (b'analyzed', revsetlang.analyze), |
|
3397 | 3397 | (b'optimized', revsetlang.optimize), |
|
3398 | 3398 | ] |
|
3399 | 3399 | if opts[b'no_optimized']: |
|
3400 | 3400 | stages = stages[:-1] |
|
3401 | 3401 | if opts[b'verify_optimized'] and opts[b'no_optimized']: |
|
3402 | 3402 | raise error.Abort( |
|
3403 | 3403 | _(b'cannot use --verify-optimized with --no-optimized') |
|
3404 | 3404 | ) |
|
3405 | 3405 | stagenames = {n for n, f in stages} |
|
3406 | 3406 | |
|
3407 | 3407 | showalways = set() |
|
3408 | 3408 | showchanged = set() |
|
3409 | 3409 | if ui.verbose and not opts[b'show_stage']: |
|
3410 | 3410 | # show parsed tree by --verbose (deprecated) |
|
3411 | 3411 | showalways.add(b'parsed') |
|
3412 | 3412 | showchanged.update([b'expanded', b'concatenated']) |
|
3413 | 3413 | if opts[b'optimize']: |
|
3414 | 3414 | showalways.add(b'optimized') |
|
3415 | 3415 | if opts[b'show_stage'] and opts[b'optimize']: |
|
3416 | 3416 | raise error.Abort(_(b'cannot use --optimize with --show-stage')) |
|
3417 | 3417 | if opts[b'show_stage'] == [b'all']: |
|
3418 | 3418 | showalways.update(stagenames) |
|
3419 | 3419 | else: |
|
3420 | 3420 | for n in opts[b'show_stage']: |
|
3421 | 3421 | if n not in stagenames: |
|
3422 | 3422 | raise error.Abort(_(b'invalid stage name: %s') % n) |
|
3423 | 3423 | showalways.update(opts[b'show_stage']) |
|
3424 | 3424 | |
|
3425 | 3425 | treebystage = {} |
|
3426 | 3426 | printedtree = None |
|
3427 | 3427 | tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo)) |
|
3428 | 3428 | for n, f in stages: |
|
3429 | 3429 | treebystage[n] = tree = f(tree) |
|
3430 | 3430 | if n in showalways or (n in showchanged and tree != printedtree): |
|
3431 | 3431 | if opts[b'show_stage'] or n != b'parsed': |
|
3432 | 3432 | ui.write(b"* %s:\n" % n) |
|
3433 | 3433 | ui.write(revsetlang.prettyformat(tree), b"\n") |
|
3434 | 3434 | printedtree = tree |
|
3435 | 3435 | |
|
3436 | 3436 | if opts[b'verify_optimized']: |
|
3437 | 3437 | arevs = revset.makematcher(treebystage[b'analyzed'])(repo) |
|
3438 | 3438 | brevs = revset.makematcher(treebystage[b'optimized'])(repo) |
|
3439 | 3439 | if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose): |
|
3440 | 3440 | ui.writenoi18n( |
|
3441 | 3441 | b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n" |
|
3442 | 3442 | ) |
|
3443 | 3443 | ui.writenoi18n( |
|
3444 | 3444 | b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n" |
|
3445 | 3445 | ) |
|
3446 | 3446 | arevs = list(arevs) |
|
3447 | 3447 | brevs = list(brevs) |
|
3448 | 3448 | if arevs == brevs: |
|
3449 | 3449 | return 0 |
|
3450 | 3450 | ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a') |
|
3451 | 3451 | ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b') |
|
3452 | 3452 | sm = difflib.SequenceMatcher(None, arevs, brevs) |
|
3453 | 3453 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
3454 | 3454 | if tag in ('delete', 'replace'): |
|
3455 | 3455 | for c in arevs[alo:ahi]: |
|
3456 | 3456 | ui.write(b'-%d\n' % c, label=b'diff.deleted') |
|
3457 | 3457 | if tag in ('insert', 'replace'): |
|
3458 | 3458 | for c in brevs[blo:bhi]: |
|
3459 | 3459 | ui.write(b'+%d\n' % c, label=b'diff.inserted') |
|
3460 | 3460 | if tag == 'equal': |
|
3461 | 3461 | for c in arevs[alo:ahi]: |
|
3462 | 3462 | ui.write(b' %d\n' % c) |
|
3463 | 3463 | return 1 |
|
3464 | 3464 | |
|
3465 | 3465 | func = revset.makematcher(tree) |
|
3466 | 3466 | revs = func(repo) |
|
3467 | 3467 | if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose): |
|
3468 | 3468 | ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n") |
|
3469 | 3469 | if not opts[b'show_revs']: |
|
3470 | 3470 | return |
|
3471 | 3471 | for c in revs: |
|
3472 | 3472 | ui.write(b"%d\n" % c) |
|
3473 | 3473 | |
|
3474 | 3474 | |
|
3475 | 3475 | @command( |
|
3476 | 3476 | b'debugserve', |
|
3477 | 3477 | [ |
|
3478 | 3478 | ( |
|
3479 | 3479 | b'', |
|
3480 | 3480 | b'sshstdio', |
|
3481 | 3481 | False, |
|
3482 | 3482 | _(b'run an SSH server bound to process handles'), |
|
3483 | 3483 | ), |
|
3484 | 3484 | (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')), |
|
3485 | 3485 | (b'', b'logiofile', b'', _(b'file to log server I/O to')), |
|
3486 | 3486 | ], |
|
3487 | 3487 | b'', |
|
3488 | 3488 | ) |
|
3489 | 3489 | def debugserve(ui, repo, **opts): |
|
3490 | 3490 | """run a server with advanced settings |
|
3491 | 3491 | |
|
3492 | 3492 | This command is similar to :hg:`serve`. It exists partially as a |
|
3493 | 3493 | workaround to the fact that ``hg serve --stdio`` must have specific |
|
3494 | 3494 | arguments for security reasons. |
|
3495 | 3495 | """ |
|
3496 | 3496 | opts = pycompat.byteskwargs(opts) |
|
3497 | 3497 | |
|
3498 | 3498 | if not opts[b'sshstdio']: |
|
3499 | 3499 | raise error.Abort(_(b'only --sshstdio is currently supported')) |
|
3500 | 3500 | |
|
3501 | 3501 | logfh = None |
|
3502 | 3502 | |
|
3503 | 3503 | if opts[b'logiofd'] and opts[b'logiofile']: |
|
3504 | 3504 | raise error.Abort(_(b'cannot use both --logiofd and --logiofile')) |
|
3505 | 3505 | |
|
3506 | 3506 | if opts[b'logiofd']: |
|
3507 | 3507 | # Ideally we would be line buffered. But line buffering in binary |
|
3508 | 3508 | # mode isn't supported and emits a warning in Python 3.8+. Disabling |
|
3509 | 3509 | # buffering could have performance impacts. But since this isn't |
|
3510 | 3510 | # performance critical code, it should be fine. |
|
3511 | 3511 | try: |
|
3512 | 3512 | logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0) |
|
3513 | 3513 | except OSError as e: |
|
3514 | 3514 | if e.errno != errno.ESPIPE: |
|
3515 | 3515 | raise |
|
3516 | 3516 | # can't seek a pipe, so `ab` mode fails on py3 |
|
3517 | 3517 | logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0) |
|
3518 | 3518 | elif opts[b'logiofile']: |
|
3519 | 3519 | logfh = open(opts[b'logiofile'], b'ab', 0) |
|
3520 | 3520 | |
|
3521 | 3521 | s = wireprotoserver.sshserver(ui, repo, logfh=logfh) |
|
3522 | 3522 | s.serve_forever() |
|
3523 | 3523 | |
|
3524 | 3524 | |
|
3525 | 3525 | @command(b'debugsetparents', [], _(b'REV1 [REV2]')) |
|
3526 | 3526 | def debugsetparents(ui, repo, rev1, rev2=None): |
|
3527 | 3527 | """manually set the parents of the current working directory (DANGEROUS) |
|
3528 | 3528 | |
|
3529 | 3529 | This command is not what you are looking for and should not be used. Using |
|
3530 | 3530 | this command will most certainly results in slight corruption of the file |
|
3531 | 3531 | level histories withing your repository. DO NOT USE THIS COMMAND. |
|
3532 | 3532 | |
|
3533 | 3533 | The command update the p1 and p2 field in the dirstate, and not touching |
|
3534 | 3534 | anything else. This useful for writing repository conversion tools, but |
|
3535 | 3535 | should be used with extreme care. For example, neither the working |
|
3536 | 3536 | directory nor the dirstate is updated, so file status may be incorrect |
|
3537 | 3537 | after running this command. Only used if you are one of the few people that |
|
3538 | 3538 | deeply unstand both conversion tools and file level histories. If you are |
|
3539 | 3539 | reading this help, you are not one of this people (most of them sailed west |
|
3540 | 3540 | from Mithlond anyway. |
|
3541 | 3541 | |
|
3542 | 3542 | So one last time DO NOT USE THIS COMMAND. |
|
3543 | 3543 | |
|
3544 | 3544 | Returns 0 on success. |
|
3545 | 3545 | """ |
|
3546 | 3546 | |
|
3547 | 3547 | node1 = scmutil.revsingle(repo, rev1).node() |
|
3548 | 3548 | node2 = scmutil.revsingle(repo, rev2, b'null').node() |
|
3549 | 3549 | |
|
3550 | 3550 | with repo.wlock(): |
|
3551 | 3551 | repo.setparents(node1, node2) |
|
3552 | 3552 | |
|
3553 | 3553 | |
|
3554 | 3554 | @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV')) |
|
3555 | 3555 | def debugsidedata(ui, repo, file_, rev=None, **opts): |
|
3556 | 3556 | """dump the side data for a cl/manifest/file revision |
|
3557 | 3557 | |
|
3558 | 3558 | Use --verbose to dump the sidedata content.""" |
|
3559 | 3559 | opts = pycompat.byteskwargs(opts) |
|
3560 | 3560 | if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'): |
|
3561 | 3561 | if rev is not None: |
|
3562 | 3562 | raise error.CommandError(b'debugdata', _(b'invalid arguments')) |
|
3563 | 3563 | file_, rev = None, file_ |
|
3564 | 3564 | elif rev is None: |
|
3565 | 3565 | raise error.CommandError(b'debugdata', _(b'invalid arguments')) |
|
3566 | 3566 | r = cmdutil.openstorage(repo, b'debugdata', file_, opts) |
|
3567 | 3567 | r = getattr(r, '_revlog', r) |
|
3568 | 3568 | try: |
|
3569 | 3569 | sidedata = r.sidedata(r.lookup(rev)) |
|
3570 | 3570 | except KeyError: |
|
3571 | 3571 | raise error.Abort(_(b'invalid revision identifier %s') % rev) |
|
3572 | 3572 | if sidedata: |
|
3573 | 3573 | sidedata = list(sidedata.items()) |
|
3574 | 3574 | sidedata.sort() |
|
3575 | 3575 | ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata)) |
|
3576 | 3576 | for key, value in sidedata: |
|
3577 | 3577 | ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value))) |
|
3578 | 3578 | if ui.verbose: |
|
3579 | 3579 | ui.writenoi18n(b' %s\n' % stringutil.pprint(value)) |
|
3580 | 3580 | |
|
3581 | 3581 | |
|
3582 | 3582 | @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True) |
|
3583 | 3583 | def debugssl(ui, repo, source=None, **opts): |
|
3584 | 3584 | """test a secure connection to a server |
|
3585 | 3585 | |
|
3586 | 3586 | This builds the certificate chain for the server on Windows, installing the |
|
3587 | 3587 | missing intermediates and trusted root via Windows Update if necessary. It |
|
3588 | 3588 | does nothing on other platforms. |
|
3589 | 3589 | |
|
3590 | 3590 | If SOURCE is omitted, the 'default' path will be used. If a URL is given, |
|
3591 | 3591 | that server is used. See :hg:`help urls` for more information. |
|
3592 | 3592 | |
|
3593 | 3593 | If the update succeeds, retry the original operation. Otherwise, the cause |
|
3594 | 3594 | of the SSL error is likely another issue. |
|
3595 | 3595 | """ |
|
3596 | 3596 | if not pycompat.iswindows: |
|
3597 | 3597 | raise error.Abort( |
|
3598 | 3598 | _(b'certificate chain building is only possible on Windows') |
|
3599 | 3599 | ) |
|
3600 | 3600 | |
|
3601 | 3601 | if not source: |
|
3602 | 3602 | if not repo: |
|
3603 | 3603 | raise error.Abort( |
|
3604 | 3604 | _( |
|
3605 | 3605 | b"there is no Mercurial repository here, and no " |
|
3606 | 3606 | b"server specified" |
|
3607 | 3607 | ) |
|
3608 | 3608 | ) |
|
3609 | 3609 | source = b"default" |
|
3610 | 3610 | |
|
3611 | 3611 | source, branches = hg.parseurl(ui.expandpath(source)) |
|
3612 | 3612 | url = util.url(source) |
|
3613 | 3613 | |
|
3614 | 3614 | defaultport = {b'https': 443, b'ssh': 22} |
|
3615 | 3615 | if url.scheme in defaultport: |
|
3616 | 3616 | try: |
|
3617 | 3617 | addr = (url.host, int(url.port or defaultport[url.scheme])) |
|
3618 | 3618 | except ValueError: |
|
3619 | 3619 | raise error.Abort(_(b"malformed port number in URL")) |
|
3620 | 3620 | else: |
|
3621 | 3621 | raise error.Abort(_(b"only https and ssh connections are supported")) |
|
3622 | 3622 | |
|
3623 | 3623 | from . import win32 |
|
3624 | 3624 | |
|
3625 | 3625 | s = ssl.wrap_socket( |
|
3626 | 3626 | socket.socket(), |
|
3627 | 3627 | ssl_version=ssl.PROTOCOL_TLS, |
|
3628 | 3628 | cert_reqs=ssl.CERT_NONE, |
|
3629 | 3629 | ca_certs=None, |
|
3630 | 3630 | ) |
|
3631 | 3631 | |
|
3632 | 3632 | try: |
|
3633 | 3633 | s.connect(addr) |
|
3634 | 3634 | cert = s.getpeercert(True) |
|
3635 | 3635 | |
|
3636 | 3636 | ui.status(_(b'checking the certificate chain for %s\n') % url.host) |
|
3637 | 3637 | |
|
3638 | 3638 | complete = win32.checkcertificatechain(cert, build=False) |
|
3639 | 3639 | |
|
3640 | 3640 | if not complete: |
|
3641 | 3641 | ui.status(_(b'certificate chain is incomplete, updating... ')) |
|
3642 | 3642 | |
|
3643 | 3643 | if not win32.checkcertificatechain(cert): |
|
3644 | 3644 | ui.status(_(b'failed.\n')) |
|
3645 | 3645 | else: |
|
3646 | 3646 | ui.status(_(b'done.\n')) |
|
3647 | 3647 | else: |
|
3648 | 3648 | ui.status(_(b'full certificate chain is available\n')) |
|
3649 | 3649 | finally: |
|
3650 | 3650 | s.close() |
|
3651 | 3651 | |
|
3652 | 3652 | |
|
3653 | 3653 | @command( |
|
3654 | 3654 | b"debugbackupbundle", |
|
3655 | 3655 | [ |
|
3656 | 3656 | ( |
|
3657 | 3657 | b"", |
|
3658 | 3658 | b"recover", |
|
3659 | 3659 | b"", |
|
3660 | 3660 | b"brings the specified changeset back into the repository", |
|
3661 | 3661 | ) |
|
3662 | 3662 | ] |
|
3663 | 3663 | + cmdutil.logopts, |
|
3664 | 3664 | _(b"hg debugbackupbundle [--recover HASH]"), |
|
3665 | 3665 | ) |
|
3666 | 3666 | def debugbackupbundle(ui, repo, *pats, **opts): |
|
3667 | 3667 | """lists the changesets available in backup bundles |
|
3668 | 3668 | |
|
3669 | 3669 | Without any arguments, this command prints a list of the changesets in each |
|
3670 | 3670 | backup bundle. |
|
3671 | 3671 | |
|
3672 | 3672 | --recover takes a changeset hash and unbundles the first bundle that |
|
3673 | 3673 | contains that hash, which puts that changeset back in your repository. |
|
3674 | 3674 | |
|
3675 | 3675 | --verbose will print the entire commit message and the bundle path for that |
|
3676 | 3676 | backup. |
|
3677 | 3677 | """ |
|
3678 | 3678 | backups = list( |
|
3679 | 3679 | filter( |
|
3680 | 3680 | os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg") |
|
3681 | 3681 | ) |
|
3682 | 3682 | ) |
|
3683 | 3683 | backups.sort(key=lambda x: os.path.getmtime(x), reverse=True) |
|
3684 | 3684 | |
|
3685 | 3685 | opts = pycompat.byteskwargs(opts) |
|
3686 | 3686 | opts[b"bundle"] = b"" |
|
3687 | 3687 | opts[b"force"] = None |
|
3688 | 3688 | limit = logcmdutil.getlimit(opts) |
|
3689 | 3689 | |
|
3690 | 3690 | def display(other, chlist, displayer): |
|
3691 | 3691 | if opts.get(b"newest_first"): |
|
3692 | 3692 | chlist.reverse() |
|
3693 | 3693 | count = 0 |
|
3694 | 3694 | for n in chlist: |
|
3695 | 3695 | if limit is not None and count >= limit: |
|
3696 | 3696 | break |
|
3697 | 3697 | parents = [True for p in other.changelog.parents(n) if p != nullid] |
|
3698 | 3698 | if opts.get(b"no_merges") and len(parents) == 2: |
|
3699 | 3699 | continue |
|
3700 | 3700 | count += 1 |
|
3701 | 3701 | displayer.show(other[n]) |
|
3702 | 3702 | |
|
3703 | 3703 | recovernode = opts.get(b"recover") |
|
3704 | 3704 | if recovernode: |
|
3705 | 3705 | if scmutil.isrevsymbol(repo, recovernode): |
|
3706 | 3706 | ui.warn(_(b"%s already exists in the repo\n") % recovernode) |
|
3707 | 3707 | return |
|
3708 | 3708 | elif backups: |
|
3709 | 3709 | msg = _( |
|
3710 | 3710 | b"Recover changesets using: hg debugbackupbundle --recover " |
|
3711 | 3711 | b"<changeset hash>\n\nAvailable backup changesets:" |
|
3712 | 3712 | ) |
|
3713 | 3713 | ui.status(msg, label=b"status.removed") |
|
3714 | 3714 | else: |
|
3715 | 3715 | ui.status(_(b"no backup changesets found\n")) |
|
3716 | 3716 | return |
|
3717 | 3717 | |
|
3718 | 3718 | for backup in backups: |
|
3719 | 3719 | # Much of this is copied from the hg incoming logic |
|
3720 | 3720 | source = ui.expandpath(os.path.relpath(backup, encoding.getcwd())) |
|
3721 | 3721 | source, branches = hg.parseurl(source, opts.get(b"branch")) |
|
3722 | 3722 | try: |
|
3723 | 3723 | other = hg.peer(repo, opts, source) |
|
3724 | 3724 | except error.LookupError as ex: |
|
3725 | 3725 | msg = _(b"\nwarning: unable to open bundle %s") % source |
|
3726 | 3726 | hint = _(b"\n(missing parent rev %s)\n") % short(ex.name) |
|
3727 | 3727 | ui.warn(msg, hint=hint) |
|
3728 | 3728 | continue |
|
3729 | 3729 | revs, checkout = hg.addbranchrevs( |
|
3730 | 3730 | repo, other, branches, opts.get(b"rev") |
|
3731 | 3731 | ) |
|
3732 | 3732 | |
|
3733 | 3733 | if revs: |
|
3734 | 3734 | revs = [other.lookup(rev) for rev in revs] |
|
3735 | 3735 | |
|
3736 | 3736 | quiet = ui.quiet |
|
3737 | 3737 | try: |
|
3738 | 3738 | ui.quiet = True |
|
3739 | 3739 | other, chlist, cleanupfn = bundlerepo.getremotechanges( |
|
3740 | 3740 | ui, repo, other, revs, opts[b"bundle"], opts[b"force"] |
|
3741 | 3741 | ) |
|
3742 | 3742 | except error.LookupError: |
|
3743 | 3743 | continue |
|
3744 | 3744 | finally: |
|
3745 | 3745 | ui.quiet = quiet |
|
3746 | 3746 | |
|
3747 | 3747 | try: |
|
3748 | 3748 | if not chlist: |
|
3749 | 3749 | continue |
|
3750 | 3750 | if recovernode: |
|
3751 | 3751 | with repo.lock(), repo.transaction(b"unbundle") as tr: |
|
3752 | 3752 | if scmutil.isrevsymbol(other, recovernode): |
|
3753 | 3753 | ui.status(_(b"Unbundling %s\n") % (recovernode)) |
|
3754 | 3754 | f = hg.openpath(ui, source) |
|
3755 | 3755 | gen = exchange.readbundle(ui, f, source) |
|
3756 | 3756 | if isinstance(gen, bundle2.unbundle20): |
|
3757 | 3757 | bundle2.applybundle( |
|
3758 | 3758 | repo, |
|
3759 | 3759 | gen, |
|
3760 | 3760 | tr, |
|
3761 | 3761 | source=b"unbundle", |
|
3762 | 3762 | url=b"bundle:" + source, |
|
3763 | 3763 | ) |
|
3764 | 3764 | else: |
|
3765 | 3765 | gen.apply(repo, b"unbundle", b"bundle:" + source) |
|
3766 | 3766 | break |
|
3767 | 3767 | else: |
|
3768 | 3768 | backupdate = encoding.strtolocal( |
|
3769 | 3769 | time.strftime( |
|
3770 | 3770 | "%a %H:%M, %Y-%m-%d", |
|
3771 | 3771 | time.localtime(os.path.getmtime(source)), |
|
3772 | 3772 | ) |
|
3773 | 3773 | ) |
|
3774 | 3774 | ui.status(b"\n%s\n" % (backupdate.ljust(50))) |
|
3775 | 3775 | if ui.verbose: |
|
3776 | 3776 | ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source)) |
|
3777 | 3777 | else: |
|
3778 | 3778 | opts[ |
|
3779 | 3779 | b"template" |
|
3780 | 3780 | ] = b"{label('status.modified', node|short)} {desc|firstline}\n" |
|
3781 | 3781 | displayer = logcmdutil.changesetdisplayer( |
|
3782 | 3782 | ui, other, opts, False |
|
3783 | 3783 | ) |
|
3784 | 3784 | display(other, chlist, displayer) |
|
3785 | 3785 | displayer.close() |
|
3786 | 3786 | finally: |
|
3787 | 3787 | cleanupfn() |
|
3788 | 3788 | |
|
3789 | 3789 | |
|
3790 | 3790 | @command( |
|
3791 | 3791 | b'debugsub', |
|
3792 | 3792 | [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))], |
|
3793 | 3793 | _(b'[-r REV] [REV]'), |
|
3794 | 3794 | ) |
|
3795 | 3795 | def debugsub(ui, repo, rev=None): |
|
3796 | 3796 | ctx = scmutil.revsingle(repo, rev, None) |
|
3797 | 3797 | for k, v in sorted(ctx.substate.items()): |
|
3798 | 3798 | ui.writenoi18n(b'path %s\n' % k) |
|
3799 | 3799 | ui.writenoi18n(b' source %s\n' % v[0]) |
|
3800 | 3800 | ui.writenoi18n(b' revision %s\n' % v[1]) |
|
3801 | 3801 | |
|
3802 | 3802 | |
|
3803 | 3803 | @command(b'debugshell', optionalrepo=True) |
|
3804 | 3804 | def debugshell(ui, repo): |
|
3805 | 3805 | """run an interactive Python interpreter |
|
3806 | 3806 | |
|
3807 | 3807 | The local namespace is provided with a reference to the ui and |
|
3808 | 3808 | the repo instance (if available). |
|
3809 | 3809 | """ |
|
3810 | 3810 | import code |
|
3811 | 3811 | |
|
3812 | 3812 | imported_objects = { |
|
3813 | 3813 | 'ui': ui, |
|
3814 | 3814 | 'repo': repo, |
|
3815 | 3815 | } |
|
3816 | 3816 | |
|
3817 | 3817 | code.interact(local=imported_objects) |
|
3818 | 3818 | |
|
3819 | 3819 | |
|
3820 | 3820 | @command( |
|
3821 | 3821 | b'debugsuccessorssets', |
|
3822 | 3822 | [(b'', b'closest', False, _(b'return closest successors sets only'))], |
|
3823 | 3823 | _(b'[REV]'), |
|
3824 | 3824 | ) |
|
3825 | 3825 | def debugsuccessorssets(ui, repo, *revs, **opts): |
|
3826 | 3826 | """show set of successors for revision |
|
3827 | 3827 | |
|
3828 | 3828 | A successors set of changeset A is a consistent group of revisions that |
|
3829 | 3829 | succeed A. It contains non-obsolete changesets only unless closests |
|
3830 | 3830 | successors set is set. |
|
3831 | 3831 | |
|
3832 | 3832 | In most cases a changeset A has a single successors set containing a single |
|
3833 | 3833 | successor (changeset A replaced by A'). |
|
3834 | 3834 | |
|
3835 | 3835 | A changeset that is made obsolete with no successors are called "pruned". |
|
3836 | 3836 | Such changesets have no successors sets at all. |
|
3837 | 3837 | |
|
3838 | 3838 | A changeset that has been "split" will have a successors set containing |
|
3839 | 3839 | more than one successor. |
|
3840 | 3840 | |
|
3841 | 3841 | A changeset that has been rewritten in multiple different ways is called |
|
3842 | 3842 | "divergent". Such changesets have multiple successor sets (each of which |
|
3843 | 3843 | may also be split, i.e. have multiple successors). |
|
3844 | 3844 | |
|
3845 | 3845 | Results are displayed as follows:: |
|
3846 | 3846 | |
|
3847 | 3847 | <rev1> |
|
3848 | 3848 | <successors-1A> |
|
3849 | 3849 | <rev2> |
|
3850 | 3850 | <successors-2A> |
|
3851 | 3851 | <successors-2B1> <successors-2B2> <successors-2B3> |
|
3852 | 3852 | |
|
3853 | 3853 | Here rev2 has two possible (i.e. divergent) successors sets. The first |
|
3854 | 3854 | holds one element, whereas the second holds three (i.e. the changeset has |
|
3855 | 3855 | been split). |
|
3856 | 3856 | """ |
|
3857 | 3857 | # passed to successorssets caching computation from one call to another |
|
3858 | 3858 | cache = {} |
|
3859 | 3859 | ctx2str = bytes |
|
3860 | 3860 | node2str = short |
|
3861 | 3861 | for rev in scmutil.revrange(repo, revs): |
|
3862 | 3862 | ctx = repo[rev] |
|
3863 | 3863 | ui.write(b'%s\n' % ctx2str(ctx)) |
|
3864 | 3864 | for succsset in obsutil.successorssets( |
|
3865 | 3865 | repo, ctx.node(), closest=opts['closest'], cache=cache |
|
3866 | 3866 | ): |
|
3867 | 3867 | if succsset: |
|
3868 | 3868 | ui.write(b' ') |
|
3869 | 3869 | ui.write(node2str(succsset[0])) |
|
3870 | 3870 | for node in succsset[1:]: |
|
3871 | 3871 | ui.write(b' ') |
|
3872 | 3872 | ui.write(node2str(node)) |
|
3873 | 3873 | ui.write(b'\n') |
|
3874 | 3874 | |
|
3875 | 3875 | |
|
3876 | 3876 | @command(b'debugtagscache', []) |
|
3877 | 3877 | def debugtagscache(ui, repo): |
|
3878 | 3878 | """display the contents of .hg/cache/hgtagsfnodes1""" |
|
3879 | 3879 | cache = tagsmod.hgtagsfnodescache(repo.unfiltered()) |
|
3880 | 3880 | flog = repo.file(b'.hgtags') |
|
3881 | 3881 | for r in repo: |
|
3882 | 3882 | node = repo[r].node() |
|
3883 | 3883 | tagsnode = cache.getfnode(node, computemissing=False) |
|
3884 | 3884 | if tagsnode: |
|
3885 | 3885 | tagsnodedisplay = hex(tagsnode) |
|
3886 | 3886 | if not flog.hasnode(tagsnode): |
|
3887 | 3887 | tagsnodedisplay += b' (unknown node)' |
|
3888 | 3888 | elif tagsnode is None: |
|
3889 | 3889 | tagsnodedisplay = b'missing' |
|
3890 | 3890 | else: |
|
3891 | 3891 | tagsnodedisplay = b'invalid' |
|
3892 | 3892 | |
|
3893 | 3893 | ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay)) |
|
3894 | 3894 | |
|
3895 | 3895 | |
|
3896 | 3896 | @command( |
|
3897 | 3897 | b'debugtemplate', |
|
3898 | 3898 | [ |
|
3899 | 3899 | (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')), |
|
3900 | 3900 | (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')), |
|
3901 | 3901 | ], |
|
3902 | 3902 | _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'), |
|
3903 | 3903 | optionalrepo=True, |
|
3904 | 3904 | ) |
|
3905 | 3905 | def debugtemplate(ui, repo, tmpl, **opts): |
|
3906 | 3906 | """parse and apply a template |
|
3907 | 3907 | |
|
3908 | 3908 | If -r/--rev is given, the template is processed as a log template and |
|
3909 | 3909 | applied to the given changesets. Otherwise, it is processed as a generic |
|
3910 | 3910 | template. |
|
3911 | 3911 | |
|
3912 | 3912 | Use --verbose to print the parsed tree. |
|
3913 | 3913 | """ |
|
3914 | 3914 | revs = None |
|
3915 | 3915 | if opts['rev']: |
|
3916 | 3916 | if repo is None: |
|
3917 | 3917 | raise error.RepoError( |
|
3918 | 3918 | _(b'there is no Mercurial repository here (.hg not found)') |
|
3919 | 3919 | ) |
|
3920 | 3920 | revs = scmutil.revrange(repo, opts['rev']) |
|
3921 | 3921 | |
|
3922 | 3922 | props = {} |
|
3923 | 3923 | for d in opts['define']: |
|
3924 | 3924 | try: |
|
3925 | 3925 | k, v = (e.strip() for e in d.split(b'=', 1)) |
|
3926 | 3926 | if not k or k == b'ui': |
|
3927 | 3927 | raise ValueError |
|
3928 | 3928 | props[k] = v |
|
3929 | 3929 | except ValueError: |
|
3930 | 3930 | raise error.Abort(_(b'malformed keyword definition: %s') % d) |
|
3931 | 3931 | |
|
3932 | 3932 | if ui.verbose: |
|
3933 | 3933 | aliases = ui.configitems(b'templatealias') |
|
3934 | 3934 | tree = templater.parse(tmpl) |
|
3935 | 3935 | ui.note(templater.prettyformat(tree), b'\n') |
|
3936 | 3936 | newtree = templater.expandaliases(tree, aliases) |
|
3937 | 3937 | if newtree != tree: |
|
3938 | 3938 | ui.notenoi18n( |
|
3939 | 3939 | b"* expanded:\n", templater.prettyformat(newtree), b'\n' |
|
3940 | 3940 | ) |
|
3941 | 3941 | |
|
3942 | 3942 | if revs is None: |
|
3943 | 3943 | tres = formatter.templateresources(ui, repo) |
|
3944 | 3944 | t = formatter.maketemplater(ui, tmpl, resources=tres) |
|
3945 | 3945 | if ui.verbose: |
|
3946 | 3946 | kwds, funcs = t.symbolsuseddefault() |
|
3947 | 3947 | ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds))) |
|
3948 | 3948 | ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs))) |
|
3949 | 3949 | ui.write(t.renderdefault(props)) |
|
3950 | 3950 | else: |
|
3951 | 3951 | displayer = logcmdutil.maketemplater(ui, repo, tmpl) |
|
3952 | 3952 | if ui.verbose: |
|
3953 | 3953 | kwds, funcs = displayer.t.symbolsuseddefault() |
|
3954 | 3954 | ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds))) |
|
3955 | 3955 | ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs))) |
|
3956 | 3956 | for r in revs: |
|
3957 | 3957 | displayer.show(repo[r], **pycompat.strkwargs(props)) |
|
3958 | 3958 | displayer.close() |
|
3959 | 3959 | |
|
3960 | 3960 | |
|
3961 | 3961 | @command( |
|
3962 | 3962 | b'debuguigetpass', |
|
3963 | 3963 | [ |
|
3964 | 3964 | (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')), |
|
3965 | 3965 | ], |
|
3966 | 3966 | _(b'[-p TEXT]'), |
|
3967 | 3967 | norepo=True, |
|
3968 | 3968 | ) |
|
3969 | 3969 | def debuguigetpass(ui, prompt=b''): |
|
3970 | 3970 | """show prompt to type password""" |
|
3971 | 3971 | r = ui.getpass(prompt) |
|
3972 | 3972 | if r is None: |
|
3973 | 3973 | r = b"<default response>" |
|
3974 | 3974 | ui.writenoi18n(b'response: %s\n' % r) |
|
3975 | 3975 | |
|
3976 | 3976 | |
|
3977 | 3977 | @command( |
|
3978 | 3978 | b'debuguiprompt', |
|
3979 | 3979 | [ |
|
3980 | 3980 | (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')), |
|
3981 | 3981 | ], |
|
3982 | 3982 | _(b'[-p TEXT]'), |
|
3983 | 3983 | norepo=True, |
|
3984 | 3984 | ) |
|
3985 | 3985 | def debuguiprompt(ui, prompt=b''): |
|
3986 | 3986 | """show plain prompt""" |
|
3987 | 3987 | r = ui.prompt(prompt) |
|
3988 | 3988 | ui.writenoi18n(b'response: %s\n' % r) |
|
3989 | 3989 | |
|
3990 | 3990 | |
|
3991 | 3991 | @command(b'debugupdatecaches', []) |
|
3992 | 3992 | def debugupdatecaches(ui, repo, *pats, **opts): |
|
3993 | 3993 | """warm all known caches in the repository""" |
|
3994 | 3994 | with repo.wlock(), repo.lock(): |
|
3995 | 3995 | repo.updatecaches(full=True) |
|
3996 | 3996 | |
|
3997 | 3997 | |
|
3998 | 3998 | @command( |
|
3999 | 3999 | b'debugupgraderepo', |
|
4000 | 4000 | [ |
|
4001 | 4001 | ( |
|
4002 | 4002 | b'o', |
|
4003 | 4003 | b'optimize', |
|
4004 | 4004 | [], |
|
4005 | 4005 | _(b'extra optimization to perform'), |
|
4006 | 4006 | _(b'NAME'), |
|
4007 | 4007 | ), |
|
4008 | 4008 | (b'', b'run', False, _(b'performs an upgrade')), |
|
4009 | 4009 | (b'', b'backup', True, _(b'keep the old repository content around')), |
|
4010 | 4010 | (b'', b'changelog', None, _(b'select the changelog for upgrade')), |
|
4011 | 4011 | (b'', b'manifest', None, _(b'select the manifest for upgrade')), |
|
4012 | 4012 | (b'', b'filelogs', None, _(b'select all filelogs for upgrade')), |
|
4013 | 4013 | ], |
|
4014 | 4014 | ) |
|
4015 | 4015 | def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts): |
|
4016 | 4016 | """upgrade a repository to use different features |
|
4017 | 4017 | |
|
4018 | 4018 | If no arguments are specified, the repository is evaluated for upgrade |
|
4019 | 4019 | and a list of problems and potential optimizations is printed. |
|
4020 | 4020 | |
|
4021 | 4021 | With ``--run``, a repository upgrade is performed. Behavior of the upgrade |
|
4022 | 4022 | can be influenced via additional arguments. More details will be provided |
|
4023 | 4023 | by the command output when run without ``--run``. |
|
4024 | 4024 | |
|
4025 | 4025 | During the upgrade, the repository will be locked and no writes will be |
|
4026 | 4026 | allowed. |
|
4027 | 4027 | |
|
4028 | 4028 | At the end of the upgrade, the repository may not be readable while new |
|
4029 | 4029 | repository data is swapped in. This window will be as long as it takes to |
|
4030 | 4030 | rename some directories inside the ``.hg`` directory. On most machines, this |
|
4031 | 4031 | should complete almost instantaneously and the chances of a consumer being |
|
4032 | 4032 | unable to access the repository should be low. |
|
4033 | 4033 | |
|
4034 | 4034 | By default, all revlog will be upgraded. You can restrict this using flag |
|
4035 | 4035 | such as `--manifest`: |
|
4036 | 4036 | |
|
4037 | 4037 | * `--manifest`: only optimize the manifest |
|
4038 | 4038 | * `--no-manifest`: optimize all revlog but the manifest |
|
4039 | 4039 | * `--changelog`: optimize the changelog only |
|
4040 | 4040 | * `--no-changelog --no-manifest`: optimize filelogs only |
|
4041 | 4041 | * `--filelogs`: optimize the filelogs only |
|
4042 | 4042 | * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations |
|
4043 | 4043 | """ |
|
4044 | 4044 | return upgrade.upgraderepo( |
|
4045 | 4045 | ui, repo, run=run, optimize=set(optimize), backup=backup, **opts |
|
4046 | 4046 | ) |
|
4047 | 4047 | |
|
4048 | 4048 | |
|
4049 | 4049 | @command( |
|
4050 | 4050 | b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True |
|
4051 | 4051 | ) |
|
4052 | 4052 | def debugwalk(ui, repo, *pats, **opts): |
|
4053 | 4053 | """show how files match on given patterns""" |
|
4054 | 4054 | opts = pycompat.byteskwargs(opts) |
|
4055 | 4055 | m = scmutil.match(repo[None], pats, opts) |
|
4056 | 4056 | if ui.verbose: |
|
4057 | 4057 | ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n') |
|
4058 | 4058 | items = list(repo[None].walk(m)) |
|
4059 | 4059 | if not items: |
|
4060 | 4060 | return |
|
4061 | 4061 | f = lambda fn: fn |
|
4062 | 4062 | if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/': |
|
4063 | 4063 | f = lambda fn: util.normpath(fn) |
|
4064 | 4064 | fmt = b'f %%-%ds %%-%ds %%s' % ( |
|
4065 | 4065 | max([len(abs) for abs in items]), |
|
4066 | 4066 | max([len(repo.pathto(abs)) for abs in items]), |
|
4067 | 4067 | ) |
|
4068 | 4068 | for abs in items: |
|
4069 | 4069 | line = fmt % ( |
|
4070 | 4070 | abs, |
|
4071 | 4071 | f(repo.pathto(abs)), |
|
4072 | 4072 | m.exact(abs) and b'exact' or b'', |
|
4073 | 4073 | ) |
|
4074 | 4074 | ui.write(b"%s\n" % line.rstrip()) |
|
4075 | 4075 | |
|
4076 | 4076 | |
|
4077 | 4077 | @command(b'debugwhyunstable', [], _(b'REV')) |
|
4078 | 4078 | def debugwhyunstable(ui, repo, rev): |
|
4079 | 4079 | """explain instabilities of a changeset""" |
|
4080 | 4080 | for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)): |
|
4081 | 4081 | dnodes = b'' |
|
4082 | 4082 | if entry.get(b'divergentnodes'): |
|
4083 | 4083 | dnodes = ( |
|
4084 | 4084 | b' '.join( |
|
4085 | 4085 | b'%s (%s)' % (ctx.hex(), ctx.phasestr()) |
|
4086 | 4086 | for ctx in entry[b'divergentnodes'] |
|
4087 | 4087 | ) |
|
4088 | 4088 | + b' ' |
|
4089 | 4089 | ) |
|
4090 | 4090 | ui.write( |
|
4091 | 4091 | b'%s: %s%s %s\n' |
|
4092 | 4092 | % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node']) |
|
4093 | 4093 | ) |
|
4094 | 4094 | |
|
4095 | 4095 | |
|
4096 | 4096 | @command( |
|
4097 | 4097 | b'debugwireargs', |
|
4098 | 4098 | [ |
|
4099 | 4099 | (b'', b'three', b'', b'three'), |
|
4100 | 4100 | (b'', b'four', b'', b'four'), |
|
4101 | 4101 | (b'', b'five', b'', b'five'), |
|
4102 | 4102 | ] |
|
4103 | 4103 | + cmdutil.remoteopts, |
|
4104 | 4104 | _(b'REPO [OPTIONS]... [ONE [TWO]]'), |
|
4105 | 4105 | norepo=True, |
|
4106 | 4106 | ) |
|
4107 | 4107 | def debugwireargs(ui, repopath, *vals, **opts): |
|
4108 | 4108 | opts = pycompat.byteskwargs(opts) |
|
4109 | 4109 | repo = hg.peer(ui, opts, repopath) |
|
4110 | 4110 | try: |
|
4111 | 4111 | for opt in cmdutil.remoteopts: |
|
4112 | 4112 | del opts[opt[1]] |
|
4113 | 4113 | args = {} |
|
4114 | 4114 | for k, v in pycompat.iteritems(opts): |
|
4115 | 4115 | if v: |
|
4116 | 4116 | args[k] = v |
|
4117 | 4117 | args = pycompat.strkwargs(args) |
|
4118 | 4118 | # run twice to check that we don't mess up the stream for the next command |
|
4119 | 4119 | res1 = repo.debugwireargs(*vals, **args) |
|
4120 | 4120 | res2 = repo.debugwireargs(*vals, **args) |
|
4121 | 4121 | ui.write(b"%s\n" % res1) |
|
4122 | 4122 | if res1 != res2: |
|
4123 | 4123 | ui.warn(b"%s\n" % res2) |
|
4124 | 4124 | finally: |
|
4125 | 4125 | repo.close() |
|
4126 | 4126 | |
|
4127 | 4127 | |
|
4128 | 4128 | def _parsewirelangblocks(fh): |
|
4129 | 4129 | activeaction = None |
|
4130 | 4130 | blocklines = [] |
|
4131 | 4131 | lastindent = 0 |
|
4132 | 4132 | |
|
4133 | 4133 | for line in fh: |
|
4134 | 4134 | line = line.rstrip() |
|
4135 | 4135 | if not line: |
|
4136 | 4136 | continue |
|
4137 | 4137 | |
|
4138 | 4138 | if line.startswith(b'#'): |
|
4139 | 4139 | continue |
|
4140 | 4140 | |
|
4141 | 4141 | if not line.startswith(b' '): |
|
4142 | 4142 | # New block. Flush previous one. |
|
4143 | 4143 | if activeaction: |
|
4144 | 4144 | yield activeaction, blocklines |
|
4145 | 4145 | |
|
4146 | 4146 | activeaction = line |
|
4147 | 4147 | blocklines = [] |
|
4148 | 4148 | lastindent = 0 |
|
4149 | 4149 | continue |
|
4150 | 4150 | |
|
4151 | 4151 | # Else we start with an indent. |
|
4152 | 4152 | |
|
4153 | 4153 | if not activeaction: |
|
4154 | 4154 | raise error.Abort(_(b'indented line outside of block')) |
|
4155 | 4155 | |
|
4156 | 4156 | indent = len(line) - len(line.lstrip()) |
|
4157 | 4157 | |
|
4158 | 4158 | # If this line is indented more than the last line, concatenate it. |
|
4159 | 4159 | if indent > lastindent and blocklines: |
|
4160 | 4160 | blocklines[-1] += line.lstrip() |
|
4161 | 4161 | else: |
|
4162 | 4162 | blocklines.append(line) |
|
4163 | 4163 | lastindent = indent |
|
4164 | 4164 | |
|
4165 | 4165 | # Flush last block. |
|
4166 | 4166 | if activeaction: |
|
4167 | 4167 | yield activeaction, blocklines |
|
4168 | 4168 | |
|
4169 | 4169 | |
|
4170 | 4170 | @command( |
|
4171 | 4171 | b'debugwireproto', |
|
4172 | 4172 | [ |
|
4173 | 4173 | (b'', b'localssh', False, _(b'start an SSH server for this repo')), |
|
4174 | 4174 | (b'', b'peer', b'', _(b'construct a specific version of the peer')), |
|
4175 | 4175 | ( |
|
4176 | 4176 | b'', |
|
4177 | 4177 | b'noreadstderr', |
|
4178 | 4178 | False, |
|
4179 | 4179 | _(b'do not read from stderr of the remote'), |
|
4180 | 4180 | ), |
|
4181 | 4181 | ( |
|
4182 | 4182 | b'', |
|
4183 | 4183 | b'nologhandshake', |
|
4184 | 4184 | False, |
|
4185 | 4185 | _(b'do not log I/O related to the peer handshake'), |
|
4186 | 4186 | ), |
|
4187 | 4187 | ] |
|
4188 | 4188 | + cmdutil.remoteopts, |
|
4189 | 4189 | _(b'[PATH]'), |
|
4190 | 4190 | optionalrepo=True, |
|
4191 | 4191 | ) |
|
4192 | 4192 | def debugwireproto(ui, repo, path=None, **opts): |
|
4193 | 4193 | """send wire protocol commands to a server |
|
4194 | 4194 | |
|
4195 | 4195 | This command can be used to issue wire protocol commands to remote |
|
4196 | 4196 | peers and to debug the raw data being exchanged. |
|
4197 | 4197 | |
|
4198 | 4198 | ``--localssh`` will start an SSH server against the current repository |
|
4199 | 4199 | and connect to that. By default, the connection will perform a handshake |
|
4200 | 4200 | and establish an appropriate peer instance. |
|
4201 | 4201 | |
|
4202 | 4202 | ``--peer`` can be used to bypass the handshake protocol and construct a |
|
4203 | 4203 | peer instance using the specified class type. Valid values are ``raw``, |
|
4204 | 4204 | ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending |
|
4205 | 4205 | raw data payloads and don't support higher-level command actions. |
|
4206 | 4206 | |
|
4207 | 4207 | ``--noreadstderr`` can be used to disable automatic reading from stderr |
|
4208 | 4208 | of the peer (for SSH connections only). Disabling automatic reading of |
|
4209 | 4209 | stderr is useful for making output more deterministic. |
|
4210 | 4210 | |
|
4211 | 4211 | Commands are issued via a mini language which is specified via stdin. |
|
4212 | 4212 | The language consists of individual actions to perform. An action is |
|
4213 | 4213 | defined by a block. A block is defined as a line with no leading |
|
4214 | 4214 | space followed by 0 or more lines with leading space. Blocks are |
|
4215 | 4215 | effectively a high-level command with additional metadata. |
|
4216 | 4216 | |
|
4217 | 4217 | Lines beginning with ``#`` are ignored. |
|
4218 | 4218 | |
|
4219 | 4219 | The following sections denote available actions. |
|
4220 | 4220 | |
|
4221 | 4221 | raw |
|
4222 | 4222 | --- |
|
4223 | 4223 | |
|
4224 | 4224 | Send raw data to the server. |
|
4225 | 4225 | |
|
4226 | 4226 | The block payload contains the raw data to send as one atomic send |
|
4227 | 4227 | operation. The data may not actually be delivered in a single system |
|
4228 | 4228 | call: it depends on the abilities of the transport being used. |
|
4229 | 4229 | |
|
4230 | 4230 | Each line in the block is de-indented and concatenated. Then, that |
|
4231 | 4231 | value is evaluated as a Python b'' literal. This allows the use of |
|
4232 | 4232 | backslash escaping, etc. |
|
4233 | 4233 | |
|
4234 | 4234 | raw+ |
|
4235 | 4235 | ---- |
|
4236 | 4236 | |
|
4237 | 4237 | Behaves like ``raw`` except flushes output afterwards. |
|
4238 | 4238 | |
|
4239 | 4239 | command <X> |
|
4240 | 4240 | ----------- |
|
4241 | 4241 | |
|
4242 | 4242 | Send a request to run a named command, whose name follows the ``command`` |
|
4243 | 4243 | string. |
|
4244 | 4244 | |
|
4245 | 4245 | Arguments to the command are defined as lines in this block. The format of |
|
4246 | 4246 | each line is ``<key> <value>``. e.g.:: |
|
4247 | 4247 | |
|
4248 | 4248 | command listkeys |
|
4249 | 4249 | namespace bookmarks |
|
4250 | 4250 | |
|
4251 | 4251 | If the value begins with ``eval:``, it will be interpreted as a Python |
|
4252 | 4252 | literal expression. Otherwise values are interpreted as Python b'' literals. |
|
4253 | 4253 | This allows sending complex types and encoding special byte sequences via |
|
4254 | 4254 | backslash escaping. |
|
4255 | 4255 | |
|
4256 | 4256 | The following arguments have special meaning: |
|
4257 | 4257 | |
|
4258 | 4258 | ``PUSHFILE`` |
|
4259 | 4259 | When defined, the *push* mechanism of the peer will be used instead |
|
4260 | 4260 | of the static request-response mechanism and the content of the |
|
4261 | 4261 | file specified in the value of this argument will be sent as the |
|
4262 | 4262 | command payload. |
|
4263 | 4263 | |
|
4264 | 4264 | This can be used to submit a local bundle file to the remote. |
|
4265 | 4265 | |
|
4266 | 4266 | batchbegin |
|
4267 | 4267 | ---------- |
|
4268 | 4268 | |
|
4269 | 4269 | Instruct the peer to begin a batched send. |
|
4270 | 4270 | |
|
4271 | 4271 | All ``command`` blocks are queued for execution until the next |
|
4272 | 4272 | ``batchsubmit`` block. |
|
4273 | 4273 | |
|
4274 | 4274 | batchsubmit |
|
4275 | 4275 | ----------- |
|
4276 | 4276 | |
|
4277 | 4277 | Submit previously queued ``command`` blocks as a batch request. |
|
4278 | 4278 | |
|
4279 | 4279 | This action MUST be paired with a ``batchbegin`` action. |
|
4280 | 4280 | |
|
4281 | 4281 | httprequest <method> <path> |
|
4282 | 4282 | --------------------------- |
|
4283 | 4283 | |
|
4284 | 4284 | (HTTP peer only) |
|
4285 | 4285 | |
|
4286 | 4286 | Send an HTTP request to the peer. |
|
4287 | 4287 | |
|
4288 | 4288 | The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``. |
|
4289 | 4289 | |
|
4290 | 4290 | Arguments of the form ``<key>: <value>`` are interpreted as HTTP request |
|
4291 | 4291 | headers to add to the request. e.g. ``Accept: foo``. |
|
4292 | 4292 | |
|
4293 | 4293 | The following arguments are special: |
|
4294 | 4294 | |
|
4295 | 4295 | ``BODYFILE`` |
|
4296 | 4296 | The content of the file defined as the value to this argument will be |
|
4297 | 4297 | transferred verbatim as the HTTP request body. |
|
4298 | 4298 | |
|
4299 | 4299 | ``frame <type> <flags> <payload>`` |
|
4300 | 4300 | Send a unified protocol frame as part of the request body. |
|
4301 | 4301 | |
|
4302 | 4302 | All frames will be collected and sent as the body to the HTTP |
|
4303 | 4303 | request. |
|
4304 | 4304 | |
|
4305 | 4305 | close |
|
4306 | 4306 | ----- |
|
4307 | 4307 | |
|
4308 | 4308 | Close the connection to the server. |
|
4309 | 4309 | |
|
4310 | 4310 | flush |
|
4311 | 4311 | ----- |
|
4312 | 4312 | |
|
4313 | 4313 | Flush data written to the server. |
|
4314 | 4314 | |
|
4315 | 4315 | readavailable |
|
4316 | 4316 | ------------- |
|
4317 | 4317 | |
|
4318 | 4318 | Close the write end of the connection and read all available data from |
|
4319 | 4319 | the server. |
|
4320 | 4320 | |
|
4321 | 4321 | If the connection to the server encompasses multiple pipes, we poll both |
|
4322 | 4322 | pipes and read available data. |
|
4323 | 4323 | |
|
4324 | 4324 | readline |
|
4325 | 4325 | -------- |
|
4326 | 4326 | |
|
4327 | 4327 | Read a line of output from the server. If there are multiple output |
|
4328 | 4328 | pipes, reads only the main pipe. |
|
4329 | 4329 | |
|
4330 | 4330 | ereadline |
|
4331 | 4331 | --------- |
|
4332 | 4332 | |
|
4333 | 4333 | Like ``readline``, but read from the stderr pipe, if available. |
|
4334 | 4334 | |
|
4335 | 4335 | read <X> |
|
4336 | 4336 | -------- |
|
4337 | 4337 | |
|
4338 | 4338 | ``read()`` N bytes from the server's main output pipe. |
|
4339 | 4339 | |
|
4340 | 4340 | eread <X> |
|
4341 | 4341 | --------- |
|
4342 | 4342 | |
|
4343 | 4343 | ``read()`` N bytes from the server's stderr pipe, if available. |
|
4344 | 4344 | |
|
4345 | 4345 | Specifying Unified Frame-Based Protocol Frames |
|
4346 | 4346 | ---------------------------------------------- |
|
4347 | 4347 | |
|
4348 | 4348 | It is possible to emit a *Unified Frame-Based Protocol* by using special |
|
4349 | 4349 | syntax. |
|
4350 | 4350 | |
|
4351 | 4351 | A frame is composed as a type, flags, and payload. These can be parsed |
|
4352 | 4352 | from a string of the form: |
|
4353 | 4353 | |
|
4354 | 4354 | <request-id> <stream-id> <stream-flags> <type> <flags> <payload> |
|
4355 | 4355 | |
|
4356 | 4356 | ``request-id`` and ``stream-id`` are integers defining the request and |
|
4357 | 4357 | stream identifiers. |
|
4358 | 4358 | |
|
4359 | 4359 | ``type`` can be an integer value for the frame type or the string name |
|
4360 | 4360 | of the type. The strings are defined in ``wireprotoframing.py``. e.g. |
|
4361 | 4361 | ``command-name``. |
|
4362 | 4362 | |
|
4363 | 4363 | ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag |
|
4364 | 4364 | components. Each component (and there can be just one) can be an integer |
|
4365 | 4365 | or a flag name for stream flags or frame flags, respectively. Values are |
|
4366 | 4366 | resolved to integers and then bitwise OR'd together. |
|
4367 | 4367 | |
|
4368 | 4368 | ``payload`` represents the raw frame payload. If it begins with |
|
4369 | 4369 | ``cbor:``, the following string is evaluated as Python code and the |
|
4370 | 4370 | resulting object is fed into a CBOR encoder. Otherwise it is interpreted |
|
4371 | 4371 | as a Python byte string literal. |
|
4372 | 4372 | """ |
|
4373 | 4373 | opts = pycompat.byteskwargs(opts) |
|
4374 | 4374 | |
|
4375 | 4375 | if opts[b'localssh'] and not repo: |
|
4376 | 4376 | raise error.Abort(_(b'--localssh requires a repository')) |
|
4377 | 4377 | |
|
4378 | 4378 | if opts[b'peer'] and opts[b'peer'] not in ( |
|
4379 | 4379 | b'raw', |
|
4380 | 4380 | b'http2', |
|
4381 | 4381 | b'ssh1', |
|
4382 | 4382 | b'ssh2', |
|
4383 | 4383 | ): |
|
4384 | 4384 | raise error.Abort( |
|
4385 | 4385 | _(b'invalid value for --peer'), |
|
4386 | 4386 | hint=_(b'valid values are "raw", "ssh1", and "ssh2"'), |
|
4387 | 4387 | ) |
|
4388 | 4388 | |
|
4389 | 4389 | if path and opts[b'localssh']: |
|
4390 | 4390 | raise error.Abort(_(b'cannot specify --localssh with an explicit path')) |
|
4391 | 4391 | |
|
4392 | 4392 | if ui.interactive(): |
|
4393 | 4393 | ui.write(_(b'(waiting for commands on stdin)\n')) |
|
4394 | 4394 | |
|
4395 | 4395 | blocks = list(_parsewirelangblocks(ui.fin)) |
|
4396 | 4396 | |
|
4397 | 4397 | proc = None |
|
4398 | 4398 | stdin = None |
|
4399 | 4399 | stdout = None |
|
4400 | 4400 | stderr = None |
|
4401 | 4401 | opener = None |
|
4402 | 4402 | |
|
4403 | 4403 | if opts[b'localssh']: |
|
4404 | 4404 | # We start the SSH server in its own process so there is process |
|
4405 | 4405 | # separation. This prevents a whole class of potential bugs around |
|
4406 | 4406 | # shared state from interfering with server operation. |
|
4407 | 4407 | args = procutil.hgcmd() + [ |
|
4408 | 4408 | b'-R', |
|
4409 | 4409 | repo.root, |
|
4410 | 4410 | b'debugserve', |
|
4411 | 4411 | b'--sshstdio', |
|
4412 | 4412 | ] |
|
4413 | 4413 | proc = subprocess.Popen( |
|
4414 | 4414 | pycompat.rapply(procutil.tonativestr, args), |
|
4415 | 4415 | stdin=subprocess.PIPE, |
|
4416 | 4416 | stdout=subprocess.PIPE, |
|
4417 | 4417 | stderr=subprocess.PIPE, |
|
4418 | 4418 | bufsize=0, |
|
4419 | 4419 | ) |
|
4420 | 4420 | |
|
4421 | 4421 | stdin = proc.stdin |
|
4422 | 4422 | stdout = proc.stdout |
|
4423 | 4423 | stderr = proc.stderr |
|
4424 | 4424 | |
|
4425 | 4425 | # We turn the pipes into observers so we can log I/O. |
|
4426 | 4426 | if ui.verbose or opts[b'peer'] == b'raw': |
|
4427 | 4427 | stdin = util.makeloggingfileobject( |
|
4428 | 4428 | ui, proc.stdin, b'i', logdata=True |
|
4429 | 4429 | ) |
|
4430 | 4430 | stdout = util.makeloggingfileobject( |
|
4431 | 4431 | ui, proc.stdout, b'o', logdata=True |
|
4432 | 4432 | ) |
|
4433 | 4433 | stderr = util.makeloggingfileobject( |
|
4434 | 4434 | ui, proc.stderr, b'e', logdata=True |
|
4435 | 4435 | ) |
|
4436 | 4436 | |
|
4437 | 4437 | # --localssh also implies the peer connection settings. |
|
4438 | 4438 | |
|
4439 | 4439 | url = b'ssh://localserver' |
|
4440 | 4440 | autoreadstderr = not opts[b'noreadstderr'] |
|
4441 | 4441 | |
|
4442 | 4442 | if opts[b'peer'] == b'ssh1': |
|
4443 | 4443 | ui.write(_(b'creating ssh peer for wire protocol version 1\n')) |
|
4444 | 4444 | peer = sshpeer.sshv1peer( |
|
4445 | 4445 | ui, |
|
4446 | 4446 | url, |
|
4447 | 4447 | proc, |
|
4448 | 4448 | stdin, |
|
4449 | 4449 | stdout, |
|
4450 | 4450 | stderr, |
|
4451 | 4451 | None, |
|
4452 | 4452 | autoreadstderr=autoreadstderr, |
|
4453 | 4453 | ) |
|
4454 | 4454 | elif opts[b'peer'] == b'ssh2': |
|
4455 | 4455 | ui.write(_(b'creating ssh peer for wire protocol version 2\n')) |
|
4456 | 4456 | peer = sshpeer.sshv2peer( |
|
4457 | 4457 | ui, |
|
4458 | 4458 | url, |
|
4459 | 4459 | proc, |
|
4460 | 4460 | stdin, |
|
4461 | 4461 | stdout, |
|
4462 | 4462 | stderr, |
|
4463 | 4463 | None, |
|
4464 | 4464 | autoreadstderr=autoreadstderr, |
|
4465 | 4465 | ) |
|
4466 | 4466 | elif opts[b'peer'] == b'raw': |
|
4467 | 4467 | ui.write(_(b'using raw connection to peer\n')) |
|
4468 | 4468 | peer = None |
|
4469 | 4469 | else: |
|
4470 | 4470 | ui.write(_(b'creating ssh peer from handshake results\n')) |
|
4471 | 4471 | peer = sshpeer.makepeer( |
|
4472 | 4472 | ui, |
|
4473 | 4473 | url, |
|
4474 | 4474 | proc, |
|
4475 | 4475 | stdin, |
|
4476 | 4476 | stdout, |
|
4477 | 4477 | stderr, |
|
4478 | 4478 | autoreadstderr=autoreadstderr, |
|
4479 | 4479 | ) |
|
4480 | 4480 | |
|
4481 | 4481 | elif path: |
|
4482 | 4482 | # We bypass hg.peer() so we can proxy the sockets. |
|
4483 | 4483 | # TODO consider not doing this because we skip |
|
4484 | 4484 | # ``hg.wirepeersetupfuncs`` and potentially other useful functionality. |
|
4485 | 4485 | u = util.url(path) |
|
4486 | 4486 | if u.scheme != b'http': |
|
4487 | 4487 | raise error.Abort(_(b'only http:// paths are currently supported')) |
|
4488 | 4488 | |
|
4489 | 4489 | url, authinfo = u.authinfo() |
|
4490 | 4490 | openerargs = { |
|
4491 | 4491 | 'useragent': b'Mercurial debugwireproto', |
|
4492 | 4492 | } |
|
4493 | 4493 | |
|
4494 | 4494 | # Turn pipes/sockets into observers so we can log I/O. |
|
4495 | 4495 | if ui.verbose: |
|
4496 | 4496 | openerargs.update( |
|
4497 | 4497 | { |
|
4498 | 4498 | 'loggingfh': ui, |
|
4499 | 4499 | 'loggingname': b's', |
|
4500 | 4500 | 'loggingopts': { |
|
4501 | 4501 | 'logdata': True, |
|
4502 | 4502 | 'logdataapis': False, |
|
4503 | 4503 | }, |
|
4504 | 4504 | } |
|
4505 | 4505 | ) |
|
4506 | 4506 | |
|
4507 | 4507 | if ui.debugflag: |
|
4508 | 4508 | openerargs['loggingopts']['logdataapis'] = True |
|
4509 | 4509 | |
|
4510 | 4510 | # Don't send default headers when in raw mode. This allows us to |
|
4511 | 4511 | # bypass most of the behavior of our URL handling code so we can |
|
4512 | 4512 | # have near complete control over what's sent on the wire. |
|
4513 | 4513 | if opts[b'peer'] == b'raw': |
|
4514 | 4514 | openerargs['sendaccept'] = False |
|
4515 | 4515 | |
|
4516 | 4516 | opener = urlmod.opener(ui, authinfo, **openerargs) |
|
4517 | 4517 | |
|
4518 | 4518 | if opts[b'peer'] == b'http2': |
|
4519 | 4519 | ui.write(_(b'creating http peer for wire protocol version 2\n')) |
|
4520 | 4520 | # We go through makepeer() because we need an API descriptor for |
|
4521 | 4521 | # the peer instance to be useful. |
|
4522 | 4522 | with ui.configoverride( |
|
4523 | 4523 | {(b'experimental', b'httppeer.advertise-v2'): True} |
|
4524 | 4524 | ): |
|
4525 | 4525 | if opts[b'nologhandshake']: |
|
4526 | 4526 | ui.pushbuffer() |
|
4527 | 4527 | |
|
4528 | 4528 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
4529 | 4529 | |
|
4530 | 4530 | if opts[b'nologhandshake']: |
|
4531 | 4531 | ui.popbuffer() |
|
4532 | 4532 | |
|
4533 | 4533 | if not isinstance(peer, httppeer.httpv2peer): |
|
4534 | 4534 | raise error.Abort( |
|
4535 | 4535 | _( |
|
4536 | 4536 | b'could not instantiate HTTP peer for ' |
|
4537 | 4537 | b'wire protocol version 2' |
|
4538 | 4538 | ), |
|
4539 | 4539 | hint=_( |
|
4540 | 4540 | b'the server may not have the feature ' |
|
4541 | 4541 | b'enabled or is not allowing this ' |
|
4542 | 4542 | b'client version' |
|
4543 | 4543 | ), |
|
4544 | 4544 | ) |
|
4545 | 4545 | |
|
4546 | 4546 | elif opts[b'peer'] == b'raw': |
|
4547 | 4547 | ui.write(_(b'using raw connection to peer\n')) |
|
4548 | 4548 | peer = None |
|
4549 | 4549 | elif opts[b'peer']: |
|
4550 | 4550 | raise error.Abort( |
|
4551 | 4551 | _(b'--peer %s not supported with HTTP peers') % opts[b'peer'] |
|
4552 | 4552 | ) |
|
4553 | 4553 | else: |
|
4554 | 4554 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
4555 | 4555 | |
|
4556 | 4556 | # We /could/ populate stdin/stdout with sock.makefile()... |
|
4557 | 4557 | else: |
|
4558 | 4558 | raise error.Abort(_(b'unsupported connection configuration')) |
|
4559 | 4559 | |
|
4560 | 4560 | batchedcommands = None |
|
4561 | 4561 | |
|
4562 | 4562 | # Now perform actions based on the parsed wire language instructions. |
|
4563 | 4563 | for action, lines in blocks: |
|
4564 | 4564 | if action in (b'raw', b'raw+'): |
|
4565 | 4565 | if not stdin: |
|
4566 | 4566 | raise error.Abort(_(b'cannot call raw/raw+ on this peer')) |
|
4567 | 4567 | |
|
4568 | 4568 | # Concatenate the data together. |
|
4569 | 4569 | data = b''.join(l.lstrip() for l in lines) |
|
4570 | 4570 | data = stringutil.unescapestr(data) |
|
4571 | 4571 | stdin.write(data) |
|
4572 | 4572 | |
|
4573 | 4573 | if action == b'raw+': |
|
4574 | 4574 | stdin.flush() |
|
4575 | 4575 | elif action == b'flush': |
|
4576 | 4576 | if not stdin: |
|
4577 | 4577 | raise error.Abort(_(b'cannot call flush on this peer')) |
|
4578 | 4578 | stdin.flush() |
|
4579 | 4579 | elif action.startswith(b'command'): |
|
4580 | 4580 | if not peer: |
|
4581 | 4581 | raise error.Abort( |
|
4582 | 4582 | _( |
|
4583 | 4583 | b'cannot send commands unless peer instance ' |
|
4584 | 4584 | b'is available' |
|
4585 | 4585 | ) |
|
4586 | 4586 | ) |
|
4587 | 4587 | |
|
4588 | 4588 | command = action.split(b' ', 1)[1] |
|
4589 | 4589 | |
|
4590 | 4590 | args = {} |
|
4591 | 4591 | for line in lines: |
|
4592 | 4592 | # We need to allow empty values. |
|
4593 | 4593 | fields = line.lstrip().split(b' ', 1) |
|
4594 | 4594 | if len(fields) == 1: |
|
4595 | 4595 | key = fields[0] |
|
4596 | 4596 | value = b'' |
|
4597 | 4597 | else: |
|
4598 | 4598 | key, value = fields |
|
4599 | 4599 | |
|
4600 | 4600 | if value.startswith(b'eval:'): |
|
4601 | 4601 | value = stringutil.evalpythonliteral(value[5:]) |
|
4602 | 4602 | else: |
|
4603 | 4603 | value = stringutil.unescapestr(value) |
|
4604 | 4604 | |
|
4605 | 4605 | args[key] = value |
|
4606 | 4606 | |
|
4607 | 4607 | if batchedcommands is not None: |
|
4608 | 4608 | batchedcommands.append((command, args)) |
|
4609 | 4609 | continue |
|
4610 | 4610 | |
|
4611 | 4611 | ui.status(_(b'sending %s command\n') % command) |
|
4612 | 4612 | |
|
4613 | 4613 | if b'PUSHFILE' in args: |
|
4614 | 4614 | with open(args[b'PUSHFILE'], 'rb') as fh: |
|
4615 | 4615 | del args[b'PUSHFILE'] |
|
4616 | 4616 | res, output = peer._callpush( |
|
4617 | 4617 | command, fh, **pycompat.strkwargs(args) |
|
4618 | 4618 | ) |
|
4619 | 4619 | ui.status(_(b'result: %s\n') % stringutil.escapestr(res)) |
|
4620 | 4620 | ui.status( |
|
4621 | 4621 | _(b'remote output: %s\n') % stringutil.escapestr(output) |
|
4622 | 4622 | ) |
|
4623 | 4623 | else: |
|
4624 | 4624 | with peer.commandexecutor() as e: |
|
4625 | 4625 | res = e.callcommand(command, args).result() |
|
4626 | 4626 | |
|
4627 | 4627 | if isinstance(res, wireprotov2peer.commandresponse): |
|
4628 | 4628 | val = res.objects() |
|
4629 | 4629 | ui.status( |
|
4630 | 4630 | _(b'response: %s\n') |
|
4631 | 4631 | % stringutil.pprint(val, bprefix=True, indent=2) |
|
4632 | 4632 | ) |
|
4633 | 4633 | else: |
|
4634 | 4634 | ui.status( |
|
4635 | 4635 | _(b'response: %s\n') |
|
4636 | 4636 | % stringutil.pprint(res, bprefix=True, indent=2) |
|
4637 | 4637 | ) |
|
4638 | 4638 | |
|
4639 | 4639 | elif action == b'batchbegin': |
|
4640 | 4640 | if batchedcommands is not None: |
|
4641 | 4641 | raise error.Abort(_(b'nested batchbegin not allowed')) |
|
4642 | 4642 | |
|
4643 | 4643 | batchedcommands = [] |
|
4644 | 4644 | elif action == b'batchsubmit': |
|
4645 | 4645 | # There is a batching API we could go through. But it would be |
|
4646 | 4646 | # difficult to normalize requests into function calls. It is easier |
|
4647 | 4647 | # to bypass this layer and normalize to commands + args. |
|
4648 | 4648 | ui.status( |
|
4649 | 4649 | _(b'sending batch with %d sub-commands\n') |
|
4650 | 4650 | % len(batchedcommands) |
|
4651 | 4651 | ) |
|
4652 | 4652 | assert peer is not None |
|
4653 | 4653 | for i, chunk in enumerate(peer._submitbatch(batchedcommands)): |
|
4654 | 4654 | ui.status( |
|
4655 | 4655 | _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk)) |
|
4656 | 4656 | ) |
|
4657 | 4657 | |
|
4658 | 4658 | batchedcommands = None |
|
4659 | 4659 | |
|
4660 | 4660 | elif action.startswith(b'httprequest '): |
|
4661 | 4661 | if not opener: |
|
4662 | 4662 | raise error.Abort( |
|
4663 | 4663 | _(b'cannot use httprequest without an HTTP peer') |
|
4664 | 4664 | ) |
|
4665 | 4665 | |
|
4666 | 4666 | request = action.split(b' ', 2) |
|
4667 | 4667 | if len(request) != 3: |
|
4668 | 4668 | raise error.Abort( |
|
4669 | 4669 | _( |
|
4670 | 4670 | b'invalid httprequest: expected format is ' |
|
4671 | 4671 | b'"httprequest <method> <path>' |
|
4672 | 4672 | ) |
|
4673 | 4673 | ) |
|
4674 | 4674 | |
|
4675 | 4675 | method, httppath = request[1:] |
|
4676 | 4676 | headers = {} |
|
4677 | 4677 | body = None |
|
4678 | 4678 | frames = [] |
|
4679 | 4679 | for line in lines: |
|
4680 | 4680 | line = line.lstrip() |
|
4681 | 4681 | m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line) |
|
4682 | 4682 | if m: |
|
4683 | 4683 | # Headers need to use native strings. |
|
4684 | 4684 | key = pycompat.strurl(m.group(1)) |
|
4685 | 4685 | value = pycompat.strurl(m.group(2)) |
|
4686 | 4686 | headers[key] = value |
|
4687 | 4687 | continue |
|
4688 | 4688 | |
|
4689 | 4689 | if line.startswith(b'BODYFILE '): |
|
4690 | 4690 | with open(line.split(b' ', 1), b'rb') as fh: |
|
4691 | 4691 | body = fh.read() |
|
4692 | 4692 | elif line.startswith(b'frame '): |
|
4693 | 4693 | frame = wireprotoframing.makeframefromhumanstring( |
|
4694 | 4694 | line[len(b'frame ') :] |
|
4695 | 4695 | ) |
|
4696 | 4696 | |
|
4697 | 4697 | frames.append(frame) |
|
4698 | 4698 | else: |
|
4699 | 4699 | raise error.Abort( |
|
4700 | 4700 | _(b'unknown argument to httprequest: %s') % line |
|
4701 | 4701 | ) |
|
4702 | 4702 | |
|
4703 | 4703 | url = path + httppath |
|
4704 | 4704 | |
|
4705 | 4705 | if frames: |
|
4706 | 4706 | body = b''.join(bytes(f) for f in frames) |
|
4707 | 4707 | |
|
4708 | 4708 | req = urlmod.urlreq.request(pycompat.strurl(url), body, headers) |
|
4709 | 4709 | |
|
4710 | 4710 | # urllib.Request insists on using has_data() as a proxy for |
|
4711 | 4711 | # determining the request method. Override that to use our |
|
4712 | 4712 | # explicitly requested method. |
|
4713 | 4713 | req.get_method = lambda: pycompat.sysstr(method) |
|
4714 | 4714 | |
|
4715 | 4715 | try: |
|
4716 | 4716 | res = opener.open(req) |
|
4717 | 4717 | body = res.read() |
|
4718 | 4718 | except util.urlerr.urlerror as e: |
|
4719 | 4719 | # read() method must be called, but only exists in Python 2 |
|
4720 | 4720 | getattr(e, 'read', lambda: None)() |
|
4721 | 4721 | continue |
|
4722 | 4722 | |
|
4723 | 4723 | ct = res.headers.get('Content-Type') |
|
4724 | 4724 | if ct == 'application/mercurial-cbor': |
|
4725 | 4725 | ui.write( |
|
4726 | 4726 | _(b'cbor> %s\n') |
|
4727 | 4727 | % stringutil.pprint( |
|
4728 | 4728 | cborutil.decodeall(body), bprefix=True, indent=2 |
|
4729 | 4729 | ) |
|
4730 | 4730 | ) |
|
4731 | 4731 | |
|
4732 | 4732 | elif action == b'close': |
|
4733 | 4733 | assert peer is not None |
|
4734 | 4734 | peer.close() |
|
4735 | 4735 | elif action == b'readavailable': |
|
4736 | 4736 | if not stdout or not stderr: |
|
4737 | 4737 | raise error.Abort( |
|
4738 | 4738 | _(b'readavailable not available on this peer') |
|
4739 | 4739 | ) |
|
4740 | 4740 | |
|
4741 | 4741 | stdin.close() |
|
4742 | 4742 | stdout.read() |
|
4743 | 4743 | stderr.read() |
|
4744 | 4744 | |
|
4745 | 4745 | elif action == b'readline': |
|
4746 | 4746 | if not stdout: |
|
4747 | 4747 | raise error.Abort(_(b'readline not available on this peer')) |
|
4748 | 4748 | stdout.readline() |
|
4749 | 4749 | elif action == b'ereadline': |
|
4750 | 4750 | if not stderr: |
|
4751 | 4751 | raise error.Abort(_(b'ereadline not available on this peer')) |
|
4752 | 4752 | stderr.readline() |
|
4753 | 4753 | elif action.startswith(b'read '): |
|
4754 | 4754 | count = int(action.split(b' ', 1)[1]) |
|
4755 | 4755 | if not stdout: |
|
4756 | 4756 | raise error.Abort(_(b'read not available on this peer')) |
|
4757 | 4757 | stdout.read(count) |
|
4758 | 4758 | elif action.startswith(b'eread '): |
|
4759 | 4759 | count = int(action.split(b' ', 1)[1]) |
|
4760 | 4760 | if not stderr: |
|
4761 | 4761 | raise error.Abort(_(b'eread not available on this peer')) |
|
4762 | 4762 | stderr.read(count) |
|
4763 | 4763 | else: |
|
4764 | 4764 | raise error.Abort(_(b'unknown action: %s') % action) |
|
4765 | 4765 | |
|
4766 | 4766 | if batchedcommands is not None: |
|
4767 | 4767 | raise error.Abort(_(b'unclosed "batchbegin" request')) |
|
4768 | 4768 | |
|
4769 | 4769 | if peer: |
|
4770 | 4770 | peer.close() |
|
4771 | 4771 | |
|
4772 | 4772 | if proc: |
|
4773 | 4773 | proc.kill() |
@@ -1,2756 +1,2775 b'' | |||
|
1 | 1 | # exchange.py - utility to exchange data between repos. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import weakref |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | hex, |
|
16 | 16 | nullid, |
|
17 | 17 | nullrev, |
|
18 | 18 | ) |
|
19 | 19 | from . import ( |
|
20 | 20 | bookmarks as bookmod, |
|
21 | 21 | bundle2, |
|
22 | 22 | bundlecaches, |
|
23 | 23 | changegroup, |
|
24 | 24 | discovery, |
|
25 | 25 | error, |
|
26 | 26 | exchangev2, |
|
27 | 27 | lock as lockmod, |
|
28 | 28 | logexchange, |
|
29 | 29 | narrowspec, |
|
30 | 30 | obsolete, |
|
31 | 31 | obsutil, |
|
32 | 32 | phases, |
|
33 | 33 | pushkey, |
|
34 | 34 | pycompat, |
|
35 | 35 | requirements, |
|
36 | 36 | scmutil, |
|
37 | 37 | streamclone, |
|
38 | 38 | url as urlmod, |
|
39 | 39 | util, |
|
40 | 40 | wireprototypes, |
|
41 | 41 | ) |
|
42 | 42 | from .utils import ( |
|
43 | 43 | hashutil, |
|
44 | 44 | stringutil, |
|
45 | 45 | ) |
|
46 | 46 | |
|
47 | 47 | urlerr = util.urlerr |
|
48 | 48 | urlreq = util.urlreq |
|
49 | 49 | |
|
50 | 50 | _NARROWACL_SECTION = b'narrowacl' |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def readbundle(ui, fh, fname, vfs=None): |
|
54 | 54 | header = changegroup.readexactly(fh, 4) |
|
55 | 55 | |
|
56 | 56 | alg = None |
|
57 | 57 | if not fname: |
|
58 | 58 | fname = b"stream" |
|
59 | 59 | if not header.startswith(b'HG') and header.startswith(b'\0'): |
|
60 | 60 | fh = changegroup.headerlessfixup(fh, header) |
|
61 | 61 | header = b"HG10" |
|
62 | 62 | alg = b'UN' |
|
63 | 63 | elif vfs: |
|
64 | 64 | fname = vfs.join(fname) |
|
65 | 65 | |
|
66 | 66 | magic, version = header[0:2], header[2:4] |
|
67 | 67 | |
|
68 | 68 | if magic != b'HG': |
|
69 | 69 | raise error.Abort(_(b'%s: not a Mercurial bundle') % fname) |
|
70 | 70 | if version == b'10': |
|
71 | 71 | if alg is None: |
|
72 | 72 | alg = changegroup.readexactly(fh, 2) |
|
73 | 73 | return changegroup.cg1unpacker(fh, alg) |
|
74 | 74 | elif version.startswith(b'2'): |
|
75 | 75 | return bundle2.getunbundler(ui, fh, magicstring=magic + version) |
|
76 | 76 | elif version == b'S1': |
|
77 | 77 | return streamclone.streamcloneapplier(fh) |
|
78 | 78 | else: |
|
79 | 79 | raise error.Abort( |
|
80 | 80 | _(b'%s: unknown bundle version %s') % (fname, version) |
|
81 | 81 | ) |
|
82 | 82 | |
|
83 | 83 | |
|
84 | 84 | def getbundlespec(ui, fh): |
|
85 | 85 | """Infer the bundlespec from a bundle file handle. |
|
86 | 86 | |
|
87 | 87 | The input file handle is seeked and the original seek position is not |
|
88 | 88 | restored. |
|
89 | 89 | """ |
|
90 | 90 | |
|
91 | 91 | def speccompression(alg): |
|
92 | 92 | try: |
|
93 | 93 | return util.compengines.forbundletype(alg).bundletype()[0] |
|
94 | 94 | except KeyError: |
|
95 | 95 | return None |
|
96 | 96 | |
|
97 | 97 | b = readbundle(ui, fh, None) |
|
98 | 98 | if isinstance(b, changegroup.cg1unpacker): |
|
99 | 99 | alg = b._type |
|
100 | 100 | if alg == b'_truncatedBZ': |
|
101 | 101 | alg = b'BZ' |
|
102 | 102 | comp = speccompression(alg) |
|
103 | 103 | if not comp: |
|
104 | 104 | raise error.Abort(_(b'unknown compression algorithm: %s') % alg) |
|
105 | 105 | return b'%s-v1' % comp |
|
106 | 106 | elif isinstance(b, bundle2.unbundle20): |
|
107 | 107 | if b'Compression' in b.params: |
|
108 | 108 | comp = speccompression(b.params[b'Compression']) |
|
109 | 109 | if not comp: |
|
110 | 110 | raise error.Abort( |
|
111 | 111 | _(b'unknown compression algorithm: %s') % comp |
|
112 | 112 | ) |
|
113 | 113 | else: |
|
114 | 114 | comp = b'none' |
|
115 | 115 | |
|
116 | 116 | version = None |
|
117 | 117 | for part in b.iterparts(): |
|
118 | 118 | if part.type == b'changegroup': |
|
119 | 119 | version = part.params[b'version'] |
|
120 | 120 | if version in (b'01', b'02'): |
|
121 | 121 | version = b'v2' |
|
122 | 122 | else: |
|
123 | 123 | raise error.Abort( |
|
124 | 124 | _( |
|
125 | 125 | b'changegroup version %s does not have ' |
|
126 | 126 | b'a known bundlespec' |
|
127 | 127 | ) |
|
128 | 128 | % version, |
|
129 | 129 | hint=_(b'try upgrading your Mercurial client'), |
|
130 | 130 | ) |
|
131 | 131 | elif part.type == b'stream2' and version is None: |
|
132 | 132 | # A stream2 part requires to be part of a v2 bundle |
|
133 | 133 | requirements = urlreq.unquote(part.params[b'requirements']) |
|
134 | 134 | splitted = requirements.split() |
|
135 | 135 | params = bundle2._formatrequirementsparams(splitted) |
|
136 | 136 | return b'none-v2;stream=v2;%s' % params |
|
137 | 137 | |
|
138 | 138 | if not version: |
|
139 | 139 | raise error.Abort( |
|
140 | 140 | _(b'could not identify changegroup version in bundle') |
|
141 | 141 | ) |
|
142 | 142 | |
|
143 | 143 | return b'%s-%s' % (comp, version) |
|
144 | 144 | elif isinstance(b, streamclone.streamcloneapplier): |
|
145 | 145 | requirements = streamclone.readbundle1header(fh)[2] |
|
146 | 146 | formatted = bundle2._formatrequirementsparams(requirements) |
|
147 | 147 | return b'none-packed1;%s' % formatted |
|
148 | 148 | else: |
|
149 | 149 | raise error.Abort(_(b'unknown bundle type: %s') % b) |
|
150 | 150 | |
|
151 | 151 | |
|
152 | 152 | def _computeoutgoing(repo, heads, common): |
|
153 | 153 | """Computes which revs are outgoing given a set of common |
|
154 | 154 | and a set of heads. |
|
155 | 155 | |
|
156 | 156 | This is a separate function so extensions can have access to |
|
157 | 157 | the logic. |
|
158 | 158 | |
|
159 | 159 | Returns a discovery.outgoing object. |
|
160 | 160 | """ |
|
161 | 161 | cl = repo.changelog |
|
162 | 162 | if common: |
|
163 | 163 | hasnode = cl.hasnode |
|
164 | 164 | common = [n for n in common if hasnode(n)] |
|
165 | 165 | else: |
|
166 | 166 | common = [nullid] |
|
167 | 167 | if not heads: |
|
168 | 168 | heads = cl.heads() |
|
169 | 169 | return discovery.outgoing(repo, common, heads) |
|
170 | 170 | |
|
171 | 171 | |
|
172 | 172 | def _checkpublish(pushop): |
|
173 | 173 | repo = pushop.repo |
|
174 | 174 | ui = repo.ui |
|
175 | 175 | behavior = ui.config(b'experimental', b'auto-publish') |
|
176 | 176 | if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'): |
|
177 | 177 | return |
|
178 | 178 | remotephases = listkeys(pushop.remote, b'phases') |
|
179 | 179 | if not remotephases.get(b'publishing', False): |
|
180 | 180 | return |
|
181 | 181 | |
|
182 | 182 | if pushop.revs is None: |
|
183 | 183 | published = repo.filtered(b'served').revs(b'not public()') |
|
184 | 184 | else: |
|
185 | 185 | published = repo.revs(b'::%ln - public()', pushop.revs) |
|
186 | 186 | if published: |
|
187 | 187 | if behavior == b'warn': |
|
188 | 188 | ui.warn( |
|
189 | 189 | _(b'%i changesets about to be published\n') % len(published) |
|
190 | 190 | ) |
|
191 | 191 | elif behavior == b'confirm': |
|
192 | 192 | if ui.promptchoice( |
|
193 | 193 | _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No') |
|
194 | 194 | % len(published) |
|
195 | 195 | ): |
|
196 | 196 | raise error.CanceledError(_(b'user quit')) |
|
197 | 197 | elif behavior == b'abort': |
|
198 | 198 | msg = _(b'push would publish %i changesets') % len(published) |
|
199 | 199 | hint = _( |
|
200 | 200 | b"use --publish or adjust 'experimental.auto-publish'" |
|
201 | 201 | b" config" |
|
202 | 202 | ) |
|
203 | 203 | raise error.Abort(msg, hint=hint) |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | def _forcebundle1(op): |
|
207 | 207 | """return true if a pull/push must use bundle1 |
|
208 | 208 | |
|
209 | 209 | This function is used to allow testing of the older bundle version""" |
|
210 | 210 | ui = op.repo.ui |
|
211 | 211 | # The goal is this config is to allow developer to choose the bundle |
|
212 | 212 | # version used during exchanged. This is especially handy during test. |
|
213 | 213 | # Value is a list of bundle version to be picked from, highest version |
|
214 | 214 | # should be used. |
|
215 | 215 | # |
|
216 | 216 | # developer config: devel.legacy.exchange |
|
217 | 217 | exchange = ui.configlist(b'devel', b'legacy.exchange') |
|
218 | 218 | forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange |
|
219 | 219 | return forcebundle1 or not op.remote.capable(b'bundle2') |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | class pushoperation(object): |
|
223 | 223 | """A object that represent a single push operation |
|
224 | 224 | |
|
225 | 225 | Its purpose is to carry push related state and very common operations. |
|
226 | 226 | |
|
227 | 227 | A new pushoperation should be created at the beginning of each push and |
|
228 | 228 | discarded afterward. |
|
229 | 229 | """ |
|
230 | 230 | |
|
231 | 231 | def __init__( |
|
232 | 232 | self, |
|
233 | 233 | repo, |
|
234 | 234 | remote, |
|
235 | 235 | force=False, |
|
236 | 236 | revs=None, |
|
237 | 237 | newbranch=False, |
|
238 | 238 | bookmarks=(), |
|
239 | 239 | publish=False, |
|
240 | 240 | pushvars=None, |
|
241 | 241 | ): |
|
242 | 242 | # repo we push from |
|
243 | 243 | self.repo = repo |
|
244 | 244 | self.ui = repo.ui |
|
245 | 245 | # repo we push to |
|
246 | 246 | self.remote = remote |
|
247 | 247 | # force option provided |
|
248 | 248 | self.force = force |
|
249 | 249 | # revs to be pushed (None is "all") |
|
250 | 250 | self.revs = revs |
|
251 | 251 | # bookmark explicitly pushed |
|
252 | 252 | self.bookmarks = bookmarks |
|
253 | 253 | # allow push of new branch |
|
254 | 254 | self.newbranch = newbranch |
|
255 | 255 | # step already performed |
|
256 | 256 | # (used to check what steps have been already performed through bundle2) |
|
257 | 257 | self.stepsdone = set() |
|
258 | 258 | # Integer version of the changegroup push result |
|
259 | 259 | # - None means nothing to push |
|
260 | 260 | # - 0 means HTTP error |
|
261 | 261 | # - 1 means we pushed and remote head count is unchanged *or* |
|
262 | 262 | # we have outgoing changesets but refused to push |
|
263 | 263 | # - other values as described by addchangegroup() |
|
264 | 264 | self.cgresult = None |
|
265 | 265 | # Boolean value for the bookmark push |
|
266 | 266 | self.bkresult = None |
|
267 | 267 | # discover.outgoing object (contains common and outgoing data) |
|
268 | 268 | self.outgoing = None |
|
269 | 269 | # all remote topological heads before the push |
|
270 | 270 | self.remoteheads = None |
|
271 | 271 | # Details of the remote branch pre and post push |
|
272 | 272 | # |
|
273 | 273 | # mapping: {'branch': ([remoteheads], |
|
274 | 274 | # [newheads], |
|
275 | 275 | # [unsyncedheads], |
|
276 | 276 | # [discardedheads])} |
|
277 | 277 | # - branch: the branch name |
|
278 | 278 | # - remoteheads: the list of remote heads known locally |
|
279 | 279 | # None if the branch is new |
|
280 | 280 | # - newheads: the new remote heads (known locally) with outgoing pushed |
|
281 | 281 | # - unsyncedheads: the list of remote heads unknown locally. |
|
282 | 282 | # - discardedheads: the list of remote heads made obsolete by the push |
|
283 | 283 | self.pushbranchmap = None |
|
284 | 284 | # testable as a boolean indicating if any nodes are missing locally. |
|
285 | 285 | self.incoming = None |
|
286 | 286 | # summary of the remote phase situation |
|
287 | 287 | self.remotephases = None |
|
288 | 288 | # phases changes that must be pushed along side the changesets |
|
289 | 289 | self.outdatedphases = None |
|
290 | 290 | # phases changes that must be pushed if changeset push fails |
|
291 | 291 | self.fallbackoutdatedphases = None |
|
292 | 292 | # outgoing obsmarkers |
|
293 | 293 | self.outobsmarkers = set() |
|
294 | 294 | # outgoing bookmarks, list of (bm, oldnode | '', newnode | '') |
|
295 | 295 | self.outbookmarks = [] |
|
296 | 296 | # transaction manager |
|
297 | 297 | self.trmanager = None |
|
298 | 298 | # map { pushkey partid -> callback handling failure} |
|
299 | 299 | # used to handle exception from mandatory pushkey part failure |
|
300 | 300 | self.pkfailcb = {} |
|
301 | 301 | # an iterable of pushvars or None |
|
302 | 302 | self.pushvars = pushvars |
|
303 | 303 | # publish pushed changesets |
|
304 | 304 | self.publish = publish |
|
305 | 305 | |
|
306 | 306 | @util.propertycache |
|
307 | 307 | def futureheads(self): |
|
308 | 308 | """future remote heads if the changeset push succeeds""" |
|
309 | 309 | return self.outgoing.ancestorsof |
|
310 | 310 | |
|
311 | 311 | @util.propertycache |
|
312 | 312 | def fallbackheads(self): |
|
313 | 313 | """future remote heads if the changeset push fails""" |
|
314 | 314 | if self.revs is None: |
|
315 | 315 | # not target to push, all common are relevant |
|
316 | 316 | return self.outgoing.commonheads |
|
317 | 317 | unfi = self.repo.unfiltered() |
|
318 | 318 | # I want cheads = heads(::ancestorsof and ::commonheads) |
|
319 | 319 | # (ancestorsof is revs with secret changeset filtered out) |
|
320 | 320 | # |
|
321 | 321 | # This can be expressed as: |
|
322 | 322 | # cheads = ( (ancestorsof and ::commonheads) |
|
323 | 323 | # + (commonheads and ::ancestorsof))" |
|
324 | 324 | # ) |
|
325 | 325 | # |
|
326 | 326 | # while trying to push we already computed the following: |
|
327 | 327 | # common = (::commonheads) |
|
328 | 328 | # missing = ((commonheads::ancestorsof) - commonheads) |
|
329 | 329 | # |
|
330 | 330 | # We can pick: |
|
331 | 331 | # * ancestorsof part of common (::commonheads) |
|
332 | 332 | common = self.outgoing.common |
|
333 | 333 | rev = self.repo.changelog.index.rev |
|
334 | 334 | cheads = [node for node in self.revs if rev(node) in common] |
|
335 | 335 | # and |
|
336 | 336 | # * commonheads parents on missing |
|
337 | 337 | revset = unfi.set( |
|
338 | 338 | b'%ln and parents(roots(%ln))', |
|
339 | 339 | self.outgoing.commonheads, |
|
340 | 340 | self.outgoing.missing, |
|
341 | 341 | ) |
|
342 | 342 | cheads.extend(c.node() for c in revset) |
|
343 | 343 | return cheads |
|
344 | 344 | |
|
345 | 345 | @property |
|
346 | 346 | def commonheads(self): |
|
347 | 347 | """set of all common heads after changeset bundle push""" |
|
348 | 348 | if self.cgresult: |
|
349 | 349 | return self.futureheads |
|
350 | 350 | else: |
|
351 | 351 | return self.fallbackheads |
|
352 | 352 | |
|
353 | 353 | |
|
354 | 354 | # mapping of message used when pushing bookmark |
|
355 | 355 | bookmsgmap = { |
|
356 | 356 | b'update': ( |
|
357 | 357 | _(b"updating bookmark %s\n"), |
|
358 | 358 | _(b'updating bookmark %s failed\n'), |
|
359 | 359 | ), |
|
360 | 360 | b'export': ( |
|
361 | 361 | _(b"exporting bookmark %s\n"), |
|
362 | 362 | _(b'exporting bookmark %s failed\n'), |
|
363 | 363 | ), |
|
364 | 364 | b'delete': ( |
|
365 | 365 | _(b"deleting remote bookmark %s\n"), |
|
366 | 366 | _(b'deleting remote bookmark %s failed\n'), |
|
367 | 367 | ), |
|
368 | 368 | } |
|
369 | 369 | |
|
370 | 370 | |
|
371 | 371 | def push( |
|
372 | 372 | repo, |
|
373 | 373 | remote, |
|
374 | 374 | force=False, |
|
375 | 375 | revs=None, |
|
376 | 376 | newbranch=False, |
|
377 | 377 | bookmarks=(), |
|
378 | 378 | publish=False, |
|
379 | 379 | opargs=None, |
|
380 | 380 | ): |
|
381 | 381 | """Push outgoing changesets (limited by revs) from a local |
|
382 | 382 | repository to remote. Return an integer: |
|
383 | 383 | - None means nothing to push |
|
384 | 384 | - 0 means HTTP error |
|
385 | 385 | - 1 means we pushed and remote head count is unchanged *or* |
|
386 | 386 | we have outgoing changesets but refused to push |
|
387 | 387 | - other values as described by addchangegroup() |
|
388 | 388 | """ |
|
389 | 389 | if opargs is None: |
|
390 | 390 | opargs = {} |
|
391 | 391 | pushop = pushoperation( |
|
392 | 392 | repo, |
|
393 | 393 | remote, |
|
394 | 394 | force, |
|
395 | 395 | revs, |
|
396 | 396 | newbranch, |
|
397 | 397 | bookmarks, |
|
398 | 398 | publish, |
|
399 | 399 | **pycompat.strkwargs(opargs) |
|
400 | 400 | ) |
|
401 | 401 | if pushop.remote.local(): |
|
402 | 402 | missing = ( |
|
403 | 403 | set(pushop.repo.requirements) - pushop.remote.local().supported |
|
404 | 404 | ) |
|
405 | 405 | if missing: |
|
406 | 406 | msg = _( |
|
407 | 407 | b"required features are not" |
|
408 | 408 | b" supported in the destination:" |
|
409 | 409 | b" %s" |
|
410 | 410 | ) % (b', '.join(sorted(missing))) |
|
411 | 411 | raise error.Abort(msg) |
|
412 | 412 | |
|
413 | 413 | if not pushop.remote.canpush(): |
|
414 | 414 | raise error.Abort(_(b"destination does not support push")) |
|
415 | 415 | |
|
416 | 416 | if not pushop.remote.capable(b'unbundle'): |
|
417 | 417 | raise error.Abort( |
|
418 | 418 | _( |
|
419 | 419 | b'cannot push: destination does not support the ' |
|
420 | 420 | b'unbundle wire protocol command' |
|
421 | 421 | ) |
|
422 | 422 | ) |
|
423 | 423 | |
|
424 | 424 | # get lock as we might write phase data |
|
425 | 425 | wlock = lock = None |
|
426 | 426 | try: |
|
427 | 427 | # bundle2 push may receive a reply bundle touching bookmarks |
|
428 | 428 | # requiring the wlock. Take it now to ensure proper ordering. |
|
429 | 429 | maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback') |
|
430 | 430 | if ( |
|
431 | 431 | (not _forcebundle1(pushop)) |
|
432 | 432 | and maypushback |
|
433 | 433 | and not bookmod.bookmarksinstore(repo) |
|
434 | 434 | ): |
|
435 | 435 | wlock = pushop.repo.wlock() |
|
436 | 436 | lock = pushop.repo.lock() |
|
437 | 437 | pushop.trmanager = transactionmanager( |
|
438 | 438 | pushop.repo, b'push-response', pushop.remote.url() |
|
439 | 439 | ) |
|
440 | 440 | except error.LockUnavailable as err: |
|
441 | 441 | # source repo cannot be locked. |
|
442 | 442 | # We do not abort the push, but just disable the local phase |
|
443 | 443 | # synchronisation. |
|
444 | 444 | msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr( |
|
445 | 445 | err |
|
446 | 446 | ) |
|
447 | 447 | pushop.ui.debug(msg) |
|
448 | 448 | |
|
449 | 449 | with wlock or util.nullcontextmanager(): |
|
450 | 450 | with lock or util.nullcontextmanager(): |
|
451 | 451 | with pushop.trmanager or util.nullcontextmanager(): |
|
452 | 452 | pushop.repo.checkpush(pushop) |
|
453 | 453 | _checkpublish(pushop) |
|
454 | 454 | _pushdiscovery(pushop) |
|
455 | 455 | if not pushop.force: |
|
456 | 456 | _checksubrepostate(pushop) |
|
457 | 457 | if not _forcebundle1(pushop): |
|
458 | 458 | _pushbundle2(pushop) |
|
459 | 459 | _pushchangeset(pushop) |
|
460 | 460 | _pushsyncphase(pushop) |
|
461 | 461 | _pushobsolete(pushop) |
|
462 | 462 | _pushbookmark(pushop) |
|
463 | 463 | |
|
464 | 464 | if repo.ui.configbool(b'experimental', b'remotenames'): |
|
465 | 465 | logexchange.pullremotenames(repo, remote) |
|
466 | 466 | |
|
467 | 467 | return pushop |
|
468 | 468 | |
|
469 | 469 | |
|
470 | 470 | # list of steps to perform discovery before push |
|
471 | 471 | pushdiscoveryorder = [] |
|
472 | 472 | |
|
473 | 473 | # Mapping between step name and function |
|
474 | 474 | # |
|
475 | 475 | # This exists to help extensions wrap steps if necessary |
|
476 | 476 | pushdiscoverymapping = {} |
|
477 | 477 | |
|
478 | 478 | |
|
479 | 479 | def pushdiscovery(stepname): |
|
480 | 480 | """decorator for function performing discovery before push |
|
481 | 481 | |
|
482 | 482 | The function is added to the step -> function mapping and appended to the |
|
483 | 483 | list of steps. Beware that decorated function will be added in order (this |
|
484 | 484 | may matter). |
|
485 | 485 | |
|
486 | 486 | You can only use this decorator for a new step, if you want to wrap a step |
|
487 | 487 | from an extension, change the pushdiscovery dictionary directly.""" |
|
488 | 488 | |
|
489 | 489 | def dec(func): |
|
490 | 490 | assert stepname not in pushdiscoverymapping |
|
491 | 491 | pushdiscoverymapping[stepname] = func |
|
492 | 492 | pushdiscoveryorder.append(stepname) |
|
493 | 493 | return func |
|
494 | 494 | |
|
495 | 495 | return dec |
|
496 | 496 | |
|
497 | 497 | |
|
498 | 498 | def _pushdiscovery(pushop): |
|
499 | 499 | """Run all discovery steps""" |
|
500 | 500 | for stepname in pushdiscoveryorder: |
|
501 | 501 | step = pushdiscoverymapping[stepname] |
|
502 | 502 | step(pushop) |
|
503 | 503 | |
|
504 | 504 | |
|
505 | 505 | def _checksubrepostate(pushop): |
|
506 | 506 | """Ensure all outgoing referenced subrepo revisions are present locally""" |
|
507 | 507 | for n in pushop.outgoing.missing: |
|
508 | 508 | ctx = pushop.repo[n] |
|
509 | 509 | |
|
510 | 510 | if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files(): |
|
511 | 511 | for subpath in sorted(ctx.substate): |
|
512 | 512 | sub = ctx.sub(subpath) |
|
513 | 513 | sub.verify(onpush=True) |
|
514 | 514 | |
|
515 | 515 | |
|
516 | 516 | @pushdiscovery(b'changeset') |
|
517 | 517 | def _pushdiscoverychangeset(pushop): |
|
518 | 518 | """discover the changeset that need to be pushed""" |
|
519 | 519 | fci = discovery.findcommonincoming |
|
520 | 520 | if pushop.revs: |
|
521 | 521 | commoninc = fci( |
|
522 | 522 | pushop.repo, |
|
523 | 523 | pushop.remote, |
|
524 | 524 | force=pushop.force, |
|
525 | 525 | ancestorsof=pushop.revs, |
|
526 | 526 | ) |
|
527 | 527 | else: |
|
528 | 528 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) |
|
529 | 529 | common, inc, remoteheads = commoninc |
|
530 | 530 | fco = discovery.findcommonoutgoing |
|
531 | 531 | outgoing = fco( |
|
532 | 532 | pushop.repo, |
|
533 | 533 | pushop.remote, |
|
534 | 534 | onlyheads=pushop.revs, |
|
535 | 535 | commoninc=commoninc, |
|
536 | 536 | force=pushop.force, |
|
537 | 537 | ) |
|
538 | 538 | pushop.outgoing = outgoing |
|
539 | 539 | pushop.remoteheads = remoteheads |
|
540 | 540 | pushop.incoming = inc |
|
541 | 541 | |
|
542 | 542 | |
|
543 | 543 | @pushdiscovery(b'phase') |
|
544 | 544 | def _pushdiscoveryphase(pushop): |
|
545 | 545 | """discover the phase that needs to be pushed |
|
546 | 546 | |
|
547 | 547 | (computed for both success and failure case for changesets push)""" |
|
548 | 548 | outgoing = pushop.outgoing |
|
549 | 549 | unfi = pushop.repo.unfiltered() |
|
550 | 550 | remotephases = listkeys(pushop.remote, b'phases') |
|
551 | 551 | |
|
552 | 552 | if ( |
|
553 | 553 | pushop.ui.configbool(b'ui', b'_usedassubrepo') |
|
554 | 554 | and remotephases # server supports phases |
|
555 | 555 | and not pushop.outgoing.missing # no changesets to be pushed |
|
556 | 556 | and remotephases.get(b'publishing', False) |
|
557 | 557 | ): |
|
558 | 558 | # When: |
|
559 | 559 | # - this is a subrepo push |
|
560 | 560 | # - and remote support phase |
|
561 | 561 | # - and no changeset are to be pushed |
|
562 | 562 | # - and remote is publishing |
|
563 | 563 | # We may be in issue 3781 case! |
|
564 | 564 | # We drop the possible phase synchronisation done by |
|
565 | 565 | # courtesy to publish changesets possibly locally draft |
|
566 | 566 | # on the remote. |
|
567 | 567 | pushop.outdatedphases = [] |
|
568 | 568 | pushop.fallbackoutdatedphases = [] |
|
569 | 569 | return |
|
570 | 570 | |
|
571 | 571 | pushop.remotephases = phases.remotephasessummary( |
|
572 | 572 | pushop.repo, pushop.fallbackheads, remotephases |
|
573 | 573 | ) |
|
574 | 574 | droots = pushop.remotephases.draftroots |
|
575 | 575 | |
|
576 | 576 | extracond = b'' |
|
577 | 577 | if not pushop.remotephases.publishing: |
|
578 | 578 | extracond = b' and public()' |
|
579 | 579 | revset = b'heads((%%ln::%%ln) %s)' % extracond |
|
580 | 580 | # Get the list of all revs draft on remote by public here. |
|
581 | 581 | # XXX Beware that revset break if droots is not strictly |
|
582 | 582 | # XXX root we may want to ensure it is but it is costly |
|
583 | 583 | fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) |
|
584 | 584 | if not pushop.remotephases.publishing and pushop.publish: |
|
585 | 585 | future = list( |
|
586 | 586 | unfi.set( |
|
587 | 587 | b'%ln and (not public() or %ln::)', pushop.futureheads, droots |
|
588 | 588 | ) |
|
589 | 589 | ) |
|
590 | 590 | elif not outgoing.missing: |
|
591 | 591 | future = fallback |
|
592 | 592 | else: |
|
593 | 593 | # adds changeset we are going to push as draft |
|
594 | 594 | # |
|
595 | 595 | # should not be necessary for publishing server, but because of an |
|
596 | 596 | # issue fixed in xxxxx we have to do it anyway. |
|
597 | 597 | fdroots = list( |
|
598 | 598 | unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots) |
|
599 | 599 | ) |
|
600 | 600 | fdroots = [f.node() for f in fdroots] |
|
601 | 601 | future = list(unfi.set(revset, fdroots, pushop.futureheads)) |
|
602 | 602 | pushop.outdatedphases = future |
|
603 | 603 | pushop.fallbackoutdatedphases = fallback |
|
604 | 604 | |
|
605 | 605 | |
|
606 | 606 | @pushdiscovery(b'obsmarker') |
|
607 | 607 | def _pushdiscoveryobsmarkers(pushop): |
|
608 | 608 | if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt): |
|
609 | 609 | return |
|
610 | 610 | |
|
611 | 611 | if not pushop.repo.obsstore: |
|
612 | 612 | return |
|
613 | 613 | |
|
614 | 614 | if b'obsolete' not in listkeys(pushop.remote, b'namespaces'): |
|
615 | 615 | return |
|
616 | 616 | |
|
617 | 617 | repo = pushop.repo |
|
618 | 618 | # very naive computation, that can be quite expensive on big repo. |
|
619 | 619 | # However: evolution is currently slow on them anyway. |
|
620 | 620 | nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads)) |
|
621 | 621 | pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) |
|
622 | 622 | |
|
623 | 623 | |
|
624 | 624 | @pushdiscovery(b'bookmarks') |
|
625 | 625 | def _pushdiscoverybookmarks(pushop): |
|
626 | 626 | ui = pushop.ui |
|
627 | 627 | repo = pushop.repo.unfiltered() |
|
628 | 628 | remote = pushop.remote |
|
629 | 629 | ui.debug(b"checking for updated bookmarks\n") |
|
630 | 630 | ancestors = () |
|
631 | 631 | if pushop.revs: |
|
632 | 632 | revnums = pycompat.maplist(repo.changelog.rev, pushop.revs) |
|
633 | 633 | ancestors = repo.changelog.ancestors(revnums, inclusive=True) |
|
634 | 634 | |
|
635 | 635 | remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks')) |
|
636 | 636 | |
|
637 | 637 | explicit = { |
|
638 | 638 | repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks |
|
639 | 639 | } |
|
640 | 640 | |
|
641 | 641 | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) |
|
642 | 642 | return _processcompared(pushop, ancestors, explicit, remotebookmark, comp) |
|
643 | 643 | |
|
644 | 644 | |
|
645 | 645 | def _processcompared(pushop, pushed, explicit, remotebms, comp): |
|
646 | 646 | """take decision on bookmarks to push to the remote repo |
|
647 | 647 | |
|
648 | 648 | Exists to help extensions alter this behavior. |
|
649 | 649 | """ |
|
650 | 650 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp |
|
651 | 651 | |
|
652 | 652 | repo = pushop.repo |
|
653 | 653 | |
|
654 | 654 | for b, scid, dcid in advsrc: |
|
655 | 655 | if b in explicit: |
|
656 | 656 | explicit.remove(b) |
|
657 | 657 | if not pushed or repo[scid].rev() in pushed: |
|
658 | 658 | pushop.outbookmarks.append((b, dcid, scid)) |
|
659 | 659 | # search added bookmark |
|
660 | 660 | for b, scid, dcid in addsrc: |
|
661 | 661 | if b in explicit: |
|
662 | 662 | explicit.remove(b) |
|
663 | 663 | if bookmod.isdivergent(b): |
|
664 | 664 | pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b) |
|
665 | 665 | pushop.bkresult = 2 |
|
666 | 666 | else: |
|
667 | 667 | pushop.outbookmarks.append((b, b'', scid)) |
|
668 | 668 | # search for overwritten bookmark |
|
669 | 669 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): |
|
670 | 670 | if b in explicit: |
|
671 | 671 | explicit.remove(b) |
|
672 | 672 | pushop.outbookmarks.append((b, dcid, scid)) |
|
673 | 673 | # search for bookmark to delete |
|
674 | 674 | for b, scid, dcid in adddst: |
|
675 | 675 | if b in explicit: |
|
676 | 676 | explicit.remove(b) |
|
677 | 677 | # treat as "deleted locally" |
|
678 | 678 | pushop.outbookmarks.append((b, dcid, b'')) |
|
679 | 679 | # identical bookmarks shouldn't get reported |
|
680 | 680 | for b, scid, dcid in same: |
|
681 | 681 | if b in explicit: |
|
682 | 682 | explicit.remove(b) |
|
683 | 683 | |
|
684 | 684 | if explicit: |
|
685 | 685 | explicit = sorted(explicit) |
|
686 | 686 | # we should probably list all of them |
|
687 | 687 | pushop.ui.warn( |
|
688 | 688 | _( |
|
689 | 689 | b'bookmark %s does not exist on the local ' |
|
690 | 690 | b'or remote repository!\n' |
|
691 | 691 | ) |
|
692 | 692 | % explicit[0] |
|
693 | 693 | ) |
|
694 | 694 | pushop.bkresult = 2 |
|
695 | 695 | |
|
696 | 696 | pushop.outbookmarks.sort() |
|
697 | 697 | |
|
698 | 698 | |
|
699 | 699 | def _pushcheckoutgoing(pushop): |
|
700 | 700 | outgoing = pushop.outgoing |
|
701 | 701 | unfi = pushop.repo.unfiltered() |
|
702 | 702 | if not outgoing.missing: |
|
703 | 703 | # nothing to push |
|
704 | 704 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) |
|
705 | 705 | return False |
|
706 | 706 | # something to push |
|
707 | 707 | if not pushop.force: |
|
708 | 708 | # if repo.obsstore == False --> no obsolete |
|
709 | 709 | # then, save the iteration |
|
710 | 710 | if unfi.obsstore: |
|
711 | 711 | # this message are here for 80 char limit reason |
|
712 | 712 | mso = _(b"push includes obsolete changeset: %s!") |
|
713 | 713 | mspd = _(b"push includes phase-divergent changeset: %s!") |
|
714 | 714 | mscd = _(b"push includes content-divergent changeset: %s!") |
|
715 | 715 | mst = { |
|
716 | 716 | b"orphan": _(b"push includes orphan changeset: %s!"), |
|
717 | 717 | b"phase-divergent": mspd, |
|
718 | 718 | b"content-divergent": mscd, |
|
719 | 719 | } |
|
720 | 720 | # If we are to push if there is at least one |
|
721 | 721 | # obsolete or unstable changeset in missing, at |
|
722 | 722 | # least one of the missinghead will be obsolete or |
|
723 | 723 | # unstable. So checking heads only is ok |
|
724 | 724 | for node in outgoing.ancestorsof: |
|
725 | 725 | ctx = unfi[node] |
|
726 | 726 | if ctx.obsolete(): |
|
727 | 727 | raise error.Abort(mso % ctx) |
|
728 | 728 | elif ctx.isunstable(): |
|
729 | 729 | # TODO print more than one instability in the abort |
|
730 | 730 | # message |
|
731 | 731 | raise error.Abort(mst[ctx.instabilities()[0]] % ctx) |
|
732 | 732 | |
|
733 | 733 | discovery.checkheads(pushop) |
|
734 | 734 | return True |
|
735 | 735 | |
|
736 | 736 | |
|
737 | 737 | # List of names of steps to perform for an outgoing bundle2, order matters. |
|
738 | 738 | b2partsgenorder = [] |
|
739 | 739 | |
|
740 | 740 | # Mapping between step name and function |
|
741 | 741 | # |
|
742 | 742 | # This exists to help extensions wrap steps if necessary |
|
743 | 743 | b2partsgenmapping = {} |
|
744 | 744 | |
|
745 | 745 | |
|
746 | 746 | def b2partsgenerator(stepname, idx=None): |
|
747 | 747 | """decorator for function generating bundle2 part |
|
748 | 748 | |
|
749 | 749 | The function is added to the step -> function mapping and appended to the |
|
750 | 750 | list of steps. Beware that decorated functions will be added in order |
|
751 | 751 | (this may matter). |
|
752 | 752 | |
|
753 | 753 | You can only use this decorator for new steps, if you want to wrap a step |
|
754 | 754 | from an extension, attack the b2partsgenmapping dictionary directly.""" |
|
755 | 755 | |
|
756 | 756 | def dec(func): |
|
757 | 757 | assert stepname not in b2partsgenmapping |
|
758 | 758 | b2partsgenmapping[stepname] = func |
|
759 | 759 | if idx is None: |
|
760 | 760 | b2partsgenorder.append(stepname) |
|
761 | 761 | else: |
|
762 | 762 | b2partsgenorder.insert(idx, stepname) |
|
763 | 763 | return func |
|
764 | 764 | |
|
765 | 765 | return dec |
|
766 | 766 | |
|
767 | 767 | |
|
768 | 768 | def _pushb2ctxcheckheads(pushop, bundler): |
|
769 | 769 | """Generate race condition checking parts |
|
770 | 770 | |
|
771 | 771 | Exists as an independent function to aid extensions |
|
772 | 772 | """ |
|
773 | 773 | # * 'force' do not check for push race, |
|
774 | 774 | # * if we don't push anything, there are nothing to check. |
|
775 | 775 | if not pushop.force and pushop.outgoing.ancestorsof: |
|
776 | 776 | allowunrelated = b'related' in bundler.capabilities.get( |
|
777 | 777 | b'checkheads', () |
|
778 | 778 | ) |
|
779 | 779 | emptyremote = pushop.pushbranchmap is None |
|
780 | 780 | if not allowunrelated or emptyremote: |
|
781 | 781 | bundler.newpart(b'check:heads', data=iter(pushop.remoteheads)) |
|
782 | 782 | else: |
|
783 | 783 | affected = set() |
|
784 | 784 | for branch, heads in pycompat.iteritems(pushop.pushbranchmap): |
|
785 | 785 | remoteheads, newheads, unsyncedheads, discardedheads = heads |
|
786 | 786 | if remoteheads is not None: |
|
787 | 787 | remote = set(remoteheads) |
|
788 | 788 | affected |= set(discardedheads) & remote |
|
789 | 789 | affected |= remote - set(newheads) |
|
790 | 790 | if affected: |
|
791 | 791 | data = iter(sorted(affected)) |
|
792 | 792 | bundler.newpart(b'check:updated-heads', data=data) |
|
793 | 793 | |
|
794 | 794 | |
|
795 | 795 | def _pushing(pushop): |
|
796 | 796 | """return True if we are pushing anything""" |
|
797 | 797 | return bool( |
|
798 | 798 | pushop.outgoing.missing |
|
799 | 799 | or pushop.outdatedphases |
|
800 | 800 | or pushop.outobsmarkers |
|
801 | 801 | or pushop.outbookmarks |
|
802 | 802 | ) |
|
803 | 803 | |
|
804 | 804 | |
|
805 | 805 | @b2partsgenerator(b'check-bookmarks') |
|
806 | 806 | def _pushb2checkbookmarks(pushop, bundler): |
|
807 | 807 | """insert bookmark move checking""" |
|
808 | 808 | if not _pushing(pushop) or pushop.force: |
|
809 | 809 | return |
|
810 | 810 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
811 | 811 | hasbookmarkcheck = b'bookmarks' in b2caps |
|
812 | 812 | if not (pushop.outbookmarks and hasbookmarkcheck): |
|
813 | 813 | return |
|
814 | 814 | data = [] |
|
815 | 815 | for book, old, new in pushop.outbookmarks: |
|
816 | 816 | data.append((book, old)) |
|
817 | 817 | checkdata = bookmod.binaryencode(data) |
|
818 | 818 | bundler.newpart(b'check:bookmarks', data=checkdata) |
|
819 | 819 | |
|
820 | 820 | |
|
821 | 821 | @b2partsgenerator(b'check-phases') |
|
822 | 822 | def _pushb2checkphases(pushop, bundler): |
|
823 | 823 | """insert phase move checking""" |
|
824 | 824 | if not _pushing(pushop) or pushop.force: |
|
825 | 825 | return |
|
826 | 826 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
827 | 827 | hasphaseheads = b'heads' in b2caps.get(b'phases', ()) |
|
828 | 828 | if pushop.remotephases is not None and hasphaseheads: |
|
829 | 829 | # check that the remote phase has not changed |
|
830 | 830 | checks = {p: [] for p in phases.allphases} |
|
831 | 831 | checks[phases.public].extend(pushop.remotephases.publicheads) |
|
832 | 832 | checks[phases.draft].extend(pushop.remotephases.draftroots) |
|
833 | 833 | if any(pycompat.itervalues(checks)): |
|
834 | 834 | for phase in checks: |
|
835 | 835 | checks[phase].sort() |
|
836 | 836 | checkdata = phases.binaryencode(checks) |
|
837 | 837 | bundler.newpart(b'check:phases', data=checkdata) |
|
838 | 838 | |
|
839 | 839 | |
|
840 | 840 | @b2partsgenerator(b'changeset') |
|
841 | 841 | def _pushb2ctx(pushop, bundler): |
|
842 | 842 | """handle changegroup push through bundle2 |
|
843 | 843 | |
|
844 | 844 | addchangegroup result is stored in the ``pushop.cgresult`` attribute. |
|
845 | 845 | """ |
|
846 | 846 | if b'changesets' in pushop.stepsdone: |
|
847 | 847 | return |
|
848 | 848 | pushop.stepsdone.add(b'changesets') |
|
849 | 849 | # Send known heads to the server for race detection. |
|
850 | 850 | if not _pushcheckoutgoing(pushop): |
|
851 | 851 | return |
|
852 | 852 | pushop.repo.prepushoutgoinghooks(pushop) |
|
853 | 853 | |
|
854 | 854 | _pushb2ctxcheckheads(pushop, bundler) |
|
855 | 855 | |
|
856 | 856 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
857 | 857 | version = b'01' |
|
858 | 858 | cgversions = b2caps.get(b'changegroup') |
|
859 | 859 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
860 | 860 | cgversions = [ |
|
861 | 861 | v |
|
862 | 862 | for v in cgversions |
|
863 | 863 | if v in changegroup.supportedoutgoingversions(pushop.repo) |
|
864 | 864 | ] |
|
865 | 865 | if not cgversions: |
|
866 | 866 | raise error.Abort(_(b'no common changegroup version')) |
|
867 | 867 | version = max(cgversions) |
|
868 | 868 | cgstream = changegroup.makestream( |
|
869 | 869 | pushop.repo, pushop.outgoing, version, b'push' |
|
870 | 870 | ) |
|
871 | 871 | cgpart = bundler.newpart(b'changegroup', data=cgstream) |
|
872 | 872 | if cgversions: |
|
873 | 873 | cgpart.addparam(b'version', version) |
|
874 | 874 | if scmutil.istreemanifest(pushop.repo): |
|
875 | 875 | cgpart.addparam(b'treemanifest', b'1') |
|
876 | 876 | if b'exp-sidedata-flag' in pushop.repo.requirements: |
|
877 | 877 | cgpart.addparam(b'exp-sidedata', b'1') |
|
878 | 878 | |
|
879 | 879 | def handlereply(op): |
|
880 | 880 | """extract addchangegroup returns from server reply""" |
|
881 | 881 | cgreplies = op.records.getreplies(cgpart.id) |
|
882 | 882 | assert len(cgreplies[b'changegroup']) == 1 |
|
883 | 883 | pushop.cgresult = cgreplies[b'changegroup'][0][b'return'] |
|
884 | 884 | |
|
885 | 885 | return handlereply |
|
886 | 886 | |
|
887 | 887 | |
|
888 | 888 | @b2partsgenerator(b'phase') |
|
889 | 889 | def _pushb2phases(pushop, bundler): |
|
890 | 890 | """handle phase push through bundle2""" |
|
891 | 891 | if b'phases' in pushop.stepsdone: |
|
892 | 892 | return |
|
893 | 893 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
894 | 894 | ui = pushop.repo.ui |
|
895 | 895 | |
|
896 | 896 | legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange') |
|
897 | 897 | haspushkey = b'pushkey' in b2caps |
|
898 | 898 | hasphaseheads = b'heads' in b2caps.get(b'phases', ()) |
|
899 | 899 | |
|
900 | 900 | if hasphaseheads and not legacyphase: |
|
901 | 901 | return _pushb2phaseheads(pushop, bundler) |
|
902 | 902 | elif haspushkey: |
|
903 | 903 | return _pushb2phasespushkey(pushop, bundler) |
|
904 | 904 | |
|
905 | 905 | |
|
906 | 906 | def _pushb2phaseheads(pushop, bundler): |
|
907 | 907 | """push phase information through a bundle2 - binary part""" |
|
908 | 908 | pushop.stepsdone.add(b'phases') |
|
909 | 909 | if pushop.outdatedphases: |
|
910 | 910 | updates = {p: [] for p in phases.allphases} |
|
911 | 911 | updates[0].extend(h.node() for h in pushop.outdatedphases) |
|
912 | 912 | phasedata = phases.binaryencode(updates) |
|
913 | 913 | bundler.newpart(b'phase-heads', data=phasedata) |
|
914 | 914 | |
|
915 | 915 | |
|
916 | 916 | def _pushb2phasespushkey(pushop, bundler): |
|
917 | 917 | """push phase information through a bundle2 - pushkey part""" |
|
918 | 918 | pushop.stepsdone.add(b'phases') |
|
919 | 919 | part2node = [] |
|
920 | 920 | |
|
921 | 921 | def handlefailure(pushop, exc): |
|
922 | 922 | targetid = int(exc.partid) |
|
923 | 923 | for partid, node in part2node: |
|
924 | 924 | if partid == targetid: |
|
925 | 925 | raise error.Abort(_(b'updating %s to public failed') % node) |
|
926 | 926 | |
|
927 | 927 | enc = pushkey.encode |
|
928 | 928 | for newremotehead in pushop.outdatedphases: |
|
929 | 929 | part = bundler.newpart(b'pushkey') |
|
930 | 930 | part.addparam(b'namespace', enc(b'phases')) |
|
931 | 931 | part.addparam(b'key', enc(newremotehead.hex())) |
|
932 | 932 | part.addparam(b'old', enc(b'%d' % phases.draft)) |
|
933 | 933 | part.addparam(b'new', enc(b'%d' % phases.public)) |
|
934 | 934 | part2node.append((part.id, newremotehead)) |
|
935 | 935 | pushop.pkfailcb[part.id] = handlefailure |
|
936 | 936 | |
|
937 | 937 | def handlereply(op): |
|
938 | 938 | for partid, node in part2node: |
|
939 | 939 | partrep = op.records.getreplies(partid) |
|
940 | 940 | results = partrep[b'pushkey'] |
|
941 | 941 | assert len(results) <= 1 |
|
942 | 942 | msg = None |
|
943 | 943 | if not results: |
|
944 | 944 | msg = _(b'server ignored update of %s to public!\n') % node |
|
945 | 945 | elif not int(results[0][b'return']): |
|
946 | 946 | msg = _(b'updating %s to public failed!\n') % node |
|
947 | 947 | if msg is not None: |
|
948 | 948 | pushop.ui.warn(msg) |
|
949 | 949 | |
|
950 | 950 | return handlereply |
|
951 | 951 | |
|
952 | 952 | |
|
953 | 953 | @b2partsgenerator(b'obsmarkers') |
|
954 | 954 | def _pushb2obsmarkers(pushop, bundler): |
|
955 | 955 | if b'obsmarkers' in pushop.stepsdone: |
|
956 | 956 | return |
|
957 | 957 | remoteversions = bundle2.obsmarkersversion(bundler.capabilities) |
|
958 | 958 | if obsolete.commonversion(remoteversions) is None: |
|
959 | 959 | return |
|
960 | 960 | pushop.stepsdone.add(b'obsmarkers') |
|
961 | 961 | if pushop.outobsmarkers: |
|
962 | 962 | markers = obsutil.sortedmarkers(pushop.outobsmarkers) |
|
963 | 963 | bundle2.buildobsmarkerspart(bundler, markers) |
|
964 | 964 | |
|
965 | 965 | |
|
966 | 966 | @b2partsgenerator(b'bookmarks') |
|
967 | 967 | def _pushb2bookmarks(pushop, bundler): |
|
968 | 968 | """handle bookmark push through bundle2""" |
|
969 | 969 | if b'bookmarks' in pushop.stepsdone: |
|
970 | 970 | return |
|
971 | 971 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
972 | 972 | |
|
973 | 973 | legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange') |
|
974 | 974 | legacybooks = b'bookmarks' in legacy |
|
975 | 975 | |
|
976 | 976 | if not legacybooks and b'bookmarks' in b2caps: |
|
977 | 977 | return _pushb2bookmarkspart(pushop, bundler) |
|
978 | 978 | elif b'pushkey' in b2caps: |
|
979 | 979 | return _pushb2bookmarkspushkey(pushop, bundler) |
|
980 | 980 | |
|
981 | 981 | |
|
982 | 982 | def _bmaction(old, new): |
|
983 | 983 | """small utility for bookmark pushing""" |
|
984 | 984 | if not old: |
|
985 | 985 | return b'export' |
|
986 | 986 | elif not new: |
|
987 | 987 | return b'delete' |
|
988 | 988 | return b'update' |
|
989 | 989 | |
|
990 | 990 | |
|
991 | 991 | def _abortonsecretctx(pushop, node, b): |
|
992 | 992 | """abort if a given bookmark points to a secret changeset""" |
|
993 | 993 | if node and pushop.repo[node].phase() == phases.secret: |
|
994 | 994 | raise error.Abort( |
|
995 | 995 | _(b'cannot push bookmark %s as it points to a secret changeset') % b |
|
996 | 996 | ) |
|
997 | 997 | |
|
998 | 998 | |
|
999 | 999 | def _pushb2bookmarkspart(pushop, bundler): |
|
1000 | 1000 | pushop.stepsdone.add(b'bookmarks') |
|
1001 | 1001 | if not pushop.outbookmarks: |
|
1002 | 1002 | return |
|
1003 | 1003 | |
|
1004 | 1004 | allactions = [] |
|
1005 | 1005 | data = [] |
|
1006 | 1006 | for book, old, new in pushop.outbookmarks: |
|
1007 | 1007 | _abortonsecretctx(pushop, new, book) |
|
1008 | 1008 | data.append((book, new)) |
|
1009 | 1009 | allactions.append((book, _bmaction(old, new))) |
|
1010 | 1010 | checkdata = bookmod.binaryencode(data) |
|
1011 | 1011 | bundler.newpart(b'bookmarks', data=checkdata) |
|
1012 | 1012 | |
|
1013 | 1013 | def handlereply(op): |
|
1014 | 1014 | ui = pushop.ui |
|
1015 | 1015 | # if success |
|
1016 | 1016 | for book, action in allactions: |
|
1017 | 1017 | ui.status(bookmsgmap[action][0] % book) |
|
1018 | 1018 | |
|
1019 | 1019 | return handlereply |
|
1020 | 1020 | |
|
1021 | 1021 | |
|
1022 | 1022 | def _pushb2bookmarkspushkey(pushop, bundler): |
|
1023 | 1023 | pushop.stepsdone.add(b'bookmarks') |
|
1024 | 1024 | part2book = [] |
|
1025 | 1025 | enc = pushkey.encode |
|
1026 | 1026 | |
|
1027 | 1027 | def handlefailure(pushop, exc): |
|
1028 | 1028 | targetid = int(exc.partid) |
|
1029 | 1029 | for partid, book, action in part2book: |
|
1030 | 1030 | if partid == targetid: |
|
1031 | 1031 | raise error.Abort(bookmsgmap[action][1].rstrip() % book) |
|
1032 | 1032 | # we should not be called for part we did not generated |
|
1033 | 1033 | assert False |
|
1034 | 1034 | |
|
1035 | 1035 | for book, old, new in pushop.outbookmarks: |
|
1036 | 1036 | _abortonsecretctx(pushop, new, book) |
|
1037 | 1037 | part = bundler.newpart(b'pushkey') |
|
1038 | 1038 | part.addparam(b'namespace', enc(b'bookmarks')) |
|
1039 | 1039 | part.addparam(b'key', enc(book)) |
|
1040 | 1040 | part.addparam(b'old', enc(hex(old))) |
|
1041 | 1041 | part.addparam(b'new', enc(hex(new))) |
|
1042 | 1042 | action = b'update' |
|
1043 | 1043 | if not old: |
|
1044 | 1044 | action = b'export' |
|
1045 | 1045 | elif not new: |
|
1046 | 1046 | action = b'delete' |
|
1047 | 1047 | part2book.append((part.id, book, action)) |
|
1048 | 1048 | pushop.pkfailcb[part.id] = handlefailure |
|
1049 | 1049 | |
|
1050 | 1050 | def handlereply(op): |
|
1051 | 1051 | ui = pushop.ui |
|
1052 | 1052 | for partid, book, action in part2book: |
|
1053 | 1053 | partrep = op.records.getreplies(partid) |
|
1054 | 1054 | results = partrep[b'pushkey'] |
|
1055 | 1055 | assert len(results) <= 1 |
|
1056 | 1056 | if not results: |
|
1057 | 1057 | pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book) |
|
1058 | 1058 | else: |
|
1059 | 1059 | ret = int(results[0][b'return']) |
|
1060 | 1060 | if ret: |
|
1061 | 1061 | ui.status(bookmsgmap[action][0] % book) |
|
1062 | 1062 | else: |
|
1063 | 1063 | ui.warn(bookmsgmap[action][1] % book) |
|
1064 | 1064 | if pushop.bkresult is not None: |
|
1065 | 1065 | pushop.bkresult = 1 |
|
1066 | 1066 | |
|
1067 | 1067 | return handlereply |
|
1068 | 1068 | |
|
1069 | 1069 | |
|
1070 | 1070 | @b2partsgenerator(b'pushvars', idx=0) |
|
1071 | 1071 | def _getbundlesendvars(pushop, bundler): |
|
1072 | 1072 | '''send shellvars via bundle2''' |
|
1073 | 1073 | pushvars = pushop.pushvars |
|
1074 | 1074 | if pushvars: |
|
1075 | 1075 | shellvars = {} |
|
1076 | 1076 | for raw in pushvars: |
|
1077 | 1077 | if b'=' not in raw: |
|
1078 | 1078 | msg = ( |
|
1079 | 1079 | b"unable to parse variable '%s', should follow " |
|
1080 | 1080 | b"'KEY=VALUE' or 'KEY=' format" |
|
1081 | 1081 | ) |
|
1082 | 1082 | raise error.Abort(msg % raw) |
|
1083 | 1083 | k, v = raw.split(b'=', 1) |
|
1084 | 1084 | shellvars[k] = v |
|
1085 | 1085 | |
|
1086 | 1086 | part = bundler.newpart(b'pushvars') |
|
1087 | 1087 | |
|
1088 | 1088 | for key, value in pycompat.iteritems(shellvars): |
|
1089 | 1089 | part.addparam(key, value, mandatory=False) |
|
1090 | 1090 | |
|
1091 | 1091 | |
|
1092 | 1092 | def _pushbundle2(pushop): |
|
1093 | 1093 | """push data to the remote using bundle2 |
|
1094 | 1094 | |
|
1095 | 1095 | The only currently supported type of data is changegroup but this will |
|
1096 | 1096 | evolve in the future.""" |
|
1097 | 1097 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) |
|
1098 | 1098 | pushback = pushop.trmanager and pushop.ui.configbool( |
|
1099 | 1099 | b'experimental', b'bundle2.pushback' |
|
1100 | 1100 | ) |
|
1101 | 1101 | |
|
1102 | 1102 | # create reply capability |
|
1103 | 1103 | capsblob = bundle2.encodecaps( |
|
1104 | 1104 | bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client') |
|
1105 | 1105 | ) |
|
1106 | 1106 | bundler.newpart(b'replycaps', data=capsblob) |
|
1107 | 1107 | replyhandlers = [] |
|
1108 | 1108 | for partgenname in b2partsgenorder: |
|
1109 | 1109 | partgen = b2partsgenmapping[partgenname] |
|
1110 | 1110 | ret = partgen(pushop, bundler) |
|
1111 | 1111 | if callable(ret): |
|
1112 | 1112 | replyhandlers.append(ret) |
|
1113 | 1113 | # do not push if nothing to push |
|
1114 | 1114 | if bundler.nbparts <= 1: |
|
1115 | 1115 | return |
|
1116 | 1116 | stream = util.chunkbuffer(bundler.getchunks()) |
|
1117 | 1117 | try: |
|
1118 | 1118 | try: |
|
1119 | 1119 | with pushop.remote.commandexecutor() as e: |
|
1120 | 1120 | reply = e.callcommand( |
|
1121 | 1121 | b'unbundle', |
|
1122 | 1122 | { |
|
1123 | 1123 | b'bundle': stream, |
|
1124 | 1124 | b'heads': [b'force'], |
|
1125 | 1125 | b'url': pushop.remote.url(), |
|
1126 | 1126 | }, |
|
1127 | 1127 | ).result() |
|
1128 | 1128 | except error.BundleValueError as exc: |
|
1129 | 1129 | raise error.Abort(_(b'missing support for %s') % exc) |
|
1130 | 1130 | try: |
|
1131 | 1131 | trgetter = None |
|
1132 | 1132 | if pushback: |
|
1133 | 1133 | trgetter = pushop.trmanager.transaction |
|
1134 | 1134 | op = bundle2.processbundle(pushop.repo, reply, trgetter) |
|
1135 | 1135 | except error.BundleValueError as exc: |
|
1136 | 1136 | raise error.Abort(_(b'missing support for %s') % exc) |
|
1137 | 1137 | except bundle2.AbortFromPart as exc: |
|
1138 | 1138 | pushop.ui.error(_(b'remote: %s\n') % exc) |
|
1139 | 1139 | if exc.hint is not None: |
|
1140 | 1140 | pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint)) |
|
1141 | 1141 | raise error.Abort(_(b'push failed on remote')) |
|
1142 | 1142 | except error.PushkeyFailed as exc: |
|
1143 | 1143 | partid = int(exc.partid) |
|
1144 | 1144 | if partid not in pushop.pkfailcb: |
|
1145 | 1145 | raise |
|
1146 | 1146 | pushop.pkfailcb[partid](pushop, exc) |
|
1147 | 1147 | for rephand in replyhandlers: |
|
1148 | 1148 | rephand(op) |
|
1149 | 1149 | |
|
1150 | 1150 | |
|
1151 | 1151 | def _pushchangeset(pushop): |
|
1152 | 1152 | """Make the actual push of changeset bundle to remote repo""" |
|
1153 | 1153 | if b'changesets' in pushop.stepsdone: |
|
1154 | 1154 | return |
|
1155 | 1155 | pushop.stepsdone.add(b'changesets') |
|
1156 | 1156 | if not _pushcheckoutgoing(pushop): |
|
1157 | 1157 | return |
|
1158 | 1158 | |
|
1159 | 1159 | # Should have verified this in push(). |
|
1160 | 1160 | assert pushop.remote.capable(b'unbundle') |
|
1161 | 1161 | |
|
1162 | 1162 | pushop.repo.prepushoutgoinghooks(pushop) |
|
1163 | 1163 | outgoing = pushop.outgoing |
|
1164 | 1164 | # TODO: get bundlecaps from remote |
|
1165 | 1165 | bundlecaps = None |
|
1166 | 1166 | # create a changegroup from local |
|
1167 | 1167 | if pushop.revs is None and not ( |
|
1168 | 1168 | outgoing.excluded or pushop.repo.changelog.filteredrevs |
|
1169 | 1169 | ): |
|
1170 | 1170 | # push everything, |
|
1171 | 1171 | # use the fast path, no race possible on push |
|
1172 | 1172 | cg = changegroup.makechangegroup( |
|
1173 | 1173 | pushop.repo, |
|
1174 | 1174 | outgoing, |
|
1175 | 1175 | b'01', |
|
1176 | 1176 | b'push', |
|
1177 | 1177 | fastpath=True, |
|
1178 | 1178 | bundlecaps=bundlecaps, |
|
1179 | 1179 | ) |
|
1180 | 1180 | else: |
|
1181 | 1181 | cg = changegroup.makechangegroup( |
|
1182 | 1182 | pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps |
|
1183 | 1183 | ) |
|
1184 | 1184 | |
|
1185 | 1185 | # apply changegroup to remote |
|
1186 | 1186 | # local repo finds heads on server, finds out what |
|
1187 | 1187 | # revs it must push. once revs transferred, if server |
|
1188 | 1188 | # finds it has different heads (someone else won |
|
1189 | 1189 | # commit/push race), server aborts. |
|
1190 | 1190 | if pushop.force: |
|
1191 | 1191 | remoteheads = [b'force'] |
|
1192 | 1192 | else: |
|
1193 | 1193 | remoteheads = pushop.remoteheads |
|
1194 | 1194 | # ssh: return remote's addchangegroup() |
|
1195 | 1195 | # http: return remote's addchangegroup() or 0 for error |
|
1196 | 1196 | pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url()) |
|
1197 | 1197 | |
|
1198 | 1198 | |
|
1199 | 1199 | def _pushsyncphase(pushop): |
|
1200 | 1200 | """synchronise phase information locally and remotely""" |
|
1201 | 1201 | cheads = pushop.commonheads |
|
1202 | 1202 | # even when we don't push, exchanging phase data is useful |
|
1203 | 1203 | remotephases = listkeys(pushop.remote, b'phases') |
|
1204 | 1204 | if ( |
|
1205 | 1205 | pushop.ui.configbool(b'ui', b'_usedassubrepo') |
|
1206 | 1206 | and remotephases # server supports phases |
|
1207 | 1207 | and pushop.cgresult is None # nothing was pushed |
|
1208 | 1208 | and remotephases.get(b'publishing', False) |
|
1209 | 1209 | ): |
|
1210 | 1210 | # When: |
|
1211 | 1211 | # - this is a subrepo push |
|
1212 | 1212 | # - and remote support phase |
|
1213 | 1213 | # - and no changeset was pushed |
|
1214 | 1214 | # - and remote is publishing |
|
1215 | 1215 | # We may be in issue 3871 case! |
|
1216 | 1216 | # We drop the possible phase synchronisation done by |
|
1217 | 1217 | # courtesy to publish changesets possibly locally draft |
|
1218 | 1218 | # on the remote. |
|
1219 | 1219 | remotephases = {b'publishing': b'True'} |
|
1220 | 1220 | if not remotephases: # old server or public only reply from non-publishing |
|
1221 | 1221 | _localphasemove(pushop, cheads) |
|
1222 | 1222 | # don't push any phase data as there is nothing to push |
|
1223 | 1223 | else: |
|
1224 | 1224 | ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases) |
|
1225 | 1225 | pheads, droots = ana |
|
1226 | 1226 | ### Apply remote phase on local |
|
1227 | 1227 | if remotephases.get(b'publishing', False): |
|
1228 | 1228 | _localphasemove(pushop, cheads) |
|
1229 | 1229 | else: # publish = False |
|
1230 | 1230 | _localphasemove(pushop, pheads) |
|
1231 | 1231 | _localphasemove(pushop, cheads, phases.draft) |
|
1232 | 1232 | ### Apply local phase on remote |
|
1233 | 1233 | |
|
1234 | 1234 | if pushop.cgresult: |
|
1235 | 1235 | if b'phases' in pushop.stepsdone: |
|
1236 | 1236 | # phases already pushed though bundle2 |
|
1237 | 1237 | return |
|
1238 | 1238 | outdated = pushop.outdatedphases |
|
1239 | 1239 | else: |
|
1240 | 1240 | outdated = pushop.fallbackoutdatedphases |
|
1241 | 1241 | |
|
1242 | 1242 | pushop.stepsdone.add(b'phases') |
|
1243 | 1243 | |
|
1244 | 1244 | # filter heads already turned public by the push |
|
1245 | 1245 | outdated = [c for c in outdated if c.node() not in pheads] |
|
1246 | 1246 | # fallback to independent pushkey command |
|
1247 | 1247 | for newremotehead in outdated: |
|
1248 | 1248 | with pushop.remote.commandexecutor() as e: |
|
1249 | 1249 | r = e.callcommand( |
|
1250 | 1250 | b'pushkey', |
|
1251 | 1251 | { |
|
1252 | 1252 | b'namespace': b'phases', |
|
1253 | 1253 | b'key': newremotehead.hex(), |
|
1254 | 1254 | b'old': b'%d' % phases.draft, |
|
1255 | 1255 | b'new': b'%d' % phases.public, |
|
1256 | 1256 | }, |
|
1257 | 1257 | ).result() |
|
1258 | 1258 | |
|
1259 | 1259 | if not r: |
|
1260 | 1260 | pushop.ui.warn( |
|
1261 | 1261 | _(b'updating %s to public failed!\n') % newremotehead |
|
1262 | 1262 | ) |
|
1263 | 1263 | |
|
1264 | 1264 | |
|
1265 | 1265 | def _localphasemove(pushop, nodes, phase=phases.public): |
|
1266 | 1266 | """move <nodes> to <phase> in the local source repo""" |
|
1267 | 1267 | if pushop.trmanager: |
|
1268 | 1268 | phases.advanceboundary( |
|
1269 | 1269 | pushop.repo, pushop.trmanager.transaction(), phase, nodes |
|
1270 | 1270 | ) |
|
1271 | 1271 | else: |
|
1272 | 1272 | # repo is not locked, do not change any phases! |
|
1273 | 1273 | # Informs the user that phases should have been moved when |
|
1274 | 1274 | # applicable. |
|
1275 | 1275 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] |
|
1276 | 1276 | phasestr = phases.phasenames[phase] |
|
1277 | 1277 | if actualmoves: |
|
1278 | 1278 | pushop.ui.status( |
|
1279 | 1279 | _( |
|
1280 | 1280 | b'cannot lock source repo, skipping ' |
|
1281 | 1281 | b'local %s phase update\n' |
|
1282 | 1282 | ) |
|
1283 | 1283 | % phasestr |
|
1284 | 1284 | ) |
|
1285 | 1285 | |
|
1286 | 1286 | |
|
1287 | 1287 | def _pushobsolete(pushop): |
|
1288 | 1288 | """utility function to push obsolete markers to a remote""" |
|
1289 | 1289 | if b'obsmarkers' in pushop.stepsdone: |
|
1290 | 1290 | return |
|
1291 | 1291 | repo = pushop.repo |
|
1292 | 1292 | remote = pushop.remote |
|
1293 | 1293 | pushop.stepsdone.add(b'obsmarkers') |
|
1294 | 1294 | if pushop.outobsmarkers: |
|
1295 | 1295 | pushop.ui.debug(b'try to push obsolete markers to remote\n') |
|
1296 | 1296 | rslts = [] |
|
1297 | 1297 | markers = obsutil.sortedmarkers(pushop.outobsmarkers) |
|
1298 | 1298 | remotedata = obsolete._pushkeyescape(markers) |
|
1299 | 1299 | for key in sorted(remotedata, reverse=True): |
|
1300 | 1300 | # reverse sort to ensure we end with dump0 |
|
1301 | 1301 | data = remotedata[key] |
|
1302 | 1302 | rslts.append(remote.pushkey(b'obsolete', key, b'', data)) |
|
1303 | 1303 | if [r for r in rslts if not r]: |
|
1304 | 1304 | msg = _(b'failed to push some obsolete markers!\n') |
|
1305 | 1305 | repo.ui.warn(msg) |
|
1306 | 1306 | |
|
1307 | 1307 | |
|
1308 | 1308 | def _pushbookmark(pushop): |
|
1309 | 1309 | """Update bookmark position on remote""" |
|
1310 | 1310 | if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone: |
|
1311 | 1311 | return |
|
1312 | 1312 | pushop.stepsdone.add(b'bookmarks') |
|
1313 | 1313 | ui = pushop.ui |
|
1314 | 1314 | remote = pushop.remote |
|
1315 | 1315 | |
|
1316 | 1316 | for b, old, new in pushop.outbookmarks: |
|
1317 | 1317 | action = b'update' |
|
1318 | 1318 | if not old: |
|
1319 | 1319 | action = b'export' |
|
1320 | 1320 | elif not new: |
|
1321 | 1321 | action = b'delete' |
|
1322 | 1322 | |
|
1323 | 1323 | with remote.commandexecutor() as e: |
|
1324 | 1324 | r = e.callcommand( |
|
1325 | 1325 | b'pushkey', |
|
1326 | 1326 | { |
|
1327 | 1327 | b'namespace': b'bookmarks', |
|
1328 | 1328 | b'key': b, |
|
1329 | 1329 | b'old': hex(old), |
|
1330 | 1330 | b'new': hex(new), |
|
1331 | 1331 | }, |
|
1332 | 1332 | ).result() |
|
1333 | 1333 | |
|
1334 | 1334 | if r: |
|
1335 | 1335 | ui.status(bookmsgmap[action][0] % b) |
|
1336 | 1336 | else: |
|
1337 | 1337 | ui.warn(bookmsgmap[action][1] % b) |
|
1338 | 1338 | # discovery can have set the value form invalid entry |
|
1339 | 1339 | if pushop.bkresult is not None: |
|
1340 | 1340 | pushop.bkresult = 1 |
|
1341 | 1341 | |
|
1342 | 1342 | |
|
1343 | 1343 | class pulloperation(object): |
|
1344 | 1344 | """A object that represent a single pull operation |
|
1345 | 1345 | |
|
1346 | 1346 | It purpose is to carry pull related state and very common operation. |
|
1347 | 1347 | |
|
1348 | 1348 | A new should be created at the beginning of each pull and discarded |
|
1349 | 1349 | afterward. |
|
1350 | 1350 | """ |
|
1351 | 1351 | |
|
1352 | 1352 | def __init__( |
|
1353 | 1353 | self, |
|
1354 | 1354 | repo, |
|
1355 | 1355 | remote, |
|
1356 | 1356 | heads=None, |
|
1357 | 1357 | force=False, |
|
1358 | 1358 | bookmarks=(), |
|
1359 | 1359 | remotebookmarks=None, |
|
1360 | 1360 | streamclonerequested=None, |
|
1361 | 1361 | includepats=None, |
|
1362 | 1362 | excludepats=None, |
|
1363 | 1363 | depth=None, |
|
1364 | 1364 | ): |
|
1365 | 1365 | # repo we pull into |
|
1366 | 1366 | self.repo = repo |
|
1367 | 1367 | # repo we pull from |
|
1368 | 1368 | self.remote = remote |
|
1369 | 1369 | # revision we try to pull (None is "all") |
|
1370 | 1370 | self.heads = heads |
|
1371 | 1371 | # bookmark pulled explicitly |
|
1372 | 1372 | self.explicitbookmarks = [ |
|
1373 | 1373 | repo._bookmarks.expandname(bookmark) for bookmark in bookmarks |
|
1374 | 1374 | ] |
|
1375 | 1375 | # do we force pull? |
|
1376 | 1376 | self.force = force |
|
1377 | 1377 | # whether a streaming clone was requested |
|
1378 | 1378 | self.streamclonerequested = streamclonerequested |
|
1379 | 1379 | # transaction manager |
|
1380 | 1380 | self.trmanager = None |
|
1381 | 1381 | # set of common changeset between local and remote before pull |
|
1382 | 1382 | self.common = None |
|
1383 | 1383 | # set of pulled head |
|
1384 | 1384 | self.rheads = None |
|
1385 | 1385 | # list of missing changeset to fetch remotely |
|
1386 | 1386 | self.fetch = None |
|
1387 | 1387 | # remote bookmarks data |
|
1388 | 1388 | self.remotebookmarks = remotebookmarks |
|
1389 | 1389 | # result of changegroup pulling (used as return code by pull) |
|
1390 | 1390 | self.cgresult = None |
|
1391 | 1391 | # list of step already done |
|
1392 | 1392 | self.stepsdone = set() |
|
1393 | 1393 | # Whether we attempted a clone from pre-generated bundles. |
|
1394 | 1394 | self.clonebundleattempted = False |
|
1395 | 1395 | # Set of file patterns to include. |
|
1396 | 1396 | self.includepats = includepats |
|
1397 | 1397 | # Set of file patterns to exclude. |
|
1398 | 1398 | self.excludepats = excludepats |
|
1399 | 1399 | # Number of ancestor changesets to pull from each pulled head. |
|
1400 | 1400 | self.depth = depth |
|
1401 | 1401 | |
|
1402 | 1402 | @util.propertycache |
|
1403 | 1403 | def pulledsubset(self): |
|
1404 | 1404 | """heads of the set of changeset target by the pull""" |
|
1405 | 1405 | # compute target subset |
|
1406 | 1406 | if self.heads is None: |
|
1407 | 1407 | # We pulled every thing possible |
|
1408 | 1408 | # sync on everything common |
|
1409 | 1409 | c = set(self.common) |
|
1410 | 1410 | ret = list(self.common) |
|
1411 | 1411 | for n in self.rheads: |
|
1412 | 1412 | if n not in c: |
|
1413 | 1413 | ret.append(n) |
|
1414 | 1414 | return ret |
|
1415 | 1415 | else: |
|
1416 | 1416 | # We pulled a specific subset |
|
1417 | 1417 | # sync on this subset |
|
1418 | 1418 | return self.heads |
|
1419 | 1419 | |
|
1420 | 1420 | @util.propertycache |
|
1421 | 1421 | def canusebundle2(self): |
|
1422 | 1422 | return not _forcebundle1(self) |
|
1423 | 1423 | |
|
1424 | 1424 | @util.propertycache |
|
1425 | 1425 | def remotebundle2caps(self): |
|
1426 | 1426 | return bundle2.bundle2caps(self.remote) |
|
1427 | 1427 | |
|
1428 | 1428 | def gettransaction(self): |
|
1429 | 1429 | # deprecated; talk to trmanager directly |
|
1430 | 1430 | return self.trmanager.transaction() |
|
1431 | 1431 | |
|
1432 | 1432 | |
|
1433 | 1433 | class transactionmanager(util.transactional): |
|
1434 | 1434 | """An object to manage the life cycle of a transaction |
|
1435 | 1435 | |
|
1436 | 1436 | It creates the transaction on demand and calls the appropriate hooks when |
|
1437 | 1437 | closing the transaction.""" |
|
1438 | 1438 | |
|
1439 | 1439 | def __init__(self, repo, source, url): |
|
1440 | 1440 | self.repo = repo |
|
1441 | 1441 | self.source = source |
|
1442 | 1442 | self.url = url |
|
1443 | 1443 | self._tr = None |
|
1444 | 1444 | |
|
1445 | 1445 | def transaction(self): |
|
1446 | 1446 | """Return an open transaction object, constructing if necessary""" |
|
1447 | 1447 | if not self._tr: |
|
1448 | 1448 | trname = b'%s\n%s' % (self.source, util.hidepassword(self.url)) |
|
1449 | 1449 | self._tr = self.repo.transaction(trname) |
|
1450 | 1450 | self._tr.hookargs[b'source'] = self.source |
|
1451 | 1451 | self._tr.hookargs[b'url'] = self.url |
|
1452 | 1452 | return self._tr |
|
1453 | 1453 | |
|
1454 | 1454 | def close(self): |
|
1455 | 1455 | """close transaction if created""" |
|
1456 | 1456 | if self._tr is not None: |
|
1457 | 1457 | self._tr.close() |
|
1458 | 1458 | |
|
1459 | 1459 | def release(self): |
|
1460 | 1460 | """release transaction if created""" |
|
1461 | 1461 | if self._tr is not None: |
|
1462 | 1462 | self._tr.release() |
|
1463 | 1463 | |
|
1464 | 1464 | |
|
1465 | 1465 | def listkeys(remote, namespace): |
|
1466 | 1466 | with remote.commandexecutor() as e: |
|
1467 | 1467 | return e.callcommand(b'listkeys', {b'namespace': namespace}).result() |
|
1468 | 1468 | |
|
1469 | 1469 | |
|
1470 | 1470 | def _fullpullbundle2(repo, pullop): |
|
1471 | 1471 | # The server may send a partial reply, i.e. when inlining |
|
1472 | 1472 | # pre-computed bundles. In that case, update the common |
|
1473 | 1473 | # set based on the results and pull another bundle. |
|
1474 | 1474 | # |
|
1475 | 1475 | # There are two indicators that the process is finished: |
|
1476 | 1476 | # - no changeset has been added, or |
|
1477 | 1477 | # - all remote heads are known locally. |
|
1478 | 1478 | # The head check must use the unfiltered view as obsoletion |
|
1479 | 1479 | # markers can hide heads. |
|
1480 | 1480 | unfi = repo.unfiltered() |
|
1481 | 1481 | unficl = unfi.changelog |
|
1482 | 1482 | |
|
1483 | 1483 | def headsofdiff(h1, h2): |
|
1484 | 1484 | """Returns heads(h1 % h2)""" |
|
1485 | 1485 | res = unfi.set(b'heads(%ln %% %ln)', h1, h2) |
|
1486 | 1486 | return {ctx.node() for ctx in res} |
|
1487 | 1487 | |
|
1488 | 1488 | def headsofunion(h1, h2): |
|
1489 | 1489 | """Returns heads((h1 + h2) - null)""" |
|
1490 | 1490 | res = unfi.set(b'heads((%ln + %ln - null))', h1, h2) |
|
1491 | 1491 | return {ctx.node() for ctx in res} |
|
1492 | 1492 | |
|
1493 | 1493 | while True: |
|
1494 | 1494 | old_heads = unficl.heads() |
|
1495 | 1495 | clstart = len(unficl) |
|
1496 | 1496 | _pullbundle2(pullop) |
|
1497 | 1497 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
1498 | 1498 | # XXX narrow clones filter the heads on the server side during |
|
1499 | 1499 | # XXX getbundle and result in partial replies as well. |
|
1500 | 1500 | # XXX Disable pull bundles in this case as band aid to avoid |
|
1501 | 1501 | # XXX extra round trips. |
|
1502 | 1502 | break |
|
1503 | 1503 | if clstart == len(unficl): |
|
1504 | 1504 | break |
|
1505 | 1505 | if all(unficl.hasnode(n) for n in pullop.rheads): |
|
1506 | 1506 | break |
|
1507 | 1507 | new_heads = headsofdiff(unficl.heads(), old_heads) |
|
1508 | 1508 | pullop.common = headsofunion(new_heads, pullop.common) |
|
1509 | 1509 | pullop.rheads = set(pullop.rheads) - pullop.common |
|
1510 | 1510 | |
|
1511 | 1511 | |
|
1512 | 1512 | def add_confirm_callback(repo, pullop): |
|
1513 | 1513 | """adds a finalize callback to transaction which can be used to show stats |
|
1514 | 1514 | to user and confirm the pull before committing transaction""" |
|
1515 | 1515 | |
|
1516 | 1516 | tr = pullop.trmanager.transaction() |
|
1517 | 1517 | scmutil.registersummarycallback( |
|
1518 | 1518 | repo, tr, txnname=b'pull', as_validator=True |
|
1519 | 1519 | ) |
|
1520 | 1520 | reporef = weakref.ref(repo.unfiltered()) |
|
1521 | 1521 | |
|
1522 | 1522 | def prompt(tr): |
|
1523 | 1523 | repo = reporef() |
|
1524 | 1524 | cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No') |
|
1525 | 1525 | if repo.ui.promptchoice(cm): |
|
1526 | 1526 | raise error.Abort(b"user aborted") |
|
1527 | 1527 | |
|
1528 | 1528 | tr.addvalidator(b'900-pull-prompt', prompt) |
|
1529 | 1529 | |
|
1530 | 1530 | |
|
1531 | 1531 | def pull( |
|
1532 | 1532 | repo, |
|
1533 | 1533 | remote, |
|
1534 | 1534 | heads=None, |
|
1535 | 1535 | force=False, |
|
1536 | 1536 | bookmarks=(), |
|
1537 | 1537 | opargs=None, |
|
1538 | 1538 | streamclonerequested=None, |
|
1539 | 1539 | includepats=None, |
|
1540 | 1540 | excludepats=None, |
|
1541 | 1541 | depth=None, |
|
1542 | 1542 | confirm=None, |
|
1543 | 1543 | ): |
|
1544 | 1544 | """Fetch repository data from a remote. |
|
1545 | 1545 | |
|
1546 | 1546 | This is the main function used to retrieve data from a remote repository. |
|
1547 | 1547 | |
|
1548 | 1548 | ``repo`` is the local repository to clone into. |
|
1549 | 1549 | ``remote`` is a peer instance. |
|
1550 | 1550 | ``heads`` is an iterable of revisions we want to pull. ``None`` (the |
|
1551 | 1551 | default) means to pull everything from the remote. |
|
1552 | 1552 | ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By |
|
1553 | 1553 | default, all remote bookmarks are pulled. |
|
1554 | 1554 | ``opargs`` are additional keyword arguments to pass to ``pulloperation`` |
|
1555 | 1555 | initialization. |
|
1556 | 1556 | ``streamclonerequested`` is a boolean indicating whether a "streaming |
|
1557 | 1557 | clone" is requested. A "streaming clone" is essentially a raw file copy |
|
1558 | 1558 | of revlogs from the server. This only works when the local repository is |
|
1559 | 1559 | empty. The default value of ``None`` means to respect the server |
|
1560 | 1560 | configuration for preferring stream clones. |
|
1561 | 1561 | ``includepats`` and ``excludepats`` define explicit file patterns to |
|
1562 | 1562 | include and exclude in storage, respectively. If not defined, narrow |
|
1563 | 1563 | patterns from the repo instance are used, if available. |
|
1564 | 1564 | ``depth`` is an integer indicating the DAG depth of history we're |
|
1565 | 1565 | interested in. If defined, for each revision specified in ``heads``, we |
|
1566 | 1566 | will fetch up to this many of its ancestors and data associated with them. |
|
1567 | 1567 | ``confirm`` is a boolean indicating whether the pull should be confirmed |
|
1568 | 1568 | before committing the transaction. This overrides HGPLAIN. |
|
1569 | 1569 | |
|
1570 | 1570 | Returns the ``pulloperation`` created for this pull. |
|
1571 | 1571 | """ |
|
1572 | 1572 | if opargs is None: |
|
1573 | 1573 | opargs = {} |
|
1574 | 1574 | |
|
1575 | 1575 | # We allow the narrow patterns to be passed in explicitly to provide more |
|
1576 | 1576 | # flexibility for API consumers. |
|
1577 | 1577 | if includepats or excludepats: |
|
1578 | 1578 | includepats = includepats or set() |
|
1579 | 1579 | excludepats = excludepats or set() |
|
1580 | 1580 | else: |
|
1581 | 1581 | includepats, excludepats = repo.narrowpats |
|
1582 | 1582 | |
|
1583 | 1583 | narrowspec.validatepatterns(includepats) |
|
1584 | 1584 | narrowspec.validatepatterns(excludepats) |
|
1585 | 1585 | |
|
1586 | 1586 | pullop = pulloperation( |
|
1587 | 1587 | repo, |
|
1588 | 1588 | remote, |
|
1589 | 1589 | heads, |
|
1590 | 1590 | force, |
|
1591 | 1591 | bookmarks=bookmarks, |
|
1592 | 1592 | streamclonerequested=streamclonerequested, |
|
1593 | 1593 | includepats=includepats, |
|
1594 | 1594 | excludepats=excludepats, |
|
1595 | 1595 | depth=depth, |
|
1596 | 1596 | **pycompat.strkwargs(opargs) |
|
1597 | 1597 | ) |
|
1598 | 1598 | |
|
1599 | 1599 | peerlocal = pullop.remote.local() |
|
1600 | 1600 | if peerlocal: |
|
1601 | 1601 | missing = set(peerlocal.requirements) - pullop.repo.supported |
|
1602 | 1602 | if missing: |
|
1603 | 1603 | msg = _( |
|
1604 | 1604 | b"required features are not" |
|
1605 | 1605 | b" supported in the destination:" |
|
1606 | 1606 | b" %s" |
|
1607 | 1607 | ) % (b', '.join(sorted(missing))) |
|
1608 | 1608 | raise error.Abort(msg) |
|
1609 | 1609 | |
|
1610 | 1610 | pullop.trmanager = transactionmanager(repo, b'pull', remote.url()) |
|
1611 | 1611 | wlock = util.nullcontextmanager() |
|
1612 | 1612 | if not bookmod.bookmarksinstore(repo): |
|
1613 | 1613 | wlock = repo.wlock() |
|
1614 | 1614 | with wlock, repo.lock(), pullop.trmanager: |
|
1615 | 1615 | if confirm or ( |
|
1616 | 1616 | repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain() |
|
1617 | 1617 | ): |
|
1618 | 1618 | add_confirm_callback(repo, pullop) |
|
1619 | 1619 | |
|
1620 | 1620 | # Use the modern wire protocol, if available. |
|
1621 | 1621 | if remote.capable(b'command-changesetdata'): |
|
1622 | 1622 | exchangev2.pull(pullop) |
|
1623 | 1623 | else: |
|
1624 | 1624 | # This should ideally be in _pullbundle2(). However, it needs to run |
|
1625 | 1625 | # before discovery to avoid extra work. |
|
1626 | 1626 | _maybeapplyclonebundle(pullop) |
|
1627 | 1627 | streamclone.maybeperformlegacystreamclone(pullop) |
|
1628 | 1628 | _pulldiscovery(pullop) |
|
1629 | 1629 | if pullop.canusebundle2: |
|
1630 | 1630 | _fullpullbundle2(repo, pullop) |
|
1631 | 1631 | _pullchangeset(pullop) |
|
1632 | 1632 | _pullphase(pullop) |
|
1633 | 1633 | _pullbookmarks(pullop) |
|
1634 | 1634 | _pullobsolete(pullop) |
|
1635 | 1635 | |
|
1636 | 1636 | # storing remotenames |
|
1637 | 1637 | if repo.ui.configbool(b'experimental', b'remotenames'): |
|
1638 | 1638 | logexchange.pullremotenames(repo, remote) |
|
1639 | 1639 | |
|
1640 | 1640 | return pullop |
|
1641 | 1641 | |
|
1642 | 1642 | |
|
1643 | 1643 | # list of steps to perform discovery before pull |
|
1644 | 1644 | pulldiscoveryorder = [] |
|
1645 | 1645 | |
|
1646 | 1646 | # Mapping between step name and function |
|
1647 | 1647 | # |
|
1648 | 1648 | # This exists to help extensions wrap steps if necessary |
|
1649 | 1649 | pulldiscoverymapping = {} |
|
1650 | 1650 | |
|
1651 | 1651 | |
|
1652 | 1652 | def pulldiscovery(stepname): |
|
1653 | 1653 | """decorator for function performing discovery before pull |
|
1654 | 1654 | |
|
1655 | 1655 | The function is added to the step -> function mapping and appended to the |
|
1656 | 1656 | list of steps. Beware that decorated function will be added in order (this |
|
1657 | 1657 | may matter). |
|
1658 | 1658 | |
|
1659 | 1659 | You can only use this decorator for a new step, if you want to wrap a step |
|
1660 | 1660 | from an extension, change the pulldiscovery dictionary directly.""" |
|
1661 | 1661 | |
|
1662 | 1662 | def dec(func): |
|
1663 | 1663 | assert stepname not in pulldiscoverymapping |
|
1664 | 1664 | pulldiscoverymapping[stepname] = func |
|
1665 | 1665 | pulldiscoveryorder.append(stepname) |
|
1666 | 1666 | return func |
|
1667 | 1667 | |
|
1668 | 1668 | return dec |
|
1669 | 1669 | |
|
1670 | 1670 | |
|
1671 | 1671 | def _pulldiscovery(pullop): |
|
1672 | 1672 | """Run all discovery steps""" |
|
1673 | 1673 | for stepname in pulldiscoveryorder: |
|
1674 | 1674 | step = pulldiscoverymapping[stepname] |
|
1675 | 1675 | step(pullop) |
|
1676 | 1676 | |
|
1677 | 1677 | |
|
1678 | 1678 | @pulldiscovery(b'b1:bookmarks') |
|
1679 | 1679 | def _pullbookmarkbundle1(pullop): |
|
1680 | 1680 | """fetch bookmark data in bundle1 case |
|
1681 | 1681 | |
|
1682 | 1682 | If not using bundle2, we have to fetch bookmarks before changeset |
|
1683 | 1683 | discovery to reduce the chance and impact of race conditions.""" |
|
1684 | 1684 | if pullop.remotebookmarks is not None: |
|
1685 | 1685 | return |
|
1686 | 1686 | if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps: |
|
1687 | 1687 | # all known bundle2 servers now support listkeys, but lets be nice with |
|
1688 | 1688 | # new implementation. |
|
1689 | 1689 | return |
|
1690 | 1690 | books = listkeys(pullop.remote, b'bookmarks') |
|
1691 | 1691 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) |
|
1692 | 1692 | |
|
1693 | 1693 | |
|
1694 | 1694 | @pulldiscovery(b'changegroup') |
|
1695 | 1695 | def _pulldiscoverychangegroup(pullop): |
|
1696 | 1696 | """discovery phase for the pull |
|
1697 | 1697 | |
|
1698 | 1698 | Current handle changeset discovery only, will change handle all discovery |
|
1699 | 1699 | at some point.""" |
|
1700 | 1700 | tmp = discovery.findcommonincoming( |
|
1701 | 1701 | pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force |
|
1702 | 1702 | ) |
|
1703 | 1703 | common, fetch, rheads = tmp |
|
1704 | 1704 | has_node = pullop.repo.unfiltered().changelog.index.has_node |
|
1705 | 1705 | if fetch and rheads: |
|
1706 | 1706 | # If a remote heads is filtered locally, put in back in common. |
|
1707 | 1707 | # |
|
1708 | 1708 | # This is a hackish solution to catch most of "common but locally |
|
1709 | 1709 | # hidden situation". We do not performs discovery on unfiltered |
|
1710 | 1710 | # repository because it end up doing a pathological amount of round |
|
1711 | 1711 | # trip for w huge amount of changeset we do not care about. |
|
1712 | 1712 | # |
|
1713 | 1713 | # If a set of such "common but filtered" changeset exist on the server |
|
1714 | 1714 | # but are not including a remote heads, we'll not be able to detect it, |
|
1715 | 1715 | scommon = set(common) |
|
1716 | 1716 | for n in rheads: |
|
1717 | 1717 | if has_node(n): |
|
1718 | 1718 | if n not in scommon: |
|
1719 | 1719 | common.append(n) |
|
1720 | 1720 | if set(rheads).issubset(set(common)): |
|
1721 | 1721 | fetch = [] |
|
1722 | 1722 | pullop.common = common |
|
1723 | 1723 | pullop.fetch = fetch |
|
1724 | 1724 | pullop.rheads = rheads |
|
1725 | 1725 | |
|
1726 | 1726 | |
|
1727 | 1727 | def _pullbundle2(pullop): |
|
1728 | 1728 | """pull data using bundle2 |
|
1729 | 1729 | |
|
1730 | 1730 | For now, the only supported data are changegroup.""" |
|
1731 | 1731 | kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')} |
|
1732 | 1732 | |
|
1733 | 1733 | # make ui easier to access |
|
1734 | 1734 | ui = pullop.repo.ui |
|
1735 | 1735 | |
|
1736 | 1736 | # At the moment we don't do stream clones over bundle2. If that is |
|
1737 | 1737 | # implemented then here's where the check for that will go. |
|
1738 | 1738 | streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] |
|
1739 | 1739 | |
|
1740 | 1740 | # declare pull perimeters |
|
1741 | 1741 | kwargs[b'common'] = pullop.common |
|
1742 | 1742 | kwargs[b'heads'] = pullop.heads or pullop.rheads |
|
1743 | 1743 | |
|
1744 | 1744 | # check server supports narrow and then adding includepats and excludepats |
|
1745 | 1745 | servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP) |
|
1746 | 1746 | if servernarrow and pullop.includepats: |
|
1747 | 1747 | kwargs[b'includepats'] = pullop.includepats |
|
1748 | 1748 | if servernarrow and pullop.excludepats: |
|
1749 | 1749 | kwargs[b'excludepats'] = pullop.excludepats |
|
1750 | 1750 | |
|
1751 | 1751 | if streaming: |
|
1752 | 1752 | kwargs[b'cg'] = False |
|
1753 | 1753 | kwargs[b'stream'] = True |
|
1754 | 1754 | pullop.stepsdone.add(b'changegroup') |
|
1755 | 1755 | pullop.stepsdone.add(b'phases') |
|
1756 | 1756 | |
|
1757 | 1757 | else: |
|
1758 | 1758 | # pulling changegroup |
|
1759 | 1759 | pullop.stepsdone.add(b'changegroup') |
|
1760 | 1760 | |
|
1761 | 1761 | kwargs[b'cg'] = pullop.fetch |
|
1762 | 1762 | |
|
1763 | 1763 | legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange') |
|
1764 | 1764 | hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ()) |
|
1765 | 1765 | if not legacyphase and hasbinaryphase: |
|
1766 | 1766 | kwargs[b'phases'] = True |
|
1767 | 1767 | pullop.stepsdone.add(b'phases') |
|
1768 | 1768 | |
|
1769 | 1769 | if b'listkeys' in pullop.remotebundle2caps: |
|
1770 | 1770 | if b'phases' not in pullop.stepsdone: |
|
1771 | 1771 | kwargs[b'listkeys'] = [b'phases'] |
|
1772 | 1772 | |
|
1773 | 1773 | bookmarksrequested = False |
|
1774 | 1774 | legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange') |
|
1775 | 1775 | hasbinarybook = b'bookmarks' in pullop.remotebundle2caps |
|
1776 | 1776 | |
|
1777 | 1777 | if pullop.remotebookmarks is not None: |
|
1778 | 1778 | pullop.stepsdone.add(b'request-bookmarks') |
|
1779 | 1779 | |
|
1780 | 1780 | if ( |
|
1781 | 1781 | b'request-bookmarks' not in pullop.stepsdone |
|
1782 | 1782 | and pullop.remotebookmarks is None |
|
1783 | 1783 | and not legacybookmark |
|
1784 | 1784 | and hasbinarybook |
|
1785 | 1785 | ): |
|
1786 | 1786 | kwargs[b'bookmarks'] = True |
|
1787 | 1787 | bookmarksrequested = True |
|
1788 | 1788 | |
|
1789 | 1789 | if b'listkeys' in pullop.remotebundle2caps: |
|
1790 | 1790 | if b'request-bookmarks' not in pullop.stepsdone: |
|
1791 | 1791 | # make sure to always includes bookmark data when migrating |
|
1792 | 1792 | # `hg incoming --bundle` to using this function. |
|
1793 | 1793 | pullop.stepsdone.add(b'request-bookmarks') |
|
1794 | 1794 | kwargs.setdefault(b'listkeys', []).append(b'bookmarks') |
|
1795 | 1795 | |
|
1796 | 1796 | # If this is a full pull / clone and the server supports the clone bundles |
|
1797 | 1797 | # feature, tell the server whether we attempted a clone bundle. The |
|
1798 | 1798 | # presence of this flag indicates the client supports clone bundles. This |
|
1799 | 1799 | # will enable the server to treat clients that support clone bundles |
|
1800 | 1800 | # differently from those that don't. |
|
1801 | 1801 | if ( |
|
1802 | 1802 | pullop.remote.capable(b'clonebundles') |
|
1803 | 1803 | and pullop.heads is None |
|
1804 | 1804 | and list(pullop.common) == [nullid] |
|
1805 | 1805 | ): |
|
1806 | 1806 | kwargs[b'cbattempted'] = pullop.clonebundleattempted |
|
1807 | 1807 | |
|
1808 | 1808 | if streaming: |
|
1809 | 1809 | pullop.repo.ui.status(_(b'streaming all changes\n')) |
|
1810 | 1810 | elif not pullop.fetch: |
|
1811 | 1811 | pullop.repo.ui.status(_(b"no changes found\n")) |
|
1812 | 1812 | pullop.cgresult = 0 |
|
1813 | 1813 | else: |
|
1814 | 1814 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1815 | 1815 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
1816 | 1816 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1817 | 1817 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) |
|
1818 | 1818 | if obsolete.commonversion(remoteversions) is not None: |
|
1819 | 1819 | kwargs[b'obsmarkers'] = True |
|
1820 | 1820 | pullop.stepsdone.add(b'obsmarkers') |
|
1821 | 1821 | _pullbundle2extraprepare(pullop, kwargs) |
|
1822 | 1822 | |
|
1823 | 1823 | with pullop.remote.commandexecutor() as e: |
|
1824 | 1824 | args = dict(kwargs) |
|
1825 | 1825 | args[b'source'] = b'pull' |
|
1826 | 1826 | bundle = e.callcommand(b'getbundle', args).result() |
|
1827 | 1827 | |
|
1828 | 1828 | try: |
|
1829 | 1829 | op = bundle2.bundleoperation( |
|
1830 | 1830 | pullop.repo, pullop.gettransaction, source=b'pull' |
|
1831 | 1831 | ) |
|
1832 | 1832 | op.modes[b'bookmarks'] = b'records' |
|
1833 | 1833 | bundle2.processbundle(pullop.repo, bundle, op=op) |
|
1834 | 1834 | except bundle2.AbortFromPart as exc: |
|
1835 | 1835 | pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc) |
|
1836 | 1836 | raise error.Abort(_(b'pull failed on remote'), hint=exc.hint) |
|
1837 | 1837 | except error.BundleValueError as exc: |
|
1838 | 1838 | raise error.Abort(_(b'missing support for %s') % exc) |
|
1839 | 1839 | |
|
1840 | 1840 | if pullop.fetch: |
|
1841 | 1841 | pullop.cgresult = bundle2.combinechangegroupresults(op) |
|
1842 | 1842 | |
|
1843 | 1843 | # processing phases change |
|
1844 | 1844 | for namespace, value in op.records[b'listkeys']: |
|
1845 | 1845 | if namespace == b'phases': |
|
1846 | 1846 | _pullapplyphases(pullop, value) |
|
1847 | 1847 | |
|
1848 | 1848 | # processing bookmark update |
|
1849 | 1849 | if bookmarksrequested: |
|
1850 | 1850 | books = {} |
|
1851 | 1851 | for record in op.records[b'bookmarks']: |
|
1852 | 1852 | books[record[b'bookmark']] = record[b"node"] |
|
1853 | 1853 | pullop.remotebookmarks = books |
|
1854 | 1854 | else: |
|
1855 | 1855 | for namespace, value in op.records[b'listkeys']: |
|
1856 | 1856 | if namespace == b'bookmarks': |
|
1857 | 1857 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) |
|
1858 | 1858 | |
|
1859 | 1859 | # bookmark data were either already there or pulled in the bundle |
|
1860 | 1860 | if pullop.remotebookmarks is not None: |
|
1861 | 1861 | _pullbookmarks(pullop) |
|
1862 | 1862 | |
|
1863 | 1863 | |
|
1864 | 1864 | def _pullbundle2extraprepare(pullop, kwargs): |
|
1865 | 1865 | """hook function so that extensions can extend the getbundle call""" |
|
1866 | 1866 | |
|
1867 | 1867 | |
|
1868 | 1868 | def _pullchangeset(pullop): |
|
1869 | 1869 | """pull changeset from unbundle into the local repo""" |
|
1870 | 1870 | # We delay the open of the transaction as late as possible so we |
|
1871 | 1871 | # don't open transaction for nothing or you break future useful |
|
1872 | 1872 | # rollback call |
|
1873 | 1873 | if b'changegroup' in pullop.stepsdone: |
|
1874 | 1874 | return |
|
1875 | 1875 | pullop.stepsdone.add(b'changegroup') |
|
1876 | 1876 | if not pullop.fetch: |
|
1877 | 1877 | pullop.repo.ui.status(_(b"no changes found\n")) |
|
1878 | 1878 | pullop.cgresult = 0 |
|
1879 | 1879 | return |
|
1880 | 1880 | tr = pullop.gettransaction() |
|
1881 | 1881 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1882 | 1882 | pullop.repo.ui.status(_(b"requesting all changes\n")) |
|
1883 | 1883 | elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'): |
|
1884 | 1884 | # issue1320, avoid a race if remote changed after discovery |
|
1885 | 1885 | pullop.heads = pullop.rheads |
|
1886 | 1886 | |
|
1887 | 1887 | if pullop.remote.capable(b'getbundle'): |
|
1888 | 1888 | # TODO: get bundlecaps from remote |
|
1889 | 1889 | cg = pullop.remote.getbundle( |
|
1890 | 1890 | b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads |
|
1891 | 1891 | ) |
|
1892 | 1892 | elif pullop.heads is None: |
|
1893 | 1893 | with pullop.remote.commandexecutor() as e: |
|
1894 | 1894 | cg = e.callcommand( |
|
1895 | 1895 | b'changegroup', |
|
1896 | 1896 | { |
|
1897 | 1897 | b'nodes': pullop.fetch, |
|
1898 | 1898 | b'source': b'pull', |
|
1899 | 1899 | }, |
|
1900 | 1900 | ).result() |
|
1901 | 1901 | |
|
1902 | 1902 | elif not pullop.remote.capable(b'changegroupsubset'): |
|
1903 | 1903 | raise error.Abort( |
|
1904 | 1904 | _( |
|
1905 | 1905 | b"partial pull cannot be done because " |
|
1906 | 1906 | b"other repository doesn't support " |
|
1907 | 1907 | b"changegroupsubset." |
|
1908 | 1908 | ) |
|
1909 | 1909 | ) |
|
1910 | 1910 | else: |
|
1911 | 1911 | with pullop.remote.commandexecutor() as e: |
|
1912 | 1912 | cg = e.callcommand( |
|
1913 | 1913 | b'changegroupsubset', |
|
1914 | 1914 | { |
|
1915 | 1915 | b'bases': pullop.fetch, |
|
1916 | 1916 | b'heads': pullop.heads, |
|
1917 | 1917 | b'source': b'pull', |
|
1918 | 1918 | }, |
|
1919 | 1919 | ).result() |
|
1920 | 1920 | |
|
1921 | 1921 | bundleop = bundle2.applybundle( |
|
1922 | 1922 | pullop.repo, cg, tr, b'pull', pullop.remote.url() |
|
1923 | 1923 | ) |
|
1924 | 1924 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) |
|
1925 | 1925 | |
|
1926 | 1926 | |
|
1927 | 1927 | def _pullphase(pullop): |
|
1928 | 1928 | # Get remote phases data from remote |
|
1929 | 1929 | if b'phases' in pullop.stepsdone: |
|
1930 | 1930 | return |
|
1931 | 1931 | remotephases = listkeys(pullop.remote, b'phases') |
|
1932 | 1932 | _pullapplyphases(pullop, remotephases) |
|
1933 | 1933 | |
|
1934 | 1934 | |
|
1935 | 1935 | def _pullapplyphases(pullop, remotephases): |
|
1936 | 1936 | """apply phase movement from observed remote state""" |
|
1937 | 1937 | if b'phases' in pullop.stepsdone: |
|
1938 | 1938 | return |
|
1939 | 1939 | pullop.stepsdone.add(b'phases') |
|
1940 | 1940 | publishing = bool(remotephases.get(b'publishing', False)) |
|
1941 | 1941 | if remotephases and not publishing: |
|
1942 | 1942 | # remote is new and non-publishing |
|
1943 | 1943 | pheads, _dr = phases.analyzeremotephases( |
|
1944 | 1944 | pullop.repo, pullop.pulledsubset, remotephases |
|
1945 | 1945 | ) |
|
1946 | 1946 | dheads = pullop.pulledsubset |
|
1947 | 1947 | else: |
|
1948 | 1948 | # Remote is old or publishing all common changesets |
|
1949 | 1949 | # should be seen as public |
|
1950 | 1950 | pheads = pullop.pulledsubset |
|
1951 | 1951 | dheads = [] |
|
1952 | 1952 | unfi = pullop.repo.unfiltered() |
|
1953 | 1953 | phase = unfi._phasecache.phase |
|
1954 | 1954 | rev = unfi.changelog.index.get_rev |
|
1955 | 1955 | public = phases.public |
|
1956 | 1956 | draft = phases.draft |
|
1957 | 1957 | |
|
1958 | 1958 | # exclude changesets already public locally and update the others |
|
1959 | 1959 | pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] |
|
1960 | 1960 | if pheads: |
|
1961 | 1961 | tr = pullop.gettransaction() |
|
1962 | 1962 | phases.advanceboundary(pullop.repo, tr, public, pheads) |
|
1963 | 1963 | |
|
1964 | 1964 | # exclude changesets already draft locally and update the others |
|
1965 | 1965 | dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] |
|
1966 | 1966 | if dheads: |
|
1967 | 1967 | tr = pullop.gettransaction() |
|
1968 | 1968 | phases.advanceboundary(pullop.repo, tr, draft, dheads) |
|
1969 | 1969 | |
|
1970 | 1970 | |
|
1971 | 1971 | def _pullbookmarks(pullop): |
|
1972 | 1972 | """process the remote bookmark information to update the local one""" |
|
1973 | 1973 | if b'bookmarks' in pullop.stepsdone: |
|
1974 | 1974 | return |
|
1975 | 1975 | pullop.stepsdone.add(b'bookmarks') |
|
1976 | 1976 | repo = pullop.repo |
|
1977 | 1977 | remotebookmarks = pullop.remotebookmarks |
|
1978 | 1978 | bookmod.updatefromremote( |
|
1979 | 1979 | repo.ui, |
|
1980 | 1980 | repo, |
|
1981 | 1981 | remotebookmarks, |
|
1982 | 1982 | pullop.remote.url(), |
|
1983 | 1983 | pullop.gettransaction, |
|
1984 | 1984 | explicit=pullop.explicitbookmarks, |
|
1985 | 1985 | ) |
|
1986 | 1986 | |
|
1987 | 1987 | |
|
1988 | 1988 | def _pullobsolete(pullop): |
|
1989 | 1989 | """utility function to pull obsolete markers from a remote |
|
1990 | 1990 | |
|
1991 | 1991 | The `gettransaction` is function that return the pull transaction, creating |
|
1992 | 1992 | one if necessary. We return the transaction to inform the calling code that |
|
1993 | 1993 | a new transaction have been created (when applicable). |
|
1994 | 1994 | |
|
1995 | 1995 | Exists mostly to allow overriding for experimentation purpose""" |
|
1996 | 1996 | if b'obsmarkers' in pullop.stepsdone: |
|
1997 | 1997 | return |
|
1998 | 1998 | pullop.stepsdone.add(b'obsmarkers') |
|
1999 | 1999 | tr = None |
|
2000 | 2000 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
2001 | 2001 | pullop.repo.ui.debug(b'fetching remote obsolete markers\n') |
|
2002 | 2002 | remoteobs = listkeys(pullop.remote, b'obsolete') |
|
2003 | 2003 | if b'dump0' in remoteobs: |
|
2004 | 2004 | tr = pullop.gettransaction() |
|
2005 | 2005 | markers = [] |
|
2006 | 2006 | for key in sorted(remoteobs, reverse=True): |
|
2007 | 2007 | if key.startswith(b'dump'): |
|
2008 | 2008 | data = util.b85decode(remoteobs[key]) |
|
2009 | 2009 | version, newmarks = obsolete._readmarkers(data) |
|
2010 | 2010 | markers += newmarks |
|
2011 | 2011 | if markers: |
|
2012 | 2012 | pullop.repo.obsstore.add(tr, markers) |
|
2013 | 2013 | pullop.repo.invalidatevolatilesets() |
|
2014 | 2014 | return tr |
|
2015 | 2015 | |
|
2016 | 2016 | |
|
2017 | 2017 | def applynarrowacl(repo, kwargs): |
|
2018 | 2018 | """Apply narrow fetch access control. |
|
2019 | 2019 | |
|
2020 | 2020 | This massages the named arguments for getbundle wire protocol commands |
|
2021 | 2021 | so requested data is filtered through access control rules. |
|
2022 | 2022 | """ |
|
2023 | 2023 | ui = repo.ui |
|
2024 | 2024 | # TODO this assumes existence of HTTP and is a layering violation. |
|
2025 | 2025 | username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username()) |
|
2026 | 2026 | user_includes = ui.configlist( |
|
2027 | 2027 | _NARROWACL_SECTION, |
|
2028 | 2028 | username + b'.includes', |
|
2029 | 2029 | ui.configlist(_NARROWACL_SECTION, b'default.includes'), |
|
2030 | 2030 | ) |
|
2031 | 2031 | user_excludes = ui.configlist( |
|
2032 | 2032 | _NARROWACL_SECTION, |
|
2033 | 2033 | username + b'.excludes', |
|
2034 | 2034 | ui.configlist(_NARROWACL_SECTION, b'default.excludes'), |
|
2035 | 2035 | ) |
|
2036 | 2036 | if not user_includes: |
|
2037 | 2037 | raise error.Abort( |
|
2038 | 2038 | _(b"%s configuration for user %s is empty") |
|
2039 | 2039 | % (_NARROWACL_SECTION, username) |
|
2040 | 2040 | ) |
|
2041 | 2041 | |
|
2042 | 2042 | user_includes = [ |
|
2043 | 2043 | b'path:.' if p == b'*' else b'path:' + p for p in user_includes |
|
2044 | 2044 | ] |
|
2045 | 2045 | user_excludes = [ |
|
2046 | 2046 | b'path:.' if p == b'*' else b'path:' + p for p in user_excludes |
|
2047 | 2047 | ] |
|
2048 | 2048 | |
|
2049 | 2049 | req_includes = set(kwargs.get('includepats', [])) |
|
2050 | 2050 | req_excludes = set(kwargs.get('excludepats', [])) |
|
2051 | 2051 | |
|
2052 | 2052 | req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( |
|
2053 | 2053 | req_includes, req_excludes, user_includes, user_excludes |
|
2054 | 2054 | ) |
|
2055 | 2055 | |
|
2056 | 2056 | if invalid_includes: |
|
2057 | 2057 | raise error.Abort( |
|
2058 | 2058 | _(b"The following includes are not accessible for %s: %s") |
|
2059 | 2059 | % (username, stringutil.pprint(invalid_includes)) |
|
2060 | 2060 | ) |
|
2061 | 2061 | |
|
2062 | 2062 | new_args = {} |
|
2063 | 2063 | new_args.update(kwargs) |
|
2064 | 2064 | new_args['narrow'] = True |
|
2065 | 2065 | new_args['narrow_acl'] = True |
|
2066 | 2066 | new_args['includepats'] = req_includes |
|
2067 | 2067 | if req_excludes: |
|
2068 | 2068 | new_args['excludepats'] = req_excludes |
|
2069 | 2069 | |
|
2070 | 2070 | return new_args |
|
2071 | 2071 | |
|
2072 | 2072 | |
|
2073 | 2073 | def _computeellipsis(repo, common, heads, known, match, depth=None): |
|
2074 | 2074 | """Compute the shape of a narrowed DAG. |
|
2075 | 2075 | |
|
2076 | 2076 | Args: |
|
2077 | 2077 | repo: The repository we're transferring. |
|
2078 | 2078 | common: The roots of the DAG range we're transferring. |
|
2079 | 2079 | May be just [nullid], which means all ancestors of heads. |
|
2080 | 2080 | heads: The heads of the DAG range we're transferring. |
|
2081 | 2081 | match: The narrowmatcher that allows us to identify relevant changes. |
|
2082 | 2082 | depth: If not None, only consider nodes to be full nodes if they are at |
|
2083 | 2083 | most depth changesets away from one of heads. |
|
2084 | 2084 | |
|
2085 | 2085 | Returns: |
|
2086 | 2086 | A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: |
|
2087 | 2087 | |
|
2088 | 2088 | visitnodes: The list of nodes (either full or ellipsis) which |
|
2089 | 2089 | need to be sent to the client. |
|
2090 | 2090 | relevant_nodes: The set of changelog nodes which change a file inside |
|
2091 | 2091 | the narrowspec. The client needs these as non-ellipsis nodes. |
|
2092 | 2092 | ellipsisroots: A dict of {rev: parents} that is used in |
|
2093 | 2093 | narrowchangegroup to produce ellipsis nodes with the |
|
2094 | 2094 | correct parents. |
|
2095 | 2095 | """ |
|
2096 | 2096 | cl = repo.changelog |
|
2097 | 2097 | mfl = repo.manifestlog |
|
2098 | 2098 | |
|
2099 | 2099 | clrev = cl.rev |
|
2100 | 2100 | |
|
2101 | 2101 | commonrevs = {clrev(n) for n in common} | {nullrev} |
|
2102 | 2102 | headsrevs = {clrev(n) for n in heads} |
|
2103 | 2103 | |
|
2104 | 2104 | if depth: |
|
2105 | 2105 | revdepth = {h: 0 for h in headsrevs} |
|
2106 | 2106 | |
|
2107 | 2107 | ellipsisheads = collections.defaultdict(set) |
|
2108 | 2108 | ellipsisroots = collections.defaultdict(set) |
|
2109 | 2109 | |
|
2110 | 2110 | def addroot(head, curchange): |
|
2111 | 2111 | """Add a root to an ellipsis head, splitting heads with 3 roots.""" |
|
2112 | 2112 | ellipsisroots[head].add(curchange) |
|
2113 | 2113 | # Recursively split ellipsis heads with 3 roots by finding the |
|
2114 | 2114 | # roots' youngest common descendant which is an elided merge commit. |
|
2115 | 2115 | # That descendant takes 2 of the 3 roots as its own, and becomes a |
|
2116 | 2116 | # root of the head. |
|
2117 | 2117 | while len(ellipsisroots[head]) > 2: |
|
2118 | 2118 | child, roots = splithead(head) |
|
2119 | 2119 | splitroots(head, child, roots) |
|
2120 | 2120 | head = child # Recurse in case we just added a 3rd root |
|
2121 | 2121 | |
|
2122 | 2122 | def splitroots(head, child, roots): |
|
2123 | 2123 | ellipsisroots[head].difference_update(roots) |
|
2124 | 2124 | ellipsisroots[head].add(child) |
|
2125 | 2125 | ellipsisroots[child].update(roots) |
|
2126 | 2126 | ellipsisroots[child].discard(child) |
|
2127 | 2127 | |
|
2128 | 2128 | def splithead(head): |
|
2129 | 2129 | r1, r2, r3 = sorted(ellipsisroots[head]) |
|
2130 | 2130 | for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): |
|
2131 | 2131 | mid = repo.revs( |
|
2132 | 2132 | b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head |
|
2133 | 2133 | ) |
|
2134 | 2134 | for j in mid: |
|
2135 | 2135 | if j == nr2: |
|
2136 | 2136 | return nr2, (nr1, nr2) |
|
2137 | 2137 | if j not in ellipsisroots or len(ellipsisroots[j]) < 2: |
|
2138 | 2138 | return j, (nr1, nr2) |
|
2139 | 2139 | raise error.Abort( |
|
2140 | 2140 | _( |
|
2141 | 2141 | b'Failed to split up ellipsis node! head: %d, ' |
|
2142 | 2142 | b'roots: %d %d %d' |
|
2143 | 2143 | ) |
|
2144 | 2144 | % (head, r1, r2, r3) |
|
2145 | 2145 | ) |
|
2146 | 2146 | |
|
2147 | 2147 | missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) |
|
2148 | 2148 | visit = reversed(missing) |
|
2149 | 2149 | relevant_nodes = set() |
|
2150 | 2150 | visitnodes = [cl.node(m) for m in missing] |
|
2151 | 2151 | required = set(headsrevs) | known |
|
2152 | 2152 | for rev in visit: |
|
2153 | 2153 | clrev = cl.changelogrevision(rev) |
|
2154 | 2154 | ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev] |
|
2155 | 2155 | if depth is not None: |
|
2156 | 2156 | curdepth = revdepth[rev] |
|
2157 | 2157 | for p in ps: |
|
2158 | 2158 | revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) |
|
2159 | 2159 | needed = False |
|
2160 | 2160 | shallow_enough = depth is None or revdepth[rev] <= depth |
|
2161 | 2161 | if shallow_enough: |
|
2162 | 2162 | curmf = mfl[clrev.manifest].read() |
|
2163 | 2163 | if ps: |
|
2164 | 2164 | # We choose to not trust the changed files list in |
|
2165 | 2165 | # changesets because it's not always correct. TODO: could |
|
2166 | 2166 | # we trust it for the non-merge case? |
|
2167 | 2167 | p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() |
|
2168 | 2168 | needed = bool(curmf.diff(p1mf, match)) |
|
2169 | 2169 | if not needed and len(ps) > 1: |
|
2170 | 2170 | # For merge changes, the list of changed files is not |
|
2171 | 2171 | # helpful, since we need to emit the merge if a file |
|
2172 | 2172 | # in the narrow spec has changed on either side of the |
|
2173 | 2173 | # merge. As a result, we do a manifest diff to check. |
|
2174 | 2174 | p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() |
|
2175 | 2175 | needed = bool(curmf.diff(p2mf, match)) |
|
2176 | 2176 | else: |
|
2177 | 2177 | # For a root node, we need to include the node if any |
|
2178 | 2178 | # files in the node match the narrowspec. |
|
2179 | 2179 | needed = any(curmf.walk(match)) |
|
2180 | 2180 | |
|
2181 | 2181 | if needed: |
|
2182 | 2182 | for head in ellipsisheads[rev]: |
|
2183 | 2183 | addroot(head, rev) |
|
2184 | 2184 | for p in ps: |
|
2185 | 2185 | required.add(p) |
|
2186 | 2186 | relevant_nodes.add(cl.node(rev)) |
|
2187 | 2187 | else: |
|
2188 | 2188 | if not ps: |
|
2189 | 2189 | ps = [nullrev] |
|
2190 | 2190 | if rev in required: |
|
2191 | 2191 | for head in ellipsisheads[rev]: |
|
2192 | 2192 | addroot(head, rev) |
|
2193 | 2193 | for p in ps: |
|
2194 | 2194 | ellipsisheads[p].add(rev) |
|
2195 | 2195 | else: |
|
2196 | 2196 | for p in ps: |
|
2197 | 2197 | ellipsisheads[p] |= ellipsisheads[rev] |
|
2198 | 2198 | |
|
2199 | 2199 | # add common changesets as roots of their reachable ellipsis heads |
|
2200 | 2200 | for c in commonrevs: |
|
2201 | 2201 | for head in ellipsisheads[c]: |
|
2202 | 2202 | addroot(head, c) |
|
2203 | 2203 | return visitnodes, relevant_nodes, ellipsisroots |
|
2204 | 2204 | |
|
2205 | 2205 | |
|
2206 | 2206 | def caps20to10(repo, role): |
|
2207 | 2207 | """return a set with appropriate options to use bundle20 during getbundle""" |
|
2208 | 2208 | caps = {b'HG20'} |
|
2209 | 2209 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) |
|
2210 | 2210 | caps.add(b'bundle2=' + urlreq.quote(capsblob)) |
|
2211 | 2211 | return caps |
|
2212 | 2212 | |
|
2213 | 2213 | |
|
2214 | 2214 | # List of names of steps to perform for a bundle2 for getbundle, order matters. |
|
2215 | 2215 | getbundle2partsorder = [] |
|
2216 | 2216 | |
|
2217 | 2217 | # Mapping between step name and function |
|
2218 | 2218 | # |
|
2219 | 2219 | # This exists to help extensions wrap steps if necessary |
|
2220 | 2220 | getbundle2partsmapping = {} |
|
2221 | 2221 | |
|
2222 | 2222 | |
|
2223 | 2223 | def getbundle2partsgenerator(stepname, idx=None): |
|
2224 | 2224 | """decorator for function generating bundle2 part for getbundle |
|
2225 | 2225 | |
|
2226 | 2226 | The function is added to the step -> function mapping and appended to the |
|
2227 | 2227 | list of steps. Beware that decorated functions will be added in order |
|
2228 | 2228 | (this may matter). |
|
2229 | 2229 | |
|
2230 | 2230 | You can only use this decorator for new steps, if you want to wrap a step |
|
2231 | 2231 | from an extension, attack the getbundle2partsmapping dictionary directly.""" |
|
2232 | 2232 | |
|
2233 | 2233 | def dec(func): |
|
2234 | 2234 | assert stepname not in getbundle2partsmapping |
|
2235 | 2235 | getbundle2partsmapping[stepname] = func |
|
2236 | 2236 | if idx is None: |
|
2237 | 2237 | getbundle2partsorder.append(stepname) |
|
2238 | 2238 | else: |
|
2239 | 2239 | getbundle2partsorder.insert(idx, stepname) |
|
2240 | 2240 | return func |
|
2241 | 2241 | |
|
2242 | 2242 | return dec |
|
2243 | 2243 | |
|
2244 | 2244 | |
|
2245 | 2245 | def bundle2requested(bundlecaps): |
|
2246 | 2246 | if bundlecaps is not None: |
|
2247 | 2247 | return any(cap.startswith(b'HG2') for cap in bundlecaps) |
|
2248 | 2248 | return False |
|
2249 | 2249 | |
|
2250 | 2250 | |
|
2251 | 2251 | def getbundlechunks( |
|
2252 | repo, source, heads=None, common=None, bundlecaps=None, **kwargs | |
|
2252 | repo, | |
|
2253 | source, | |
|
2254 | heads=None, | |
|
2255 | common=None, | |
|
2256 | bundlecaps=None, | |
|
2257 | remote_sidedata=None, | |
|
2258 | **kwargs | |
|
2253 | 2259 | ): |
|
2254 | 2260 | """Return chunks constituting a bundle's raw data. |
|
2255 | 2261 | |
|
2256 | 2262 | Could be a bundle HG10 or a bundle HG20 depending on bundlecaps |
|
2257 | 2263 | passed. |
|
2258 | 2264 | |
|
2259 | 2265 | Returns a 2-tuple of a dict with metadata about the generated bundle |
|
2260 | 2266 | and an iterator over raw chunks (of varying sizes). |
|
2261 | 2267 | """ |
|
2262 | 2268 | kwargs = pycompat.byteskwargs(kwargs) |
|
2263 | 2269 | info = {} |
|
2264 | 2270 | usebundle2 = bundle2requested(bundlecaps) |
|
2265 | 2271 | # bundle10 case |
|
2266 | 2272 | if not usebundle2: |
|
2267 | 2273 | if bundlecaps and not kwargs.get(b'cg', True): |
|
2268 | 2274 | raise ValueError( |
|
2269 | 2275 | _(b'request for bundle10 must include changegroup') |
|
2270 | 2276 | ) |
|
2271 | 2277 | |
|
2272 | 2278 | if kwargs: |
|
2273 | 2279 | raise ValueError( |
|
2274 | 2280 | _(b'unsupported getbundle arguments: %s') |
|
2275 | 2281 | % b', '.join(sorted(kwargs.keys())) |
|
2276 | 2282 | ) |
|
2277 | 2283 | outgoing = _computeoutgoing(repo, heads, common) |
|
2278 | 2284 | info[b'bundleversion'] = 1 |
|
2279 | 2285 | return ( |
|
2280 | 2286 | info, |
|
2281 | 2287 | changegroup.makestream( |
|
2282 | repo, outgoing, b'01', source, bundlecaps=bundlecaps | |
|
2288 | repo, | |
|
2289 | outgoing, | |
|
2290 | b'01', | |
|
2291 | source, | |
|
2292 | bundlecaps=bundlecaps, | |
|
2293 | remote_sidedata=remote_sidedata, | |
|
2283 | 2294 | ), |
|
2284 | 2295 | ) |
|
2285 | 2296 | |
|
2286 | 2297 | # bundle20 case |
|
2287 | 2298 | info[b'bundleversion'] = 2 |
|
2288 | 2299 | b2caps = {} |
|
2289 | 2300 | for bcaps in bundlecaps: |
|
2290 | 2301 | if bcaps.startswith(b'bundle2='): |
|
2291 | 2302 | blob = urlreq.unquote(bcaps[len(b'bundle2=') :]) |
|
2292 | 2303 | b2caps.update(bundle2.decodecaps(blob)) |
|
2293 | 2304 | bundler = bundle2.bundle20(repo.ui, b2caps) |
|
2294 | 2305 | |
|
2295 | 2306 | kwargs[b'heads'] = heads |
|
2296 | 2307 | kwargs[b'common'] = common |
|
2297 | 2308 | |
|
2298 | 2309 | for name in getbundle2partsorder: |
|
2299 | 2310 | func = getbundle2partsmapping[name] |
|
2300 | 2311 | func( |
|
2301 | 2312 | bundler, |
|
2302 | 2313 | repo, |
|
2303 | 2314 | source, |
|
2304 | 2315 | bundlecaps=bundlecaps, |
|
2305 | 2316 | b2caps=b2caps, |
|
2317 | remote_sidedata=remote_sidedata, | |
|
2306 | 2318 | **pycompat.strkwargs(kwargs) |
|
2307 | 2319 | ) |
|
2308 | 2320 | |
|
2309 | 2321 | info[b'prefercompressed'] = bundler.prefercompressed |
|
2310 | 2322 | |
|
2311 | 2323 | return info, bundler.getchunks() |
|
2312 | 2324 | |
|
2313 | 2325 | |
|
2314 | 2326 | @getbundle2partsgenerator(b'stream2') |
|
2315 | 2327 | def _getbundlestream2(bundler, repo, *args, **kwargs): |
|
2316 | 2328 | return bundle2.addpartbundlestream2(bundler, repo, **kwargs) |
|
2317 | 2329 | |
|
2318 | 2330 | |
|
2319 | 2331 | @getbundle2partsgenerator(b'changegroup') |
|
2320 | 2332 | def _getbundlechangegrouppart( |
|
2321 | 2333 | bundler, |
|
2322 | 2334 | repo, |
|
2323 | 2335 | source, |
|
2324 | 2336 | bundlecaps=None, |
|
2325 | 2337 | b2caps=None, |
|
2326 | 2338 | heads=None, |
|
2327 | 2339 | common=None, |
|
2340 | remote_sidedata=None, | |
|
2328 | 2341 | **kwargs |
|
2329 | 2342 | ): |
|
2330 | 2343 | """add a changegroup part to the requested bundle""" |
|
2331 | 2344 | if not kwargs.get('cg', True) or not b2caps: |
|
2332 | 2345 | return |
|
2333 | 2346 | |
|
2334 | 2347 | version = b'01' |
|
2335 | 2348 | cgversions = b2caps.get(b'changegroup') |
|
2336 | 2349 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
2337 | 2350 | cgversions = [ |
|
2338 | 2351 | v |
|
2339 | 2352 | for v in cgversions |
|
2340 | 2353 | if v in changegroup.supportedoutgoingversions(repo) |
|
2341 | 2354 | ] |
|
2342 | 2355 | if not cgversions: |
|
2343 | 2356 | raise error.Abort(_(b'no common changegroup version')) |
|
2344 | 2357 | version = max(cgversions) |
|
2345 | 2358 | |
|
2346 | 2359 | outgoing = _computeoutgoing(repo, heads, common) |
|
2347 | 2360 | if not outgoing.missing: |
|
2348 | 2361 | return |
|
2349 | 2362 | |
|
2350 | 2363 | if kwargs.get('narrow', False): |
|
2351 | 2364 | include = sorted(filter(bool, kwargs.get('includepats', []))) |
|
2352 | 2365 | exclude = sorted(filter(bool, kwargs.get('excludepats', []))) |
|
2353 | 2366 | matcher = narrowspec.match(repo.root, include=include, exclude=exclude) |
|
2354 | 2367 | else: |
|
2355 | 2368 | matcher = None |
|
2356 | 2369 | |
|
2357 | 2370 | cgstream = changegroup.makestream( |
|
2358 | repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher | |
|
2371 | repo, | |
|
2372 | outgoing, | |
|
2373 | version, | |
|
2374 | source, | |
|
2375 | bundlecaps=bundlecaps, | |
|
2376 | matcher=matcher, | |
|
2377 | remote_sidedata=remote_sidedata, | |
|
2359 | 2378 | ) |
|
2360 | 2379 | |
|
2361 | 2380 | part = bundler.newpart(b'changegroup', data=cgstream) |
|
2362 | 2381 | if cgversions: |
|
2363 | 2382 | part.addparam(b'version', version) |
|
2364 | 2383 | |
|
2365 | 2384 | part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False) |
|
2366 | 2385 | |
|
2367 | 2386 | if scmutil.istreemanifest(repo): |
|
2368 | 2387 | part.addparam(b'treemanifest', b'1') |
|
2369 | 2388 | |
|
2370 | 2389 | if b'exp-sidedata-flag' in repo.requirements: |
|
2371 | 2390 | part.addparam(b'exp-sidedata', b'1') |
|
2372 | 2391 | |
|
2373 | 2392 | if ( |
|
2374 | 2393 | kwargs.get('narrow', False) |
|
2375 | 2394 | and kwargs.get('narrow_acl', False) |
|
2376 | 2395 | and (include or exclude) |
|
2377 | 2396 | ): |
|
2378 | 2397 | # this is mandatory because otherwise ACL clients won't work |
|
2379 | 2398 | narrowspecpart = bundler.newpart(b'Narrow:responsespec') |
|
2380 | 2399 | narrowspecpart.data = b'%s\0%s' % ( |
|
2381 | 2400 | b'\n'.join(include), |
|
2382 | 2401 | b'\n'.join(exclude), |
|
2383 | 2402 | ) |
|
2384 | 2403 | |
|
2385 | 2404 | |
|
2386 | 2405 | @getbundle2partsgenerator(b'bookmarks') |
|
2387 | 2406 | def _getbundlebookmarkpart( |
|
2388 | 2407 | bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs |
|
2389 | 2408 | ): |
|
2390 | 2409 | """add a bookmark part to the requested bundle""" |
|
2391 | 2410 | if not kwargs.get('bookmarks', False): |
|
2392 | 2411 | return |
|
2393 | 2412 | if not b2caps or b'bookmarks' not in b2caps: |
|
2394 | 2413 | raise error.Abort(_(b'no common bookmarks exchange method')) |
|
2395 | 2414 | books = bookmod.listbinbookmarks(repo) |
|
2396 | 2415 | data = bookmod.binaryencode(books) |
|
2397 | 2416 | if data: |
|
2398 | 2417 | bundler.newpart(b'bookmarks', data=data) |
|
2399 | 2418 | |
|
2400 | 2419 | |
|
2401 | 2420 | @getbundle2partsgenerator(b'listkeys') |
|
2402 | 2421 | def _getbundlelistkeysparts( |
|
2403 | 2422 | bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs |
|
2404 | 2423 | ): |
|
2405 | 2424 | """add parts containing listkeys namespaces to the requested bundle""" |
|
2406 | 2425 | listkeys = kwargs.get('listkeys', ()) |
|
2407 | 2426 | for namespace in listkeys: |
|
2408 | 2427 | part = bundler.newpart(b'listkeys') |
|
2409 | 2428 | part.addparam(b'namespace', namespace) |
|
2410 | 2429 | keys = repo.listkeys(namespace).items() |
|
2411 | 2430 | part.data = pushkey.encodekeys(keys) |
|
2412 | 2431 | |
|
2413 | 2432 | |
|
2414 | 2433 | @getbundle2partsgenerator(b'obsmarkers') |
|
2415 | 2434 | def _getbundleobsmarkerpart( |
|
2416 | 2435 | bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs |
|
2417 | 2436 | ): |
|
2418 | 2437 | """add an obsolescence markers part to the requested bundle""" |
|
2419 | 2438 | if kwargs.get('obsmarkers', False): |
|
2420 | 2439 | if heads is None: |
|
2421 | 2440 | heads = repo.heads() |
|
2422 | 2441 | subset = [c.node() for c in repo.set(b'::%ln', heads)] |
|
2423 | 2442 | markers = repo.obsstore.relevantmarkers(subset) |
|
2424 | 2443 | markers = obsutil.sortedmarkers(markers) |
|
2425 | 2444 | bundle2.buildobsmarkerspart(bundler, markers) |
|
2426 | 2445 | |
|
2427 | 2446 | |
|
2428 | 2447 | @getbundle2partsgenerator(b'phases') |
|
2429 | 2448 | def _getbundlephasespart( |
|
2430 | 2449 | bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs |
|
2431 | 2450 | ): |
|
2432 | 2451 | """add phase heads part to the requested bundle""" |
|
2433 | 2452 | if kwargs.get('phases', False): |
|
2434 | 2453 | if not b2caps or b'heads' not in b2caps.get(b'phases'): |
|
2435 | 2454 | raise error.Abort(_(b'no common phases exchange method')) |
|
2436 | 2455 | if heads is None: |
|
2437 | 2456 | heads = repo.heads() |
|
2438 | 2457 | |
|
2439 | 2458 | headsbyphase = collections.defaultdict(set) |
|
2440 | 2459 | if repo.publishing(): |
|
2441 | 2460 | headsbyphase[phases.public] = heads |
|
2442 | 2461 | else: |
|
2443 | 2462 | # find the appropriate heads to move |
|
2444 | 2463 | |
|
2445 | 2464 | phase = repo._phasecache.phase |
|
2446 | 2465 | node = repo.changelog.node |
|
2447 | 2466 | rev = repo.changelog.rev |
|
2448 | 2467 | for h in heads: |
|
2449 | 2468 | headsbyphase[phase(repo, rev(h))].add(h) |
|
2450 | 2469 | seenphases = list(headsbyphase.keys()) |
|
2451 | 2470 | |
|
2452 | 2471 | # We do not handle anything but public and draft phase for now) |
|
2453 | 2472 | if seenphases: |
|
2454 | 2473 | assert max(seenphases) <= phases.draft |
|
2455 | 2474 | |
|
2456 | 2475 | # if client is pulling non-public changesets, we need to find |
|
2457 | 2476 | # intermediate public heads. |
|
2458 | 2477 | draftheads = headsbyphase.get(phases.draft, set()) |
|
2459 | 2478 | if draftheads: |
|
2460 | 2479 | publicheads = headsbyphase.get(phases.public, set()) |
|
2461 | 2480 | |
|
2462 | 2481 | revset = b'heads(only(%ln, %ln) and public())' |
|
2463 | 2482 | extraheads = repo.revs(revset, draftheads, publicheads) |
|
2464 | 2483 | for r in extraheads: |
|
2465 | 2484 | headsbyphase[phases.public].add(node(r)) |
|
2466 | 2485 | |
|
2467 | 2486 | # transform data in a format used by the encoding function |
|
2468 | 2487 | phasemapping = { |
|
2469 | 2488 | phase: sorted(headsbyphase[phase]) for phase in phases.allphases |
|
2470 | 2489 | } |
|
2471 | 2490 | |
|
2472 | 2491 | # generate the actual part |
|
2473 | 2492 | phasedata = phases.binaryencode(phasemapping) |
|
2474 | 2493 | bundler.newpart(b'phase-heads', data=phasedata) |
|
2475 | 2494 | |
|
2476 | 2495 | |
|
2477 | 2496 | @getbundle2partsgenerator(b'hgtagsfnodes') |
|
2478 | 2497 | def _getbundletagsfnodes( |
|
2479 | 2498 | bundler, |
|
2480 | 2499 | repo, |
|
2481 | 2500 | source, |
|
2482 | 2501 | bundlecaps=None, |
|
2483 | 2502 | b2caps=None, |
|
2484 | 2503 | heads=None, |
|
2485 | 2504 | common=None, |
|
2486 | 2505 | **kwargs |
|
2487 | 2506 | ): |
|
2488 | 2507 | """Transfer the .hgtags filenodes mapping. |
|
2489 | 2508 | |
|
2490 | 2509 | Only values for heads in this bundle will be transferred. |
|
2491 | 2510 | |
|
2492 | 2511 | The part data consists of pairs of 20 byte changeset node and .hgtags |
|
2493 | 2512 | filenodes raw values. |
|
2494 | 2513 | """ |
|
2495 | 2514 | # Don't send unless: |
|
2496 | 2515 | # - changeset are being exchanged, |
|
2497 | 2516 | # - the client supports it. |
|
2498 | 2517 | if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps): |
|
2499 | 2518 | return |
|
2500 | 2519 | |
|
2501 | 2520 | outgoing = _computeoutgoing(repo, heads, common) |
|
2502 | 2521 | bundle2.addparttagsfnodescache(repo, bundler, outgoing) |
|
2503 | 2522 | |
|
2504 | 2523 | |
|
2505 | 2524 | @getbundle2partsgenerator(b'cache:rev-branch-cache') |
|
2506 | 2525 | def _getbundlerevbranchcache( |
|
2507 | 2526 | bundler, |
|
2508 | 2527 | repo, |
|
2509 | 2528 | source, |
|
2510 | 2529 | bundlecaps=None, |
|
2511 | 2530 | b2caps=None, |
|
2512 | 2531 | heads=None, |
|
2513 | 2532 | common=None, |
|
2514 | 2533 | **kwargs |
|
2515 | 2534 | ): |
|
2516 | 2535 | """Transfer the rev-branch-cache mapping |
|
2517 | 2536 | |
|
2518 | 2537 | The payload is a series of data related to each branch |
|
2519 | 2538 | |
|
2520 | 2539 | 1) branch name length |
|
2521 | 2540 | 2) number of open heads |
|
2522 | 2541 | 3) number of closed heads |
|
2523 | 2542 | 4) open heads nodes |
|
2524 | 2543 | 5) closed heads nodes |
|
2525 | 2544 | """ |
|
2526 | 2545 | # Don't send unless: |
|
2527 | 2546 | # - changeset are being exchanged, |
|
2528 | 2547 | # - the client supports it. |
|
2529 | 2548 | # - narrow bundle isn't in play (not currently compatible). |
|
2530 | 2549 | if ( |
|
2531 | 2550 | not kwargs.get('cg', True) |
|
2532 | 2551 | or not b2caps |
|
2533 | 2552 | or b'rev-branch-cache' not in b2caps |
|
2534 | 2553 | or kwargs.get('narrow', False) |
|
2535 | 2554 | or repo.ui.has_section(_NARROWACL_SECTION) |
|
2536 | 2555 | ): |
|
2537 | 2556 | return |
|
2538 | 2557 | |
|
2539 | 2558 | outgoing = _computeoutgoing(repo, heads, common) |
|
2540 | 2559 | bundle2.addpartrevbranchcache(repo, bundler, outgoing) |
|
2541 | 2560 | |
|
2542 | 2561 | |
|
2543 | 2562 | def check_heads(repo, their_heads, context): |
|
2544 | 2563 | """check if the heads of a repo have been modified |
|
2545 | 2564 | |
|
2546 | 2565 | Used by peer for unbundling. |
|
2547 | 2566 | """ |
|
2548 | 2567 | heads = repo.heads() |
|
2549 | 2568 | heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest() |
|
2550 | 2569 | if not ( |
|
2551 | 2570 | their_heads == [b'force'] |
|
2552 | 2571 | or their_heads == heads |
|
2553 | 2572 | or their_heads == [b'hashed', heads_hash] |
|
2554 | 2573 | ): |
|
2555 | 2574 | # someone else committed/pushed/unbundled while we |
|
2556 | 2575 | # were transferring data |
|
2557 | 2576 | raise error.PushRaced( |
|
2558 | 2577 | b'repository changed while %s - please try again' % context |
|
2559 | 2578 | ) |
|
2560 | 2579 | |
|
2561 | 2580 | |
|
2562 | 2581 | def unbundle(repo, cg, heads, source, url): |
|
2563 | 2582 | """Apply a bundle to a repo. |
|
2564 | 2583 | |
|
2565 | 2584 | this function makes sure the repo is locked during the application and have |
|
2566 | 2585 | mechanism to check that no push race occurred between the creation of the |
|
2567 | 2586 | bundle and its application. |
|
2568 | 2587 | |
|
2569 | 2588 | If the push was raced as PushRaced exception is raised.""" |
|
2570 | 2589 | r = 0 |
|
2571 | 2590 | # need a transaction when processing a bundle2 stream |
|
2572 | 2591 | # [wlock, lock, tr] - needs to be an array so nested functions can modify it |
|
2573 | 2592 | lockandtr = [None, None, None] |
|
2574 | 2593 | recordout = None |
|
2575 | 2594 | # quick fix for output mismatch with bundle2 in 3.4 |
|
2576 | 2595 | captureoutput = repo.ui.configbool( |
|
2577 | 2596 | b'experimental', b'bundle2-output-capture' |
|
2578 | 2597 | ) |
|
2579 | 2598 | if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'): |
|
2580 | 2599 | captureoutput = True |
|
2581 | 2600 | try: |
|
2582 | 2601 | # note: outside bundle1, 'heads' is expected to be empty and this |
|
2583 | 2602 | # 'check_heads' call wil be a no-op |
|
2584 | 2603 | check_heads(repo, heads, b'uploading changes') |
|
2585 | 2604 | # push can proceed |
|
2586 | 2605 | if not isinstance(cg, bundle2.unbundle20): |
|
2587 | 2606 | # legacy case: bundle1 (changegroup 01) |
|
2588 | 2607 | txnname = b"\n".join([source, util.hidepassword(url)]) |
|
2589 | 2608 | with repo.lock(), repo.transaction(txnname) as tr: |
|
2590 | 2609 | op = bundle2.applybundle(repo, cg, tr, source, url) |
|
2591 | 2610 | r = bundle2.combinechangegroupresults(op) |
|
2592 | 2611 | else: |
|
2593 | 2612 | r = None |
|
2594 | 2613 | try: |
|
2595 | 2614 | |
|
2596 | 2615 | def gettransaction(): |
|
2597 | 2616 | if not lockandtr[2]: |
|
2598 | 2617 | if not bookmod.bookmarksinstore(repo): |
|
2599 | 2618 | lockandtr[0] = repo.wlock() |
|
2600 | 2619 | lockandtr[1] = repo.lock() |
|
2601 | 2620 | lockandtr[2] = repo.transaction(source) |
|
2602 | 2621 | lockandtr[2].hookargs[b'source'] = source |
|
2603 | 2622 | lockandtr[2].hookargs[b'url'] = url |
|
2604 | 2623 | lockandtr[2].hookargs[b'bundle2'] = b'1' |
|
2605 | 2624 | return lockandtr[2] |
|
2606 | 2625 | |
|
2607 | 2626 | # Do greedy locking by default until we're satisfied with lazy |
|
2608 | 2627 | # locking. |
|
2609 | 2628 | if not repo.ui.configbool( |
|
2610 | 2629 | b'experimental', b'bundle2lazylocking' |
|
2611 | 2630 | ): |
|
2612 | 2631 | gettransaction() |
|
2613 | 2632 | |
|
2614 | 2633 | op = bundle2.bundleoperation( |
|
2615 | 2634 | repo, |
|
2616 | 2635 | gettransaction, |
|
2617 | 2636 | captureoutput=captureoutput, |
|
2618 | 2637 | source=b'push', |
|
2619 | 2638 | ) |
|
2620 | 2639 | try: |
|
2621 | 2640 | op = bundle2.processbundle(repo, cg, op=op) |
|
2622 | 2641 | finally: |
|
2623 | 2642 | r = op.reply |
|
2624 | 2643 | if captureoutput and r is not None: |
|
2625 | 2644 | repo.ui.pushbuffer(error=True, subproc=True) |
|
2626 | 2645 | |
|
2627 | 2646 | def recordout(output): |
|
2628 | 2647 | r.newpart(b'output', data=output, mandatory=False) |
|
2629 | 2648 | |
|
2630 | 2649 | if lockandtr[2] is not None: |
|
2631 | 2650 | lockandtr[2].close() |
|
2632 | 2651 | except BaseException as exc: |
|
2633 | 2652 | exc.duringunbundle2 = True |
|
2634 | 2653 | if captureoutput and r is not None: |
|
2635 | 2654 | parts = exc._bundle2salvagedoutput = r.salvageoutput() |
|
2636 | 2655 | |
|
2637 | 2656 | def recordout(output): |
|
2638 | 2657 | part = bundle2.bundlepart( |
|
2639 | 2658 | b'output', data=output, mandatory=False |
|
2640 | 2659 | ) |
|
2641 | 2660 | parts.append(part) |
|
2642 | 2661 | |
|
2643 | 2662 | raise |
|
2644 | 2663 | finally: |
|
2645 | 2664 | lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) |
|
2646 | 2665 | if recordout is not None: |
|
2647 | 2666 | recordout(repo.ui.popbuffer()) |
|
2648 | 2667 | return r |
|
2649 | 2668 | |
|
2650 | 2669 | |
|
2651 | 2670 | def _maybeapplyclonebundle(pullop): |
|
2652 | 2671 | """Apply a clone bundle from a remote, if possible.""" |
|
2653 | 2672 | |
|
2654 | 2673 | repo = pullop.repo |
|
2655 | 2674 | remote = pullop.remote |
|
2656 | 2675 | |
|
2657 | 2676 | if not repo.ui.configbool(b'ui', b'clonebundles'): |
|
2658 | 2677 | return |
|
2659 | 2678 | |
|
2660 | 2679 | # Only run if local repo is empty. |
|
2661 | 2680 | if len(repo): |
|
2662 | 2681 | return |
|
2663 | 2682 | |
|
2664 | 2683 | if pullop.heads: |
|
2665 | 2684 | return |
|
2666 | 2685 | |
|
2667 | 2686 | if not remote.capable(b'clonebundles'): |
|
2668 | 2687 | return |
|
2669 | 2688 | |
|
2670 | 2689 | with remote.commandexecutor() as e: |
|
2671 | 2690 | res = e.callcommand(b'clonebundles', {}).result() |
|
2672 | 2691 | |
|
2673 | 2692 | # If we call the wire protocol command, that's good enough to record the |
|
2674 | 2693 | # attempt. |
|
2675 | 2694 | pullop.clonebundleattempted = True |
|
2676 | 2695 | |
|
2677 | 2696 | entries = bundlecaches.parseclonebundlesmanifest(repo, res) |
|
2678 | 2697 | if not entries: |
|
2679 | 2698 | repo.ui.note( |
|
2680 | 2699 | _( |
|
2681 | 2700 | b'no clone bundles available on remote; ' |
|
2682 | 2701 | b'falling back to regular clone\n' |
|
2683 | 2702 | ) |
|
2684 | 2703 | ) |
|
2685 | 2704 | return |
|
2686 | 2705 | |
|
2687 | 2706 | entries = bundlecaches.filterclonebundleentries( |
|
2688 | 2707 | repo, entries, streamclonerequested=pullop.streamclonerequested |
|
2689 | 2708 | ) |
|
2690 | 2709 | |
|
2691 | 2710 | if not entries: |
|
2692 | 2711 | # There is a thundering herd concern here. However, if a server |
|
2693 | 2712 | # operator doesn't advertise bundles appropriate for its clients, |
|
2694 | 2713 | # they deserve what's coming. Furthermore, from a client's |
|
2695 | 2714 | # perspective, no automatic fallback would mean not being able to |
|
2696 | 2715 | # clone! |
|
2697 | 2716 | repo.ui.warn( |
|
2698 | 2717 | _( |
|
2699 | 2718 | b'no compatible clone bundles available on server; ' |
|
2700 | 2719 | b'falling back to regular clone\n' |
|
2701 | 2720 | ) |
|
2702 | 2721 | ) |
|
2703 | 2722 | repo.ui.warn( |
|
2704 | 2723 | _(b'(you may want to report this to the server operator)\n') |
|
2705 | 2724 | ) |
|
2706 | 2725 | return |
|
2707 | 2726 | |
|
2708 | 2727 | entries = bundlecaches.sortclonebundleentries(repo.ui, entries) |
|
2709 | 2728 | |
|
2710 | 2729 | url = entries[0][b'URL'] |
|
2711 | 2730 | repo.ui.status(_(b'applying clone bundle from %s\n') % url) |
|
2712 | 2731 | if trypullbundlefromurl(repo.ui, repo, url): |
|
2713 | 2732 | repo.ui.status(_(b'finished applying clone bundle\n')) |
|
2714 | 2733 | # Bundle failed. |
|
2715 | 2734 | # |
|
2716 | 2735 | # We abort by default to avoid the thundering herd of |
|
2717 | 2736 | # clients flooding a server that was expecting expensive |
|
2718 | 2737 | # clone load to be offloaded. |
|
2719 | 2738 | elif repo.ui.configbool(b'ui', b'clonebundlefallback'): |
|
2720 | 2739 | repo.ui.warn(_(b'falling back to normal clone\n')) |
|
2721 | 2740 | else: |
|
2722 | 2741 | raise error.Abort( |
|
2723 | 2742 | _(b'error applying bundle'), |
|
2724 | 2743 | hint=_( |
|
2725 | 2744 | b'if this error persists, consider contacting ' |
|
2726 | 2745 | b'the server operator or disable clone ' |
|
2727 | 2746 | b'bundles via ' |
|
2728 | 2747 | b'"--config ui.clonebundles=false"' |
|
2729 | 2748 | ), |
|
2730 | 2749 | ) |
|
2731 | 2750 | |
|
2732 | 2751 | |
|
2733 | 2752 | def trypullbundlefromurl(ui, repo, url): |
|
2734 | 2753 | """Attempt to apply a bundle from a URL.""" |
|
2735 | 2754 | with repo.lock(), repo.transaction(b'bundleurl') as tr: |
|
2736 | 2755 | try: |
|
2737 | 2756 | fh = urlmod.open(ui, url) |
|
2738 | 2757 | cg = readbundle(ui, fh, b'stream') |
|
2739 | 2758 | |
|
2740 | 2759 | if isinstance(cg, streamclone.streamcloneapplier): |
|
2741 | 2760 | cg.apply(repo) |
|
2742 | 2761 | else: |
|
2743 | 2762 | bundle2.applybundle(repo, cg, tr, b'clonebundles', url) |
|
2744 | 2763 | return True |
|
2745 | 2764 | except urlerr.httperror as e: |
|
2746 | 2765 | ui.warn( |
|
2747 | 2766 | _(b'HTTP error fetching bundle: %s\n') |
|
2748 | 2767 | % stringutil.forcebytestr(e) |
|
2749 | 2768 | ) |
|
2750 | 2769 | except urlerr.urlerror as e: |
|
2751 | 2770 | ui.warn( |
|
2752 | 2771 | _(b'error fetching bundle: %s\n') |
|
2753 | 2772 | % stringutil.forcebytestr(e.reason) |
|
2754 | 2773 | ) |
|
2755 | 2774 | |
|
2756 | 2775 | return False |
@@ -1,799 +1,807 b'' | |||
|
1 | 1 | # exchangev2.py - repository exchange for wire protocol version 2 |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import weakref |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | nullid, |
|
16 | 16 | short, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | bookmarks, |
|
20 | 20 | error, |
|
21 | 21 | mdiff, |
|
22 | 22 | narrowspec, |
|
23 | 23 | phases, |
|
24 | 24 | pycompat, |
|
25 | 25 | requirements as requirementsmod, |
|
26 | 26 | setdiscovery, |
|
27 | 27 | ) |
|
28 | 28 | from .interfaces import repository |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | def pull(pullop): |
|
32 | 32 | """Pull using wire protocol version 2.""" |
|
33 | 33 | repo = pullop.repo |
|
34 | 34 | remote = pullop.remote |
|
35 | 35 | |
|
36 | 36 | usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop) |
|
37 | 37 | |
|
38 | 38 | # If this is a clone and it was requested to perform a "stream clone", |
|
39 | 39 | # we obtain the raw files data from the remote then fall back to an |
|
40 | 40 | # incremental pull. This is somewhat hacky and is not nearly robust enough |
|
41 | 41 | # for long-term usage. |
|
42 | 42 | if usingrawchangelogandmanifest: |
|
43 | 43 | with repo.transaction(b'clone'): |
|
44 | 44 | _fetchrawstorefiles(repo, remote) |
|
45 | 45 | repo.invalidate(clearfilecache=True) |
|
46 | 46 | |
|
47 | 47 | tr = pullop.trmanager.transaction() |
|
48 | 48 | |
|
49 | 49 | # We don't use the repo's narrow matcher here because the patterns passed |
|
50 | 50 | # to exchange.pull() could be different. |
|
51 | 51 | narrowmatcher = narrowspec.match( |
|
52 | 52 | repo.root, |
|
53 | 53 | # Empty maps to nevermatcher. So always |
|
54 | 54 | # set includes if missing. |
|
55 | 55 | pullop.includepats or {b'path:.'}, |
|
56 | 56 | pullop.excludepats, |
|
57 | 57 | ) |
|
58 | 58 | |
|
59 | 59 | if pullop.includepats or pullop.excludepats: |
|
60 | 60 | pathfilter = {} |
|
61 | 61 | if pullop.includepats: |
|
62 | 62 | pathfilter[b'include'] = sorted(pullop.includepats) |
|
63 | 63 | if pullop.excludepats: |
|
64 | 64 | pathfilter[b'exclude'] = sorted(pullop.excludepats) |
|
65 | 65 | else: |
|
66 | 66 | pathfilter = None |
|
67 | 67 | |
|
68 | 68 | # Figure out what needs to be fetched. |
|
69 | 69 | common, fetch, remoteheads = _pullchangesetdiscovery( |
|
70 | 70 | repo, remote, pullop.heads, abortwhenunrelated=pullop.force |
|
71 | 71 | ) |
|
72 | 72 | |
|
73 | 73 | # And fetch the data. |
|
74 | 74 | pullheads = pullop.heads or remoteheads |
|
75 | 75 | csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads) |
|
76 | 76 | |
|
77 | 77 | # New revisions are written to the changelog. But all other updates |
|
78 | 78 | # are deferred. Do those now. |
|
79 | 79 | |
|
80 | 80 | # Ensure all new changesets are draft by default. If the repo is |
|
81 | 81 | # publishing, the phase will be adjusted by the loop below. |
|
82 | 82 | if csetres[b'added']: |
|
83 | 83 | phases.registernew( |
|
84 | 84 | repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']] |
|
85 | 85 | ) |
|
86 | 86 | |
|
87 | 87 | # And adjust the phase of all changesets accordingly. |
|
88 | 88 | for phasenumber, phase in phases.phasenames.items(): |
|
89 | 89 | if phase == b'secret' or not csetres[b'nodesbyphase'][phase]: |
|
90 | 90 | continue |
|
91 | 91 | |
|
92 | 92 | phases.advanceboundary( |
|
93 | 93 | repo, |
|
94 | 94 | tr, |
|
95 | 95 | phasenumber, |
|
96 | 96 | csetres[b'nodesbyphase'][phase], |
|
97 | 97 | ) |
|
98 | 98 | |
|
99 | 99 | # Write bookmark updates. |
|
100 | 100 | bookmarks.updatefromremote( |
|
101 | 101 | repo.ui, |
|
102 | 102 | repo, |
|
103 | 103 | csetres[b'bookmarks'], |
|
104 | 104 | remote.url(), |
|
105 | 105 | pullop.gettransaction, |
|
106 | 106 | explicit=pullop.explicitbookmarks, |
|
107 | 107 | ) |
|
108 | 108 | |
|
109 | 109 | manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes']) |
|
110 | 110 | |
|
111 | 111 | # We don't properly support shallow changeset and manifest yet. So we apply |
|
112 | 112 | # depth limiting locally. |
|
113 | 113 | if pullop.depth: |
|
114 | 114 | relevantcsetnodes = set() |
|
115 | 115 | clnode = repo.changelog.node |
|
116 | 116 | |
|
117 | 117 | for rev in repo.revs( |
|
118 | 118 | b'ancestors(%ln, %s)', pullheads, pullop.depth - 1 |
|
119 | 119 | ): |
|
120 | 120 | relevantcsetnodes.add(clnode(rev)) |
|
121 | 121 | |
|
122 | 122 | csetrelevantfilter = lambda n: n in relevantcsetnodes |
|
123 | 123 | |
|
124 | 124 | else: |
|
125 | 125 | csetrelevantfilter = lambda n: True |
|
126 | 126 | |
|
127 | 127 | # If obtaining the raw store files, we need to scan the full repo to |
|
128 | 128 | # derive all the changesets, manifests, and linkrevs. |
|
129 | 129 | if usingrawchangelogandmanifest: |
|
130 | 130 | csetsforfiles = [] |
|
131 | 131 | mnodesforfiles = [] |
|
132 | 132 | manifestlinkrevs = {} |
|
133 | 133 | |
|
134 | 134 | for rev in repo: |
|
135 | 135 | ctx = repo[rev] |
|
136 | 136 | node = ctx.node() |
|
137 | 137 | |
|
138 | 138 | if not csetrelevantfilter(node): |
|
139 | 139 | continue |
|
140 | 140 | |
|
141 | 141 | mnode = ctx.manifestnode() |
|
142 | 142 | |
|
143 | 143 | csetsforfiles.append(node) |
|
144 | 144 | mnodesforfiles.append(mnode) |
|
145 | 145 | manifestlinkrevs[mnode] = rev |
|
146 | 146 | |
|
147 | 147 | else: |
|
148 | 148 | csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)] |
|
149 | 149 | mnodesforfiles = manres[b'added'] |
|
150 | 150 | manifestlinkrevs = manres[b'linkrevs'] |
|
151 | 151 | |
|
152 | 152 | # Find all file nodes referenced by added manifests and fetch those |
|
153 | 153 | # revisions. |
|
154 | 154 | fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles) |
|
155 | 155 | _fetchfilesfromcsets( |
|
156 | 156 | repo, |
|
157 | 157 | tr, |
|
158 | 158 | remote, |
|
159 | 159 | pathfilter, |
|
160 | 160 | fnodes, |
|
161 | 161 | csetsforfiles, |
|
162 | 162 | manifestlinkrevs, |
|
163 | 163 | shallow=bool(pullop.depth), |
|
164 | 164 | ) |
|
165 | 165 | |
|
166 | 166 | |
|
167 | 167 | def _checkuserawstorefiledata(pullop): |
|
168 | 168 | """Check whether we should use rawstorefiledata command to retrieve data.""" |
|
169 | 169 | |
|
170 | 170 | repo = pullop.repo |
|
171 | 171 | remote = pullop.remote |
|
172 | 172 | |
|
173 | 173 | # Command to obtain raw store data isn't available. |
|
174 | 174 | if b'rawstorefiledata' not in remote.apidescriptor[b'commands']: |
|
175 | 175 | return False |
|
176 | 176 | |
|
177 | 177 | # Only honor if user requested stream clone operation. |
|
178 | 178 | if not pullop.streamclonerequested: |
|
179 | 179 | return False |
|
180 | 180 | |
|
181 | 181 | # Only works on empty repos. |
|
182 | 182 | if len(repo): |
|
183 | 183 | return False |
|
184 | 184 | |
|
185 | 185 | # TODO This is super hacky. There needs to be a storage API for this. We |
|
186 | 186 | # also need to check for compatibility with the remote. |
|
187 | 187 | if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements: |
|
188 | 188 | return False |
|
189 | 189 | |
|
190 | 190 | return True |
|
191 | 191 | |
|
192 | 192 | |
|
193 | 193 | def _fetchrawstorefiles(repo, remote): |
|
194 | 194 | with remote.commandexecutor() as e: |
|
195 | 195 | objs = e.callcommand( |
|
196 | 196 | b'rawstorefiledata', |
|
197 | 197 | { |
|
198 | 198 | b'files': [b'changelog', b'manifestlog'], |
|
199 | 199 | }, |
|
200 | 200 | ).result() |
|
201 | 201 | |
|
202 | 202 | # First object is a summary of files data that follows. |
|
203 | 203 | overall = next(objs) |
|
204 | 204 | |
|
205 | 205 | progress = repo.ui.makeprogress( |
|
206 | 206 | _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes') |
|
207 | 207 | ) |
|
208 | 208 | with progress: |
|
209 | 209 | progress.update(0) |
|
210 | 210 | |
|
211 | 211 | # Next are pairs of file metadata, data. |
|
212 | 212 | while True: |
|
213 | 213 | try: |
|
214 | 214 | filemeta = next(objs) |
|
215 | 215 | except StopIteration: |
|
216 | 216 | break |
|
217 | 217 | |
|
218 | 218 | for k in (b'location', b'path', b'size'): |
|
219 | 219 | if k not in filemeta: |
|
220 | 220 | raise error.Abort( |
|
221 | 221 | _(b'remote file data missing key: %s') % k |
|
222 | 222 | ) |
|
223 | 223 | |
|
224 | 224 | if filemeta[b'location'] == b'store': |
|
225 | 225 | vfs = repo.svfs |
|
226 | 226 | else: |
|
227 | 227 | raise error.Abort( |
|
228 | 228 | _(b'invalid location for raw file data: %s') |
|
229 | 229 | % filemeta[b'location'] |
|
230 | 230 | ) |
|
231 | 231 | |
|
232 | 232 | bytesremaining = filemeta[b'size'] |
|
233 | 233 | |
|
234 | 234 | with vfs.open(filemeta[b'path'], b'wb') as fh: |
|
235 | 235 | while True: |
|
236 | 236 | try: |
|
237 | 237 | chunk = next(objs) |
|
238 | 238 | except StopIteration: |
|
239 | 239 | break |
|
240 | 240 | |
|
241 | 241 | bytesremaining -= len(chunk) |
|
242 | 242 | |
|
243 | 243 | if bytesremaining < 0: |
|
244 | 244 | raise error.Abort( |
|
245 | 245 | _( |
|
246 | 246 | b'received invalid number of bytes for file ' |
|
247 | 247 | b'data; expected %d, got extra' |
|
248 | 248 | ) |
|
249 | 249 | % filemeta[b'size'] |
|
250 | 250 | ) |
|
251 | 251 | |
|
252 | 252 | progress.increment(step=len(chunk)) |
|
253 | 253 | fh.write(chunk) |
|
254 | 254 | |
|
255 | 255 | try: |
|
256 | 256 | if chunk.islast: |
|
257 | 257 | break |
|
258 | 258 | except AttributeError: |
|
259 | 259 | raise error.Abort( |
|
260 | 260 | _( |
|
261 | 261 | b'did not receive indefinite length bytestring ' |
|
262 | 262 | b'for file data' |
|
263 | 263 | ) |
|
264 | 264 | ) |
|
265 | 265 | |
|
266 | 266 | if bytesremaining: |
|
267 | 267 | raise error.Abort( |
|
268 | 268 | _( |
|
269 | 269 | b'received invalid number of bytes for' |
|
270 | 270 | b'file data; expected %d got %d' |
|
271 | 271 | ) |
|
272 | 272 | % ( |
|
273 | 273 | filemeta[b'size'], |
|
274 | 274 | filemeta[b'size'] - bytesremaining, |
|
275 | 275 | ) |
|
276 | 276 | ) |
|
277 | 277 | |
|
278 | 278 | |
|
279 | 279 | def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True): |
|
280 | 280 | """Determine which changesets need to be pulled.""" |
|
281 | 281 | |
|
282 | 282 | if heads: |
|
283 | 283 | knownnode = repo.changelog.hasnode |
|
284 | 284 | if all(knownnode(head) for head in heads): |
|
285 | 285 | return heads, False, heads |
|
286 | 286 | |
|
287 | 287 | # TODO wire protocol version 2 is capable of more efficient discovery |
|
288 | 288 | # than setdiscovery. Consider implementing something better. |
|
289 | 289 | common, fetch, remoteheads = setdiscovery.findcommonheads( |
|
290 | 290 | repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated |
|
291 | 291 | ) |
|
292 | 292 | |
|
293 | 293 | common = set(common) |
|
294 | 294 | remoteheads = set(remoteheads) |
|
295 | 295 | |
|
296 | 296 | # If a remote head is filtered locally, put it back in the common set. |
|
297 | 297 | # See the comment in exchange._pulldiscoverychangegroup() for more. |
|
298 | 298 | |
|
299 | 299 | if fetch and remoteheads: |
|
300 | 300 | has_node = repo.unfiltered().changelog.index.has_node |
|
301 | 301 | |
|
302 | 302 | common |= {head for head in remoteheads if has_node(head)} |
|
303 | 303 | |
|
304 | 304 | if set(remoteheads).issubset(common): |
|
305 | 305 | fetch = [] |
|
306 | 306 | |
|
307 | 307 | common.discard(nullid) |
|
308 | 308 | |
|
309 | 309 | return common, fetch, remoteheads |
|
310 | 310 | |
|
311 | 311 | |
|
312 | 312 | def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads): |
|
313 | 313 | # TODO consider adding a step here where we obtain the DAG shape first |
|
314 | 314 | # (or ask the server to slice changesets into chunks for us) so that |
|
315 | 315 | # we can perform multiple fetches in batches. This will facilitate |
|
316 | 316 | # resuming interrupted clones, higher server-side cache hit rates due |
|
317 | 317 | # to smaller segments, etc. |
|
318 | 318 | with remote.commandexecutor() as e: |
|
319 | 319 | objs = e.callcommand( |
|
320 | 320 | b'changesetdata', |
|
321 | 321 | { |
|
322 | 322 | b'revisions': [ |
|
323 | 323 | { |
|
324 | 324 | b'type': b'changesetdagrange', |
|
325 | 325 | b'roots': sorted(common), |
|
326 | 326 | b'heads': sorted(remoteheads), |
|
327 | 327 | } |
|
328 | 328 | ], |
|
329 | 329 | b'fields': {b'bookmarks', b'parents', b'phase', b'revision'}, |
|
330 | 330 | }, |
|
331 | 331 | ).result() |
|
332 | 332 | |
|
333 | 333 | # The context manager waits on all response data when exiting. So |
|
334 | 334 | # we need to remain in the context manager in order to stream data. |
|
335 | 335 | return _processchangesetdata(repo, tr, objs) |
|
336 | 336 | |
|
337 | 337 | |
|
338 | 338 | def _processchangesetdata(repo, tr, objs): |
|
339 | 339 | repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)) |
|
340 | 340 | |
|
341 | 341 | urepo = repo.unfiltered() |
|
342 | 342 | cl = urepo.changelog |
|
343 | 343 | |
|
344 | 344 | cl.delayupdate(tr) |
|
345 | 345 | |
|
346 | 346 | # The first emitted object is a header describing the data that |
|
347 | 347 | # follows. |
|
348 | 348 | meta = next(objs) |
|
349 | 349 | |
|
350 | 350 | progress = repo.ui.makeprogress( |
|
351 | 351 | _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems') |
|
352 | 352 | ) |
|
353 | 353 | |
|
354 | 354 | manifestnodes = {} |
|
355 | 355 | added = [] |
|
356 | 356 | |
|
357 | 357 | def linkrev(node): |
|
358 | 358 | repo.ui.debug(b'add changeset %s\n' % short(node)) |
|
359 | 359 | # Linkrev for changelog is always self. |
|
360 | 360 | return len(cl) |
|
361 | 361 | |
|
362 | 362 | def ondupchangeset(cl, rev): |
|
363 | 363 | added.append(cl.node(rev)) |
|
364 | 364 | |
|
365 | 365 | def onchangeset(cl, rev): |
|
366 | 366 | progress.increment() |
|
367 | 367 | |
|
368 | 368 | revision = cl.changelogrevision(rev) |
|
369 | 369 | added.append(cl.node(rev)) |
|
370 | 370 | |
|
371 | 371 | # We need to preserve the mapping of changelog revision to node |
|
372 | 372 | # so we can set the linkrev accordingly when manifests are added. |
|
373 | 373 | manifestnodes[rev] = revision.manifest |
|
374 | 374 | |
|
375 | 375 | repo.register_changeset(rev, revision) |
|
376 | 376 | |
|
377 | 377 | nodesbyphase = {phase: set() for phase in phases.phasenames.values()} |
|
378 | 378 | remotebookmarks = {} |
|
379 | 379 | |
|
380 | 380 | # addgroup() expects a 7-tuple describing revisions. This normalizes |
|
381 | 381 | # the wire data to that format. |
|
382 | 382 | # |
|
383 | 383 | # This loop also aggregates non-revision metadata, such as phase |
|
384 | 384 | # data. |
|
385 | 385 | def iterrevisions(): |
|
386 | 386 | for cset in objs: |
|
387 | 387 | node = cset[b'node'] |
|
388 | 388 | |
|
389 | 389 | if b'phase' in cset: |
|
390 | 390 | nodesbyphase[cset[b'phase']].add(node) |
|
391 | 391 | |
|
392 | 392 | for mark in cset.get(b'bookmarks', []): |
|
393 | 393 | remotebookmarks[mark] = node |
|
394 | 394 | |
|
395 | 395 | # TODO add mechanism for extensions to examine records so they |
|
396 | 396 | # can siphon off custom data fields. |
|
397 | 397 | |
|
398 | 398 | extrafields = {} |
|
399 | 399 | |
|
400 | 400 | for field, size in cset.get(b'fieldsfollowing', []): |
|
401 | 401 | extrafields[field] = next(objs) |
|
402 | 402 | |
|
403 | 403 | # Some entries might only be metadata only updates. |
|
404 | 404 | if b'revision' not in extrafields: |
|
405 | 405 | continue |
|
406 | 406 | |
|
407 | 407 | data = extrafields[b'revision'] |
|
408 | 408 | |
|
409 | 409 | yield ( |
|
410 | 410 | node, |
|
411 | 411 | cset[b'parents'][0], |
|
412 | 412 | cset[b'parents'][1], |
|
413 | 413 | # Linknode is always itself for changesets. |
|
414 | 414 | cset[b'node'], |
|
415 | 415 | # We always send full revisions. So delta base is not set. |
|
416 | 416 | nullid, |
|
417 | 417 | mdiff.trivialdiffheader(len(data)) + data, |
|
418 | 418 | # Flags not yet supported. |
|
419 | 419 | 0, |
|
420 | # Sidedata not yet supported | |
|
421 | {}, | |
|
420 | 422 | ) |
|
421 | 423 | |
|
422 | 424 | cl.addgroup( |
|
423 | 425 | iterrevisions(), |
|
424 | 426 | linkrev, |
|
425 | 427 | weakref.proxy(tr), |
|
426 | 428 | alwayscache=True, |
|
427 | 429 | addrevisioncb=onchangeset, |
|
428 | 430 | duplicaterevisioncb=ondupchangeset, |
|
429 | 431 | ) |
|
430 | 432 | |
|
431 | 433 | progress.complete() |
|
432 | 434 | |
|
433 | 435 | return { |
|
434 | 436 | b'added': added, |
|
435 | 437 | b'nodesbyphase': nodesbyphase, |
|
436 | 438 | b'bookmarks': remotebookmarks, |
|
437 | 439 | b'manifestnodes': manifestnodes, |
|
438 | 440 | } |
|
439 | 441 | |
|
440 | 442 | |
|
441 | 443 | def _fetchmanifests(repo, tr, remote, manifestnodes): |
|
442 | 444 | rootmanifest = repo.manifestlog.getstorage(b'') |
|
443 | 445 | |
|
444 | 446 | # Some manifests can be shared between changesets. Filter out revisions |
|
445 | 447 | # we already know about. |
|
446 | 448 | fetchnodes = [] |
|
447 | 449 | linkrevs = {} |
|
448 | 450 | seen = set() |
|
449 | 451 | |
|
450 | 452 | for clrev, node in sorted(pycompat.iteritems(manifestnodes)): |
|
451 | 453 | if node in seen: |
|
452 | 454 | continue |
|
453 | 455 | |
|
454 | 456 | try: |
|
455 | 457 | rootmanifest.rev(node) |
|
456 | 458 | except error.LookupError: |
|
457 | 459 | fetchnodes.append(node) |
|
458 | 460 | linkrevs[node] = clrev |
|
459 | 461 | |
|
460 | 462 | seen.add(node) |
|
461 | 463 | |
|
462 | 464 | # TODO handle tree manifests |
|
463 | 465 | |
|
464 | 466 | # addgroup() expects 7-tuple describing revisions. This normalizes |
|
465 | 467 | # the wire data to that format. |
|
466 | 468 | def iterrevisions(objs, progress): |
|
467 | 469 | for manifest in objs: |
|
468 | 470 | node = manifest[b'node'] |
|
469 | 471 | |
|
470 | 472 | extrafields = {} |
|
471 | 473 | |
|
472 | 474 | for field, size in manifest.get(b'fieldsfollowing', []): |
|
473 | 475 | extrafields[field] = next(objs) |
|
474 | 476 | |
|
475 | 477 | if b'delta' in extrafields: |
|
476 | 478 | basenode = manifest[b'deltabasenode'] |
|
477 | 479 | delta = extrafields[b'delta'] |
|
478 | 480 | elif b'revision' in extrafields: |
|
479 | 481 | basenode = nullid |
|
480 | 482 | revision = extrafields[b'revision'] |
|
481 | 483 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
482 | 484 | else: |
|
483 | 485 | continue |
|
484 | 486 | |
|
485 | 487 | yield ( |
|
486 | 488 | node, |
|
487 | 489 | manifest[b'parents'][0], |
|
488 | 490 | manifest[b'parents'][1], |
|
489 | 491 | # The value passed in is passed to the lookup function passed |
|
490 | 492 | # to addgroup(). We already have a map of manifest node to |
|
491 | 493 | # changelog revision number. So we just pass in the |
|
492 | 494 | # manifest node here and use linkrevs.__getitem__ as the |
|
493 | 495 | # resolution function. |
|
494 | 496 | node, |
|
495 | 497 | basenode, |
|
496 | 498 | delta, |
|
497 | 499 | # Flags not yet supported. |
|
498 | 500 | 0, |
|
501 | # Sidedata not yet supported. | |
|
502 | {}, | |
|
499 | 503 | ) |
|
500 | 504 | |
|
501 | 505 | progress.increment() |
|
502 | 506 | |
|
503 | 507 | progress = repo.ui.makeprogress( |
|
504 | 508 | _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes) |
|
505 | 509 | ) |
|
506 | 510 | |
|
507 | 511 | commandmeta = remote.apidescriptor[b'commands'][b'manifestdata'] |
|
508 | 512 | batchsize = commandmeta.get(b'recommendedbatchsize', 10000) |
|
509 | 513 | # TODO make size configurable on client? |
|
510 | 514 | |
|
511 | 515 | # We send commands 1 at a time to the remote. This is not the most |
|
512 | 516 | # efficient because we incur a round trip at the end of each batch. |
|
513 | 517 | # However, the existing frame-based reactor keeps consuming server |
|
514 | 518 | # data in the background. And this results in response data buffering |
|
515 | 519 | # in memory. This can consume gigabytes of memory. |
|
516 | 520 | # TODO send multiple commands in a request once background buffering |
|
517 | 521 | # issues are resolved. |
|
518 | 522 | |
|
519 | 523 | added = [] |
|
520 | 524 | |
|
521 | 525 | for i in pycompat.xrange(0, len(fetchnodes), batchsize): |
|
522 | 526 | batch = [node for node in fetchnodes[i : i + batchsize]] |
|
523 | 527 | if not batch: |
|
524 | 528 | continue |
|
525 | 529 | |
|
526 | 530 | with remote.commandexecutor() as e: |
|
527 | 531 | objs = e.callcommand( |
|
528 | 532 | b'manifestdata', |
|
529 | 533 | { |
|
530 | 534 | b'tree': b'', |
|
531 | 535 | b'nodes': batch, |
|
532 | 536 | b'fields': {b'parents', b'revision'}, |
|
533 | 537 | b'haveparents': True, |
|
534 | 538 | }, |
|
535 | 539 | ).result() |
|
536 | 540 | |
|
537 | 541 | # Chomp off header object. |
|
538 | 542 | next(objs) |
|
539 | 543 | |
|
540 | 544 | def onchangeset(cl, rev): |
|
541 | 545 | added.append(cl.node(rev)) |
|
542 | 546 | |
|
543 | 547 | rootmanifest.addgroup( |
|
544 | 548 | iterrevisions(objs, progress), |
|
545 | 549 | linkrevs.__getitem__, |
|
546 | 550 | weakref.proxy(tr), |
|
547 | 551 | addrevisioncb=onchangeset, |
|
548 | 552 | duplicaterevisioncb=onchangeset, |
|
549 | 553 | ) |
|
550 | 554 | |
|
551 | 555 | progress.complete() |
|
552 | 556 | |
|
553 | 557 | return { |
|
554 | 558 | b'added': added, |
|
555 | 559 | b'linkrevs': linkrevs, |
|
556 | 560 | } |
|
557 | 561 | |
|
558 | 562 | |
|
559 | 563 | def _derivefilesfrommanifests(repo, matcher, manifestnodes): |
|
560 | 564 | """Determine what file nodes are relevant given a set of manifest nodes. |
|
561 | 565 | |
|
562 | 566 | Returns a dict mapping file paths to dicts of file node to first manifest |
|
563 | 567 | node. |
|
564 | 568 | """ |
|
565 | 569 | ml = repo.manifestlog |
|
566 | 570 | fnodes = collections.defaultdict(dict) |
|
567 | 571 | |
|
568 | 572 | progress = repo.ui.makeprogress( |
|
569 | 573 | _(b'scanning manifests'), total=len(manifestnodes) |
|
570 | 574 | ) |
|
571 | 575 | |
|
572 | 576 | with progress: |
|
573 | 577 | for manifestnode in manifestnodes: |
|
574 | 578 | m = ml.get(b'', manifestnode) |
|
575 | 579 | |
|
576 | 580 | # TODO this will pull in unwanted nodes because it takes the storage |
|
577 | 581 | # delta into consideration. What we really want is something that |
|
578 | 582 | # takes the delta between the manifest's parents. And ideally we |
|
579 | 583 | # would ignore file nodes that are known locally. For now, ignore |
|
580 | 584 | # both these limitations. This will result in incremental fetches |
|
581 | 585 | # requesting data we already have. So this is far from ideal. |
|
582 | 586 | md = m.readfast() |
|
583 | 587 | |
|
584 | 588 | for path, fnode in md.items(): |
|
585 | 589 | if matcher(path): |
|
586 | 590 | fnodes[path].setdefault(fnode, manifestnode) |
|
587 | 591 | |
|
588 | 592 | progress.increment() |
|
589 | 593 | |
|
590 | 594 | return fnodes |
|
591 | 595 | |
|
592 | 596 | |
|
593 | 597 | def _fetchfiles(repo, tr, remote, fnodes, linkrevs): |
|
594 | 598 | """Fetch file data from explicit file revisions.""" |
|
595 | 599 | |
|
596 | 600 | def iterrevisions(objs, progress): |
|
597 | 601 | for filerevision in objs: |
|
598 | 602 | node = filerevision[b'node'] |
|
599 | 603 | |
|
600 | 604 | extrafields = {} |
|
601 | 605 | |
|
602 | 606 | for field, size in filerevision.get(b'fieldsfollowing', []): |
|
603 | 607 | extrafields[field] = next(objs) |
|
604 | 608 | |
|
605 | 609 | if b'delta' in extrafields: |
|
606 | 610 | basenode = filerevision[b'deltabasenode'] |
|
607 | 611 | delta = extrafields[b'delta'] |
|
608 | 612 | elif b'revision' in extrafields: |
|
609 | 613 | basenode = nullid |
|
610 | 614 | revision = extrafields[b'revision'] |
|
611 | 615 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
612 | 616 | else: |
|
613 | 617 | continue |
|
614 | 618 | |
|
615 | 619 | yield ( |
|
616 | 620 | node, |
|
617 | 621 | filerevision[b'parents'][0], |
|
618 | 622 | filerevision[b'parents'][1], |
|
619 | 623 | node, |
|
620 | 624 | basenode, |
|
621 | 625 | delta, |
|
622 | 626 | # Flags not yet supported. |
|
623 | 627 | 0, |
|
628 | # Sidedata not yet supported. | |
|
629 | {}, | |
|
624 | 630 | ) |
|
625 | 631 | |
|
626 | 632 | progress.increment() |
|
627 | 633 | |
|
628 | 634 | progress = repo.ui.makeprogress( |
|
629 | 635 | _(b'files'), |
|
630 | 636 | unit=_(b'chunks'), |
|
631 | 637 | total=sum(len(v) for v in pycompat.itervalues(fnodes)), |
|
632 | 638 | ) |
|
633 | 639 | |
|
634 | 640 | # TODO make batch size configurable |
|
635 | 641 | batchsize = 10000 |
|
636 | 642 | fnodeslist = [x for x in sorted(fnodes.items())] |
|
637 | 643 | |
|
638 | 644 | for i in pycompat.xrange(0, len(fnodeslist), batchsize): |
|
639 | 645 | batch = [x for x in fnodeslist[i : i + batchsize]] |
|
640 | 646 | if not batch: |
|
641 | 647 | continue |
|
642 | 648 | |
|
643 | 649 | with remote.commandexecutor() as e: |
|
644 | 650 | fs = [] |
|
645 | 651 | locallinkrevs = {} |
|
646 | 652 | |
|
647 | 653 | for path, nodes in batch: |
|
648 | 654 | fs.append( |
|
649 | 655 | ( |
|
650 | 656 | path, |
|
651 | 657 | e.callcommand( |
|
652 | 658 | b'filedata', |
|
653 | 659 | { |
|
654 | 660 | b'path': path, |
|
655 | 661 | b'nodes': sorted(nodes), |
|
656 | 662 | b'fields': {b'parents', b'revision'}, |
|
657 | 663 | b'haveparents': True, |
|
658 | 664 | }, |
|
659 | 665 | ), |
|
660 | 666 | ) |
|
661 | 667 | ) |
|
662 | 668 | |
|
663 | 669 | locallinkrevs[path] = { |
|
664 | 670 | node: linkrevs[manifestnode] |
|
665 | 671 | for node, manifestnode in pycompat.iteritems(nodes) |
|
666 | 672 | } |
|
667 | 673 | |
|
668 | 674 | for path, f in fs: |
|
669 | 675 | objs = f.result() |
|
670 | 676 | |
|
671 | 677 | # Chomp off header objects. |
|
672 | 678 | next(objs) |
|
673 | 679 | |
|
674 | 680 | store = repo.file(path) |
|
675 | 681 | store.addgroup( |
|
676 | 682 | iterrevisions(objs, progress), |
|
677 | 683 | locallinkrevs[path].__getitem__, |
|
678 | 684 | weakref.proxy(tr), |
|
679 | 685 | ) |
|
680 | 686 | |
|
681 | 687 | |
|
682 | 688 | def _fetchfilesfromcsets( |
|
683 | 689 | repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False |
|
684 | 690 | ): |
|
685 | 691 | """Fetch file data from explicit changeset revisions.""" |
|
686 | 692 | |
|
687 | 693 | def iterrevisions(objs, remaining, progress): |
|
688 | 694 | while remaining: |
|
689 | 695 | filerevision = next(objs) |
|
690 | 696 | |
|
691 | 697 | node = filerevision[b'node'] |
|
692 | 698 | |
|
693 | 699 | extrafields = {} |
|
694 | 700 | |
|
695 | 701 | for field, size in filerevision.get(b'fieldsfollowing', []): |
|
696 | 702 | extrafields[field] = next(objs) |
|
697 | 703 | |
|
698 | 704 | if b'delta' in extrafields: |
|
699 | 705 | basenode = filerevision[b'deltabasenode'] |
|
700 | 706 | delta = extrafields[b'delta'] |
|
701 | 707 | elif b'revision' in extrafields: |
|
702 | 708 | basenode = nullid |
|
703 | 709 | revision = extrafields[b'revision'] |
|
704 | 710 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
705 | 711 | else: |
|
706 | 712 | continue |
|
707 | 713 | |
|
708 | 714 | if b'linknode' in filerevision: |
|
709 | 715 | linknode = filerevision[b'linknode'] |
|
710 | 716 | else: |
|
711 | 717 | linknode = node |
|
712 | 718 | |
|
713 | 719 | yield ( |
|
714 | 720 | node, |
|
715 | 721 | filerevision[b'parents'][0], |
|
716 | 722 | filerevision[b'parents'][1], |
|
717 | 723 | linknode, |
|
718 | 724 | basenode, |
|
719 | 725 | delta, |
|
720 | 726 | # Flags not yet supported. |
|
721 | 727 | 0, |
|
728 | # Sidedata not yet supported. | |
|
729 | {}, | |
|
722 | 730 | ) |
|
723 | 731 | |
|
724 | 732 | progress.increment() |
|
725 | 733 | remaining -= 1 |
|
726 | 734 | |
|
727 | 735 | progress = repo.ui.makeprogress( |
|
728 | 736 | _(b'files'), |
|
729 | 737 | unit=_(b'chunks'), |
|
730 | 738 | total=sum(len(v) for v in pycompat.itervalues(fnodes)), |
|
731 | 739 | ) |
|
732 | 740 | |
|
733 | 741 | commandmeta = remote.apidescriptor[b'commands'][b'filesdata'] |
|
734 | 742 | batchsize = commandmeta.get(b'recommendedbatchsize', 50000) |
|
735 | 743 | |
|
736 | 744 | shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features |
|
737 | 745 | fields = {b'parents', b'revision'} |
|
738 | 746 | clrev = repo.changelog.rev |
|
739 | 747 | |
|
740 | 748 | # There are no guarantees that we'll have ancestor revisions if |
|
741 | 749 | # a) this repo has shallow file storage b) shallow data fetching is enabled. |
|
742 | 750 | # Force remote to not delta against possibly unknown revisions when these |
|
743 | 751 | # conditions hold. |
|
744 | 752 | haveparents = not (shallowfiles or shallow) |
|
745 | 753 | |
|
746 | 754 | # Similarly, we may not have calculated linkrevs for all incoming file |
|
747 | 755 | # revisions. Ask the remote to do work for us in this case. |
|
748 | 756 | if not haveparents: |
|
749 | 757 | fields.add(b'linknode') |
|
750 | 758 | |
|
751 | 759 | for i in pycompat.xrange(0, len(csets), batchsize): |
|
752 | 760 | batch = [x for x in csets[i : i + batchsize]] |
|
753 | 761 | if not batch: |
|
754 | 762 | continue |
|
755 | 763 | |
|
756 | 764 | with remote.commandexecutor() as e: |
|
757 | 765 | args = { |
|
758 | 766 | b'revisions': [ |
|
759 | 767 | { |
|
760 | 768 | b'type': b'changesetexplicit', |
|
761 | 769 | b'nodes': batch, |
|
762 | 770 | } |
|
763 | 771 | ], |
|
764 | 772 | b'fields': fields, |
|
765 | 773 | b'haveparents': haveparents, |
|
766 | 774 | } |
|
767 | 775 | |
|
768 | 776 | if pathfilter: |
|
769 | 777 | args[b'pathfilter'] = pathfilter |
|
770 | 778 | |
|
771 | 779 | objs = e.callcommand(b'filesdata', args).result() |
|
772 | 780 | |
|
773 | 781 | # First object is an overall header. |
|
774 | 782 | overall = next(objs) |
|
775 | 783 | |
|
776 | 784 | # We have overall['totalpaths'] segments. |
|
777 | 785 | for i in pycompat.xrange(overall[b'totalpaths']): |
|
778 | 786 | header = next(objs) |
|
779 | 787 | |
|
780 | 788 | path = header[b'path'] |
|
781 | 789 | store = repo.file(path) |
|
782 | 790 | |
|
783 | 791 | linkrevs = { |
|
784 | 792 | fnode: manlinkrevs[mnode] |
|
785 | 793 | for fnode, mnode in pycompat.iteritems(fnodes[path]) |
|
786 | 794 | } |
|
787 | 795 | |
|
788 | 796 | def getlinkrev(node): |
|
789 | 797 | if node in linkrevs: |
|
790 | 798 | return linkrevs[node] |
|
791 | 799 | else: |
|
792 | 800 | return clrev(node) |
|
793 | 801 | |
|
794 | 802 | store.addgroup( |
|
795 | 803 | iterrevisions(objs, header[b'totalitems'], progress), |
|
796 | 804 | getlinkrev, |
|
797 | 805 | weakref.proxy(tr), |
|
798 | 806 | maybemissingparents=shallow, |
|
799 | 807 | ) |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now