##// END OF EJS Templates
changegroup: add v4 changegroup for revlog v2 exchange...
Raphaël Gomès -
r47445:a41565be default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,513 +1,516 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import (
13 from mercurial.node import (
14 bin,
14 bin,
15 nullid,
15 nullid,
16 wdirfilenodeids,
16 wdirfilenodeids,
17 wdirid,
17 wdirid,
18 )
18 )
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 ancestor,
21 ancestor,
22 error,
22 error,
23 mdiff,
23 mdiff,
24 pycompat,
24 pycompat,
25 revlog,
25 revlog,
26 util,
26 util,
27 )
27 )
28 from mercurial.utils import storageutil
28 from mercurial.utils import storageutil
29 from mercurial.revlogutils import flagutil
29 from mercurial.revlogutils import flagutil
30
30
31 from . import (
31 from . import (
32 constants,
32 constants,
33 fileserverclient,
33 fileserverclient,
34 shallowutil,
34 shallowutil,
35 )
35 )
36
36
37
37
38 class remotefilelognodemap(object):
38 class remotefilelognodemap(object):
39 def __init__(self, filename, store):
39 def __init__(self, filename, store):
40 self._filename = filename
40 self._filename = filename
41 self._store = store
41 self._store = store
42
42
43 def __contains__(self, node):
43 def __contains__(self, node):
44 missing = self._store.getmissing([(self._filename, node)])
44 missing = self._store.getmissing([(self._filename, node)])
45 return not bool(missing)
45 return not bool(missing)
46
46
47 def __get__(self, node):
47 def __get__(self, node):
48 if node not in self:
48 if node not in self:
49 raise KeyError(node)
49 raise KeyError(node)
50 return node
50 return node
51
51
52
52
53 class remotefilelog(object):
53 class remotefilelog(object):
54
54
55 _generaldelta = True
55 _generaldelta = True
56 _flagserrorclass = error.RevlogError
56 _flagserrorclass = error.RevlogError
57
57
58 def __init__(self, opener, path, repo):
58 def __init__(self, opener, path, repo):
59 self.opener = opener
59 self.opener = opener
60 self.filename = path
60 self.filename = path
61 self.repo = repo
61 self.repo = repo
62 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
62 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
63
63
64 self.version = 1
64 self.version = 1
65
65
66 self._flagprocessors = dict(flagutil.flagprocessors)
66 self._flagprocessors = dict(flagutil.flagprocessors)
67
67
68 def read(self, node):
68 def read(self, node):
69 """returns the file contents at this node"""
69 """returns the file contents at this node"""
70 t = self.revision(node)
70 t = self.revision(node)
71 if not t.startswith(b'\1\n'):
71 if not t.startswith(b'\1\n'):
72 return t
72 return t
73 s = t.index(b'\1\n', 2)
73 s = t.index(b'\1\n', 2)
74 return t[s + 2 :]
74 return t[s + 2 :]
75
75
76 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
76 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
77 # hash with the metadata, like in vanilla filelogs
77 # hash with the metadata, like in vanilla filelogs
78 hashtext = shallowutil.createrevlogtext(
78 hashtext = shallowutil.createrevlogtext(
79 text, meta.get(b'copy'), meta.get(b'copyrev')
79 text, meta.get(b'copy'), meta.get(b'copyrev')
80 )
80 )
81 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
81 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
82 return self.addrevision(
82 return self.addrevision(
83 hashtext, transaction, linknode, p1, p2, node=node
83 hashtext, transaction, linknode, p1, p2, node=node
84 )
84 )
85
85
86 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
86 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
87 # text passed to "_createfileblob" does not include filelog metadata
87 # text passed to "_createfileblob" does not include filelog metadata
88 header = shallowutil.buildfileblobheader(len(text), flags)
88 header = shallowutil.buildfileblobheader(len(text), flags)
89 data = b"%s\0%s" % (header, text)
89 data = b"%s\0%s" % (header, text)
90
90
91 realp1 = p1
91 realp1 = p1
92 copyfrom = b""
92 copyfrom = b""
93 if meta and b'copy' in meta:
93 if meta and b'copy' in meta:
94 copyfrom = meta[b'copy']
94 copyfrom = meta[b'copy']
95 realp1 = bin(meta[b'copyrev'])
95 realp1 = bin(meta[b'copyrev'])
96
96
97 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
97 data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
98
98
99 visited = set()
99 visited = set()
100
100
101 pancestors = {}
101 pancestors = {}
102 queue = []
102 queue = []
103 if realp1 != nullid:
103 if realp1 != nullid:
104 p1flog = self
104 p1flog = self
105 if copyfrom:
105 if copyfrom:
106 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
106 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
107
107
108 pancestors.update(p1flog.ancestormap(realp1))
108 pancestors.update(p1flog.ancestormap(realp1))
109 queue.append(realp1)
109 queue.append(realp1)
110 visited.add(realp1)
110 visited.add(realp1)
111 if p2 != nullid:
111 if p2 != nullid:
112 pancestors.update(self.ancestormap(p2))
112 pancestors.update(self.ancestormap(p2))
113 queue.append(p2)
113 queue.append(p2)
114 visited.add(p2)
114 visited.add(p2)
115
115
116 ancestortext = b""
116 ancestortext = b""
117
117
118 # add the ancestors in topological order
118 # add the ancestors in topological order
119 while queue:
119 while queue:
120 c = queue.pop(0)
120 c = queue.pop(0)
121 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
121 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
122
122
123 pacopyfrom = pacopyfrom or b''
123 pacopyfrom = pacopyfrom or b''
124 ancestortext += b"%s%s%s%s%s\0" % (
124 ancestortext += b"%s%s%s%s%s\0" % (
125 c,
125 c,
126 pa1,
126 pa1,
127 pa2,
127 pa2,
128 ancestorlinknode,
128 ancestorlinknode,
129 pacopyfrom,
129 pacopyfrom,
130 )
130 )
131
131
132 if pa1 != nullid and pa1 not in visited:
132 if pa1 != nullid and pa1 not in visited:
133 queue.append(pa1)
133 queue.append(pa1)
134 visited.add(pa1)
134 visited.add(pa1)
135 if pa2 != nullid and pa2 not in visited:
135 if pa2 != nullid and pa2 not in visited:
136 queue.append(pa2)
136 queue.append(pa2)
137 visited.add(pa2)
137 visited.add(pa2)
138
138
139 data += ancestortext
139 data += ancestortext
140
140
141 return data
141 return data
142
142
143 def addrevision(
143 def addrevision(
144 self,
144 self,
145 text,
145 text,
146 transaction,
146 transaction,
147 linknode,
147 linknode,
148 p1,
148 p1,
149 p2,
149 p2,
150 cachedelta=None,
150 cachedelta=None,
151 node=None,
151 node=None,
152 flags=revlog.REVIDX_DEFAULT_FLAGS,
152 flags=revlog.REVIDX_DEFAULT_FLAGS,
153 sidedata=None,
153 sidedata=None,
154 ):
154 ):
155 # text passed to "addrevision" includes hg filelog metadata header
155 # text passed to "addrevision" includes hg filelog metadata header
156 if node is None:
156 if node is None:
157 node = storageutil.hashrevisionsha1(text, p1, p2)
157 node = storageutil.hashrevisionsha1(text, p1, p2)
158
158
159 meta, metaoffset = storageutil.parsemeta(text)
159 meta, metaoffset = storageutil.parsemeta(text)
160 rawtext, validatehash = flagutil.processflagswrite(
160 rawtext, validatehash = flagutil.processflagswrite(
161 self,
161 self,
162 text,
162 text,
163 flags,
163 flags,
164 )
164 )
165 return self.addrawrevision(
165 return self.addrawrevision(
166 rawtext,
166 rawtext,
167 transaction,
167 transaction,
168 linknode,
168 linknode,
169 p1,
169 p1,
170 p2,
170 p2,
171 node,
171 node,
172 flags,
172 flags,
173 cachedelta,
173 cachedelta,
174 _metatuple=(meta, metaoffset),
174 _metatuple=(meta, metaoffset),
175 )
175 )
176
176
177 def addrawrevision(
177 def addrawrevision(
178 self,
178 self,
179 rawtext,
179 rawtext,
180 transaction,
180 transaction,
181 linknode,
181 linknode,
182 p1,
182 p1,
183 p2,
183 p2,
184 node,
184 node,
185 flags,
185 flags,
186 cachedelta=None,
186 cachedelta=None,
187 _metatuple=None,
187 _metatuple=None,
188 ):
188 ):
189 if _metatuple:
189 if _metatuple:
190 # _metatuple: used by "addrevision" internally by remotefilelog
190 # _metatuple: used by "addrevision" internally by remotefilelog
191 # meta was parsed confidently
191 # meta was parsed confidently
192 meta, metaoffset = _metatuple
192 meta, metaoffset = _metatuple
193 else:
193 else:
194 # not from self.addrevision, but something else (repo._filecommit)
194 # not from self.addrevision, but something else (repo._filecommit)
195 # calls addrawrevision directly. remotefilelog needs to get and
195 # calls addrawrevision directly. remotefilelog needs to get and
196 # strip filelog metadata.
196 # strip filelog metadata.
197 # we don't have confidence about whether rawtext contains filelog
197 # we don't have confidence about whether rawtext contains filelog
198 # metadata or not (flag processor could replace it), so we just
198 # metadata or not (flag processor could replace it), so we just
199 # parse it as best-effort.
199 # parse it as best-effort.
200 # in LFS (flags != 0)'s case, the best way is to call LFS code to
200 # in LFS (flags != 0)'s case, the best way is to call LFS code to
201 # get the meta information, instead of storageutil.parsemeta.
201 # get the meta information, instead of storageutil.parsemeta.
202 meta, metaoffset = storageutil.parsemeta(rawtext)
202 meta, metaoffset = storageutil.parsemeta(rawtext)
203 if flags != 0:
203 if flags != 0:
204 # when flags != 0, be conservative and do not mangle rawtext, since
204 # when flags != 0, be conservative and do not mangle rawtext, since
205 # a read flag processor expects the text not being mangled at all.
205 # a read flag processor expects the text not being mangled at all.
206 metaoffset = 0
206 metaoffset = 0
207 if metaoffset:
207 if metaoffset:
208 # remotefilelog fileblob stores copy metadata in its ancestortext,
208 # remotefilelog fileblob stores copy metadata in its ancestortext,
209 # not its main blob. so we need to remove filelog metadata
209 # not its main blob. so we need to remove filelog metadata
210 # (containing copy information) from text.
210 # (containing copy information) from text.
211 blobtext = rawtext[metaoffset:]
211 blobtext = rawtext[metaoffset:]
212 else:
212 else:
213 blobtext = rawtext
213 blobtext = rawtext
214 data = self._createfileblob(
214 data = self._createfileblob(
215 blobtext, meta, flags, p1, p2, node, linknode
215 blobtext, meta, flags, p1, p2, node, linknode
216 )
216 )
217 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
217 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
218
218
219 return node
219 return node
220
220
221 def renamed(self, node):
221 def renamed(self, node):
222 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
222 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
223 p1, p2, linknode, copyfrom = ancestors[node]
223 p1, p2, linknode, copyfrom = ancestors[node]
224 if copyfrom:
224 if copyfrom:
225 return (copyfrom, p1)
225 return (copyfrom, p1)
226
226
227 return False
227 return False
228
228
229 def size(self, node):
229 def size(self, node):
230 """return the size of a given revision"""
230 """return the size of a given revision"""
231 return len(self.read(node))
231 return len(self.read(node))
232
232
233 rawsize = size
233 rawsize = size
234
234
235 def cmp(self, node, text):
235 def cmp(self, node, text):
236 """compare text with a given file revision
236 """compare text with a given file revision
237
237
238 returns True if text is different than what is stored.
238 returns True if text is different than what is stored.
239 """
239 """
240
240
241 if node == nullid:
241 if node == nullid:
242 return True
242 return True
243
243
244 nodetext = self.read(node)
244 nodetext = self.read(node)
245 return nodetext != text
245 return nodetext != text
246
246
247 def __nonzero__(self):
247 def __nonzero__(self):
248 return True
248 return True
249
249
250 __bool__ = __nonzero__
250 __bool__ = __nonzero__
251
251
252 def __len__(self):
252 def __len__(self):
253 if self.filename == b'.hgtags':
253 if self.filename == b'.hgtags':
254 # The length of .hgtags is used to fast path tag checking.
254 # The length of .hgtags is used to fast path tag checking.
255 # remotefilelog doesn't support .hgtags since the entire .hgtags
255 # remotefilelog doesn't support .hgtags since the entire .hgtags
256 # history is needed. Use the excludepattern setting to make
256 # history is needed. Use the excludepattern setting to make
257 # .hgtags a normal filelog.
257 # .hgtags a normal filelog.
258 return 0
258 return 0
259
259
260 raise RuntimeError(b"len not supported")
260 raise RuntimeError(b"len not supported")
261
261
262 def heads(self):
262 def heads(self):
263 # Fake heads of the filelog to satisfy hgweb.
263 # Fake heads of the filelog to satisfy hgweb.
264 return []
264 return []
265
265
266 def empty(self):
266 def empty(self):
267 return False
267 return False
268
268
269 def flags(self, node):
269 def flags(self, node):
270 if isinstance(node, int):
270 if isinstance(node, int):
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'remotefilelog does not accept integer rev for flags'
272 b'remotefilelog does not accept integer rev for flags'
273 )
273 )
274 store = self.repo.contentstore
274 store = self.repo.contentstore
275 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
275 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
276
276
277 def parents(self, node):
277 def parents(self, node):
278 if node == nullid:
278 if node == nullid:
279 return nullid, nullid
279 return nullid, nullid
280
280
281 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
281 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
282 p1, p2, linknode, copyfrom = ancestormap[node]
282 p1, p2, linknode, copyfrom = ancestormap[node]
283 if copyfrom:
283 if copyfrom:
284 p1 = nullid
284 p1 = nullid
285
285
286 return p1, p2
286 return p1, p2
287
287
288 def parentrevs(self, rev):
288 def parentrevs(self, rev):
289 # TODO(augie): this is a node and should be a rev, but for now
289 # TODO(augie): this is a node and should be a rev, but for now
290 # nothing in core seems to actually break.
290 # nothing in core seems to actually break.
291 return self.parents(rev)
291 return self.parents(rev)
292
292
293 def linknode(self, node):
293 def linknode(self, node):
294 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
294 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
295 p1, p2, linknode, copyfrom = ancestormap[node]
295 p1, p2, linknode, copyfrom = ancestormap[node]
296 return linknode
296 return linknode
297
297
298 def linkrev(self, node):
298 def linkrev(self, node):
299 return self.repo.unfiltered().changelog.rev(self.linknode(node))
299 return self.repo.unfiltered().changelog.rev(self.linknode(node))
300
300
301 def emitrevisions(
301 def emitrevisions(
302 self,
302 self,
303 nodes,
303 nodes,
304 nodesorder=None,
304 nodesorder=None,
305 revisiondata=False,
305 revisiondata=False,
306 assumehaveparentrevisions=False,
306 assumehaveparentrevisions=False,
307 deltaprevious=False,
307 deltaprevious=False,
308 deltamode=None,
308 deltamode=None,
309 sidedata_helpers=None,
309 ):
310 ):
310 # we don't use any of these parameters here
311 # we don't use any of these parameters here
311 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
312 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
312 del deltamode
313 del deltamode
313 prevnode = None
314 prevnode = None
314 for node in nodes:
315 for node in nodes:
315 p1, p2 = self.parents(node)
316 p1, p2 = self.parents(node)
316 if prevnode is None:
317 if prevnode is None:
317 basenode = prevnode = p1
318 basenode = prevnode = p1
318 if basenode == node:
319 if basenode == node:
319 basenode = nullid
320 basenode = nullid
320 if basenode != nullid:
321 if basenode != nullid:
321 revision = None
322 revision = None
322 delta = self.revdiff(basenode, node)
323 delta = self.revdiff(basenode, node)
323 else:
324 else:
324 revision = self.rawdata(node)
325 revision = self.rawdata(node)
325 delta = None
326 delta = None
326 yield revlog.revlogrevisiondelta(
327 yield revlog.revlogrevisiondelta(
327 node=node,
328 node=node,
328 p1node=p1,
329 p1node=p1,
329 p2node=p2,
330 p2node=p2,
330 linknode=self.linknode(node),
331 linknode=self.linknode(node),
331 basenode=basenode,
332 basenode=basenode,
332 flags=self.flags(node),
333 flags=self.flags(node),
333 baserevisionsize=None,
334 baserevisionsize=None,
334 revision=revision,
335 revision=revision,
335 delta=delta,
336 delta=delta,
337 # Sidedata is not supported yet
338 sidedata=None,
336 )
339 )
337
340
338 def revdiff(self, node1, node2):
341 def revdiff(self, node1, node2):
339 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
342 return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2))
340
343
341 def lookup(self, node):
344 def lookup(self, node):
342 if len(node) == 40:
345 if len(node) == 40:
343 node = bin(node)
346 node = bin(node)
344 if len(node) != 20:
347 if len(node) != 20:
345 raise error.LookupError(
348 raise error.LookupError(
346 node, self.filename, _(b'invalid lookup input')
349 node, self.filename, _(b'invalid lookup input')
347 )
350 )
348
351
349 return node
352 return node
350
353
351 def rev(self, node):
354 def rev(self, node):
352 # This is a hack to make TortoiseHG work.
355 # This is a hack to make TortoiseHG work.
353 return node
356 return node
354
357
355 def node(self, rev):
358 def node(self, rev):
356 # This is a hack.
359 # This is a hack.
357 if isinstance(rev, int):
360 if isinstance(rev, int):
358 raise error.ProgrammingError(
361 raise error.ProgrammingError(
359 b'remotefilelog does not convert integer rev to node'
362 b'remotefilelog does not convert integer rev to node'
360 )
363 )
361 return rev
364 return rev
362
365
363 def _processflags(self, text, flags, operation, raw=False):
366 def _processflags(self, text, flags, operation, raw=False):
364 """deprecated entry point to access flag processors"""
367 """deprecated entry point to access flag processors"""
365 msg = b'_processflag(...) use the specialized variant'
368 msg = b'_processflag(...) use the specialized variant'
366 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
369 util.nouideprecwarn(msg, b'5.2', stacklevel=2)
367 if raw:
370 if raw:
368 return text, flagutil.processflagsraw(self, text, flags)
371 return text, flagutil.processflagsraw(self, text, flags)
369 elif operation == b'read':
372 elif operation == b'read':
370 return flagutil.processflagsread(self, text, flags)
373 return flagutil.processflagsread(self, text, flags)
371 else: # write operation
374 else: # write operation
372 return flagutil.processflagswrite(self, text, flags)
375 return flagutil.processflagswrite(self, text, flags)
373
376
374 def revision(self, node, raw=False):
377 def revision(self, node, raw=False):
375 """returns the revlog contents at this node.
378 """returns the revlog contents at this node.
376 this includes the meta data traditionally included in file revlogs.
379 this includes the meta data traditionally included in file revlogs.
377 this is generally only used for bundling and communicating with vanilla
380 this is generally only used for bundling and communicating with vanilla
378 hg clients.
381 hg clients.
379 """
382 """
380 if node == nullid:
383 if node == nullid:
381 return b""
384 return b""
382 if len(node) != 20:
385 if len(node) != 20:
383 raise error.LookupError(
386 raise error.LookupError(
384 node, self.filename, _(b'invalid revision input')
387 node, self.filename, _(b'invalid revision input')
385 )
388 )
386 if node == wdirid or node in wdirfilenodeids:
389 if node == wdirid or node in wdirfilenodeids:
387 raise error.WdirUnsupported
390 raise error.WdirUnsupported
388
391
389 store = self.repo.contentstore
392 store = self.repo.contentstore
390 rawtext = store.get(self.filename, node)
393 rawtext = store.get(self.filename, node)
391 if raw:
394 if raw:
392 return rawtext
395 return rawtext
393 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
396 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
394 if flags == 0:
397 if flags == 0:
395 return rawtext
398 return rawtext
396 return flagutil.processflagsread(self, rawtext, flags)[0]
399 return flagutil.processflagsread(self, rawtext, flags)[0]
397
400
398 def rawdata(self, node):
401 def rawdata(self, node):
399 return self.revision(node, raw=False)
402 return self.revision(node, raw=False)
400
403
401 def _read(self, id):
404 def _read(self, id):
402 """reads the raw file blob from disk, cache, or server"""
405 """reads the raw file blob from disk, cache, or server"""
403 fileservice = self.repo.fileservice
406 fileservice = self.repo.fileservice
404 localcache = fileservice.localcache
407 localcache = fileservice.localcache
405 cachekey = fileserverclient.getcachekey(
408 cachekey = fileserverclient.getcachekey(
406 self.repo.name, self.filename, id
409 self.repo.name, self.filename, id
407 )
410 )
408 try:
411 try:
409 return localcache.read(cachekey)
412 return localcache.read(cachekey)
410 except KeyError:
413 except KeyError:
411 pass
414 pass
412
415
413 localkey = fileserverclient.getlocalkey(self.filename, id)
416 localkey = fileserverclient.getlocalkey(self.filename, id)
414 localpath = os.path.join(self.localpath, localkey)
417 localpath = os.path.join(self.localpath, localkey)
415 try:
418 try:
416 return shallowutil.readfile(localpath)
419 return shallowutil.readfile(localpath)
417 except IOError:
420 except IOError:
418 pass
421 pass
419
422
420 fileservice.prefetch([(self.filename, id)])
423 fileservice.prefetch([(self.filename, id)])
421 try:
424 try:
422 return localcache.read(cachekey)
425 return localcache.read(cachekey)
423 except KeyError:
426 except KeyError:
424 pass
427 pass
425
428
426 raise error.LookupError(id, self.filename, _(b'no node'))
429 raise error.LookupError(id, self.filename, _(b'no node'))
427
430
428 def ancestormap(self, node):
431 def ancestormap(self, node):
429 return self.repo.metadatastore.getancestors(self.filename, node)
432 return self.repo.metadatastore.getancestors(self.filename, node)
430
433
431 def ancestor(self, a, b):
434 def ancestor(self, a, b):
432 if a == nullid or b == nullid:
435 if a == nullid or b == nullid:
433 return nullid
436 return nullid
434
437
435 revmap, parentfunc = self._buildrevgraph(a, b)
438 revmap, parentfunc = self._buildrevgraph(a, b)
436 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
439 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
437
440
438 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
441 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
439 if ancs:
442 if ancs:
440 # choose a consistent winner when there's a tie
443 # choose a consistent winner when there's a tie
441 return min(map(nodemap.__getitem__, ancs))
444 return min(map(nodemap.__getitem__, ancs))
442 return nullid
445 return nullid
443
446
444 def commonancestorsheads(self, a, b):
447 def commonancestorsheads(self, a, b):
445 """calculate all the heads of the common ancestors of nodes a and b"""
448 """calculate all the heads of the common ancestors of nodes a and b"""
446
449
447 if a == nullid or b == nullid:
450 if a == nullid or b == nullid:
448 return nullid
451 return nullid
449
452
450 revmap, parentfunc = self._buildrevgraph(a, b)
453 revmap, parentfunc = self._buildrevgraph(a, b)
451 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
454 nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
452
455
453 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
456 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
454 return map(nodemap.__getitem__, ancs)
457 return map(nodemap.__getitem__, ancs)
455
458
456 def _buildrevgraph(self, a, b):
459 def _buildrevgraph(self, a, b):
457 """Builds a numeric revision graph for the given two nodes.
460 """Builds a numeric revision graph for the given two nodes.
458 Returns a node->rev map and a rev->[revs] parent function.
461 Returns a node->rev map and a rev->[revs] parent function.
459 """
462 """
460 amap = self.ancestormap(a)
463 amap = self.ancestormap(a)
461 bmap = self.ancestormap(b)
464 bmap = self.ancestormap(b)
462
465
463 # Union the two maps
466 # Union the two maps
464 parentsmap = collections.defaultdict(list)
467 parentsmap = collections.defaultdict(list)
465 allparents = set()
468 allparents = set()
466 for mapping in (amap, bmap):
469 for mapping in (amap, bmap):
467 for node, pdata in pycompat.iteritems(mapping):
470 for node, pdata in pycompat.iteritems(mapping):
468 parents = parentsmap[node]
471 parents = parentsmap[node]
469 p1, p2, linknode, copyfrom = pdata
472 p1, p2, linknode, copyfrom = pdata
470 # Don't follow renames (copyfrom).
473 # Don't follow renames (copyfrom).
471 # remotefilectx.ancestor does that.
474 # remotefilectx.ancestor does that.
472 if p1 != nullid and not copyfrom:
475 if p1 != nullid and not copyfrom:
473 parents.append(p1)
476 parents.append(p1)
474 allparents.add(p1)
477 allparents.add(p1)
475 if p2 != nullid:
478 if p2 != nullid:
476 parents.append(p2)
479 parents.append(p2)
477 allparents.add(p2)
480 allparents.add(p2)
478
481
479 # Breadth first traversal to build linkrev graph
482 # Breadth first traversal to build linkrev graph
480 parentrevs = collections.defaultdict(list)
483 parentrevs = collections.defaultdict(list)
481 revmap = {}
484 revmap = {}
482 queue = collections.deque(
485 queue = collections.deque(
483 ((None, n) for n in parentsmap if n not in allparents)
486 ((None, n) for n in parentsmap if n not in allparents)
484 )
487 )
485 while queue:
488 while queue:
486 prevrev, current = queue.pop()
489 prevrev, current = queue.pop()
487 if current in revmap:
490 if current in revmap:
488 if prevrev:
491 if prevrev:
489 parentrevs[prevrev].append(revmap[current])
492 parentrevs[prevrev].append(revmap[current])
490 continue
493 continue
491
494
492 # Assign linkrevs in reverse order, so start at
495 # Assign linkrevs in reverse order, so start at
493 # len(parentsmap) and work backwards.
496 # len(parentsmap) and work backwards.
494 currentrev = len(parentsmap) - len(revmap) - 1
497 currentrev = len(parentsmap) - len(revmap) - 1
495 revmap[current] = currentrev
498 revmap[current] = currentrev
496
499
497 if prevrev:
500 if prevrev:
498 parentrevs[prevrev].append(currentrev)
501 parentrevs[prevrev].append(currentrev)
499
502
500 for parent in parentsmap.get(current):
503 for parent in parentsmap.get(current):
501 queue.appendleft((currentrev, parent))
504 queue.appendleft((currentrev, parent))
502
505
503 return revmap, parentrevs.__getitem__
506 return revmap, parentrevs.__getitem__
504
507
505 def strip(self, minlink, transaction):
508 def strip(self, minlink, transaction):
506 pass
509 pass
507
510
508 # misc unused things
511 # misc unused things
509 def files(self):
512 def files(self):
510 return []
513 return []
511
514
512 def checksize(self):
515 def checksize(self):
513 return 0, 0
516 return 0, 0
@@ -1,303 +1,307 b''
1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
1 # shallowbundle.py - bundle10 implementation for use with shallow repositories
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 from mercurial.i18n import _
9 from mercurial.i18n import _
10 from mercurial.node import bin, hex, nullid
10 from mercurial.node import bin, hex, nullid
11 from mercurial import (
11 from mercurial import (
12 bundlerepo,
12 bundlerepo,
13 changegroup,
13 changegroup,
14 error,
14 error,
15 match,
15 match,
16 mdiff,
16 mdiff,
17 pycompat,
17 pycompat,
18 )
18 )
19 from . import (
19 from . import (
20 constants,
20 constants,
21 remotefilelog,
21 remotefilelog,
22 shallowutil,
22 shallowutil,
23 )
23 )
24
24
25 NoFiles = 0
25 NoFiles = 0
26 LocalFiles = 1
26 LocalFiles = 1
27 AllFiles = 2
27 AllFiles = 2
28
28
29
29
30 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
30 def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
31 if not isinstance(rlog, remotefilelog.remotefilelog):
31 if not isinstance(rlog, remotefilelog.remotefilelog):
32 for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
32 for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
33 yield c
33 yield c
34 return
34 return
35
35
36 if len(nodelist) == 0:
36 if len(nodelist) == 0:
37 yield self.close()
37 yield self.close()
38 return
38 return
39
39
40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
40 nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
41
41
42 # add the parent of the first rev
42 # add the parent of the first rev
43 p = rlog.parents(nodelist[0])[0]
43 p = rlog.parents(nodelist[0])[0]
44 nodelist.insert(0, p)
44 nodelist.insert(0, p)
45
45
46 # build deltas
46 # build deltas
47 for i in pycompat.xrange(len(nodelist) - 1):
47 for i in pycompat.xrange(len(nodelist) - 1):
48 prev, curr = nodelist[i], nodelist[i + 1]
48 prev, curr = nodelist[i], nodelist[i + 1]
49 linknode = lookup(curr)
49 linknode = lookup(curr)
50 for c in self.nodechunk(rlog, curr, prev, linknode):
50 for c in self.nodechunk(rlog, curr, prev, linknode):
51 yield c
51 yield c
52
52
53 yield self.close()
53 yield self.close()
54
54
55
55
56 class shallowcg1packer(changegroup.cgpacker):
56 class shallowcg1packer(changegroup.cgpacker):
57 def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
57 def generate(self, commonrevs, clnodes, fastpathlinkrev, source, **kwargs):
58 if shallowutil.isenabled(self._repo):
58 if shallowutil.isenabled(self._repo):
59 fastpathlinkrev = False
59 fastpathlinkrev = False
60
60
61 return super(shallowcg1packer, self).generate(
61 return super(shallowcg1packer, self).generate(
62 commonrevs, clnodes, fastpathlinkrev, source, **kwargs
62 commonrevs, clnodes, fastpathlinkrev, source, **kwargs
63 )
63 )
64
64
65 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
65 def group(self, nodelist, rlog, lookup, units=None, reorder=None):
66 return shallowgroup(
66 return shallowgroup(
67 shallowcg1packer, self, nodelist, rlog, lookup, units=units
67 shallowcg1packer, self, nodelist, rlog, lookup, units=units
68 )
68 )
69
69
70 def generatefiles(self, changedfiles, *args):
70 def generatefiles(self, changedfiles, *args, **kwargs):
71 try:
71 try:
72 linknodes, commonrevs, source = args
72 linknodes, commonrevs, source = args
73 except ValueError:
73 except ValueError:
74 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
74 commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
75 if shallowutil.isenabled(self._repo):
75 if shallowutil.isenabled(self._repo):
76 repo = self._repo
76 repo = self._repo
77 if isinstance(repo, bundlerepo.bundlerepository):
77 if isinstance(repo, bundlerepo.bundlerepository):
78 # If the bundle contains filelogs, we can't pull from it, since
78 # If the bundle contains filelogs, we can't pull from it, since
79 # bundlerepo is heavily tied to revlogs. Instead require that
79 # bundlerepo is heavily tied to revlogs. Instead require that
80 # the user use unbundle instead.
80 # the user use unbundle instead.
81 # Force load the filelog data.
81 # Force load the filelog data.
82 bundlerepo.bundlerepository.file(repo, b'foo')
82 bundlerepo.bundlerepository.file(repo, b'foo')
83 if repo._cgfilespos:
83 if repo._cgfilespos:
84 raise error.Abort(
84 raise error.Abort(
85 b"cannot pull from full bundles",
85 b"cannot pull from full bundles",
86 hint=b"use `hg unbundle` instead",
86 hint=b"use `hg unbundle` instead",
87 )
87 )
88 return []
88 return []
89 filestosend = self.shouldaddfilegroups(source)
89 filestosend = self.shouldaddfilegroups(source)
90 if filestosend == NoFiles:
90 if filestosend == NoFiles:
91 changedfiles = list(
91 changedfiles = list(
92 [f for f in changedfiles if not repo.shallowmatch(f)]
92 [f for f in changedfiles if not repo.shallowmatch(f)]
93 )
93 )
94
94
95 return super(shallowcg1packer, self).generatefiles(changedfiles, *args)
95 return super(shallowcg1packer, self).generatefiles(
96 changedfiles, *args, **kwargs
97 )
96
98
97 def shouldaddfilegroups(self, source):
99 def shouldaddfilegroups(self, source):
98 repo = self._repo
100 repo = self._repo
99 if not shallowutil.isenabled(repo):
101 if not shallowutil.isenabled(repo):
100 return AllFiles
102 return AllFiles
101
103
102 if source == b"push" or source == b"bundle":
104 if source == b"push" or source == b"bundle":
103 return AllFiles
105 return AllFiles
104
106
105 caps = self._bundlecaps or []
107 caps = self._bundlecaps or []
106 if source == b"serve" or source == b"pull":
108 if source == b"serve" or source == b"pull":
107 if constants.BUNDLE2_CAPABLITY in caps:
109 if constants.BUNDLE2_CAPABLITY in caps:
108 return LocalFiles
110 return LocalFiles
109 else:
111 else:
110 # Serving to a full repo requires us to serve everything
112 # Serving to a full repo requires us to serve everything
111 repo.ui.warn(_(b"pulling from a shallow repo\n"))
113 repo.ui.warn(_(b"pulling from a shallow repo\n"))
112 return AllFiles
114 return AllFiles
113
115
114 return NoFiles
116 return NoFiles
115
117
116 def prune(self, rlog, missing, commonrevs):
118 def prune(self, rlog, missing, commonrevs):
117 if not isinstance(rlog, remotefilelog.remotefilelog):
119 if not isinstance(rlog, remotefilelog.remotefilelog):
118 return super(shallowcg1packer, self).prune(
120 return super(shallowcg1packer, self).prune(
119 rlog, missing, commonrevs
121 rlog, missing, commonrevs
120 )
122 )
121
123
122 repo = self._repo
124 repo = self._repo
123 results = []
125 results = []
124 for fnode in missing:
126 for fnode in missing:
125 fctx = repo.filectx(rlog.filename, fileid=fnode)
127 fctx = repo.filectx(rlog.filename, fileid=fnode)
126 if fctx.linkrev() not in commonrevs:
128 if fctx.linkrev() not in commonrevs:
127 results.append(fnode)
129 results.append(fnode)
128 return results
130 return results
129
131
130 def nodechunk(self, revlog, node, prevnode, linknode):
132 def nodechunk(self, revlog, node, prevnode, linknode):
131 prefix = b''
133 prefix = b''
132 if prevnode == nullid:
134 if prevnode == nullid:
133 delta = revlog.rawdata(node)
135 delta = revlog.rawdata(node)
134 prefix = mdiff.trivialdiffheader(len(delta))
136 prefix = mdiff.trivialdiffheader(len(delta))
135 else:
137 else:
136 # Actually uses remotefilelog.revdiff which works on nodes, not revs
138 # Actually uses remotefilelog.revdiff which works on nodes, not revs
137 delta = revlog.revdiff(prevnode, node)
139 delta = revlog.revdiff(prevnode, node)
138 p1, p2 = revlog.parents(node)
140 p1, p2 = revlog.parents(node)
139 flags = revlog.flags(node)
141 flags = revlog.flags(node)
140 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
142 meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
141 meta += prefix
143 meta += prefix
142 l = len(meta) + len(delta)
144 l = len(meta) + len(delta)
143 yield changegroup.chunkheader(l)
145 yield changegroup.chunkheader(l)
144 yield meta
146 yield meta
145 yield delta
147 yield delta
146
148
147
149
148 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
150 def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
149 if not shallowutil.isenabled(repo):
151 if not shallowutil.isenabled(repo):
150 return orig(repo, outgoing, version, source, *args, **kwargs)
152 return orig(repo, outgoing, version, source, *args, **kwargs)
151
153
152 original = repo.shallowmatch
154 original = repo.shallowmatch
153 try:
155 try:
154 # if serving, only send files the clients has patterns for
156 # if serving, only send files the clients has patterns for
155 if source == b'serve':
157 if source == b'serve':
156 bundlecaps = kwargs.get('bundlecaps')
158 bundlecaps = kwargs.get('bundlecaps')
157 includepattern = None
159 includepattern = None
158 excludepattern = None
160 excludepattern = None
159 for cap in bundlecaps or []:
161 for cap in bundlecaps or []:
160 if cap.startswith(b"includepattern="):
162 if cap.startswith(b"includepattern="):
161 raw = cap[len(b"includepattern=") :]
163 raw = cap[len(b"includepattern=") :]
162 if raw:
164 if raw:
163 includepattern = raw.split(b'\0')
165 includepattern = raw.split(b'\0')
164 elif cap.startswith(b"excludepattern="):
166 elif cap.startswith(b"excludepattern="):
165 raw = cap[len(b"excludepattern=") :]
167 raw = cap[len(b"excludepattern=") :]
166 if raw:
168 if raw:
167 excludepattern = raw.split(b'\0')
169 excludepattern = raw.split(b'\0')
168 if includepattern or excludepattern:
170 if includepattern or excludepattern:
169 repo.shallowmatch = match.match(
171 repo.shallowmatch = match.match(
170 repo.root, b'', None, includepattern, excludepattern
172 repo.root, b'', None, includepattern, excludepattern
171 )
173 )
172 else:
174 else:
173 repo.shallowmatch = match.always()
175 repo.shallowmatch = match.always()
174 return orig(repo, outgoing, version, source, *args, **kwargs)
176 return orig(repo, outgoing, version, source, *args, **kwargs)
175 finally:
177 finally:
176 repo.shallowmatch = original
178 repo.shallowmatch = original
177
179
178
180
179 def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
181 def addchangegroupfiles(
182 orig, repo, source, revmap, trp, expectedfiles, *args, **kwargs
183 ):
180 if not shallowutil.isenabled(repo):
184 if not shallowutil.isenabled(repo):
181 return orig(repo, source, revmap, trp, expectedfiles, *args)
185 return orig(repo, source, revmap, trp, expectedfiles, *args, **kwargs)
182
186
183 newfiles = 0
187 newfiles = 0
184 visited = set()
188 visited = set()
185 revisiondatas = {}
189 revisiondatas = {}
186 queue = []
190 queue = []
187
191
188 # Normal Mercurial processes each file one at a time, adding all
192 # Normal Mercurial processes each file one at a time, adding all
189 # the new revisions for that file at once. In remotefilelog a file
193 # the new revisions for that file at once. In remotefilelog a file
190 # revision may depend on a different file's revision (in the case
194 # revision may depend on a different file's revision (in the case
191 # of a rename/copy), so we must lay all revisions down across all
195 # of a rename/copy), so we must lay all revisions down across all
192 # files in topological order.
196 # files in topological order.
193
197
194 # read all the file chunks but don't add them
198 # read all the file chunks but don't add them
195 progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
199 progress = repo.ui.makeprogress(_(b'files'), total=expectedfiles)
196 while True:
200 while True:
197 chunkdata = source.filelogheader()
201 chunkdata = source.filelogheader()
198 if not chunkdata:
202 if not chunkdata:
199 break
203 break
200 f = chunkdata[b"filename"]
204 f = chunkdata[b"filename"]
201 repo.ui.debug(b"adding %s revisions\n" % f)
205 repo.ui.debug(b"adding %s revisions\n" % f)
202 progress.increment()
206 progress.increment()
203
207
204 if not repo.shallowmatch(f):
208 if not repo.shallowmatch(f):
205 fl = repo.file(f)
209 fl = repo.file(f)
206 deltas = source.deltaiter()
210 deltas = source.deltaiter()
207 fl.addgroup(deltas, revmap, trp)
211 fl.addgroup(deltas, revmap, trp)
208 continue
212 continue
209
213
210 chain = None
214 chain = None
211 while True:
215 while True:
212 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
216 # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
213 revisiondata = source.deltachunk(chain)
217 revisiondata = source.deltachunk(chain)
214 if not revisiondata:
218 if not revisiondata:
215 break
219 break
216
220
217 chain = revisiondata[0]
221 chain = revisiondata[0]
218
222
219 revisiondatas[(f, chain)] = revisiondata
223 revisiondatas[(f, chain)] = revisiondata
220 queue.append((f, chain))
224 queue.append((f, chain))
221
225
222 if f not in visited:
226 if f not in visited:
223 newfiles += 1
227 newfiles += 1
224 visited.add(f)
228 visited.add(f)
225
229
226 if chain is None:
230 if chain is None:
227 raise error.Abort(_(b"received file revlog group is empty"))
231 raise error.Abort(_(b"received file revlog group is empty"))
228
232
229 processed = set()
233 processed = set()
230
234
231 def available(f, node, depf, depnode):
235 def available(f, node, depf, depnode):
232 if depnode != nullid and (depf, depnode) not in processed:
236 if depnode != nullid and (depf, depnode) not in processed:
233 if not (depf, depnode) in revisiondatas:
237 if not (depf, depnode) in revisiondatas:
234 # It's not in the changegroup, assume it's already
238 # It's not in the changegroup, assume it's already
235 # in the repo
239 # in the repo
236 return True
240 return True
237 # re-add self to queue
241 # re-add self to queue
238 queue.insert(0, (f, node))
242 queue.insert(0, (f, node))
239 # add dependency in front
243 # add dependency in front
240 queue.insert(0, (depf, depnode))
244 queue.insert(0, (depf, depnode))
241 return False
245 return False
242 return True
246 return True
243
247
244 skipcount = 0
248 skipcount = 0
245
249
246 # Prefetch the non-bundled revisions that we will need
250 # Prefetch the non-bundled revisions that we will need
247 prefetchfiles = []
251 prefetchfiles = []
248 for f, node in queue:
252 for f, node in queue:
249 revisiondata = revisiondatas[(f, node)]
253 revisiondata = revisiondatas[(f, node)]
250 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
254 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
251 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
255 dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
252
256
253 for dependent in dependents:
257 for dependent in dependents:
254 if dependent == nullid or (f, dependent) in revisiondatas:
258 if dependent == nullid or (f, dependent) in revisiondatas:
255 continue
259 continue
256 prefetchfiles.append((f, hex(dependent)))
260 prefetchfiles.append((f, hex(dependent)))
257
261
258 repo.fileservice.prefetch(prefetchfiles)
262 repo.fileservice.prefetch(prefetchfiles)
259
263
260 # Apply the revisions in topological order such that a revision
264 # Apply the revisions in topological order such that a revision
261 # is only written once it's deltabase and parents have been written.
265 # is only written once it's deltabase and parents have been written.
262 while queue:
266 while queue:
263 f, node = queue.pop(0)
267 f, node = queue.pop(0)
264 if (f, node) in processed:
268 if (f, node) in processed:
265 continue
269 continue
266
270
267 skipcount += 1
271 skipcount += 1
268 if skipcount > len(queue) + 1:
272 if skipcount > len(queue) + 1:
269 raise error.Abort(_(b"circular node dependency"))
273 raise error.Abort(_(b"circular node dependency"))
270
274
271 fl = repo.file(f)
275 fl = repo.file(f)
272
276
273 revisiondata = revisiondatas[(f, node)]
277 revisiondata = revisiondatas[(f, node)]
274 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
278 # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
275 node, p1, p2, linknode, deltabase, delta, flags = revisiondata
279 node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata
276
280
277 if not available(f, node, f, deltabase):
281 if not available(f, node, f, deltabase):
278 continue
282 continue
279
283
280 base = fl.rawdata(deltabase)
284 base = fl.rawdata(deltabase)
281 text = mdiff.patch(base, delta)
285 text = mdiff.patch(base, delta)
282 if not isinstance(text, bytes):
286 if not isinstance(text, bytes):
283 text = bytes(text)
287 text = bytes(text)
284
288
285 meta, text = shallowutil.parsemeta(text)
289 meta, text = shallowutil.parsemeta(text)
286 if b'copy' in meta:
290 if b'copy' in meta:
287 copyfrom = meta[b'copy']
291 copyfrom = meta[b'copy']
288 copynode = bin(meta[b'copyrev'])
292 copynode = bin(meta[b'copyrev'])
289 if not available(f, node, copyfrom, copynode):
293 if not available(f, node, copyfrom, copynode):
290 continue
294 continue
291
295
292 for p in [p1, p2]:
296 for p in [p1, p2]:
293 if p != nullid:
297 if p != nullid:
294 if not available(f, node, f, p):
298 if not available(f, node, f, p):
295 continue
299 continue
296
300
297 fl.add(text, meta, trp, linknode, p1, p2)
301 fl.add(text, meta, trp, linknode, p1, p2)
298 processed.add((f, node))
302 processed.add((f, node))
299 skipcount = 0
303 skipcount = 0
300
304
301 progress.complete()
305 progress.complete()
302
306
303 return len(revisiondatas), newfiles
307 return len(revisiondatas), newfiles
@@ -1,1301 +1,1310 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import sqlite3
48 import sqlite3
49 import struct
49 import struct
50 import threading
50 import threading
51 import zlib
51 import zlib
52
52
53 from mercurial.i18n import _
53 from mercurial.i18n import _
54 from mercurial.node import (
54 from mercurial.node import (
55 nullid,
55 nullid,
56 nullrev,
56 nullrev,
57 short,
57 short,
58 )
58 )
59 from mercurial.thirdparty import attr
59 from mercurial.thirdparty import attr
60 from mercurial import (
60 from mercurial import (
61 ancestor,
61 ancestor,
62 dagop,
62 dagop,
63 encoding,
63 encoding,
64 error,
64 error,
65 extensions,
65 extensions,
66 localrepo,
66 localrepo,
67 mdiff,
67 mdiff,
68 pycompat,
68 pycompat,
69 registrar,
69 registrar,
70 requirements,
70 requirements,
71 util,
71 util,
72 verify,
72 verify,
73 )
73 )
74 from mercurial.interfaces import (
74 from mercurial.interfaces import (
75 repository,
75 repository,
76 util as interfaceutil,
76 util as interfaceutil,
77 )
77 )
78 from mercurial.utils import (
78 from mercurial.utils import (
79 hashutil,
79 hashutil,
80 storageutil,
80 storageutil,
81 )
81 )
82
82
83 try:
83 try:
84 from mercurial import zstd
84 from mercurial import zstd
85
85
86 zstd.__version__
86 zstd.__version__
87 except ImportError:
87 except ImportError:
88 zstd = None
88 zstd = None
89
89
90 configtable = {}
90 configtable = {}
91 configitem = registrar.configitem(configtable)
91 configitem = registrar.configitem(configtable)
92
92
93 # experimental config: storage.sqlite.compression
93 # experimental config: storage.sqlite.compression
94 configitem(
94 configitem(
95 b'storage',
95 b'storage',
96 b'sqlite.compression',
96 b'sqlite.compression',
97 default=b'zstd' if zstd else b'zlib',
97 default=b'zstd' if zstd else b'zlib',
98 experimental=True,
98 experimental=True,
99 )
99 )
100
100
101 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
101 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
102 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
102 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
103 # be specifying the version(s) of Mercurial they are tested with, or
103 # be specifying the version(s) of Mercurial they are tested with, or
104 # leave the attribute unspecified.
104 # leave the attribute unspecified.
105 testedwith = b'ships-with-hg-core'
105 testedwith = b'ships-with-hg-core'
106
106
107 REQUIREMENT = b'exp-sqlite-001'
107 REQUIREMENT = b'exp-sqlite-001'
108 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
108 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
109 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
109 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
110 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
110 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
111 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
111 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
112
112
113 CURRENT_SCHEMA_VERSION = 1
113 CURRENT_SCHEMA_VERSION = 1
114
114
115 COMPRESSION_NONE = 1
115 COMPRESSION_NONE = 1
116 COMPRESSION_ZSTD = 2
116 COMPRESSION_ZSTD = 2
117 COMPRESSION_ZLIB = 3
117 COMPRESSION_ZLIB = 3
118
118
119 FLAG_CENSORED = 1
119 FLAG_CENSORED = 1
120 FLAG_MISSING_P1 = 2
120 FLAG_MISSING_P1 = 2
121 FLAG_MISSING_P2 = 4
121 FLAG_MISSING_P2 = 4
122
122
123 CREATE_SCHEMA = [
123 CREATE_SCHEMA = [
124 # Deltas are stored as content-indexed blobs.
124 # Deltas are stored as content-indexed blobs.
125 # compression column holds COMPRESSION_* constant for how the
125 # compression column holds COMPRESSION_* constant for how the
126 # delta is encoded.
126 # delta is encoded.
127 'CREATE TABLE delta ('
127 'CREATE TABLE delta ('
128 ' id INTEGER PRIMARY KEY, '
128 ' id INTEGER PRIMARY KEY, '
129 ' compression INTEGER NOT NULL, '
129 ' compression INTEGER NOT NULL, '
130 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
130 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
131 ' delta BLOB NOT NULL '
131 ' delta BLOB NOT NULL '
132 ')',
132 ')',
133 # Tracked paths are denormalized to integers to avoid redundant
133 # Tracked paths are denormalized to integers to avoid redundant
134 # storage of the path name.
134 # storage of the path name.
135 'CREATE TABLE filepath ('
135 'CREATE TABLE filepath ('
136 ' id INTEGER PRIMARY KEY, '
136 ' id INTEGER PRIMARY KEY, '
137 ' path BLOB NOT NULL '
137 ' path BLOB NOT NULL '
138 ')',
138 ')',
139 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
139 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
140 # We have a single table for all file revision data.
140 # We have a single table for all file revision data.
141 # Each file revision is uniquely described by a (path, rev) and
141 # Each file revision is uniquely described by a (path, rev) and
142 # (path, node).
142 # (path, node).
143 #
143 #
144 # Revision data is stored as a pointer to the delta producing this
144 # Revision data is stored as a pointer to the delta producing this
145 # revision and the file revision whose delta should be applied before
145 # revision and the file revision whose delta should be applied before
146 # that one. One can reconstruct the delta chain by recursively following
146 # that one. One can reconstruct the delta chain by recursively following
147 # the delta base revision pointers until one encounters NULL.
147 # the delta base revision pointers until one encounters NULL.
148 #
148 #
149 # flags column holds bitwise integer flags controlling storage options.
149 # flags column holds bitwise integer flags controlling storage options.
150 # These flags are defined by the FLAG_* constants.
150 # These flags are defined by the FLAG_* constants.
151 'CREATE TABLE fileindex ('
151 'CREATE TABLE fileindex ('
152 ' id INTEGER PRIMARY KEY, '
152 ' id INTEGER PRIMARY KEY, '
153 ' pathid INTEGER REFERENCES filepath(id), '
153 ' pathid INTEGER REFERENCES filepath(id), '
154 ' revnum INTEGER NOT NULL, '
154 ' revnum INTEGER NOT NULL, '
155 ' p1rev INTEGER NOT NULL, '
155 ' p1rev INTEGER NOT NULL, '
156 ' p2rev INTEGER NOT NULL, '
156 ' p2rev INTEGER NOT NULL, '
157 ' linkrev INTEGER NOT NULL, '
157 ' linkrev INTEGER NOT NULL, '
158 ' flags INTEGER NOT NULL, '
158 ' flags INTEGER NOT NULL, '
159 ' deltaid INTEGER REFERENCES delta(id), '
159 ' deltaid INTEGER REFERENCES delta(id), '
160 ' deltabaseid INTEGER REFERENCES fileindex(id), '
160 ' deltabaseid INTEGER REFERENCES fileindex(id), '
161 ' node BLOB NOT NULL '
161 ' node BLOB NOT NULL '
162 ')',
162 ')',
163 'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 ' ON fileindex (pathid, revnum)',
164 ' ON fileindex (pathid, revnum)',
165 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
165 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
166 # Provide a view over all file data for convenience.
166 # Provide a view over all file data for convenience.
167 'CREATE VIEW filedata AS '
167 'CREATE VIEW filedata AS '
168 'SELECT '
168 'SELECT '
169 ' fileindex.id AS id, '
169 ' fileindex.id AS id, '
170 ' filepath.id AS pathid, '
170 ' filepath.id AS pathid, '
171 ' filepath.path AS path, '
171 ' filepath.path AS path, '
172 ' fileindex.revnum AS revnum, '
172 ' fileindex.revnum AS revnum, '
173 ' fileindex.node AS node, '
173 ' fileindex.node AS node, '
174 ' fileindex.p1rev AS p1rev, '
174 ' fileindex.p1rev AS p1rev, '
175 ' fileindex.p2rev AS p2rev, '
175 ' fileindex.p2rev AS p2rev, '
176 ' fileindex.linkrev AS linkrev, '
176 ' fileindex.linkrev AS linkrev, '
177 ' fileindex.flags AS flags, '
177 ' fileindex.flags AS flags, '
178 ' fileindex.deltaid AS deltaid, '
178 ' fileindex.deltaid AS deltaid, '
179 ' fileindex.deltabaseid AS deltabaseid '
179 ' fileindex.deltabaseid AS deltabaseid '
180 'FROM filepath, fileindex '
180 'FROM filepath, fileindex '
181 'WHERE fileindex.pathid=filepath.id',
181 'WHERE fileindex.pathid=filepath.id',
182 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 ]
183 ]
184
184
185
185
186 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
186 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
187 """Resolve a delta chain for a file node."""
187 """Resolve a delta chain for a file node."""
188
188
189 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # TODO the "not in ({stops})" here is possibly slowing down the query
190 # because it needs to perform the lookup on every recursive invocation.
190 # because it needs to perform the lookup on every recursive invocation.
191 # This could possibly be faster if we created a temporary query with
191 # This could possibly be faster if we created a temporary query with
192 # baseid "poisoned" to null and limited the recursive filter to
192 # baseid "poisoned" to null and limited the recursive filter to
193 # "is not null".
193 # "is not null".
194 res = db.execute(
194 res = db.execute(
195 'WITH RECURSIVE '
195 'WITH RECURSIVE '
196 ' deltachain(deltaid, baseid) AS ('
196 ' deltachain(deltaid, baseid) AS ('
197 ' SELECT deltaid, deltabaseid FROM fileindex '
197 ' SELECT deltaid, deltabaseid FROM fileindex '
198 ' WHERE pathid=? AND node=? '
198 ' WHERE pathid=? AND node=? '
199 ' UNION ALL '
199 ' UNION ALL '
200 ' SELECT fileindex.deltaid, deltabaseid '
200 ' SELECT fileindex.deltaid, deltabaseid '
201 ' FROM fileindex, deltachain '
201 ' FROM fileindex, deltachain '
202 ' WHERE '
202 ' WHERE '
203 ' fileindex.id=deltachain.baseid '
203 ' fileindex.id=deltachain.baseid '
204 ' AND deltachain.baseid IS NOT NULL '
204 ' AND deltachain.baseid IS NOT NULL '
205 ' AND fileindex.id NOT IN ({stops}) '
205 ' AND fileindex.id NOT IN ({stops}) '
206 ' ) '
206 ' ) '
207 'SELECT deltachain.baseid, compression, delta '
207 'SELECT deltachain.baseid, compression, delta '
208 'FROM deltachain, delta '
208 'FROM deltachain, delta '
209 'WHERE delta.id=deltachain.deltaid'.format(
209 'WHERE delta.id=deltachain.deltaid'.format(
210 stops=','.join(['?'] * len(stoprids))
210 stops=','.join(['?'] * len(stoprids))
211 ),
211 ),
212 tuple([pathid, node] + list(stoprids.keys())),
212 tuple([pathid, node] + list(stoprids.keys())),
213 )
213 )
214
214
215 deltas = []
215 deltas = []
216 lastdeltabaseid = None
216 lastdeltabaseid = None
217
217
218 for deltabaseid, compression, delta in res:
218 for deltabaseid, compression, delta in res:
219 lastdeltabaseid = deltabaseid
219 lastdeltabaseid = deltabaseid
220
220
221 if compression == COMPRESSION_ZSTD:
221 if compression == COMPRESSION_ZSTD:
222 delta = zstddctx.decompress(delta)
222 delta = zstddctx.decompress(delta)
223 elif compression == COMPRESSION_NONE:
223 elif compression == COMPRESSION_NONE:
224 delta = delta
224 delta = delta
225 elif compression == COMPRESSION_ZLIB:
225 elif compression == COMPRESSION_ZLIB:
226 delta = zlib.decompress(delta)
226 delta = zlib.decompress(delta)
227 else:
227 else:
228 raise SQLiteStoreError(
228 raise SQLiteStoreError(
229 b'unhandled compression type: %d' % compression
229 b'unhandled compression type: %d' % compression
230 )
230 )
231
231
232 deltas.append(delta)
232 deltas.append(delta)
233
233
234 if lastdeltabaseid in stoprids:
234 if lastdeltabaseid in stoprids:
235 basetext = revisioncache[stoprids[lastdeltabaseid]]
235 basetext = revisioncache[stoprids[lastdeltabaseid]]
236 else:
236 else:
237 basetext = deltas.pop()
237 basetext = deltas.pop()
238
238
239 deltas.reverse()
239 deltas.reverse()
240 fulltext = mdiff.patches(basetext, deltas)
240 fulltext = mdiff.patches(basetext, deltas)
241
241
242 # SQLite returns buffer instances for blob columns on Python 2. This
242 # SQLite returns buffer instances for blob columns on Python 2. This
243 # type can propagate through the delta application layer. Because
243 # type can propagate through the delta application layer. Because
244 # downstream callers assume revisions are bytes, cast as needed.
244 # downstream callers assume revisions are bytes, cast as needed.
245 if not isinstance(fulltext, bytes):
245 if not isinstance(fulltext, bytes):
246 fulltext = bytes(delta)
246 fulltext = bytes(delta)
247
247
248 return fulltext
248 return fulltext
249
249
250
250
251 def insertdelta(db, compression, hash, delta):
251 def insertdelta(db, compression, hash, delta):
252 try:
252 try:
253 return db.execute(
253 return db.execute(
254 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
254 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
255 (compression, hash, delta),
255 (compression, hash, delta),
256 ).lastrowid
256 ).lastrowid
257 except sqlite3.IntegrityError:
257 except sqlite3.IntegrityError:
258 return db.execute(
258 return db.execute(
259 'SELECT id FROM delta WHERE hash=?', (hash,)
259 'SELECT id FROM delta WHERE hash=?', (hash,)
260 ).fetchone()[0]
260 ).fetchone()[0]
261
261
262
262
263 class SQLiteStoreError(error.StorageError):
263 class SQLiteStoreError(error.StorageError):
264 pass
264 pass
265
265
266
266
267 @attr.s
267 @attr.s
268 class revisionentry(object):
268 class revisionentry(object):
269 rid = attr.ib()
269 rid = attr.ib()
270 rev = attr.ib()
270 rev = attr.ib()
271 node = attr.ib()
271 node = attr.ib()
272 p1rev = attr.ib()
272 p1rev = attr.ib()
273 p2rev = attr.ib()
273 p2rev = attr.ib()
274 p1node = attr.ib()
274 p1node = attr.ib()
275 p2node = attr.ib()
275 p2node = attr.ib()
276 linkrev = attr.ib()
276 linkrev = attr.ib()
277 flags = attr.ib()
277 flags = attr.ib()
278
278
279
279
280 @interfaceutil.implementer(repository.irevisiondelta)
280 @interfaceutil.implementer(repository.irevisiondelta)
281 @attr.s(slots=True)
281 @attr.s(slots=True)
282 class sqliterevisiondelta(object):
282 class sqliterevisiondelta(object):
283 node = attr.ib()
283 node = attr.ib()
284 p1node = attr.ib()
284 p1node = attr.ib()
285 p2node = attr.ib()
285 p2node = attr.ib()
286 basenode = attr.ib()
286 basenode = attr.ib()
287 flags = attr.ib()
287 flags = attr.ib()
288 baserevisionsize = attr.ib()
288 baserevisionsize = attr.ib()
289 revision = attr.ib()
289 revision = attr.ib()
290 delta = attr.ib()
290 delta = attr.ib()
291 linknode = attr.ib(default=None)
291 linknode = attr.ib(default=None)
292
292
293
293
294 @interfaceutil.implementer(repository.iverifyproblem)
294 @interfaceutil.implementer(repository.iverifyproblem)
295 @attr.s(frozen=True)
295 @attr.s(frozen=True)
296 class sqliteproblem(object):
296 class sqliteproblem(object):
297 warning = attr.ib(default=None)
297 warning = attr.ib(default=None)
298 error = attr.ib(default=None)
298 error = attr.ib(default=None)
299 node = attr.ib(default=None)
299 node = attr.ib(default=None)
300
300
301
301
302 @interfaceutil.implementer(repository.ifilestorage)
302 @interfaceutil.implementer(repository.ifilestorage)
303 class sqlitefilestore(object):
303 class sqlitefilestore(object):
304 """Implements storage for an individual tracked path."""
304 """Implements storage for an individual tracked path."""
305
305
306 def __init__(self, db, path, compression):
306 def __init__(self, db, path, compression):
307 self._db = db
307 self._db = db
308 self._path = path
308 self._path = path
309
309
310 self._pathid = None
310 self._pathid = None
311
311
312 # revnum -> node
312 # revnum -> node
313 self._revtonode = {}
313 self._revtonode = {}
314 # node -> revnum
314 # node -> revnum
315 self._nodetorev = {}
315 self._nodetorev = {}
316 # node -> data structure
316 # node -> data structure
317 self._revisions = {}
317 self._revisions = {}
318
318
319 self._revisioncache = util.lrucachedict(10)
319 self._revisioncache = util.lrucachedict(10)
320
320
321 self._compengine = compression
321 self._compengine = compression
322
322
323 if compression == b'zstd':
323 if compression == b'zstd':
324 self._cctx = zstd.ZstdCompressor(level=3)
324 self._cctx = zstd.ZstdCompressor(level=3)
325 self._dctx = zstd.ZstdDecompressor()
325 self._dctx = zstd.ZstdDecompressor()
326 else:
326 else:
327 self._cctx = None
327 self._cctx = None
328 self._dctx = None
328 self._dctx = None
329
329
330 self._refreshindex()
330 self._refreshindex()
331
331
332 def _refreshindex(self):
332 def _refreshindex(self):
333 self._revtonode = {}
333 self._revtonode = {}
334 self._nodetorev = {}
334 self._nodetorev = {}
335 self._revisions = {}
335 self._revisions = {}
336
336
337 res = list(
337 res = list(
338 self._db.execute(
338 self._db.execute(
339 'SELECT id FROM filepath WHERE path=?', (self._path,)
339 'SELECT id FROM filepath WHERE path=?', (self._path,)
340 )
340 )
341 )
341 )
342
342
343 if not res:
343 if not res:
344 self._pathid = None
344 self._pathid = None
345 return
345 return
346
346
347 self._pathid = res[0][0]
347 self._pathid = res[0][0]
348
348
349 res = self._db.execute(
349 res = self._db.execute(
350 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
350 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
351 'FROM fileindex '
351 'FROM fileindex '
352 'WHERE pathid=? '
352 'WHERE pathid=? '
353 'ORDER BY revnum ASC',
353 'ORDER BY revnum ASC',
354 (self._pathid,),
354 (self._pathid,),
355 )
355 )
356
356
357 for i, row in enumerate(res):
357 for i, row in enumerate(res):
358 rid, rev, node, p1rev, p2rev, linkrev, flags = row
358 rid, rev, node, p1rev, p2rev, linkrev, flags = row
359
359
360 if i != rev:
360 if i != rev:
361 raise SQLiteStoreError(
361 raise SQLiteStoreError(
362 _(b'sqlite database has inconsistent revision numbers')
362 _(b'sqlite database has inconsistent revision numbers')
363 )
363 )
364
364
365 if p1rev == nullrev:
365 if p1rev == nullrev:
366 p1node = nullid
366 p1node = nullid
367 else:
367 else:
368 p1node = self._revtonode[p1rev]
368 p1node = self._revtonode[p1rev]
369
369
370 if p2rev == nullrev:
370 if p2rev == nullrev:
371 p2node = nullid
371 p2node = nullid
372 else:
372 else:
373 p2node = self._revtonode[p2rev]
373 p2node = self._revtonode[p2rev]
374
374
375 entry = revisionentry(
375 entry = revisionentry(
376 rid=rid,
376 rid=rid,
377 rev=rev,
377 rev=rev,
378 node=node,
378 node=node,
379 p1rev=p1rev,
379 p1rev=p1rev,
380 p2rev=p2rev,
380 p2rev=p2rev,
381 p1node=p1node,
381 p1node=p1node,
382 p2node=p2node,
382 p2node=p2node,
383 linkrev=linkrev,
383 linkrev=linkrev,
384 flags=flags,
384 flags=flags,
385 )
385 )
386
386
387 self._revtonode[rev] = node
387 self._revtonode[rev] = node
388 self._nodetorev[node] = rev
388 self._nodetorev[node] = rev
389 self._revisions[node] = entry
389 self._revisions[node] = entry
390
390
391 # Start of ifileindex interface.
391 # Start of ifileindex interface.
392
392
393 def __len__(self):
393 def __len__(self):
394 return len(self._revisions)
394 return len(self._revisions)
395
395
396 def __iter__(self):
396 def __iter__(self):
397 return iter(pycompat.xrange(len(self._revisions)))
397 return iter(pycompat.xrange(len(self._revisions)))
398
398
399 def hasnode(self, node):
399 def hasnode(self, node):
400 if node == nullid:
400 if node == nullid:
401 return False
401 return False
402
402
403 return node in self._nodetorev
403 return node in self._nodetorev
404
404
405 def revs(self, start=0, stop=None):
405 def revs(self, start=0, stop=None):
406 return storageutil.iterrevs(
406 return storageutil.iterrevs(
407 len(self._revisions), start=start, stop=stop
407 len(self._revisions), start=start, stop=stop
408 )
408 )
409
409
410 def parents(self, node):
410 def parents(self, node):
411 if node == nullid:
411 if node == nullid:
412 return nullid, nullid
412 return nullid, nullid
413
413
414 if node not in self._revisions:
414 if node not in self._revisions:
415 raise error.LookupError(node, self._path, _(b'no node'))
415 raise error.LookupError(node, self._path, _(b'no node'))
416
416
417 entry = self._revisions[node]
417 entry = self._revisions[node]
418 return entry.p1node, entry.p2node
418 return entry.p1node, entry.p2node
419
419
420 def parentrevs(self, rev):
420 def parentrevs(self, rev):
421 if rev == nullrev:
421 if rev == nullrev:
422 return nullrev, nullrev
422 return nullrev, nullrev
423
423
424 if rev not in self._revtonode:
424 if rev not in self._revtonode:
425 raise IndexError(rev)
425 raise IndexError(rev)
426
426
427 entry = self._revisions[self._revtonode[rev]]
427 entry = self._revisions[self._revtonode[rev]]
428 return entry.p1rev, entry.p2rev
428 return entry.p1rev, entry.p2rev
429
429
430 def rev(self, node):
430 def rev(self, node):
431 if node == nullid:
431 if node == nullid:
432 return nullrev
432 return nullrev
433
433
434 if node not in self._nodetorev:
434 if node not in self._nodetorev:
435 raise error.LookupError(node, self._path, _(b'no node'))
435 raise error.LookupError(node, self._path, _(b'no node'))
436
436
437 return self._nodetorev[node]
437 return self._nodetorev[node]
438
438
439 def node(self, rev):
439 def node(self, rev):
440 if rev == nullrev:
440 if rev == nullrev:
441 return nullid
441 return nullid
442
442
443 if rev not in self._revtonode:
443 if rev not in self._revtonode:
444 raise IndexError(rev)
444 raise IndexError(rev)
445
445
446 return self._revtonode[rev]
446 return self._revtonode[rev]
447
447
448 def lookup(self, node):
448 def lookup(self, node):
449 return storageutil.fileidlookup(self, node, self._path)
449 return storageutil.fileidlookup(self, node, self._path)
450
450
451 def linkrev(self, rev):
451 def linkrev(self, rev):
452 if rev == nullrev:
452 if rev == nullrev:
453 return nullrev
453 return nullrev
454
454
455 if rev not in self._revtonode:
455 if rev not in self._revtonode:
456 raise IndexError(rev)
456 raise IndexError(rev)
457
457
458 entry = self._revisions[self._revtonode[rev]]
458 entry = self._revisions[self._revtonode[rev]]
459 return entry.linkrev
459 return entry.linkrev
460
460
461 def iscensored(self, rev):
461 def iscensored(self, rev):
462 if rev == nullrev:
462 if rev == nullrev:
463 return False
463 return False
464
464
465 if rev not in self._revtonode:
465 if rev not in self._revtonode:
466 raise IndexError(rev)
466 raise IndexError(rev)
467
467
468 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
468 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
469
469
470 def commonancestorsheads(self, node1, node2):
470 def commonancestorsheads(self, node1, node2):
471 rev1 = self.rev(node1)
471 rev1 = self.rev(node1)
472 rev2 = self.rev(node2)
472 rev2 = self.rev(node2)
473
473
474 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
474 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
475 return pycompat.maplist(self.node, ancestors)
475 return pycompat.maplist(self.node, ancestors)
476
476
477 def descendants(self, revs):
477 def descendants(self, revs):
478 # TODO we could implement this using a recursive SQL query, which
478 # TODO we could implement this using a recursive SQL query, which
479 # might be faster.
479 # might be faster.
480 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
480 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
481
481
482 def heads(self, start=None, stop=None):
482 def heads(self, start=None, stop=None):
483 if start is None and stop is None:
483 if start is None and stop is None:
484 if not len(self):
484 if not len(self):
485 return [nullid]
485 return [nullid]
486
486
487 startrev = self.rev(start) if start is not None else nullrev
487 startrev = self.rev(start) if start is not None else nullrev
488 stoprevs = {self.rev(n) for n in stop or []}
488 stoprevs = {self.rev(n) for n in stop or []}
489
489
490 revs = dagop.headrevssubset(
490 revs = dagop.headrevssubset(
491 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
491 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
492 )
492 )
493
493
494 return [self.node(rev) for rev in revs]
494 return [self.node(rev) for rev in revs]
495
495
496 def children(self, node):
496 def children(self, node):
497 rev = self.rev(node)
497 rev = self.rev(node)
498
498
499 res = self._db.execute(
499 res = self._db.execute(
500 'SELECT'
500 'SELECT'
501 ' node '
501 ' node '
502 ' FROM filedata '
502 ' FROM filedata '
503 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
503 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
504 ' ORDER BY revnum ASC',
504 ' ORDER BY revnum ASC',
505 (self._path, rev, rev),
505 (self._path, rev, rev),
506 )
506 )
507
507
508 return [row[0] for row in res]
508 return [row[0] for row in res]
509
509
510 # End of ifileindex interface.
510 # End of ifileindex interface.
511
511
512 # Start of ifiledata interface.
512 # Start of ifiledata interface.
513
513
514 def size(self, rev):
514 def size(self, rev):
515 if rev == nullrev:
515 if rev == nullrev:
516 return 0
516 return 0
517
517
518 if rev not in self._revtonode:
518 if rev not in self._revtonode:
519 raise IndexError(rev)
519 raise IndexError(rev)
520
520
521 node = self._revtonode[rev]
521 node = self._revtonode[rev]
522
522
523 if self.renamed(node):
523 if self.renamed(node):
524 return len(self.read(node))
524 return len(self.read(node))
525
525
526 return len(self.revision(node))
526 return len(self.revision(node))
527
527
528 def revision(self, node, raw=False, _verifyhash=True):
528 def revision(self, node, raw=False, _verifyhash=True):
529 if node in (nullid, nullrev):
529 if node in (nullid, nullrev):
530 return b''
530 return b''
531
531
532 if isinstance(node, int):
532 if isinstance(node, int):
533 node = self.node(node)
533 node = self.node(node)
534
534
535 if node not in self._nodetorev:
535 if node not in self._nodetorev:
536 raise error.LookupError(node, self._path, _(b'no node'))
536 raise error.LookupError(node, self._path, _(b'no node'))
537
537
538 if node in self._revisioncache:
538 if node in self._revisioncache:
539 return self._revisioncache[node]
539 return self._revisioncache[node]
540
540
541 # Because we have a fulltext revision cache, we are able to
541 # Because we have a fulltext revision cache, we are able to
542 # short-circuit delta chain traversal and decompression as soon as
542 # short-circuit delta chain traversal and decompression as soon as
543 # we encounter a revision in the cache.
543 # we encounter a revision in the cache.
544
544
545 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
545 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
546
546
547 if not stoprids:
547 if not stoprids:
548 stoprids[-1] = None
548 stoprids[-1] = None
549
549
550 fulltext = resolvedeltachain(
550 fulltext = resolvedeltachain(
551 self._db,
551 self._db,
552 self._pathid,
552 self._pathid,
553 node,
553 node,
554 self._revisioncache,
554 self._revisioncache,
555 stoprids,
555 stoprids,
556 zstddctx=self._dctx,
556 zstddctx=self._dctx,
557 )
557 )
558
558
559 # Don't verify hashes if parent nodes were rewritten, as the hash
559 # Don't verify hashes if parent nodes were rewritten, as the hash
560 # wouldn't verify.
560 # wouldn't verify.
561 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
561 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
562 _verifyhash = False
562 _verifyhash = False
563
563
564 if _verifyhash:
564 if _verifyhash:
565 self._checkhash(fulltext, node)
565 self._checkhash(fulltext, node)
566 self._revisioncache[node] = fulltext
566 self._revisioncache[node] = fulltext
567
567
568 return fulltext
568 return fulltext
569
569
570 def rawdata(self, *args, **kwargs):
570 def rawdata(self, *args, **kwargs):
571 return self.revision(*args, **kwargs)
571 return self.revision(*args, **kwargs)
572
572
573 def read(self, node):
573 def read(self, node):
574 return storageutil.filtermetadata(self.revision(node))
574 return storageutil.filtermetadata(self.revision(node))
575
575
576 def renamed(self, node):
576 def renamed(self, node):
577 return storageutil.filerevisioncopied(self, node)
577 return storageutil.filerevisioncopied(self, node)
578
578
579 def cmp(self, node, fulltext):
579 def cmp(self, node, fulltext):
580 return not storageutil.filedataequivalent(self, node, fulltext)
580 return not storageutil.filedataequivalent(self, node, fulltext)
581
581
582 def emitrevisions(
582 def emitrevisions(
583 self,
583 self,
584 nodes,
584 nodes,
585 nodesorder=None,
585 nodesorder=None,
586 revisiondata=False,
586 revisiondata=False,
587 assumehaveparentrevisions=False,
587 assumehaveparentrevisions=False,
588 deltamode=repository.CG_DELTAMODE_STD,
588 deltamode=repository.CG_DELTAMODE_STD,
589 ):
589 ):
590 if nodesorder not in (b'nodes', b'storage', b'linear', None):
590 if nodesorder not in (b'nodes', b'storage', b'linear', None):
591 raise error.ProgrammingError(
591 raise error.ProgrammingError(
592 b'unhandled value for nodesorder: %s' % nodesorder
592 b'unhandled value for nodesorder: %s' % nodesorder
593 )
593 )
594
594
595 nodes = [n for n in nodes if n != nullid]
595 nodes = [n for n in nodes if n != nullid]
596
596
597 if not nodes:
597 if not nodes:
598 return
598 return
599
599
600 # TODO perform in a single query.
600 # TODO perform in a single query.
601 res = self._db.execute(
601 res = self._db.execute(
602 'SELECT revnum, deltaid FROM fileindex '
602 'SELECT revnum, deltaid FROM fileindex '
603 'WHERE pathid=? '
603 'WHERE pathid=? '
604 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
604 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
605 tuple([self._pathid] + nodes),
605 tuple([self._pathid] + nodes),
606 )
606 )
607
607
608 deltabases = {}
608 deltabases = {}
609
609
610 for rev, deltaid in res:
610 for rev, deltaid in res:
611 res = self._db.execute(
611 res = self._db.execute(
612 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
612 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
613 (self._pathid, deltaid),
613 (self._pathid, deltaid),
614 )
614 )
615 deltabases[rev] = res.fetchone()[0]
615 deltabases[rev] = res.fetchone()[0]
616
616
617 # TODO define revdifffn so we can use delta from storage.
617 # TODO define revdifffn so we can use delta from storage.
618 for delta in storageutil.emitrevisions(
618 for delta in storageutil.emitrevisions(
619 self,
619 self,
620 nodes,
620 nodes,
621 nodesorder,
621 nodesorder,
622 sqliterevisiondelta,
622 sqliterevisiondelta,
623 deltaparentfn=deltabases.__getitem__,
623 deltaparentfn=deltabases.__getitem__,
624 revisiondata=revisiondata,
624 revisiondata=revisiondata,
625 assumehaveparentrevisions=assumehaveparentrevisions,
625 assumehaveparentrevisions=assumehaveparentrevisions,
626 deltamode=deltamode,
626 deltamode=deltamode,
627 ):
627 ):
628
628
629 yield delta
629 yield delta
630
630
631 # End of ifiledata interface.
631 # End of ifiledata interface.
632
632
633 # Start of ifilemutation interface.
633 # Start of ifilemutation interface.
634
634
635 def add(self, filedata, meta, transaction, linkrev, p1, p2):
635 def add(self, filedata, meta, transaction, linkrev, p1, p2):
636 if meta or filedata.startswith(b'\x01\n'):
636 if meta or filedata.startswith(b'\x01\n'):
637 filedata = storageutil.packmeta(meta, filedata)
637 filedata = storageutil.packmeta(meta, filedata)
638
638
639 rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
639 rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
640 return self.node(rev)
640 return self.node(rev)
641
641
642 def addrevision(
642 def addrevision(
643 self,
643 self,
644 revisiondata,
644 revisiondata,
645 transaction,
645 transaction,
646 linkrev,
646 linkrev,
647 p1,
647 p1,
648 p2,
648 p2,
649 node=None,
649 node=None,
650 flags=0,
650 flags=0,
651 cachedelta=None,
651 cachedelta=None,
652 ):
652 ):
653 if flags:
653 if flags:
654 raise SQLiteStoreError(_(b'flags not supported on revisions'))
654 raise SQLiteStoreError(_(b'flags not supported on revisions'))
655
655
656 validatehash = node is not None
656 validatehash = node is not None
657 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
657 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
658
658
659 if validatehash:
659 if validatehash:
660 self._checkhash(revisiondata, node, p1, p2)
660 self._checkhash(revisiondata, node, p1, p2)
661
661
662 rev = self._nodetorev.get(node)
662 rev = self._nodetorev.get(node)
663 if rev is not None:
663 if rev is not None:
664 return rev
664 return rev
665
665
666 rev = self._addrawrevision(
666 rev = self._addrawrevision(
667 node, revisiondata, transaction, linkrev, p1, p2
667 node, revisiondata, transaction, linkrev, p1, p2
668 )
668 )
669
669
670 self._revisioncache[node] = revisiondata
670 self._revisioncache[node] = revisiondata
671 return rev
671 return rev
672
672
673 def addgroup(
673 def addgroup(
674 self,
674 self,
675 deltas,
675 deltas,
676 linkmapper,
676 linkmapper,
677 transaction,
677 transaction,
678 addrevisioncb=None,
678 addrevisioncb=None,
679 duplicaterevisioncb=None,
679 duplicaterevisioncb=None,
680 maybemissingparents=False,
680 maybemissingparents=False,
681 ):
681 ):
682 empty = True
682 empty = True
683
683
684 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
684 for (
685 node,
686 p1,
687 p2,
688 linknode,
689 deltabase,
690 delta,
691 wireflags,
692 sidedata,
693 ) in deltas:
685 storeflags = 0
694 storeflags = 0
686
695
687 if wireflags & repository.REVISION_FLAG_CENSORED:
696 if wireflags & repository.REVISION_FLAG_CENSORED:
688 storeflags |= FLAG_CENSORED
697 storeflags |= FLAG_CENSORED
689
698
690 if wireflags & ~repository.REVISION_FLAG_CENSORED:
699 if wireflags & ~repository.REVISION_FLAG_CENSORED:
691 raise SQLiteStoreError(b'unhandled revision flag')
700 raise SQLiteStoreError(b'unhandled revision flag')
692
701
693 if maybemissingparents:
702 if maybemissingparents:
694 if p1 != nullid and not self.hasnode(p1):
703 if p1 != nullid and not self.hasnode(p1):
695 p1 = nullid
704 p1 = nullid
696 storeflags |= FLAG_MISSING_P1
705 storeflags |= FLAG_MISSING_P1
697
706
698 if p2 != nullid and not self.hasnode(p2):
707 if p2 != nullid and not self.hasnode(p2):
699 p2 = nullid
708 p2 = nullid
700 storeflags |= FLAG_MISSING_P2
709 storeflags |= FLAG_MISSING_P2
701
710
702 baserev = self.rev(deltabase)
711 baserev = self.rev(deltabase)
703
712
704 # If base is censored, delta must be full replacement in a single
713 # If base is censored, delta must be full replacement in a single
705 # patch operation.
714 # patch operation.
706 if baserev != nullrev and self.iscensored(baserev):
715 if baserev != nullrev and self.iscensored(baserev):
707 hlen = struct.calcsize(b'>lll')
716 hlen = struct.calcsize(b'>lll')
708 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
717 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
709 newlen = len(delta) - hlen
718 newlen = len(delta) - hlen
710
719
711 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
720 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
712 raise error.CensoredBaseError(self._path, deltabase)
721 raise error.CensoredBaseError(self._path, deltabase)
713
722
714 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
723 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
715 delta, baserev, lambda x: len(self.rawdata(x))
724 delta, baserev, lambda x: len(self.rawdata(x))
716 ):
725 ):
717 storeflags |= FLAG_CENSORED
726 storeflags |= FLAG_CENSORED
718
727
719 linkrev = linkmapper(linknode)
728 linkrev = linkmapper(linknode)
720
729
721 if node in self._revisions:
730 if node in self._revisions:
722 # Possibly reset parents to make them proper.
731 # Possibly reset parents to make them proper.
723 entry = self._revisions[node]
732 entry = self._revisions[node]
724
733
725 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
734 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
726 entry.p1node = p1
735 entry.p1node = p1
727 entry.p1rev = self._nodetorev[p1]
736 entry.p1rev = self._nodetorev[p1]
728 entry.flags &= ~FLAG_MISSING_P1
737 entry.flags &= ~FLAG_MISSING_P1
729
738
730 self._db.execute(
739 self._db.execute(
731 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
740 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
732 (self._nodetorev[p1], entry.flags, entry.rid),
741 (self._nodetorev[p1], entry.flags, entry.rid),
733 )
742 )
734
743
735 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
744 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
736 entry.p2node = p2
745 entry.p2node = p2
737 entry.p2rev = self._nodetorev[p2]
746 entry.p2rev = self._nodetorev[p2]
738 entry.flags &= ~FLAG_MISSING_P2
747 entry.flags &= ~FLAG_MISSING_P2
739
748
740 self._db.execute(
749 self._db.execute(
741 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
750 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
742 (self._nodetorev[p1], entry.flags, entry.rid),
751 (self._nodetorev[p1], entry.flags, entry.rid),
743 )
752 )
744
753
745 if duplicaterevisioncb:
754 if duplicaterevisioncb:
746 duplicaterevisioncb(self, self.rev(node))
755 duplicaterevisioncb(self, self.rev(node))
747 empty = False
756 empty = False
748 continue
757 continue
749
758
750 if deltabase == nullid:
759 if deltabase == nullid:
751 text = mdiff.patch(b'', delta)
760 text = mdiff.patch(b'', delta)
752 storedelta = None
761 storedelta = None
753 else:
762 else:
754 text = None
763 text = None
755 storedelta = (deltabase, delta)
764 storedelta = (deltabase, delta)
756
765
757 rev = self._addrawrevision(
766 rev = self._addrawrevision(
758 node,
767 node,
759 text,
768 text,
760 transaction,
769 transaction,
761 linkrev,
770 linkrev,
762 p1,
771 p1,
763 p2,
772 p2,
764 storedelta=storedelta,
773 storedelta=storedelta,
765 flags=storeflags,
774 flags=storeflags,
766 )
775 )
767
776
768 if addrevisioncb:
777 if addrevisioncb:
769 addrevisioncb(self, rev)
778 addrevisioncb(self, rev)
770 empty = False
779 empty = False
771
780
772 return not empty
781 return not empty
773
782
774 def censorrevision(self, tr, censornode, tombstone=b''):
783 def censorrevision(self, tr, censornode, tombstone=b''):
775 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
784 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
776
785
777 # This restriction is cargo culted from revlogs and makes no sense for
786 # This restriction is cargo culted from revlogs and makes no sense for
778 # SQLite, since columns can be resized at will.
787 # SQLite, since columns can be resized at will.
779 if len(tombstone) > len(self.rawdata(censornode)):
788 if len(tombstone) > len(self.rawdata(censornode)):
780 raise error.Abort(
789 raise error.Abort(
781 _(b'censor tombstone must be no longer than censored data')
790 _(b'censor tombstone must be no longer than censored data')
782 )
791 )
783
792
784 # We need to replace the censored revision's data with the tombstone.
793 # We need to replace the censored revision's data with the tombstone.
785 # But replacing that data will have implications for delta chains that
794 # But replacing that data will have implications for delta chains that
786 # reference it.
795 # reference it.
787 #
796 #
788 # While "better," more complex strategies are possible, we do something
797 # While "better," more complex strategies are possible, we do something
789 # simple: we find delta chain children of the censored revision and we
798 # simple: we find delta chain children of the censored revision and we
790 # replace those incremental deltas with fulltexts of their corresponding
799 # replace those incremental deltas with fulltexts of their corresponding
791 # revision. Then we delete the now-unreferenced delta and original
800 # revision. Then we delete the now-unreferenced delta and original
792 # revision and insert a replacement.
801 # revision and insert a replacement.
793
802
794 # Find the delta to be censored.
803 # Find the delta to be censored.
795 censoreddeltaid = self._db.execute(
804 censoreddeltaid = self._db.execute(
796 'SELECT deltaid FROM fileindex WHERE id=?',
805 'SELECT deltaid FROM fileindex WHERE id=?',
797 (self._revisions[censornode].rid,),
806 (self._revisions[censornode].rid,),
798 ).fetchone()[0]
807 ).fetchone()[0]
799
808
800 # Find all its delta chain children.
809 # Find all its delta chain children.
801 # TODO once we support storing deltas for !files, we'll need to look
810 # TODO once we support storing deltas for !files, we'll need to look
802 # for those delta chains too.
811 # for those delta chains too.
803 rows = list(
812 rows = list(
804 self._db.execute(
813 self._db.execute(
805 'SELECT id, pathid, node FROM fileindex '
814 'SELECT id, pathid, node FROM fileindex '
806 'WHERE deltabaseid=? OR deltaid=?',
815 'WHERE deltabaseid=? OR deltaid=?',
807 (censoreddeltaid, censoreddeltaid),
816 (censoreddeltaid, censoreddeltaid),
808 )
817 )
809 )
818 )
810
819
811 for row in rows:
820 for row in rows:
812 rid, pathid, node = row
821 rid, pathid, node = row
813
822
814 fulltext = resolvedeltachain(
823 fulltext = resolvedeltachain(
815 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
824 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
816 )
825 )
817
826
818 deltahash = hashutil.sha1(fulltext).digest()
827 deltahash = hashutil.sha1(fulltext).digest()
819
828
820 if self._compengine == b'zstd':
829 if self._compengine == b'zstd':
821 deltablob = self._cctx.compress(fulltext)
830 deltablob = self._cctx.compress(fulltext)
822 compression = COMPRESSION_ZSTD
831 compression = COMPRESSION_ZSTD
823 elif self._compengine == b'zlib':
832 elif self._compengine == b'zlib':
824 deltablob = zlib.compress(fulltext)
833 deltablob = zlib.compress(fulltext)
825 compression = COMPRESSION_ZLIB
834 compression = COMPRESSION_ZLIB
826 elif self._compengine == b'none':
835 elif self._compengine == b'none':
827 deltablob = fulltext
836 deltablob = fulltext
828 compression = COMPRESSION_NONE
837 compression = COMPRESSION_NONE
829 else:
838 else:
830 raise error.ProgrammingError(
839 raise error.ProgrammingError(
831 b'unhandled compression engine: %s' % self._compengine
840 b'unhandled compression engine: %s' % self._compengine
832 )
841 )
833
842
834 if len(deltablob) >= len(fulltext):
843 if len(deltablob) >= len(fulltext):
835 deltablob = fulltext
844 deltablob = fulltext
836 compression = COMPRESSION_NONE
845 compression = COMPRESSION_NONE
837
846
838 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
847 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
839
848
840 self._db.execute(
849 self._db.execute(
841 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
850 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
842 'WHERE id=?',
851 'WHERE id=?',
843 (deltaid, rid),
852 (deltaid, rid),
844 )
853 )
845
854
846 # Now create the tombstone delta and replace the delta on the censored
855 # Now create the tombstone delta and replace the delta on the censored
847 # node.
856 # node.
848 deltahash = hashutil.sha1(tombstone).digest()
857 deltahash = hashutil.sha1(tombstone).digest()
849 tombstonedeltaid = insertdelta(
858 tombstonedeltaid = insertdelta(
850 self._db, COMPRESSION_NONE, deltahash, tombstone
859 self._db, COMPRESSION_NONE, deltahash, tombstone
851 )
860 )
852
861
853 flags = self._revisions[censornode].flags
862 flags = self._revisions[censornode].flags
854 flags |= FLAG_CENSORED
863 flags |= FLAG_CENSORED
855
864
856 self._db.execute(
865 self._db.execute(
857 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
866 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
858 'WHERE pathid=? AND node=?',
867 'WHERE pathid=? AND node=?',
859 (flags, tombstonedeltaid, self._pathid, censornode),
868 (flags, tombstonedeltaid, self._pathid, censornode),
860 )
869 )
861
870
862 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
871 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
863
872
864 self._refreshindex()
873 self._refreshindex()
865 self._revisioncache.clear()
874 self._revisioncache.clear()
866
875
867 def getstrippoint(self, minlink):
876 def getstrippoint(self, minlink):
868 return storageutil.resolvestripinfo(
877 return storageutil.resolvestripinfo(
869 minlink,
878 minlink,
870 len(self) - 1,
879 len(self) - 1,
871 [self.rev(n) for n in self.heads()],
880 [self.rev(n) for n in self.heads()],
872 self.linkrev,
881 self.linkrev,
873 self.parentrevs,
882 self.parentrevs,
874 )
883 )
875
884
876 def strip(self, minlink, transaction):
885 def strip(self, minlink, transaction):
877 if not len(self):
886 if not len(self):
878 return
887 return
879
888
880 rev, _ignored = self.getstrippoint(minlink)
889 rev, _ignored = self.getstrippoint(minlink)
881
890
882 if rev == len(self):
891 if rev == len(self):
883 return
892 return
884
893
885 for rev in self.revs(rev):
894 for rev in self.revs(rev):
886 self._db.execute(
895 self._db.execute(
887 'DELETE FROM fileindex WHERE pathid=? AND node=?',
896 'DELETE FROM fileindex WHERE pathid=? AND node=?',
888 (self._pathid, self.node(rev)),
897 (self._pathid, self.node(rev)),
889 )
898 )
890
899
891 # TODO how should we garbage collect data in delta table?
900 # TODO how should we garbage collect data in delta table?
892
901
893 self._refreshindex()
902 self._refreshindex()
894
903
895 # End of ifilemutation interface.
904 # End of ifilemutation interface.
896
905
897 # Start of ifilestorage interface.
906 # Start of ifilestorage interface.
898
907
899 def files(self):
908 def files(self):
900 return []
909 return []
901
910
902 def storageinfo(
911 def storageinfo(
903 self,
912 self,
904 exclusivefiles=False,
913 exclusivefiles=False,
905 sharedfiles=False,
914 sharedfiles=False,
906 revisionscount=False,
915 revisionscount=False,
907 trackedsize=False,
916 trackedsize=False,
908 storedsize=False,
917 storedsize=False,
909 ):
918 ):
910 d = {}
919 d = {}
911
920
912 if exclusivefiles:
921 if exclusivefiles:
913 d[b'exclusivefiles'] = []
922 d[b'exclusivefiles'] = []
914
923
915 if sharedfiles:
924 if sharedfiles:
916 # TODO list sqlite file(s) here.
925 # TODO list sqlite file(s) here.
917 d[b'sharedfiles'] = []
926 d[b'sharedfiles'] = []
918
927
919 if revisionscount:
928 if revisionscount:
920 d[b'revisionscount'] = len(self)
929 d[b'revisionscount'] = len(self)
921
930
922 if trackedsize:
931 if trackedsize:
923 d[b'trackedsize'] = sum(
932 d[b'trackedsize'] = sum(
924 len(self.revision(node)) for node in self._nodetorev
933 len(self.revision(node)) for node in self._nodetorev
925 )
934 )
926
935
927 if storedsize:
936 if storedsize:
928 # TODO implement this?
937 # TODO implement this?
929 d[b'storedsize'] = None
938 d[b'storedsize'] = None
930
939
931 return d
940 return d
932
941
933 def verifyintegrity(self, state):
942 def verifyintegrity(self, state):
934 state[b'skipread'] = set()
943 state[b'skipread'] = set()
935
944
936 for rev in self:
945 for rev in self:
937 node = self.node(rev)
946 node = self.node(rev)
938
947
939 try:
948 try:
940 self.revision(node)
949 self.revision(node)
941 except Exception as e:
950 except Exception as e:
942 yield sqliteproblem(
951 yield sqliteproblem(
943 error=_(b'unpacking %s: %s') % (short(node), e), node=node
952 error=_(b'unpacking %s: %s') % (short(node), e), node=node
944 )
953 )
945
954
946 state[b'skipread'].add(node)
955 state[b'skipread'].add(node)
947
956
948 # End of ifilestorage interface.
957 # End of ifilestorage interface.
949
958
950 def _checkhash(self, fulltext, node, p1=None, p2=None):
959 def _checkhash(self, fulltext, node, p1=None, p2=None):
951 if p1 is None and p2 is None:
960 if p1 is None and p2 is None:
952 p1, p2 = self.parents(node)
961 p1, p2 = self.parents(node)
953
962
954 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
963 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
955 return
964 return
956
965
957 try:
966 try:
958 del self._revisioncache[node]
967 del self._revisioncache[node]
959 except KeyError:
968 except KeyError:
960 pass
969 pass
961
970
962 if storageutil.iscensoredtext(fulltext):
971 if storageutil.iscensoredtext(fulltext):
963 raise error.CensoredNodeError(self._path, node, fulltext)
972 raise error.CensoredNodeError(self._path, node, fulltext)
964
973
965 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
974 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
966
975
967 def _addrawrevision(
976 def _addrawrevision(
968 self,
977 self,
969 node,
978 node,
970 revisiondata,
979 revisiondata,
971 transaction,
980 transaction,
972 linkrev,
981 linkrev,
973 p1,
982 p1,
974 p2,
983 p2,
975 storedelta=None,
984 storedelta=None,
976 flags=0,
985 flags=0,
977 ):
986 ):
978 if self._pathid is None:
987 if self._pathid is None:
979 res = self._db.execute(
988 res = self._db.execute(
980 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
989 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
981 )
990 )
982 self._pathid = res.lastrowid
991 self._pathid = res.lastrowid
983
992
984 # For simplicity, always store a delta against p1.
993 # For simplicity, always store a delta against p1.
985 # TODO we need a lot more logic here to make behavior reasonable.
994 # TODO we need a lot more logic here to make behavior reasonable.
986
995
987 if storedelta:
996 if storedelta:
988 deltabase, delta = storedelta
997 deltabase, delta = storedelta
989
998
990 if isinstance(deltabase, int):
999 if isinstance(deltabase, int):
991 deltabase = self.node(deltabase)
1000 deltabase = self.node(deltabase)
992
1001
993 else:
1002 else:
994 assert revisiondata is not None
1003 assert revisiondata is not None
995 deltabase = p1
1004 deltabase = p1
996
1005
997 if deltabase == nullid:
1006 if deltabase == nullid:
998 delta = revisiondata
1007 delta = revisiondata
999 else:
1008 else:
1000 delta = mdiff.textdiff(
1009 delta = mdiff.textdiff(
1001 self.revision(self.rev(deltabase)), revisiondata
1010 self.revision(self.rev(deltabase)), revisiondata
1002 )
1011 )
1003
1012
1004 # File index stores a pointer to its delta and the parent delta.
1013 # File index stores a pointer to its delta and the parent delta.
1005 # The parent delta is stored via a pointer to the fileindex PK.
1014 # The parent delta is stored via a pointer to the fileindex PK.
1006 if deltabase == nullid:
1015 if deltabase == nullid:
1007 baseid = None
1016 baseid = None
1008 else:
1017 else:
1009 baseid = self._revisions[deltabase].rid
1018 baseid = self._revisions[deltabase].rid
1010
1019
1011 # Deltas are stored with a hash of their content. This allows
1020 # Deltas are stored with a hash of their content. This allows
1012 # us to de-duplicate. The table is configured to ignore conflicts
1021 # us to de-duplicate. The table is configured to ignore conflicts
1013 # and it is faster to just insert and silently noop than to look
1022 # and it is faster to just insert and silently noop than to look
1014 # first.
1023 # first.
1015 deltahash = hashutil.sha1(delta).digest()
1024 deltahash = hashutil.sha1(delta).digest()
1016
1025
1017 if self._compengine == b'zstd':
1026 if self._compengine == b'zstd':
1018 deltablob = self._cctx.compress(delta)
1027 deltablob = self._cctx.compress(delta)
1019 compression = COMPRESSION_ZSTD
1028 compression = COMPRESSION_ZSTD
1020 elif self._compengine == b'zlib':
1029 elif self._compengine == b'zlib':
1021 deltablob = zlib.compress(delta)
1030 deltablob = zlib.compress(delta)
1022 compression = COMPRESSION_ZLIB
1031 compression = COMPRESSION_ZLIB
1023 elif self._compengine == b'none':
1032 elif self._compengine == b'none':
1024 deltablob = delta
1033 deltablob = delta
1025 compression = COMPRESSION_NONE
1034 compression = COMPRESSION_NONE
1026 else:
1035 else:
1027 raise error.ProgrammingError(
1036 raise error.ProgrammingError(
1028 b'unhandled compression engine: %s' % self._compengine
1037 b'unhandled compression engine: %s' % self._compengine
1029 )
1038 )
1030
1039
1031 # Don't store compressed data if it isn't practical.
1040 # Don't store compressed data if it isn't practical.
1032 if len(deltablob) >= len(delta):
1041 if len(deltablob) >= len(delta):
1033 deltablob = delta
1042 deltablob = delta
1034 compression = COMPRESSION_NONE
1043 compression = COMPRESSION_NONE
1035
1044
1036 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1045 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1037
1046
1038 rev = len(self)
1047 rev = len(self)
1039
1048
1040 if p1 == nullid:
1049 if p1 == nullid:
1041 p1rev = nullrev
1050 p1rev = nullrev
1042 else:
1051 else:
1043 p1rev = self._nodetorev[p1]
1052 p1rev = self._nodetorev[p1]
1044
1053
1045 if p2 == nullid:
1054 if p2 == nullid:
1046 p2rev = nullrev
1055 p2rev = nullrev
1047 else:
1056 else:
1048 p2rev = self._nodetorev[p2]
1057 p2rev = self._nodetorev[p2]
1049
1058
1050 rid = self._db.execute(
1059 rid = self._db.execute(
1051 'INSERT INTO fileindex ('
1060 'INSERT INTO fileindex ('
1052 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1061 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1053 ' deltaid, deltabaseid) '
1062 ' deltaid, deltabaseid) '
1054 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1063 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1055 (
1064 (
1056 self._pathid,
1065 self._pathid,
1057 rev,
1066 rev,
1058 node,
1067 node,
1059 p1rev,
1068 p1rev,
1060 p2rev,
1069 p2rev,
1061 linkrev,
1070 linkrev,
1062 flags,
1071 flags,
1063 deltaid,
1072 deltaid,
1064 baseid,
1073 baseid,
1065 ),
1074 ),
1066 ).lastrowid
1075 ).lastrowid
1067
1076
1068 entry = revisionentry(
1077 entry = revisionentry(
1069 rid=rid,
1078 rid=rid,
1070 rev=rev,
1079 rev=rev,
1071 node=node,
1080 node=node,
1072 p1rev=p1rev,
1081 p1rev=p1rev,
1073 p2rev=p2rev,
1082 p2rev=p2rev,
1074 p1node=p1,
1083 p1node=p1,
1075 p2node=p2,
1084 p2node=p2,
1076 linkrev=linkrev,
1085 linkrev=linkrev,
1077 flags=flags,
1086 flags=flags,
1078 )
1087 )
1079
1088
1080 self._nodetorev[node] = rev
1089 self._nodetorev[node] = rev
1081 self._revtonode[rev] = node
1090 self._revtonode[rev] = node
1082 self._revisions[node] = entry
1091 self._revisions[node] = entry
1083
1092
1084 return rev
1093 return rev
1085
1094
1086
1095
1087 class sqliterepository(localrepo.localrepository):
1096 class sqliterepository(localrepo.localrepository):
1088 def cancopy(self):
1097 def cancopy(self):
1089 return False
1098 return False
1090
1099
1091 def transaction(self, *args, **kwargs):
1100 def transaction(self, *args, **kwargs):
1092 current = self.currenttransaction()
1101 current = self.currenttransaction()
1093
1102
1094 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1103 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1095
1104
1096 if current:
1105 if current:
1097 return tr
1106 return tr
1098
1107
1099 self._dbconn.execute('BEGIN TRANSACTION')
1108 self._dbconn.execute('BEGIN TRANSACTION')
1100
1109
1101 def committransaction(_):
1110 def committransaction(_):
1102 self._dbconn.commit()
1111 self._dbconn.commit()
1103
1112
1104 tr.addfinalize(b'sqlitestore', committransaction)
1113 tr.addfinalize(b'sqlitestore', committransaction)
1105
1114
1106 return tr
1115 return tr
1107
1116
1108 @property
1117 @property
1109 def _dbconn(self):
1118 def _dbconn(self):
1110 # SQLite connections can only be used on the thread that created
1119 # SQLite connections can only be used on the thread that created
1111 # them. In most cases, this "just works." However, hgweb uses
1120 # them. In most cases, this "just works." However, hgweb uses
1112 # multiple threads.
1121 # multiple threads.
1113 tid = threading.current_thread().ident
1122 tid = threading.current_thread().ident
1114
1123
1115 if self._db:
1124 if self._db:
1116 if self._db[0] == tid:
1125 if self._db[0] == tid:
1117 return self._db[1]
1126 return self._db[1]
1118
1127
1119 db = makedb(self.svfs.join(b'db.sqlite'))
1128 db = makedb(self.svfs.join(b'db.sqlite'))
1120 self._db = (tid, db)
1129 self._db = (tid, db)
1121
1130
1122 return db
1131 return db
1123
1132
1124
1133
1125 def makedb(path):
1134 def makedb(path):
1126 """Construct a database handle for a database at path."""
1135 """Construct a database handle for a database at path."""
1127
1136
1128 db = sqlite3.connect(encoding.strfromlocal(path))
1137 db = sqlite3.connect(encoding.strfromlocal(path))
1129 db.text_factory = bytes
1138 db.text_factory = bytes
1130
1139
1131 res = db.execute('PRAGMA user_version').fetchone()[0]
1140 res = db.execute('PRAGMA user_version').fetchone()[0]
1132
1141
1133 # New database.
1142 # New database.
1134 if res == 0:
1143 if res == 0:
1135 for statement in CREATE_SCHEMA:
1144 for statement in CREATE_SCHEMA:
1136 db.execute(statement)
1145 db.execute(statement)
1137
1146
1138 db.commit()
1147 db.commit()
1139
1148
1140 elif res == CURRENT_SCHEMA_VERSION:
1149 elif res == CURRENT_SCHEMA_VERSION:
1141 pass
1150 pass
1142
1151
1143 else:
1152 else:
1144 raise error.Abort(_(b'sqlite database has unrecognized version'))
1153 raise error.Abort(_(b'sqlite database has unrecognized version'))
1145
1154
1146 db.execute('PRAGMA journal_mode=WAL')
1155 db.execute('PRAGMA journal_mode=WAL')
1147
1156
1148 return db
1157 return db
1149
1158
1150
1159
1151 def featuresetup(ui, supported):
1160 def featuresetup(ui, supported):
1152 supported.add(REQUIREMENT)
1161 supported.add(REQUIREMENT)
1153
1162
1154 if zstd:
1163 if zstd:
1155 supported.add(REQUIREMENT_ZSTD)
1164 supported.add(REQUIREMENT_ZSTD)
1156
1165
1157 supported.add(REQUIREMENT_ZLIB)
1166 supported.add(REQUIREMENT_ZLIB)
1158 supported.add(REQUIREMENT_NONE)
1167 supported.add(REQUIREMENT_NONE)
1159 supported.add(REQUIREMENT_SHALLOW_FILES)
1168 supported.add(REQUIREMENT_SHALLOW_FILES)
1160 supported.add(requirements.NARROW_REQUIREMENT)
1169 supported.add(requirements.NARROW_REQUIREMENT)
1161
1170
1162
1171
1163 def newreporequirements(orig, ui, createopts):
1172 def newreporequirements(orig, ui, createopts):
1164 if createopts[b'backend'] != b'sqlite':
1173 if createopts[b'backend'] != b'sqlite':
1165 return orig(ui, createopts)
1174 return orig(ui, createopts)
1166
1175
1167 # This restriction can be lifted once we have more confidence.
1176 # This restriction can be lifted once we have more confidence.
1168 if b'sharedrepo' in createopts:
1177 if b'sharedrepo' in createopts:
1169 raise error.Abort(
1178 raise error.Abort(
1170 _(b'shared repositories not supported with SQLite store')
1179 _(b'shared repositories not supported with SQLite store')
1171 )
1180 )
1172
1181
1173 # This filtering is out of an abundance of caution: we want to ensure
1182 # This filtering is out of an abundance of caution: we want to ensure
1174 # we honor creation options and we do that by annotating exactly the
1183 # we honor creation options and we do that by annotating exactly the
1175 # creation options we recognize.
1184 # creation options we recognize.
1176 known = {
1185 known = {
1177 b'narrowfiles',
1186 b'narrowfiles',
1178 b'backend',
1187 b'backend',
1179 b'shallowfilestore',
1188 b'shallowfilestore',
1180 }
1189 }
1181
1190
1182 unsupported = set(createopts) - known
1191 unsupported = set(createopts) - known
1183 if unsupported:
1192 if unsupported:
1184 raise error.Abort(
1193 raise error.Abort(
1185 _(b'SQLite store does not support repo creation option: %s')
1194 _(b'SQLite store does not support repo creation option: %s')
1186 % b', '.join(sorted(unsupported))
1195 % b', '.join(sorted(unsupported))
1187 )
1196 )
1188
1197
1189 # Since we're a hybrid store that still relies on revlogs, we fall back
1198 # Since we're a hybrid store that still relies on revlogs, we fall back
1190 # to using the revlogv1 backend's storage requirements then adding our
1199 # to using the revlogv1 backend's storage requirements then adding our
1191 # own requirement.
1200 # own requirement.
1192 createopts[b'backend'] = b'revlogv1'
1201 createopts[b'backend'] = b'revlogv1'
1193 requirements = orig(ui, createopts)
1202 requirements = orig(ui, createopts)
1194 requirements.add(REQUIREMENT)
1203 requirements.add(REQUIREMENT)
1195
1204
1196 compression = ui.config(b'storage', b'sqlite.compression')
1205 compression = ui.config(b'storage', b'sqlite.compression')
1197
1206
1198 if compression == b'zstd' and not zstd:
1207 if compression == b'zstd' and not zstd:
1199 raise error.Abort(
1208 raise error.Abort(
1200 _(
1209 _(
1201 b'storage.sqlite.compression set to "zstd" but '
1210 b'storage.sqlite.compression set to "zstd" but '
1202 b'zstandard compression not available to this '
1211 b'zstandard compression not available to this '
1203 b'Mercurial install'
1212 b'Mercurial install'
1204 )
1213 )
1205 )
1214 )
1206
1215
1207 if compression == b'zstd':
1216 if compression == b'zstd':
1208 requirements.add(REQUIREMENT_ZSTD)
1217 requirements.add(REQUIREMENT_ZSTD)
1209 elif compression == b'zlib':
1218 elif compression == b'zlib':
1210 requirements.add(REQUIREMENT_ZLIB)
1219 requirements.add(REQUIREMENT_ZLIB)
1211 elif compression == b'none':
1220 elif compression == b'none':
1212 requirements.add(REQUIREMENT_NONE)
1221 requirements.add(REQUIREMENT_NONE)
1213 else:
1222 else:
1214 raise error.Abort(
1223 raise error.Abort(
1215 _(
1224 _(
1216 b'unknown compression engine defined in '
1225 b'unknown compression engine defined in '
1217 b'storage.sqlite.compression: %s'
1226 b'storage.sqlite.compression: %s'
1218 )
1227 )
1219 % compression
1228 % compression
1220 )
1229 )
1221
1230
1222 if createopts.get(b'shallowfilestore'):
1231 if createopts.get(b'shallowfilestore'):
1223 requirements.add(REQUIREMENT_SHALLOW_FILES)
1232 requirements.add(REQUIREMENT_SHALLOW_FILES)
1224
1233
1225 return requirements
1234 return requirements
1226
1235
1227
1236
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1237 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 class sqlitefilestorage(object):
1238 class sqlitefilestorage(object):
1230 """Repository file storage backed by SQLite."""
1239 """Repository file storage backed by SQLite."""
1231
1240
1232 def file(self, path):
1241 def file(self, path):
1233 if path[0] == b'/':
1242 if path[0] == b'/':
1234 path = path[1:]
1243 path = path[1:]
1235
1244
1236 if REQUIREMENT_ZSTD in self.requirements:
1245 if REQUIREMENT_ZSTD in self.requirements:
1237 compression = b'zstd'
1246 compression = b'zstd'
1238 elif REQUIREMENT_ZLIB in self.requirements:
1247 elif REQUIREMENT_ZLIB in self.requirements:
1239 compression = b'zlib'
1248 compression = b'zlib'
1240 elif REQUIREMENT_NONE in self.requirements:
1249 elif REQUIREMENT_NONE in self.requirements:
1241 compression = b'none'
1250 compression = b'none'
1242 else:
1251 else:
1243 raise error.Abort(
1252 raise error.Abort(
1244 _(
1253 _(
1245 b'unable to determine what compression engine '
1254 b'unable to determine what compression engine '
1246 b'to use for SQLite storage'
1255 b'to use for SQLite storage'
1247 )
1256 )
1248 )
1257 )
1249
1258
1250 return sqlitefilestore(self._dbconn, path, compression)
1259 return sqlitefilestore(self._dbconn, path, compression)
1251
1260
1252
1261
1253 def makefilestorage(orig, requirements, features, **kwargs):
1262 def makefilestorage(orig, requirements, features, **kwargs):
1254 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1263 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1255 if REQUIREMENT in requirements:
1264 if REQUIREMENT in requirements:
1256 if REQUIREMENT_SHALLOW_FILES in requirements:
1265 if REQUIREMENT_SHALLOW_FILES in requirements:
1257 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1266 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1258
1267
1259 return sqlitefilestorage
1268 return sqlitefilestorage
1260 else:
1269 else:
1261 return orig(requirements=requirements, features=features, **kwargs)
1270 return orig(requirements=requirements, features=features, **kwargs)
1262
1271
1263
1272
1264 def makemain(orig, ui, requirements, **kwargs):
1273 def makemain(orig, ui, requirements, **kwargs):
1265 if REQUIREMENT in requirements:
1274 if REQUIREMENT in requirements:
1266 if REQUIREMENT_ZSTD in requirements and not zstd:
1275 if REQUIREMENT_ZSTD in requirements and not zstd:
1267 raise error.Abort(
1276 raise error.Abort(
1268 _(
1277 _(
1269 b'repository uses zstandard compression, which '
1278 b'repository uses zstandard compression, which '
1270 b'is not available to this Mercurial install'
1279 b'is not available to this Mercurial install'
1271 )
1280 )
1272 )
1281 )
1273
1282
1274 return sqliterepository
1283 return sqliterepository
1275
1284
1276 return orig(requirements=requirements, **kwargs)
1285 return orig(requirements=requirements, **kwargs)
1277
1286
1278
1287
1279 def verifierinit(orig, self, *args, **kwargs):
1288 def verifierinit(orig, self, *args, **kwargs):
1280 orig(self, *args, **kwargs)
1289 orig(self, *args, **kwargs)
1281
1290
1282 # We don't care that files in the store don't align with what is
1291 # We don't care that files in the store don't align with what is
1283 # advertised. So suppress these warnings.
1292 # advertised. So suppress these warnings.
1284 self.warnorphanstorefiles = False
1293 self.warnorphanstorefiles = False
1285
1294
1286
1295
1287 def extsetup(ui):
1296 def extsetup(ui):
1288 localrepo.featuresetupfuncs.add(featuresetup)
1297 localrepo.featuresetupfuncs.add(featuresetup)
1289 extensions.wrapfunction(
1298 extensions.wrapfunction(
1290 localrepo, b'newreporequirements', newreporequirements
1299 localrepo, b'newreporequirements', newreporequirements
1291 )
1300 )
1292 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1301 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1293 extensions.wrapfunction(localrepo, b'makemain', makemain)
1302 extensions.wrapfunction(localrepo, b'makemain', makemain)
1294 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1303 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1295
1304
1296
1305
1297 def reposetup(ui, repo):
1306 def reposetup(ui, repo):
1298 if isinstance(repo, sqliterepository):
1307 if isinstance(repo, sqliterepository):
1299 repo._db = None
1308 repo._db = None
1300
1309
1301 # TODO check for bundlerepository?
1310 # TODO check for bundlerepository?
@@ -1,680 +1,680 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 cmdutil,
30 cmdutil,
31 discovery,
31 discovery,
32 encoding,
32 encoding,
33 error,
33 error,
34 exchange,
34 exchange,
35 filelog,
35 filelog,
36 localrepo,
36 localrepo,
37 manifest,
37 manifest,
38 mdiff,
38 mdiff,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 revlog,
42 revlog,
43 util,
43 util,
44 vfs as vfsmod,
44 vfs as vfsmod,
45 )
45 )
46
46
47
47
48 class bundlerevlog(revlog.revlog):
48 class bundlerevlog(revlog.revlog):
49 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
49 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
50 # How it works:
50 # How it works:
51 # To retrieve a revision, we need to know the offset of the revision in
51 # To retrieve a revision, we need to know the offset of the revision in
52 # the bundle (an unbundle object). We store this offset in the index
52 # the bundle (an unbundle object). We store this offset in the index
53 # (start). The base of the delta is stored in the base field.
53 # (start). The base of the delta is stored in the base field.
54 #
54 #
55 # To differentiate a rev in the bundle from a rev in the revlog, we
55 # To differentiate a rev in the bundle from a rev in the revlog, we
56 # check revision against repotiprev.
56 # check revision against repotiprev.
57 opener = vfsmod.readonlyvfs(opener)
57 opener = vfsmod.readonlyvfs(opener)
58 revlog.revlog.__init__(self, opener, indexfile)
58 revlog.revlog.__init__(self, opener, indexfile)
59 self.bundle = cgunpacker
59 self.bundle = cgunpacker
60 n = len(self)
60 n = len(self)
61 self.repotiprev = n - 1
61 self.repotiprev = n - 1
62 self.bundlerevs = set() # used by 'bundle()' revset expression
62 self.bundlerevs = set() # used by 'bundle()' revset expression
63 for deltadata in cgunpacker.deltaiter():
63 for deltadata in cgunpacker.deltaiter():
64 node, p1, p2, cs, deltabase, delta, flags = deltadata
64 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
65
65
66 size = len(delta)
66 size = len(delta)
67 start = cgunpacker.tell() - size
67 start = cgunpacker.tell() - size
68
68
69 if self.index.has_node(node):
69 if self.index.has_node(node):
70 # this can happen if two branches make the same change
70 # this can happen if two branches make the same change
71 self.bundlerevs.add(self.index.rev(node))
71 self.bundlerevs.add(self.index.rev(node))
72 continue
72 continue
73 if cs == node:
73 if cs == node:
74 linkrev = nullrev
74 linkrev = nullrev
75 else:
75 else:
76 linkrev = linkmapper(cs)
76 linkrev = linkmapper(cs)
77
77
78 for p in (p1, p2):
78 for p in (p1, p2):
79 if not self.index.has_node(p):
79 if not self.index.has_node(p):
80 raise error.LookupError(
80 raise error.LookupError(
81 p, self.indexfile, _(b"unknown parent")
81 p, self.indexfile, _(b"unknown parent")
82 )
82 )
83
83
84 if not self.index.has_node(deltabase):
84 if not self.index.has_node(deltabase):
85 raise LookupError(
85 raise LookupError(
86 deltabase, self.indexfile, _(b'unknown delta base')
86 deltabase, self.indexfile, _(b'unknown delta base')
87 )
87 )
88
88
89 baserev = self.rev(deltabase)
89 baserev = self.rev(deltabase)
90 # start, size, full unc. size, base (unused), link, p1, p2, node
90 # start, size, full unc. size, base (unused), link, p1, p2, node
91 e = (
91 e = (
92 revlog.offset_type(start, flags),
92 revlog.offset_type(start, flags),
93 size,
93 size,
94 -1,
94 -1,
95 baserev,
95 baserev,
96 linkrev,
96 linkrev,
97 self.rev(p1),
97 self.rev(p1),
98 self.rev(p2),
98 self.rev(p2),
99 node,
99 node,
100 )
100 )
101 self.index.append(e)
101 self.index.append(e)
102 self.bundlerevs.add(n)
102 self.bundlerevs.add(n)
103 n += 1
103 n += 1
104
104
105 def _chunk(self, rev, df=None):
105 def _chunk(self, rev, df=None):
106 # Warning: in case of bundle, the diff is against what we stored as
106 # Warning: in case of bundle, the diff is against what we stored as
107 # delta base, not against rev - 1
107 # delta base, not against rev - 1
108 # XXX: could use some caching
108 # XXX: could use some caching
109 if rev <= self.repotiprev:
109 if rev <= self.repotiprev:
110 return revlog.revlog._chunk(self, rev)
110 return revlog.revlog._chunk(self, rev)
111 self.bundle.seek(self.start(rev))
111 self.bundle.seek(self.start(rev))
112 return self.bundle.read(self.length(rev))
112 return self.bundle.read(self.length(rev))
113
113
114 def revdiff(self, rev1, rev2):
114 def revdiff(self, rev1, rev2):
115 """return or calculate a delta between two revisions"""
115 """return or calculate a delta between two revisions"""
116 if rev1 > self.repotiprev and rev2 > self.repotiprev:
116 if rev1 > self.repotiprev and rev2 > self.repotiprev:
117 # hot path for bundle
117 # hot path for bundle
118 revb = self.index[rev2][3]
118 revb = self.index[rev2][3]
119 if revb == rev1:
119 if revb == rev1:
120 return self._chunk(rev2)
120 return self._chunk(rev2)
121 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
121 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
122 return revlog.revlog.revdiff(self, rev1, rev2)
122 return revlog.revlog.revdiff(self, rev1, rev2)
123
123
124 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
124 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
125
125
126 def _rawtext(self, node, rev, _df=None):
126 def _rawtext(self, node, rev, _df=None):
127 if rev is None:
127 if rev is None:
128 rev = self.rev(node)
128 rev = self.rev(node)
129 validated = False
129 validated = False
130 rawtext = None
130 rawtext = None
131 chain = []
131 chain = []
132 iterrev = rev
132 iterrev = rev
133 # reconstruct the revision if it is from a changegroup
133 # reconstruct the revision if it is from a changegroup
134 while iterrev > self.repotiprev:
134 while iterrev > self.repotiprev:
135 if self._revisioncache and self._revisioncache[1] == iterrev:
135 if self._revisioncache and self._revisioncache[1] == iterrev:
136 rawtext = self._revisioncache[2]
136 rawtext = self._revisioncache[2]
137 break
137 break
138 chain.append(iterrev)
138 chain.append(iterrev)
139 iterrev = self.index[iterrev][3]
139 iterrev = self.index[iterrev][3]
140 if iterrev == nullrev:
140 if iterrev == nullrev:
141 rawtext = b''
141 rawtext = b''
142 elif rawtext is None:
142 elif rawtext is None:
143 r = super(bundlerevlog, self)._rawtext(
143 r = super(bundlerevlog, self)._rawtext(
144 self.node(iterrev), iterrev, _df=_df
144 self.node(iterrev), iterrev, _df=_df
145 )
145 )
146 __, rawtext, validated = r
146 __, rawtext, validated = r
147 if chain:
147 if chain:
148 validated = False
148 validated = False
149 while chain:
149 while chain:
150 delta = self._chunk(chain.pop())
150 delta = self._chunk(chain.pop())
151 rawtext = mdiff.patches(rawtext, [delta])
151 rawtext = mdiff.patches(rawtext, [delta])
152 return rev, rawtext, validated
152 return rev, rawtext, validated
153
153
154 def addrevision(self, *args, **kwargs):
154 def addrevision(self, *args, **kwargs):
155 raise NotImplementedError
155 raise NotImplementedError
156
156
157 def addgroup(self, *args, **kwargs):
157 def addgroup(self, *args, **kwargs):
158 raise NotImplementedError
158 raise NotImplementedError
159
159
160 def strip(self, *args, **kwargs):
160 def strip(self, *args, **kwargs):
161 raise NotImplementedError
161 raise NotImplementedError
162
162
163 def checksize(self):
163 def checksize(self):
164 raise NotImplementedError
164 raise NotImplementedError
165
165
166
166
167 class bundlechangelog(bundlerevlog, changelog.changelog):
167 class bundlechangelog(bundlerevlog, changelog.changelog):
168 def __init__(self, opener, cgunpacker):
168 def __init__(self, opener, cgunpacker):
169 changelog.changelog.__init__(self, opener)
169 changelog.changelog.__init__(self, opener)
170 linkmapper = lambda x: x
170 linkmapper = lambda x: x
171 bundlerevlog.__init__(
171 bundlerevlog.__init__(
172 self, opener, self.indexfile, cgunpacker, linkmapper
172 self, opener, self.indexfile, cgunpacker, linkmapper
173 )
173 )
174
174
175
175
176 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
176 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
177 def __init__(
177 def __init__(
178 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
178 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
179 ):
179 ):
180 manifest.manifestrevlog.__init__(self, opener, tree=dir)
180 manifest.manifestrevlog.__init__(self, opener, tree=dir)
181 bundlerevlog.__init__(
181 bundlerevlog.__init__(
182 self, opener, self.indexfile, cgunpacker, linkmapper
182 self, opener, self.indexfile, cgunpacker, linkmapper
183 )
183 )
184 if dirlogstarts is None:
184 if dirlogstarts is None:
185 dirlogstarts = {}
185 dirlogstarts = {}
186 if self.bundle.version == b"03":
186 if self.bundle.version == b"03":
187 dirlogstarts = _getfilestarts(self.bundle)
187 dirlogstarts = _getfilestarts(self.bundle)
188 self._dirlogstarts = dirlogstarts
188 self._dirlogstarts = dirlogstarts
189 self._linkmapper = linkmapper
189 self._linkmapper = linkmapper
190
190
191 def dirlog(self, d):
191 def dirlog(self, d):
192 if d in self._dirlogstarts:
192 if d in self._dirlogstarts:
193 self.bundle.seek(self._dirlogstarts[d])
193 self.bundle.seek(self._dirlogstarts[d])
194 return bundlemanifest(
194 return bundlemanifest(
195 self.opener,
195 self.opener,
196 self.bundle,
196 self.bundle,
197 self._linkmapper,
197 self._linkmapper,
198 self._dirlogstarts,
198 self._dirlogstarts,
199 dir=d,
199 dir=d,
200 )
200 )
201 return super(bundlemanifest, self).dirlog(d)
201 return super(bundlemanifest, self).dirlog(d)
202
202
203
203
204 class bundlefilelog(filelog.filelog):
204 class bundlefilelog(filelog.filelog):
205 def __init__(self, opener, path, cgunpacker, linkmapper):
205 def __init__(self, opener, path, cgunpacker, linkmapper):
206 filelog.filelog.__init__(self, opener, path)
206 filelog.filelog.__init__(self, opener, path)
207 self._revlog = bundlerevlog(
207 self._revlog = bundlerevlog(
208 opener, self.indexfile, cgunpacker, linkmapper
208 opener, self.indexfile, cgunpacker, linkmapper
209 )
209 )
210
210
211
211
212 class bundlepeer(localrepo.localpeer):
212 class bundlepeer(localrepo.localpeer):
213 def canpush(self):
213 def canpush(self):
214 return False
214 return False
215
215
216
216
217 class bundlephasecache(phases.phasecache):
217 class bundlephasecache(phases.phasecache):
218 def __init__(self, *args, **kwargs):
218 def __init__(self, *args, **kwargs):
219 super(bundlephasecache, self).__init__(*args, **kwargs)
219 super(bundlephasecache, self).__init__(*args, **kwargs)
220 if util.safehasattr(self, 'opener'):
220 if util.safehasattr(self, 'opener'):
221 self.opener = vfsmod.readonlyvfs(self.opener)
221 self.opener = vfsmod.readonlyvfs(self.opener)
222
222
223 def write(self):
223 def write(self):
224 raise NotImplementedError
224 raise NotImplementedError
225
225
226 def _write(self, fp):
226 def _write(self, fp):
227 raise NotImplementedError
227 raise NotImplementedError
228
228
229 def _updateroots(self, phase, newroots, tr):
229 def _updateroots(self, phase, newroots, tr):
230 self.phaseroots[phase] = newroots
230 self.phaseroots[phase] = newroots
231 self.invalidate()
231 self.invalidate()
232 self.dirty = True
232 self.dirty = True
233
233
234
234
235 def _getfilestarts(cgunpacker):
235 def _getfilestarts(cgunpacker):
236 filespos = {}
236 filespos = {}
237 for chunkdata in iter(cgunpacker.filelogheader, {}):
237 for chunkdata in iter(cgunpacker.filelogheader, {}):
238 fname = chunkdata[b'filename']
238 fname = chunkdata[b'filename']
239 filespos[fname] = cgunpacker.tell()
239 filespos[fname] = cgunpacker.tell()
240 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
240 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
241 pass
241 pass
242 return filespos
242 return filespos
243
243
244
244
245 class bundlerepository(object):
245 class bundlerepository(object):
246 """A repository instance that is a union of a local repo and a bundle.
246 """A repository instance that is a union of a local repo and a bundle.
247
247
248 Instances represent a read-only repository composed of a local repository
248 Instances represent a read-only repository composed of a local repository
249 with the contents of a bundle file applied. The repository instance is
249 with the contents of a bundle file applied. The repository instance is
250 conceptually similar to the state of a repository after an
250 conceptually similar to the state of a repository after an
251 ``hg unbundle`` operation. However, the contents of the bundle are never
251 ``hg unbundle`` operation. However, the contents of the bundle are never
252 applied to the actual base repository.
252 applied to the actual base repository.
253
253
254 Instances constructed directly are not usable as repository objects.
254 Instances constructed directly are not usable as repository objects.
255 Use instance() or makebundlerepository() to create instances.
255 Use instance() or makebundlerepository() to create instances.
256 """
256 """
257
257
258 def __init__(self, bundlepath, url, tempparent):
258 def __init__(self, bundlepath, url, tempparent):
259 self._tempparent = tempparent
259 self._tempparent = tempparent
260 self._url = url
260 self._url = url
261
261
262 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
262 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
263
263
264 self.tempfile = None
264 self.tempfile = None
265 f = util.posixfile(bundlepath, b"rb")
265 f = util.posixfile(bundlepath, b"rb")
266 bundle = exchange.readbundle(self.ui, f, bundlepath)
266 bundle = exchange.readbundle(self.ui, f, bundlepath)
267
267
268 if isinstance(bundle, bundle2.unbundle20):
268 if isinstance(bundle, bundle2.unbundle20):
269 self._bundlefile = bundle
269 self._bundlefile = bundle
270 self._cgunpacker = None
270 self._cgunpacker = None
271
271
272 cgpart = None
272 cgpart = None
273 for part in bundle.iterparts(seekable=True):
273 for part in bundle.iterparts(seekable=True):
274 if part.type == b'changegroup':
274 if part.type == b'changegroup':
275 if cgpart:
275 if cgpart:
276 raise NotImplementedError(
276 raise NotImplementedError(
277 b"can't process multiple changegroups"
277 b"can't process multiple changegroups"
278 )
278 )
279 cgpart = part
279 cgpart = part
280
280
281 self._handlebundle2part(bundle, part)
281 self._handlebundle2part(bundle, part)
282
282
283 if not cgpart:
283 if not cgpart:
284 raise error.Abort(_(b"No changegroups found"))
284 raise error.Abort(_(b"No changegroups found"))
285
285
286 # This is required to placate a later consumer, which expects
286 # This is required to placate a later consumer, which expects
287 # the payload offset to be at the beginning of the changegroup.
287 # the payload offset to be at the beginning of the changegroup.
288 # We need to do this after the iterparts() generator advances
288 # We need to do this after the iterparts() generator advances
289 # because iterparts() will seek to end of payload after the
289 # because iterparts() will seek to end of payload after the
290 # generator returns control to iterparts().
290 # generator returns control to iterparts().
291 cgpart.seek(0, os.SEEK_SET)
291 cgpart.seek(0, os.SEEK_SET)
292
292
293 elif isinstance(bundle, changegroup.cg1unpacker):
293 elif isinstance(bundle, changegroup.cg1unpacker):
294 if bundle.compressed():
294 if bundle.compressed():
295 f = self._writetempbundle(
295 f = self._writetempbundle(
296 bundle.read, b'.hg10un', header=b'HG10UN'
296 bundle.read, b'.hg10un', header=b'HG10UN'
297 )
297 )
298 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
298 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
299
299
300 self._bundlefile = bundle
300 self._bundlefile = bundle
301 self._cgunpacker = bundle
301 self._cgunpacker = bundle
302 else:
302 else:
303 raise error.Abort(
303 raise error.Abort(
304 _(b'bundle type %s cannot be read') % type(bundle)
304 _(b'bundle type %s cannot be read') % type(bundle)
305 )
305 )
306
306
307 # dict with the mapping 'filename' -> position in the changegroup.
307 # dict with the mapping 'filename' -> position in the changegroup.
308 self._cgfilespos = {}
308 self._cgfilespos = {}
309
309
310 self.firstnewrev = self.changelog.repotiprev + 1
310 self.firstnewrev = self.changelog.repotiprev + 1
311 phases.retractboundary(
311 phases.retractboundary(
312 self,
312 self,
313 None,
313 None,
314 phases.draft,
314 phases.draft,
315 [ctx.node() for ctx in self[self.firstnewrev :]],
315 [ctx.node() for ctx in self[self.firstnewrev :]],
316 )
316 )
317
317
318 def _handlebundle2part(self, bundle, part):
318 def _handlebundle2part(self, bundle, part):
319 if part.type != b'changegroup':
319 if part.type != b'changegroup':
320 return
320 return
321
321
322 cgstream = part
322 cgstream = part
323 version = part.params.get(b'version', b'01')
323 version = part.params.get(b'version', b'01')
324 legalcgvers = changegroup.supportedincomingversions(self)
324 legalcgvers = changegroup.supportedincomingversions(self)
325 if version not in legalcgvers:
325 if version not in legalcgvers:
326 msg = _(b'Unsupported changegroup version: %s')
326 msg = _(b'Unsupported changegroup version: %s')
327 raise error.Abort(msg % version)
327 raise error.Abort(msg % version)
328 if bundle.compressed():
328 if bundle.compressed():
329 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
329 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
330
330
331 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
331 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
332
332
333 def _writetempbundle(self, readfn, suffix, header=b''):
333 def _writetempbundle(self, readfn, suffix, header=b''):
334 """Write a temporary file to disk"""
334 """Write a temporary file to disk"""
335 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
335 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
336 self.tempfile = temp
336 self.tempfile = temp
337
337
338 with os.fdopen(fdtemp, 'wb') as fptemp:
338 with os.fdopen(fdtemp, 'wb') as fptemp:
339 fptemp.write(header)
339 fptemp.write(header)
340 while True:
340 while True:
341 chunk = readfn(2 ** 18)
341 chunk = readfn(2 ** 18)
342 if not chunk:
342 if not chunk:
343 break
343 break
344 fptemp.write(chunk)
344 fptemp.write(chunk)
345
345
346 return self.vfs.open(self.tempfile, mode=b"rb")
346 return self.vfs.open(self.tempfile, mode=b"rb")
347
347
348 @localrepo.unfilteredpropertycache
348 @localrepo.unfilteredpropertycache
349 def _phasecache(self):
349 def _phasecache(self):
350 return bundlephasecache(self, self._phasedefaults)
350 return bundlephasecache(self, self._phasedefaults)
351
351
352 @localrepo.unfilteredpropertycache
352 @localrepo.unfilteredpropertycache
353 def changelog(self):
353 def changelog(self):
354 # consume the header if it exists
354 # consume the header if it exists
355 self._cgunpacker.changelogheader()
355 self._cgunpacker.changelogheader()
356 c = bundlechangelog(self.svfs, self._cgunpacker)
356 c = bundlechangelog(self.svfs, self._cgunpacker)
357 self.manstart = self._cgunpacker.tell()
357 self.manstart = self._cgunpacker.tell()
358 return c
358 return c
359
359
360 def _refreshchangelog(self):
360 def _refreshchangelog(self):
361 # changelog for bundle repo are not filecache, this method is not
361 # changelog for bundle repo are not filecache, this method is not
362 # applicable.
362 # applicable.
363 pass
363 pass
364
364
365 @localrepo.unfilteredpropertycache
365 @localrepo.unfilteredpropertycache
366 def manifestlog(self):
366 def manifestlog(self):
367 self._cgunpacker.seek(self.manstart)
367 self._cgunpacker.seek(self.manstart)
368 # consume the header if it exists
368 # consume the header if it exists
369 self._cgunpacker.manifestheader()
369 self._cgunpacker.manifestheader()
370 linkmapper = self.unfiltered().changelog.rev
370 linkmapper = self.unfiltered().changelog.rev
371 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
371 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
372 self.filestart = self._cgunpacker.tell()
372 self.filestart = self._cgunpacker.tell()
373
373
374 return manifest.manifestlog(
374 return manifest.manifestlog(
375 self.svfs, self, rootstore, self.narrowmatch()
375 self.svfs, self, rootstore, self.narrowmatch()
376 )
376 )
377
377
378 def _consumemanifest(self):
378 def _consumemanifest(self):
379 """Consumes the manifest portion of the bundle, setting filestart so the
379 """Consumes the manifest portion of the bundle, setting filestart so the
380 file portion can be read."""
380 file portion can be read."""
381 self._cgunpacker.seek(self.manstart)
381 self._cgunpacker.seek(self.manstart)
382 self._cgunpacker.manifestheader()
382 self._cgunpacker.manifestheader()
383 for delta in self._cgunpacker.deltaiter():
383 for delta in self._cgunpacker.deltaiter():
384 pass
384 pass
385 self.filestart = self._cgunpacker.tell()
385 self.filestart = self._cgunpacker.tell()
386
386
387 @localrepo.unfilteredpropertycache
387 @localrepo.unfilteredpropertycache
388 def manstart(self):
388 def manstart(self):
389 self.changelog
389 self.changelog
390 return self.manstart
390 return self.manstart
391
391
392 @localrepo.unfilteredpropertycache
392 @localrepo.unfilteredpropertycache
393 def filestart(self):
393 def filestart(self):
394 self.manifestlog
394 self.manifestlog
395
395
396 # If filestart was not set by self.manifestlog, that means the
396 # If filestart was not set by self.manifestlog, that means the
397 # manifestlog implementation did not consume the manifests from the
397 # manifestlog implementation did not consume the manifests from the
398 # changegroup (ex: it might be consuming trees from a separate bundle2
398 # changegroup (ex: it might be consuming trees from a separate bundle2
399 # part instead). So we need to manually consume it.
399 # part instead). So we need to manually consume it.
400 if 'filestart' not in self.__dict__:
400 if 'filestart' not in self.__dict__:
401 self._consumemanifest()
401 self._consumemanifest()
402
402
403 return self.filestart
403 return self.filestart
404
404
405 def url(self):
405 def url(self):
406 return self._url
406 return self._url
407
407
408 def file(self, f):
408 def file(self, f):
409 if not self._cgfilespos:
409 if not self._cgfilespos:
410 self._cgunpacker.seek(self.filestart)
410 self._cgunpacker.seek(self.filestart)
411 self._cgfilespos = _getfilestarts(self._cgunpacker)
411 self._cgfilespos = _getfilestarts(self._cgunpacker)
412
412
413 if f in self._cgfilespos:
413 if f in self._cgfilespos:
414 self._cgunpacker.seek(self._cgfilespos[f])
414 self._cgunpacker.seek(self._cgfilespos[f])
415 linkmapper = self.unfiltered().changelog.rev
415 linkmapper = self.unfiltered().changelog.rev
416 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
416 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
417 else:
417 else:
418 return super(bundlerepository, self).file(f)
418 return super(bundlerepository, self).file(f)
419
419
420 def close(self):
420 def close(self):
421 """Close assigned bundle file immediately."""
421 """Close assigned bundle file immediately."""
422 self._bundlefile.close()
422 self._bundlefile.close()
423 if self.tempfile is not None:
423 if self.tempfile is not None:
424 self.vfs.unlink(self.tempfile)
424 self.vfs.unlink(self.tempfile)
425 if self._tempparent:
425 if self._tempparent:
426 shutil.rmtree(self._tempparent, True)
426 shutil.rmtree(self._tempparent, True)
427
427
428 def cancopy(self):
428 def cancopy(self):
429 return False
429 return False
430
430
431 def peer(self):
431 def peer(self):
432 return bundlepeer(self)
432 return bundlepeer(self)
433
433
434 def getcwd(self):
434 def getcwd(self):
435 return encoding.getcwd() # always outside the repo
435 return encoding.getcwd() # always outside the repo
436
436
437 # Check if parents exist in localrepo before setting
437 # Check if parents exist in localrepo before setting
438 def setparents(self, p1, p2=nullid):
438 def setparents(self, p1, p2=nullid):
439 p1rev = self.changelog.rev(p1)
439 p1rev = self.changelog.rev(p1)
440 p2rev = self.changelog.rev(p2)
440 p2rev = self.changelog.rev(p2)
441 msg = _(b"setting parent to node %s that only exists in the bundle\n")
441 msg = _(b"setting parent to node %s that only exists in the bundle\n")
442 if self.changelog.repotiprev < p1rev:
442 if self.changelog.repotiprev < p1rev:
443 self.ui.warn(msg % hex(p1))
443 self.ui.warn(msg % hex(p1))
444 if self.changelog.repotiprev < p2rev:
444 if self.changelog.repotiprev < p2rev:
445 self.ui.warn(msg % hex(p2))
445 self.ui.warn(msg % hex(p2))
446 return super(bundlerepository, self).setparents(p1, p2)
446 return super(bundlerepository, self).setparents(p1, p2)
447
447
448
448
449 def instance(ui, path, create, intents=None, createopts=None):
449 def instance(ui, path, create, intents=None, createopts=None):
450 if create:
450 if create:
451 raise error.Abort(_(b'cannot create new bundle repository'))
451 raise error.Abort(_(b'cannot create new bundle repository'))
452 # internal config: bundle.mainreporoot
452 # internal config: bundle.mainreporoot
453 parentpath = ui.config(b"bundle", b"mainreporoot")
453 parentpath = ui.config(b"bundle", b"mainreporoot")
454 if not parentpath:
454 if not parentpath:
455 # try to find the correct path to the working directory repo
455 # try to find the correct path to the working directory repo
456 parentpath = cmdutil.findrepo(encoding.getcwd())
456 parentpath = cmdutil.findrepo(encoding.getcwd())
457 if parentpath is None:
457 if parentpath is None:
458 parentpath = b''
458 parentpath = b''
459 if parentpath:
459 if parentpath:
460 # Try to make the full path relative so we get a nice, short URL.
460 # Try to make the full path relative so we get a nice, short URL.
461 # In particular, we don't want temp dir names in test outputs.
461 # In particular, we don't want temp dir names in test outputs.
462 cwd = encoding.getcwd()
462 cwd = encoding.getcwd()
463 if parentpath == cwd:
463 if parentpath == cwd:
464 parentpath = b''
464 parentpath = b''
465 else:
465 else:
466 cwd = pathutil.normasprefix(cwd)
466 cwd = pathutil.normasprefix(cwd)
467 if parentpath.startswith(cwd):
467 if parentpath.startswith(cwd):
468 parentpath = parentpath[len(cwd) :]
468 parentpath = parentpath[len(cwd) :]
469 u = util.url(path)
469 u = util.url(path)
470 path = u.localpath()
470 path = u.localpath()
471 if u.scheme == b'bundle':
471 if u.scheme == b'bundle':
472 s = path.split(b"+", 1)
472 s = path.split(b"+", 1)
473 if len(s) == 1:
473 if len(s) == 1:
474 repopath, bundlename = parentpath, s[0]
474 repopath, bundlename = parentpath, s[0]
475 else:
475 else:
476 repopath, bundlename = s
476 repopath, bundlename = s
477 else:
477 else:
478 repopath, bundlename = parentpath, path
478 repopath, bundlename = parentpath, path
479
479
480 return makebundlerepository(ui, repopath, bundlename)
480 return makebundlerepository(ui, repopath, bundlename)
481
481
482
482
483 def makebundlerepository(ui, repopath, bundlepath):
483 def makebundlerepository(ui, repopath, bundlepath):
484 """Make a bundle repository object based on repo and bundle paths."""
484 """Make a bundle repository object based on repo and bundle paths."""
485 if repopath:
485 if repopath:
486 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
486 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
487 else:
487 else:
488 url = b'bundle:%s' % bundlepath
488 url = b'bundle:%s' % bundlepath
489
489
490 # Because we can't make any guarantees about the type of the base
490 # Because we can't make any guarantees about the type of the base
491 # repository, we can't have a static class representing the bundle
491 # repository, we can't have a static class representing the bundle
492 # repository. We also can't make any guarantees about how to even
492 # repository. We also can't make any guarantees about how to even
493 # call the base repository's constructor!
493 # call the base repository's constructor!
494 #
494 #
495 # So, our strategy is to go through ``localrepo.instance()`` to construct
495 # So, our strategy is to go through ``localrepo.instance()`` to construct
496 # a repo instance. Then, we dynamically create a new type derived from
496 # a repo instance. Then, we dynamically create a new type derived from
497 # both it and our ``bundlerepository`` class which overrides some
497 # both it and our ``bundlerepository`` class which overrides some
498 # functionality. We then change the type of the constructed repository
498 # functionality. We then change the type of the constructed repository
499 # to this new type and initialize the bundle-specific bits of it.
499 # to this new type and initialize the bundle-specific bits of it.
500
500
501 try:
501 try:
502 repo = localrepo.instance(ui, repopath, create=False)
502 repo = localrepo.instance(ui, repopath, create=False)
503 tempparent = None
503 tempparent = None
504 except error.RepoError:
504 except error.RepoError:
505 tempparent = pycompat.mkdtemp()
505 tempparent = pycompat.mkdtemp()
506 try:
506 try:
507 repo = localrepo.instance(ui, tempparent, create=True)
507 repo = localrepo.instance(ui, tempparent, create=True)
508 except Exception:
508 except Exception:
509 shutil.rmtree(tempparent)
509 shutil.rmtree(tempparent)
510 raise
510 raise
511
511
512 class derivedbundlerepository(bundlerepository, repo.__class__):
512 class derivedbundlerepository(bundlerepository, repo.__class__):
513 pass
513 pass
514
514
515 repo.__class__ = derivedbundlerepository
515 repo.__class__ = derivedbundlerepository
516 bundlerepository.__init__(repo, bundlepath, url, tempparent)
516 bundlerepository.__init__(repo, bundlepath, url, tempparent)
517
517
518 return repo
518 return repo
519
519
520
520
521 class bundletransactionmanager(object):
521 class bundletransactionmanager(object):
522 def transaction(self):
522 def transaction(self):
523 return None
523 return None
524
524
525 def close(self):
525 def close(self):
526 raise NotImplementedError
526 raise NotImplementedError
527
527
528 def release(self):
528 def release(self):
529 raise NotImplementedError
529 raise NotImplementedError
530
530
531
531
532 def getremotechanges(
532 def getremotechanges(
533 ui, repo, peer, onlyheads=None, bundlename=None, force=False
533 ui, repo, peer, onlyheads=None, bundlename=None, force=False
534 ):
534 ):
535 """obtains a bundle of changes incoming from peer
535 """obtains a bundle of changes incoming from peer
536
536
537 "onlyheads" restricts the returned changes to those reachable from the
537 "onlyheads" restricts the returned changes to those reachable from the
538 specified heads.
538 specified heads.
539 "bundlename", if given, stores the bundle to this file path permanently;
539 "bundlename", if given, stores the bundle to this file path permanently;
540 otherwise it's stored to a temp file and gets deleted again when you call
540 otherwise it's stored to a temp file and gets deleted again when you call
541 the returned "cleanupfn".
541 the returned "cleanupfn".
542 "force" indicates whether to proceed on unrelated repos.
542 "force" indicates whether to proceed on unrelated repos.
543
543
544 Returns a tuple (local, csets, cleanupfn):
544 Returns a tuple (local, csets, cleanupfn):
545
545
546 "local" is a local repo from which to obtain the actual incoming
546 "local" is a local repo from which to obtain the actual incoming
547 changesets; it is a bundlerepo for the obtained bundle when the
547 changesets; it is a bundlerepo for the obtained bundle when the
548 original "peer" is remote.
548 original "peer" is remote.
549 "csets" lists the incoming changeset node ids.
549 "csets" lists the incoming changeset node ids.
550 "cleanupfn" must be called without arguments when you're done processing
550 "cleanupfn" must be called without arguments when you're done processing
551 the changes; it closes both the original "peer" and the one returned
551 the changes; it closes both the original "peer" and the one returned
552 here.
552 here.
553 """
553 """
554 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
554 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads, force=force)
555 common, incoming, rheads = tmp
555 common, incoming, rheads = tmp
556 if not incoming:
556 if not incoming:
557 try:
557 try:
558 if bundlename:
558 if bundlename:
559 os.unlink(bundlename)
559 os.unlink(bundlename)
560 except OSError:
560 except OSError:
561 pass
561 pass
562 return repo, [], peer.close
562 return repo, [], peer.close
563
563
564 commonset = set(common)
564 commonset = set(common)
565 rheads = [x for x in rheads if x not in commonset]
565 rheads = [x for x in rheads if x not in commonset]
566
566
567 bundle = None
567 bundle = None
568 bundlerepo = None
568 bundlerepo = None
569 localrepo = peer.local()
569 localrepo = peer.local()
570 if bundlename or not localrepo:
570 if bundlename or not localrepo:
571 # create a bundle (uncompressed if peer repo is not local)
571 # create a bundle (uncompressed if peer repo is not local)
572
572
573 # developer config: devel.legacy.exchange
573 # developer config: devel.legacy.exchange
574 legexc = ui.configlist(b'devel', b'legacy.exchange')
574 legexc = ui.configlist(b'devel', b'legacy.exchange')
575 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
575 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
576 canbundle2 = (
576 canbundle2 = (
577 not forcebundle1
577 not forcebundle1
578 and peer.capable(b'getbundle')
578 and peer.capable(b'getbundle')
579 and peer.capable(b'bundle2')
579 and peer.capable(b'bundle2')
580 )
580 )
581 if canbundle2:
581 if canbundle2:
582 with peer.commandexecutor() as e:
582 with peer.commandexecutor() as e:
583 b2 = e.callcommand(
583 b2 = e.callcommand(
584 b'getbundle',
584 b'getbundle',
585 {
585 {
586 b'source': b'incoming',
586 b'source': b'incoming',
587 b'common': common,
587 b'common': common,
588 b'heads': rheads,
588 b'heads': rheads,
589 b'bundlecaps': exchange.caps20to10(
589 b'bundlecaps': exchange.caps20to10(
590 repo, role=b'client'
590 repo, role=b'client'
591 ),
591 ),
592 b'cg': True,
592 b'cg': True,
593 },
593 },
594 ).result()
594 ).result()
595
595
596 fname = bundle = changegroup.writechunks(
596 fname = bundle = changegroup.writechunks(
597 ui, b2._forwardchunks(), bundlename
597 ui, b2._forwardchunks(), bundlename
598 )
598 )
599 else:
599 else:
600 if peer.capable(b'getbundle'):
600 if peer.capable(b'getbundle'):
601 with peer.commandexecutor() as e:
601 with peer.commandexecutor() as e:
602 cg = e.callcommand(
602 cg = e.callcommand(
603 b'getbundle',
603 b'getbundle',
604 {
604 {
605 b'source': b'incoming',
605 b'source': b'incoming',
606 b'common': common,
606 b'common': common,
607 b'heads': rheads,
607 b'heads': rheads,
608 },
608 },
609 ).result()
609 ).result()
610 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
610 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
611 # compat with older servers when pulling all remote heads
611 # compat with older servers when pulling all remote heads
612
612
613 with peer.commandexecutor() as e:
613 with peer.commandexecutor() as e:
614 cg = e.callcommand(
614 cg = e.callcommand(
615 b'changegroup',
615 b'changegroup',
616 {
616 {
617 b'nodes': incoming,
617 b'nodes': incoming,
618 b'source': b'incoming',
618 b'source': b'incoming',
619 },
619 },
620 ).result()
620 ).result()
621
621
622 rheads = None
622 rheads = None
623 else:
623 else:
624 with peer.commandexecutor() as e:
624 with peer.commandexecutor() as e:
625 cg = e.callcommand(
625 cg = e.callcommand(
626 b'changegroupsubset',
626 b'changegroupsubset',
627 {
627 {
628 b'bases': incoming,
628 b'bases': incoming,
629 b'heads': rheads,
629 b'heads': rheads,
630 b'source': b'incoming',
630 b'source': b'incoming',
631 },
631 },
632 ).result()
632 ).result()
633
633
634 if localrepo:
634 if localrepo:
635 bundletype = b"HG10BZ"
635 bundletype = b"HG10BZ"
636 else:
636 else:
637 bundletype = b"HG10UN"
637 bundletype = b"HG10UN"
638 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
638 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
639 # keep written bundle?
639 # keep written bundle?
640 if bundlename:
640 if bundlename:
641 bundle = None
641 bundle = None
642 if not localrepo:
642 if not localrepo:
643 # use the created uncompressed bundlerepo
643 # use the created uncompressed bundlerepo
644 localrepo = bundlerepo = makebundlerepository(
644 localrepo = bundlerepo = makebundlerepository(
645 repo.baseui, repo.root, fname
645 repo.baseui, repo.root, fname
646 )
646 )
647
647
648 # this repo contains local and peer now, so filter out local again
648 # this repo contains local and peer now, so filter out local again
649 common = repo.heads()
649 common = repo.heads()
650 if localrepo:
650 if localrepo:
651 # Part of common may be remotely filtered
651 # Part of common may be remotely filtered
652 # So use an unfiltered version
652 # So use an unfiltered version
653 # The discovery process probably need cleanup to avoid that
653 # The discovery process probably need cleanup to avoid that
654 localrepo = localrepo.unfiltered()
654 localrepo = localrepo.unfiltered()
655
655
656 csets = localrepo.changelog.findmissing(common, rheads)
656 csets = localrepo.changelog.findmissing(common, rheads)
657
657
658 if bundlerepo:
658 if bundlerepo:
659 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
659 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
660
660
661 with peer.commandexecutor() as e:
661 with peer.commandexecutor() as e:
662 remotephases = e.callcommand(
662 remotephases = e.callcommand(
663 b'listkeys',
663 b'listkeys',
664 {
664 {
665 b'namespace': b'phases',
665 b'namespace': b'phases',
666 },
666 },
667 ).result()
667 ).result()
668
668
669 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
669 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
670 pullop.trmanager = bundletransactionmanager()
670 pullop.trmanager = bundletransactionmanager()
671 exchange._pullapplyphases(pullop, remotephases)
671 exchange._pullapplyphases(pullop, remotephases)
672
672
673 def cleanup():
673 def cleanup():
674 if bundlerepo:
674 if bundlerepo:
675 bundlerepo.close()
675 bundlerepo.close()
676 if bundle:
676 if bundle:
677 os.unlink(bundle)
677 os.unlink(bundle)
678 peer.close()
678 peer.close()
679
679
680 return (localrepo, csets, cleanup)
680 return (localrepo, csets, cleanup)
@@ -1,1710 +1,1784 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import repository
34 from .interfaces import repository
35 from .revlogutils import sidedata as sidedatamod
35
36
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39
40
40 LFS_REQUIREMENT = b'lfs'
41 LFS_REQUIREMENT = b'lfs'
41
42
42 readexactly = util.readexactly
43 readexactly = util.readexactly
43
44
44
45
45 def getchunk(stream):
46 def getchunk(stream):
46 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
48 l = struct.unpack(b">l", d)[0]
49 l = struct.unpack(b">l", d)[0]
49 if l <= 4:
50 if l <= 4:
50 if l:
51 if l:
51 raise error.Abort(_(b"invalid chunk length %d") % l)
52 raise error.Abort(_(b"invalid chunk length %d") % l)
52 return b""
53 return b""
53 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
54
55
55
56
56 def chunkheader(length):
57 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
58 """return a changegroup chunk header (string)"""
58 return struct.pack(b">l", length + 4)
59 return struct.pack(b">l", length + 4)
59
60
60
61
61 def closechunk():
62 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
63 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(b">l", 0)
64 return struct.pack(b">l", 0)
64
65
65
66
66 def _fileheader(path):
67 def _fileheader(path):
67 """Obtain a changegroup chunk header for a named path."""
68 """Obtain a changegroup chunk header for a named path."""
68 return chunkheader(len(path)) + path
69 return chunkheader(len(path)) + path
69
70
70
71
71 def writechunks(ui, chunks, filename, vfs=None):
72 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
73 """Write chunks to a file and return its filename.
73
74
74 The stream is assumed to be a bundle file.
75 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
76 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
77 If no filename is specified, a temporary file is created.
77 """
78 """
78 fh = None
79 fh = None
79 cleanup = None
80 cleanup = None
80 try:
81 try:
81 if filename:
82 if filename:
82 if vfs:
83 if vfs:
83 fh = vfs.open(filename, b"wb")
84 fh = vfs.open(filename, b"wb")
84 else:
85 else:
85 # Increase default buffer size because default is usually
86 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
87 # small (4k is common on Linux).
87 fh = open(filename, b"wb", 131072)
88 fh = open(filename, b"wb", 131072)
88 else:
89 else:
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fh = os.fdopen(fd, "wb")
91 fh = os.fdopen(fd, "wb")
91 cleanup = filename
92 cleanup = filename
92 for c in chunks:
93 for c in chunks:
93 fh.write(c)
94 fh.write(c)
94 cleanup = None
95 cleanup = None
95 return filename
96 return filename
96 finally:
97 finally:
97 if fh is not None:
98 if fh is not None:
98 fh.close()
99 fh.close()
99 if cleanup is not None:
100 if cleanup is not None:
100 if filename and vfs:
101 if filename and vfs:
101 vfs.unlink(cleanup)
102 vfs.unlink(cleanup)
102 else:
103 else:
103 os.unlink(cleanup)
104 os.unlink(cleanup)
104
105
105
106
106 class cg1unpacker(object):
107 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
108 """Unpacker for cg1 changegroup streams.
108
109
109 A changegroup unpacker handles the framing of the revision data in
110 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
111 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
112 method to add the changes from the changegroup to a repository.
112
113
113 If you're forwarding a changegroup unmodified to another consumer,
114 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
115 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
116 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
117 data stream has ended by observing the end of the changegroup.
117
118
118 deltachunk() is useful only if you're applying delta data. Most
119 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
120 consumers should prefer apply() instead.
120
121
121 A few other public methods exist. Those are used only for
122 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
123 bundlerepo and some debug commands - their use is discouraged.
123 """
124 """
124
125
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheadersize = deltaheader.size
127 deltaheadersize = deltaheader.size
127 version = b'01'
128 version = b'01'
128 _grouplistcount = 1 # One list of files after the manifests
129 _grouplistcount = 1 # One list of files after the manifests
129
130
130 def __init__(self, fh, alg, extras=None):
131 def __init__(self, fh, alg, extras=None):
131 if alg is None:
132 if alg is None:
132 alg = b'UN'
133 alg = b'UN'
133 if alg not in util.compengines.supportedbundletypes:
134 if alg not in util.compengines.supportedbundletypes:
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 if alg == b'BZ':
136 if alg == b'BZ':
136 alg = b'_truncatedBZ'
137 alg = b'_truncatedBZ'
137
138
138 compengine = util.compengines.forbundletype(alg)
139 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
140 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
141 self._type = alg
141 self.extras = extras or {}
142 self.extras = extras or {}
142 self.callback = None
143 self.callback = None
143
144
144 # These methods (compressed, read, seek, tell) all appear to only
145 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
146 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
147 def compressed(self):
147 return self._type is not None and self._type != b'UN'
148 return self._type is not None and self._type != b'UN'
148
149
149 def read(self, l):
150 def read(self, l):
150 return self._stream.read(l)
151 return self._stream.read(l)
151
152
152 def seek(self, pos):
153 def seek(self, pos):
153 return self._stream.seek(pos)
154 return self._stream.seek(pos)
154
155
155 def tell(self):
156 def tell(self):
156 return self._stream.tell()
157 return self._stream.tell()
157
158
158 def close(self):
159 def close(self):
159 return self._stream.close()
160 return self._stream.close()
160
161
161 def _chunklength(self):
162 def _chunklength(self):
162 d = readexactly(self._stream, 4)
163 d = readexactly(self._stream, 4)
163 l = struct.unpack(b">l", d)[0]
164 l = struct.unpack(b">l", d)[0]
164 if l <= 4:
165 if l <= 4:
165 if l:
166 if l:
166 raise error.Abort(_(b"invalid chunk length %d") % l)
167 raise error.Abort(_(b"invalid chunk length %d") % l)
167 return 0
168 return 0
168 if self.callback:
169 if self.callback:
169 self.callback()
170 self.callback()
170 return l - 4
171 return l - 4
171
172
172 def changelogheader(self):
173 def changelogheader(self):
173 """v10 does not have a changelog header chunk"""
174 """v10 does not have a changelog header chunk"""
174 return {}
175 return {}
175
176
176 def manifestheader(self):
177 def manifestheader(self):
177 """v10 does not have a manifest header chunk"""
178 """v10 does not have a manifest header chunk"""
178 return {}
179 return {}
179
180
180 def filelogheader(self):
181 def filelogheader(self):
181 """return the header of the filelogs chunk, v10 only has the filename"""
182 """return the header of the filelogs chunk, v10 only has the filename"""
182 l = self._chunklength()
183 l = self._chunklength()
183 if not l:
184 if not l:
184 return {}
185 return {}
185 fname = readexactly(self._stream, l)
186 fname = readexactly(self._stream, l)
186 return {b'filename': fname}
187 return {b'filename': fname}
187
188
188 def _deltaheader(self, headertuple, prevnode):
189 def _deltaheader(self, headertuple, prevnode):
189 node, p1, p2, cs = headertuple
190 node, p1, p2, cs = headertuple
190 if prevnode is None:
191 if prevnode is None:
191 deltabase = p1
192 deltabase = p1
192 else:
193 else:
193 deltabase = prevnode
194 deltabase = prevnode
194 flags = 0
195 flags = 0
195 return node, p1, p2, deltabase, cs, flags
196 return node, p1, p2, deltabase, cs, flags
196
197
197 def deltachunk(self, prevnode):
198 def deltachunk(self, prevnode):
198 l = self._chunklength()
199 l = self._chunklength()
199 if not l:
200 if not l:
200 return {}
201 return {}
201 headerdata = readexactly(self._stream, self.deltaheadersize)
202 headerdata = readexactly(self._stream, self.deltaheadersize)
202 header = self.deltaheader.unpack(headerdata)
203 header = self.deltaheader.unpack(headerdata)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
204 delta = readexactly(self._stream, l - self.deltaheadersize)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 return (node, p1, p2, cs, deltabase, delta, flags)
206 # cg4 forward-compat
207 sidedata = {}
208 return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
206
209
207 def getchunks(self):
210 def getchunks(self):
208 """returns all the chunks contains in the bundle
211 """returns all the chunks contains in the bundle
209
212
210 Used when you need to forward the binary stream to a file or another
213 Used when you need to forward the binary stream to a file or another
211 network API. To do so, it parse the changegroup data, otherwise it will
214 network API. To do so, it parse the changegroup data, otherwise it will
212 block in case of sshrepo because it don't know the end of the stream.
215 block in case of sshrepo because it don't know the end of the stream.
213 """
216 """
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
217 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
218 # and a list of filelogs. For changegroup 3, we expect 4 parts:
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
219 # changelog, manifestlog, a list of tree manifestlogs, and a list of
217 # filelogs.
220 # filelogs.
218 #
221 #
219 # Changelog and manifestlog parts are terminated with empty chunks. The
222 # Changelog and manifestlog parts are terminated with empty chunks. The
220 # tree and file parts are a list of entry sections. Each entry section
223 # tree and file parts are a list of entry sections. Each entry section
221 # is a series of chunks terminating in an empty chunk. The list of these
224 # is a series of chunks terminating in an empty chunk. The list of these
222 # entry sections is terminated in yet another empty chunk, so we know
225 # entry sections is terminated in yet another empty chunk, so we know
223 # we've reached the end of the tree/file list when we reach an empty
226 # we've reached the end of the tree/file list when we reach an empty
224 # chunk that was proceeded by no non-empty chunks.
227 # chunk that was proceeded by no non-empty chunks.
225
228
226 parts = 0
229 parts = 0
227 while parts < 2 + self._grouplistcount:
230 while parts < 2 + self._grouplistcount:
228 noentries = True
231 noentries = True
229 while True:
232 while True:
230 chunk = getchunk(self)
233 chunk = getchunk(self)
231 if not chunk:
234 if not chunk:
232 # The first two empty chunks represent the end of the
235 # The first two empty chunks represent the end of the
233 # changelog and the manifestlog portions. The remaining
236 # changelog and the manifestlog portions. The remaining
234 # empty chunks represent either A) the end of individual
237 # empty chunks represent either A) the end of individual
235 # tree or file entries in the file list, or B) the end of
238 # tree or file entries in the file list, or B) the end of
236 # the entire list. It's the end of the entire list if there
239 # the entire list. It's the end of the entire list if there
237 # were no entries (i.e. noentries is True).
240 # were no entries (i.e. noentries is True).
238 if parts < 2:
241 if parts < 2:
239 parts += 1
242 parts += 1
240 elif noentries:
243 elif noentries:
241 parts += 1
244 parts += 1
242 break
245 break
243 noentries = False
246 noentries = False
244 yield chunkheader(len(chunk))
247 yield chunkheader(len(chunk))
245 pos = 0
248 pos = 0
246 while pos < len(chunk):
249 while pos < len(chunk):
247 next = pos + 2 ** 20
250 next = pos + 2 ** 20
248 yield chunk[pos:next]
251 yield chunk[pos:next]
249 pos = next
252 pos = next
250 yield closechunk()
253 yield closechunk()
251
254
252 def _unpackmanifests(self, repo, revmap, trp, prog):
255 def _unpackmanifests(self, repo, revmap, trp, prog):
253 self.callback = prog.increment
256 self.callback = prog.increment
254 # no need to check for empty manifest group here:
257 # no need to check for empty manifest group here:
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
258 # if the result of the merge of 1 and 2 is the same in 3 and 4,
256 # no new manifest will be created and the manifest group will
259 # no new manifest will be created and the manifest group will
257 # be empty during the pull
260 # be empty during the pull
258 self.manifestheader()
261 self.manifestheader()
259 deltas = self.deltaiter()
262 deltas = self.deltaiter()
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
263 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
261 prog.complete()
264 prog.complete()
262 self.callback = None
265 self.callback = None
263
266
264 def apply(
267 def apply(
265 self,
268 self,
266 repo,
269 repo,
267 tr,
270 tr,
268 srctype,
271 srctype,
269 url,
272 url,
270 targetphase=phases.draft,
273 targetphase=phases.draft,
271 expectedtotal=None,
274 expectedtotal=None,
272 ):
275 ):
273 """Add the changegroup returned by source.read() to this repo.
276 """Add the changegroup returned by source.read() to this repo.
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
277 srctype is a string like 'push', 'pull', or 'unbundle'. url is
275 the URL of the repo where this changegroup is coming from.
278 the URL of the repo where this changegroup is coming from.
276
279
277 Return an integer summarizing the change to this repo:
280 Return an integer summarizing the change to this repo:
278 - nothing changed or no source: 0
281 - nothing changed or no source: 0
279 - more heads than before: 1+added heads (2..n)
282 - more heads than before: 1+added heads (2..n)
280 - fewer heads than before: -1-removed heads (-2..-n)
283 - fewer heads than before: -1-removed heads (-2..-n)
281 - number of heads stays the same: 1
284 - number of heads stays the same: 1
282 """
285 """
283 repo = repo.unfiltered()
286 repo = repo.unfiltered()
284
287
285 def csmap(x):
288 def csmap(x):
286 repo.ui.debug(b"add changeset %s\n" % short(x))
289 repo.ui.debug(b"add changeset %s\n" % short(x))
287 return len(cl)
290 return len(cl)
288
291
289 def revmap(x):
292 def revmap(x):
290 return cl.rev(x)
293 return cl.rev(x)
291
294
292 try:
295 try:
293 # The transaction may already carry source information. In this
296 # The transaction may already carry source information. In this
294 # case we use the top level data. We overwrite the argument
297 # case we use the top level data. We overwrite the argument
295 # because we need to use the top level value (if they exist)
298 # because we need to use the top level value (if they exist)
296 # in this function.
299 # in this function.
297 srctype = tr.hookargs.setdefault(b'source', srctype)
300 srctype = tr.hookargs.setdefault(b'source', srctype)
298 tr.hookargs.setdefault(b'url', url)
301 tr.hookargs.setdefault(b'url', url)
299 repo.hook(
302 repo.hook(
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
303 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
301 )
304 )
302
305
303 # write changelog data to temp files so concurrent readers
306 # write changelog data to temp files so concurrent readers
304 # will not see an inconsistent view
307 # will not see an inconsistent view
305 cl = repo.changelog
308 cl = repo.changelog
306 cl.delayupdate(tr)
309 cl.delayupdate(tr)
307 oldheads = set(cl.heads())
310 oldheads = set(cl.heads())
308
311
309 trp = weakref.proxy(tr)
312 trp = weakref.proxy(tr)
310 # pull off the changeset group
313 # pull off the changeset group
311 repo.ui.status(_(b"adding changesets\n"))
314 repo.ui.status(_(b"adding changesets\n"))
312 clstart = len(cl)
315 clstart = len(cl)
313 progress = repo.ui.makeprogress(
316 progress = repo.ui.makeprogress(
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
317 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
315 )
318 )
316 self.callback = progress.increment
319 self.callback = progress.increment
317
320
318 efilesset = set()
321 efilesset = set()
319 duprevs = []
322 duprevs = []
320
323
321 def ondupchangelog(cl, rev):
324 def ondupchangelog(cl, rev):
322 if rev < clstart:
325 if rev < clstart:
323 duprevs.append(rev)
326 duprevs.append(rev)
324
327
325 def onchangelog(cl, rev):
328 def onchangelog(cl, rev):
326 ctx = cl.changelogrevision(rev)
329 ctx = cl.changelogrevision(rev)
327 efilesset.update(ctx.files)
330 efilesset.update(ctx.files)
328 repo.register_changeset(rev, ctx)
331 repo.register_changeset(rev, ctx)
329
332
330 self.changelogheader()
333 self.changelogheader()
331 deltas = self.deltaiter()
334 deltas = self.deltaiter()
332 if not cl.addgroup(
335 if not cl.addgroup(
333 deltas,
336 deltas,
334 csmap,
337 csmap,
335 trp,
338 trp,
336 alwayscache=True,
339 alwayscache=True,
337 addrevisioncb=onchangelog,
340 addrevisioncb=onchangelog,
338 duplicaterevisioncb=ondupchangelog,
341 duplicaterevisioncb=ondupchangelog,
339 ):
342 ):
340 repo.ui.develwarn(
343 repo.ui.develwarn(
341 b'applied empty changelog from changegroup',
344 b'applied empty changelog from changegroup',
342 config=b'warn-empty-changegroup',
345 config=b'warn-empty-changegroup',
343 )
346 )
344 efiles = len(efilesset)
347 efiles = len(efilesset)
345 clend = len(cl)
348 clend = len(cl)
346 changesets = clend - clstart
349 changesets = clend - clstart
347 progress.complete()
350 progress.complete()
348 del deltas
351 del deltas
349 # TODO Python 2.7 removal
352 # TODO Python 2.7 removal
350 # del efilesset
353 # del efilesset
351 efilesset = None
354 efilesset = None
352 self.callback = None
355 self.callback = None
353
356
354 # pull off the manifest group
357 # pull off the manifest group
355 repo.ui.status(_(b"adding manifests\n"))
358 repo.ui.status(_(b"adding manifests\n"))
356 # We know that we'll never have more manifests than we had
359 # We know that we'll never have more manifests than we had
357 # changesets.
360 # changesets.
358 progress = repo.ui.makeprogress(
361 progress = repo.ui.makeprogress(
359 _(b'manifests'), unit=_(b'chunks'), total=changesets
362 _(b'manifests'), unit=_(b'chunks'), total=changesets
360 )
363 )
361 self._unpackmanifests(repo, revmap, trp, progress)
364 self._unpackmanifests(repo, revmap, trp, progress)
362
365
363 needfiles = {}
366 needfiles = {}
364 if repo.ui.configbool(b'server', b'validate'):
367 if repo.ui.configbool(b'server', b'validate'):
365 cl = repo.changelog
368 cl = repo.changelog
366 ml = repo.manifestlog
369 ml = repo.manifestlog
367 # validate incoming csets have their manifests
370 # validate incoming csets have their manifests
368 for cset in pycompat.xrange(clstart, clend):
371 for cset in pycompat.xrange(clstart, clend):
369 mfnode = cl.changelogrevision(cset).manifest
372 mfnode = cl.changelogrevision(cset).manifest
370 mfest = ml[mfnode].readdelta()
373 mfest = ml[mfnode].readdelta()
371 # store file nodes we must see
374 # store file nodes we must see
372 for f, n in pycompat.iteritems(mfest):
375 for f, n in pycompat.iteritems(mfest):
373 needfiles.setdefault(f, set()).add(n)
376 needfiles.setdefault(f, set()).add(n)
374
377
375 # process the files
378 # process the files
376 repo.ui.status(_(b"adding file changes\n"))
379 repo.ui.status(_(b"adding file changes\n"))
377 newrevs, newfiles = _addchangegroupfiles(
380 newrevs, newfiles = _addchangegroupfiles(
378 repo, self, revmap, trp, efiles, needfiles
381 repo, self, revmap, trp, efiles, needfiles
379 )
382 )
380
383
381 # making sure the value exists
384 # making sure the value exists
382 tr.changes.setdefault(b'changegroup-count-changesets', 0)
385 tr.changes.setdefault(b'changegroup-count-changesets', 0)
383 tr.changes.setdefault(b'changegroup-count-revisions', 0)
386 tr.changes.setdefault(b'changegroup-count-revisions', 0)
384 tr.changes.setdefault(b'changegroup-count-files', 0)
387 tr.changes.setdefault(b'changegroup-count-files', 0)
385 tr.changes.setdefault(b'changegroup-count-heads', 0)
388 tr.changes.setdefault(b'changegroup-count-heads', 0)
386
389
387 # some code use bundle operation for internal purpose. They usually
390 # some code use bundle operation for internal purpose. They usually
388 # set `ui.quiet` to do this outside of user sight. Size the report
391 # set `ui.quiet` to do this outside of user sight. Size the report
389 # of such operation now happens at the end of the transaction, that
392 # of such operation now happens at the end of the transaction, that
390 # ui.quiet has not direct effect on the output.
393 # ui.quiet has not direct effect on the output.
391 #
394 #
392 # To preserve this intend use an inelegant hack, we fail to report
395 # To preserve this intend use an inelegant hack, we fail to report
393 # the change if `quiet` is set. We should probably move to
396 # the change if `quiet` is set. We should probably move to
394 # something better, but this is a good first step to allow the "end
397 # something better, but this is a good first step to allow the "end
395 # of transaction report" to pass tests.
398 # of transaction report" to pass tests.
396 if not repo.ui.quiet:
399 if not repo.ui.quiet:
397 tr.changes[b'changegroup-count-changesets'] += changesets
400 tr.changes[b'changegroup-count-changesets'] += changesets
398 tr.changes[b'changegroup-count-revisions'] += newrevs
401 tr.changes[b'changegroup-count-revisions'] += newrevs
399 tr.changes[b'changegroup-count-files'] += newfiles
402 tr.changes[b'changegroup-count-files'] += newfiles
400
403
401 deltaheads = 0
404 deltaheads = 0
402 if oldheads:
405 if oldheads:
403 heads = cl.heads()
406 heads = cl.heads()
404 deltaheads += len(heads) - len(oldheads)
407 deltaheads += len(heads) - len(oldheads)
405 for h in heads:
408 for h in heads:
406 if h not in oldheads and repo[h].closesbranch():
409 if h not in oldheads and repo[h].closesbranch():
407 deltaheads -= 1
410 deltaheads -= 1
408
411
409 # see previous comment about checking ui.quiet
412 # see previous comment about checking ui.quiet
410 if not repo.ui.quiet:
413 if not repo.ui.quiet:
411 tr.changes[b'changegroup-count-heads'] += deltaheads
414 tr.changes[b'changegroup-count-heads'] += deltaheads
412 repo.invalidatevolatilesets()
415 repo.invalidatevolatilesets()
413
416
414 if changesets > 0:
417 if changesets > 0:
415 if b'node' not in tr.hookargs:
418 if b'node' not in tr.hookargs:
416 tr.hookargs[b'node'] = hex(cl.node(clstart))
419 tr.hookargs[b'node'] = hex(cl.node(clstart))
417 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
420 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
418 hookargs = dict(tr.hookargs)
421 hookargs = dict(tr.hookargs)
419 else:
422 else:
420 hookargs = dict(tr.hookargs)
423 hookargs = dict(tr.hookargs)
421 hookargs[b'node'] = hex(cl.node(clstart))
424 hookargs[b'node'] = hex(cl.node(clstart))
422 hookargs[b'node_last'] = hex(cl.node(clend - 1))
425 hookargs[b'node_last'] = hex(cl.node(clend - 1))
423 repo.hook(
426 repo.hook(
424 b'pretxnchangegroup',
427 b'pretxnchangegroup',
425 throw=True,
428 throw=True,
426 **pycompat.strkwargs(hookargs)
429 **pycompat.strkwargs(hookargs)
427 )
430 )
428
431
429 added = pycompat.xrange(clstart, clend)
432 added = pycompat.xrange(clstart, clend)
430 phaseall = None
433 phaseall = None
431 if srctype in (b'push', b'serve'):
434 if srctype in (b'push', b'serve'):
432 # Old servers can not push the boundary themselves.
435 # Old servers can not push the boundary themselves.
433 # New servers won't push the boundary if changeset already
436 # New servers won't push the boundary if changeset already
434 # exists locally as secret
437 # exists locally as secret
435 #
438 #
436 # We should not use added here but the list of all change in
439 # We should not use added here but the list of all change in
437 # the bundle
440 # the bundle
438 if repo.publishing():
441 if repo.publishing():
439 targetphase = phaseall = phases.public
442 targetphase = phaseall = phases.public
440 else:
443 else:
441 # closer target phase computation
444 # closer target phase computation
442
445
443 # Those changesets have been pushed from the
446 # Those changesets have been pushed from the
444 # outside, their phases are going to be pushed
447 # outside, their phases are going to be pushed
445 # alongside. Therefor `targetphase` is
448 # alongside. Therefor `targetphase` is
446 # ignored.
449 # ignored.
447 targetphase = phaseall = phases.draft
450 targetphase = phaseall = phases.draft
448 if added:
451 if added:
449 phases.registernew(repo, tr, targetphase, added)
452 phases.registernew(repo, tr, targetphase, added)
450 if phaseall is not None:
453 if phaseall is not None:
451 if duprevs:
454 if duprevs:
452 duprevs.extend(added)
455 duprevs.extend(added)
453 else:
456 else:
454 duprevs = added
457 duprevs = added
455 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
458 phases.advanceboundary(repo, tr, phaseall, [], revs=duprevs)
456 duprevs = []
459 duprevs = []
457
460
458 if changesets > 0:
461 if changesets > 0:
459
462
460 def runhooks(unused_success):
463 def runhooks(unused_success):
461 # These hooks run when the lock releases, not when the
464 # These hooks run when the lock releases, not when the
462 # transaction closes. So it's possible for the changelog
465 # transaction closes. So it's possible for the changelog
463 # to have changed since we last saw it.
466 # to have changed since we last saw it.
464 if clstart >= len(repo):
467 if clstart >= len(repo):
465 return
468 return
466
469
467 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
470 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
468
471
469 for rev in added:
472 for rev in added:
470 args = hookargs.copy()
473 args = hookargs.copy()
471 args[b'node'] = hex(cl.node(rev))
474 args[b'node'] = hex(cl.node(rev))
472 del args[b'node_last']
475 del args[b'node_last']
473 repo.hook(b"incoming", **pycompat.strkwargs(args))
476 repo.hook(b"incoming", **pycompat.strkwargs(args))
474
477
475 newheads = [h for h in repo.heads() if h not in oldheads]
478 newheads = [h for h in repo.heads() if h not in oldheads]
476 repo.ui.log(
479 repo.ui.log(
477 b"incoming",
480 b"incoming",
478 b"%d incoming changes - new heads: %s\n",
481 b"%d incoming changes - new heads: %s\n",
479 len(added),
482 len(added),
480 b', '.join([hex(c[:6]) for c in newheads]),
483 b', '.join([hex(c[:6]) for c in newheads]),
481 )
484 )
482
485
483 tr.addpostclose(
486 tr.addpostclose(
484 b'changegroup-runhooks-%020i' % clstart,
487 b'changegroup-runhooks-%020i' % clstart,
485 lambda tr: repo._afterlock(runhooks),
488 lambda tr: repo._afterlock(runhooks),
486 )
489 )
487 finally:
490 finally:
488 repo.ui.flush()
491 repo.ui.flush()
489 # never return 0 here:
492 # never return 0 here:
490 if deltaheads < 0:
493 if deltaheads < 0:
491 ret = deltaheads - 1
494 ret = deltaheads - 1
492 else:
495 else:
493 ret = deltaheads + 1
496 ret = deltaheads + 1
494 return ret
497 return ret
495
498
496 def deltaiter(self):
499 def deltaiter(self):
497 """
500 """
498 returns an iterator of the deltas in this changegroup
501 returns an iterator of the deltas in this changegroup
499
502
500 Useful for passing to the underlying storage system to be stored.
503 Useful for passing to the underlying storage system to be stored.
501 """
504 """
502 chain = None
505 chain = None
503 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
506 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
504 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
507 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
505 yield chunkdata
508 yield chunkdata
506 chain = chunkdata[0]
509 chain = chunkdata[0]
507
510
508
511
509 class cg2unpacker(cg1unpacker):
512 class cg2unpacker(cg1unpacker):
510 """Unpacker for cg2 streams.
513 """Unpacker for cg2 streams.
511
514
512 cg2 streams add support for generaldelta, so the delta header
515 cg2 streams add support for generaldelta, so the delta header
513 format is slightly different. All other features about the data
516 format is slightly different. All other features about the data
514 remain the same.
517 remain the same.
515 """
518 """
516
519
517 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
520 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
518 deltaheadersize = deltaheader.size
521 deltaheadersize = deltaheader.size
519 version = b'02'
522 version = b'02'
520
523
521 def _deltaheader(self, headertuple, prevnode):
524 def _deltaheader(self, headertuple, prevnode):
522 node, p1, p2, deltabase, cs = headertuple
525 node, p1, p2, deltabase, cs = headertuple
523 flags = 0
526 flags = 0
524 return node, p1, p2, deltabase, cs, flags
527 return node, p1, p2, deltabase, cs, flags
525
528
526
529
527 class cg3unpacker(cg2unpacker):
530 class cg3unpacker(cg2unpacker):
528 """Unpacker for cg3 streams.
531 """Unpacker for cg3 streams.
529
532
530 cg3 streams add support for exchanging treemanifests and revlog
533 cg3 streams add support for exchanging treemanifests and revlog
531 flags. It adds the revlog flags to the delta header and an empty chunk
534 flags. It adds the revlog flags to the delta header and an empty chunk
532 separating manifests and files.
535 separating manifests and files.
533 """
536 """
534
537
535 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
538 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
536 deltaheadersize = deltaheader.size
539 deltaheadersize = deltaheader.size
537 version = b'03'
540 version = b'03'
538 _grouplistcount = 2 # One list of manifests and one list of files
541 _grouplistcount = 2 # One list of manifests and one list of files
539
542
540 def _deltaheader(self, headertuple, prevnode):
543 def _deltaheader(self, headertuple, prevnode):
541 node, p1, p2, deltabase, cs, flags = headertuple
544 node, p1, p2, deltabase, cs, flags = headertuple
542 return node, p1, p2, deltabase, cs, flags
545 return node, p1, p2, deltabase, cs, flags
543
546
544 def _unpackmanifests(self, repo, revmap, trp, prog):
547 def _unpackmanifests(self, repo, revmap, trp, prog):
545 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
548 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
546 for chunkdata in iter(self.filelogheader, {}):
549 for chunkdata in iter(self.filelogheader, {}):
547 # If we get here, there are directory manifests in the changegroup
550 # If we get here, there are directory manifests in the changegroup
548 d = chunkdata[b"filename"]
551 d = chunkdata[b"filename"]
549 repo.ui.debug(b"adding %s revisions\n" % d)
552 repo.ui.debug(b"adding %s revisions\n" % d)
550 deltas = self.deltaiter()
553 deltas = self.deltaiter()
551 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
554 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
552 raise error.Abort(_(b"received dir revlog group is empty"))
555 raise error.Abort(_(b"received dir revlog group is empty"))
553
556
554
557
558 class cg4unpacker(cg3unpacker):
559 """Unpacker for cg4 streams.
560
561 cg4 streams add support for exchanging sidedata.
562 """
563
564 version = b'04'
565
566 def deltachunk(self, prevnode):
567 res = super(cg4unpacker, self).deltachunk(prevnode)
568 if not res:
569 return res
570
571 (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
572
573 sidedata_raw = getchunk(self._stream)
574 sidedata = {}
575 if len(sidedata_raw) > 0:
576 sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
577
578 return node, p1, p2, cs, deltabase, delta, flags, sidedata
579
580
555 class headerlessfixup(object):
581 class headerlessfixup(object):
556 def __init__(self, fh, h):
582 def __init__(self, fh, h):
557 self._h = h
583 self._h = h
558 self._fh = fh
584 self._fh = fh
559
585
560 def read(self, n):
586 def read(self, n):
561 if self._h:
587 if self._h:
562 d, self._h = self._h[:n], self._h[n:]
588 d, self._h = self._h[:n], self._h[n:]
563 if len(d) < n:
589 if len(d) < n:
564 d += readexactly(self._fh, n - len(d))
590 d += readexactly(self._fh, n - len(d))
565 return d
591 return d
566 return readexactly(self._fh, n)
592 return readexactly(self._fh, n)
567
593
568
594
569 def _revisiondeltatochunks(delta, headerfn):
595 def _revisiondeltatochunks(delta, headerfn):
570 """Serialize a revisiondelta to changegroup chunks."""
596 """Serialize a revisiondelta to changegroup chunks."""
571
597
572 # The captured revision delta may be encoded as a delta against
598 # The captured revision delta may be encoded as a delta against
573 # a base revision or as a full revision. The changegroup format
599 # a base revision or as a full revision. The changegroup format
574 # requires that everything on the wire be deltas. So for full
600 # requires that everything on the wire be deltas. So for full
575 # revisions, we need to invent a header that says to rewrite
601 # revisions, we need to invent a header that says to rewrite
576 # data.
602 # data.
577
603
578 if delta.delta is not None:
604 if delta.delta is not None:
579 prefix, data = b'', delta.delta
605 prefix, data = b'', delta.delta
580 elif delta.basenode == nullid:
606 elif delta.basenode == nullid:
581 data = delta.revision
607 data = delta.revision
582 prefix = mdiff.trivialdiffheader(len(data))
608 prefix = mdiff.trivialdiffheader(len(data))
583 else:
609 else:
584 data = delta.revision
610 data = delta.revision
585 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
611 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
586
612
587 meta = headerfn(delta)
613 meta = headerfn(delta)
588
614
589 yield chunkheader(len(meta) + len(prefix) + len(data))
615 yield chunkheader(len(meta) + len(prefix) + len(data))
590 yield meta
616 yield meta
591 if prefix:
617 if prefix:
592 yield prefix
618 yield prefix
593 yield data
619 yield data
594
620
595
621
596 def _sortnodesellipsis(store, nodes, cl, lookup):
622 def _sortnodesellipsis(store, nodes, cl, lookup):
597 """Sort nodes for changegroup generation."""
623 """Sort nodes for changegroup generation."""
598 # Ellipses serving mode.
624 # Ellipses serving mode.
599 #
625 #
600 # In a perfect world, we'd generate better ellipsis-ified graphs
626 # In a perfect world, we'd generate better ellipsis-ified graphs
601 # for non-changelog revlogs. In practice, we haven't started doing
627 # for non-changelog revlogs. In practice, we haven't started doing
602 # that yet, so the resulting DAGs for the manifestlog and filelogs
628 # that yet, so the resulting DAGs for the manifestlog and filelogs
603 # are actually full of bogus parentage on all the ellipsis
629 # are actually full of bogus parentage on all the ellipsis
604 # nodes. This has the side effect that, while the contents are
630 # nodes. This has the side effect that, while the contents are
605 # correct, the individual DAGs might be completely out of whack in
631 # correct, the individual DAGs might be completely out of whack in
606 # a case like 882681bc3166 and its ancestors (back about 10
632 # a case like 882681bc3166 and its ancestors (back about 10
607 # revisions or so) in the main hg repo.
633 # revisions or so) in the main hg repo.
608 #
634 #
609 # The one invariant we *know* holds is that the new (potentially
635 # The one invariant we *know* holds is that the new (potentially
610 # bogus) DAG shape will be valid if we order the nodes in the
636 # bogus) DAG shape will be valid if we order the nodes in the
611 # order that they're introduced in dramatis personae by the
637 # order that they're introduced in dramatis personae by the
612 # changelog, so what we do is we sort the non-changelog histories
638 # changelog, so what we do is we sort the non-changelog histories
613 # by the order in which they are used by the changelog.
639 # by the order in which they are used by the changelog.
614 key = lambda n: cl.rev(lookup(n))
640 key = lambda n: cl.rev(lookup(n))
615 return sorted(nodes, key=key)
641 return sorted(nodes, key=key)
616
642
617
643
618 def _resolvenarrowrevisioninfo(
644 def _resolvenarrowrevisioninfo(
619 cl,
645 cl,
620 store,
646 store,
621 ischangelog,
647 ischangelog,
622 rev,
648 rev,
623 linkrev,
649 linkrev,
624 linknode,
650 linknode,
625 clrevtolocalrev,
651 clrevtolocalrev,
626 fullclnodes,
652 fullclnodes,
627 precomputedellipsis,
653 precomputedellipsis,
628 ):
654 ):
629 linkparents = precomputedellipsis[linkrev]
655 linkparents = precomputedellipsis[linkrev]
630
656
631 def local(clrev):
657 def local(clrev):
632 """Turn a changelog revnum into a local revnum.
658 """Turn a changelog revnum into a local revnum.
633
659
634 The ellipsis dag is stored as revnums on the changelog,
660 The ellipsis dag is stored as revnums on the changelog,
635 but when we're producing ellipsis entries for
661 but when we're producing ellipsis entries for
636 non-changelog revlogs, we need to turn those numbers into
662 non-changelog revlogs, we need to turn those numbers into
637 something local. This does that for us, and during the
663 something local. This does that for us, and during the
638 changelog sending phase will also expand the stored
664 changelog sending phase will also expand the stored
639 mappings as needed.
665 mappings as needed.
640 """
666 """
641 if clrev == nullrev:
667 if clrev == nullrev:
642 return nullrev
668 return nullrev
643
669
644 if ischangelog:
670 if ischangelog:
645 return clrev
671 return clrev
646
672
647 # Walk the ellipsis-ized changelog breadth-first looking for a
673 # Walk the ellipsis-ized changelog breadth-first looking for a
648 # change that has been linked from the current revlog.
674 # change that has been linked from the current revlog.
649 #
675 #
650 # For a flat manifest revlog only a single step should be necessary
676 # For a flat manifest revlog only a single step should be necessary
651 # as all relevant changelog entries are relevant to the flat
677 # as all relevant changelog entries are relevant to the flat
652 # manifest.
678 # manifest.
653 #
679 #
654 # For a filelog or tree manifest dirlog however not every changelog
680 # For a filelog or tree manifest dirlog however not every changelog
655 # entry will have been relevant, so we need to skip some changelog
681 # entry will have been relevant, so we need to skip some changelog
656 # nodes even after ellipsis-izing.
682 # nodes even after ellipsis-izing.
657 walk = [clrev]
683 walk = [clrev]
658 while walk:
684 while walk:
659 p = walk[0]
685 p = walk[0]
660 walk = walk[1:]
686 walk = walk[1:]
661 if p in clrevtolocalrev:
687 if p in clrevtolocalrev:
662 return clrevtolocalrev[p]
688 return clrevtolocalrev[p]
663 elif p in fullclnodes:
689 elif p in fullclnodes:
664 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
690 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
665 elif p in precomputedellipsis:
691 elif p in precomputedellipsis:
666 walk.extend(
692 walk.extend(
667 [pp for pp in precomputedellipsis[p] if pp != nullrev]
693 [pp for pp in precomputedellipsis[p] if pp != nullrev]
668 )
694 )
669 else:
695 else:
670 # In this case, we've got an ellipsis with parents
696 # In this case, we've got an ellipsis with parents
671 # outside the current bundle (likely an
697 # outside the current bundle (likely an
672 # incremental pull). We "know" that we can use the
698 # incremental pull). We "know" that we can use the
673 # value of this same revlog at whatever revision
699 # value of this same revlog at whatever revision
674 # is pointed to by linknode. "Know" is in scare
700 # is pointed to by linknode. "Know" is in scare
675 # quotes because I haven't done enough examination
701 # quotes because I haven't done enough examination
676 # of edge cases to convince myself this is really
702 # of edge cases to convince myself this is really
677 # a fact - it works for all the (admittedly
703 # a fact - it works for all the (admittedly
678 # thorough) cases in our testsuite, but I would be
704 # thorough) cases in our testsuite, but I would be
679 # somewhat unsurprised to find a case in the wild
705 # somewhat unsurprised to find a case in the wild
680 # where this breaks down a bit. That said, I don't
706 # where this breaks down a bit. That said, I don't
681 # know if it would hurt anything.
707 # know if it would hurt anything.
682 for i in pycompat.xrange(rev, 0, -1):
708 for i in pycompat.xrange(rev, 0, -1):
683 if store.linkrev(i) == clrev:
709 if store.linkrev(i) == clrev:
684 return i
710 return i
685 # We failed to resolve a parent for this node, so
711 # We failed to resolve a parent for this node, so
686 # we crash the changegroup construction.
712 # we crash the changegroup construction.
687 raise error.Abort(
713 raise error.Abort(
688 b"unable to resolve parent while packing '%s' %r"
714 b"unable to resolve parent while packing '%s' %r"
689 b' for changeset %r' % (store.indexfile, rev, clrev)
715 b' for changeset %r' % (store.indexfile, rev, clrev)
690 )
716 )
691
717
692 return nullrev
718 return nullrev
693
719
694 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
720 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
695 p1, p2 = nullrev, nullrev
721 p1, p2 = nullrev, nullrev
696 elif len(linkparents) == 1:
722 elif len(linkparents) == 1:
697 (p1,) = sorted(local(p) for p in linkparents)
723 (p1,) = sorted(local(p) for p in linkparents)
698 p2 = nullrev
724 p2 = nullrev
699 else:
725 else:
700 p1, p2 = sorted(local(p) for p in linkparents)
726 p1, p2 = sorted(local(p) for p in linkparents)
701
727
702 p1node, p2node = store.node(p1), store.node(p2)
728 p1node, p2node = store.node(p1), store.node(p2)
703
729
704 return p1node, p2node, linknode
730 return p1node, p2node, linknode
705
731
706
732
707 def deltagroup(
733 def deltagroup(
708 repo,
734 repo,
709 store,
735 store,
710 nodes,
736 nodes,
711 ischangelog,
737 ischangelog,
712 lookup,
738 lookup,
713 forcedeltaparentprev,
739 forcedeltaparentprev,
714 topic=None,
740 topic=None,
715 ellipses=False,
741 ellipses=False,
716 clrevtolocalrev=None,
742 clrevtolocalrev=None,
717 fullclnodes=None,
743 fullclnodes=None,
718 precomputedellipsis=None,
744 precomputedellipsis=None,
719 ):
745 ):
720 """Calculate deltas for a set of revisions.
746 """Calculate deltas for a set of revisions.
721
747
722 Is a generator of ``revisiondelta`` instances.
748 Is a generator of ``revisiondelta`` instances.
723
749
724 If topic is not None, progress detail will be generated using this
750 If topic is not None, progress detail will be generated using this
725 topic name (e.g. changesets, manifests, etc).
751 topic name (e.g. changesets, manifests, etc).
726 """
752 """
727 if not nodes:
753 if not nodes:
728 return
754 return
729
755
730 cl = repo.changelog
756 cl = repo.changelog
731
757
732 if ischangelog:
758 if ischangelog:
733 # `hg log` shows changesets in storage order. To preserve order
759 # `hg log` shows changesets in storage order. To preserve order
734 # across clones, send out changesets in storage order.
760 # across clones, send out changesets in storage order.
735 nodesorder = b'storage'
761 nodesorder = b'storage'
736 elif ellipses:
762 elif ellipses:
737 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
763 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
738 nodesorder = b'nodes'
764 nodesorder = b'nodes'
739 else:
765 else:
740 nodesorder = None
766 nodesorder = None
741
767
742 # Perform ellipses filtering and revision massaging. We do this before
768 # Perform ellipses filtering and revision massaging. We do this before
743 # emitrevisions() because a) filtering out revisions creates less work
769 # emitrevisions() because a) filtering out revisions creates less work
744 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
770 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
745 # assumptions about delta choices and we would possibly send a delta
771 # assumptions about delta choices and we would possibly send a delta
746 # referencing a missing base revision.
772 # referencing a missing base revision.
747 #
773 #
748 # Also, calling lookup() has side-effects with regards to populating
774 # Also, calling lookup() has side-effects with regards to populating
749 # data structures. If we don't call lookup() for each node or if we call
775 # data structures. If we don't call lookup() for each node or if we call
750 # lookup() after the first pass through each node, things can break -
776 # lookup() after the first pass through each node, things can break -
751 # possibly intermittently depending on the python hash seed! For that
777 # possibly intermittently depending on the python hash seed! For that
752 # reason, we store a mapping of all linknodes during the initial node
778 # reason, we store a mapping of all linknodes during the initial node
753 # pass rather than use lookup() on the output side.
779 # pass rather than use lookup() on the output side.
754 if ellipses:
780 if ellipses:
755 filtered = []
781 filtered = []
756 adjustedparents = {}
782 adjustedparents = {}
757 linknodes = {}
783 linknodes = {}
758
784
759 for node in nodes:
785 for node in nodes:
760 rev = store.rev(node)
786 rev = store.rev(node)
761 linknode = lookup(node)
787 linknode = lookup(node)
762 linkrev = cl.rev(linknode)
788 linkrev = cl.rev(linknode)
763 clrevtolocalrev[linkrev] = rev
789 clrevtolocalrev[linkrev] = rev
764
790
765 # If linknode is in fullclnodes, it means the corresponding
791 # If linknode is in fullclnodes, it means the corresponding
766 # changeset was a full changeset and is being sent unaltered.
792 # changeset was a full changeset and is being sent unaltered.
767 if linknode in fullclnodes:
793 if linknode in fullclnodes:
768 linknodes[node] = linknode
794 linknodes[node] = linknode
769
795
770 # If the corresponding changeset wasn't in the set computed
796 # If the corresponding changeset wasn't in the set computed
771 # as relevant to us, it should be dropped outright.
797 # as relevant to us, it should be dropped outright.
772 elif linkrev not in precomputedellipsis:
798 elif linkrev not in precomputedellipsis:
773 continue
799 continue
774
800
775 else:
801 else:
776 # We could probably do this later and avoid the dict
802 # We could probably do this later and avoid the dict
777 # holding state. But it likely doesn't matter.
803 # holding state. But it likely doesn't matter.
778 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
804 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
779 cl,
805 cl,
780 store,
806 store,
781 ischangelog,
807 ischangelog,
782 rev,
808 rev,
783 linkrev,
809 linkrev,
784 linknode,
810 linknode,
785 clrevtolocalrev,
811 clrevtolocalrev,
786 fullclnodes,
812 fullclnodes,
787 precomputedellipsis,
813 precomputedellipsis,
788 )
814 )
789
815
790 adjustedparents[node] = (p1node, p2node)
816 adjustedparents[node] = (p1node, p2node)
791 linknodes[node] = linknode
817 linknodes[node] = linknode
792
818
793 filtered.append(node)
819 filtered.append(node)
794
820
795 nodes = filtered
821 nodes = filtered
796
822
797 # We expect the first pass to be fast, so we only engage the progress
823 # We expect the first pass to be fast, so we only engage the progress
798 # meter for constructing the revision deltas.
824 # meter for constructing the revision deltas.
799 progress = None
825 progress = None
800 if topic is not None:
826 if topic is not None:
801 progress = repo.ui.makeprogress(
827 progress = repo.ui.makeprogress(
802 topic, unit=_(b'chunks'), total=len(nodes)
828 topic, unit=_(b'chunks'), total=len(nodes)
803 )
829 )
804
830
805 configtarget = repo.ui.config(b'devel', b'bundle.delta')
831 configtarget = repo.ui.config(b'devel', b'bundle.delta')
806 if configtarget not in (b'', b'p1', b'full'):
832 if configtarget not in (b'', b'p1', b'full'):
807 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
833 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
808 repo.ui.warn(msg % configtarget)
834 repo.ui.warn(msg % configtarget)
809
835
810 deltamode = repository.CG_DELTAMODE_STD
836 deltamode = repository.CG_DELTAMODE_STD
811 if forcedeltaparentprev:
837 if forcedeltaparentprev:
812 deltamode = repository.CG_DELTAMODE_PREV
838 deltamode = repository.CG_DELTAMODE_PREV
813 elif configtarget == b'p1':
839 elif configtarget == b'p1':
814 deltamode = repository.CG_DELTAMODE_P1
840 deltamode = repository.CG_DELTAMODE_P1
815 elif configtarget == b'full':
841 elif configtarget == b'full':
816 deltamode = repository.CG_DELTAMODE_FULL
842 deltamode = repository.CG_DELTAMODE_FULL
817
843
818 revisions = store.emitrevisions(
844 revisions = store.emitrevisions(
819 nodes,
845 nodes,
820 nodesorder=nodesorder,
846 nodesorder=nodesorder,
821 revisiondata=True,
847 revisiondata=True,
822 assumehaveparentrevisions=not ellipses,
848 assumehaveparentrevisions=not ellipses,
823 deltamode=deltamode,
849 deltamode=deltamode,
824 )
850 )
825
851
826 for i, revision in enumerate(revisions):
852 for i, revision in enumerate(revisions):
827 if progress:
853 if progress:
828 progress.update(i + 1)
854 progress.update(i + 1)
829
855
830 if ellipses:
856 if ellipses:
831 linknode = linknodes[revision.node]
857 linknode = linknodes[revision.node]
832
858
833 if revision.node in adjustedparents:
859 if revision.node in adjustedparents:
834 p1node, p2node = adjustedparents[revision.node]
860 p1node, p2node = adjustedparents[revision.node]
835 revision.p1node = p1node
861 revision.p1node = p1node
836 revision.p2node = p2node
862 revision.p2node = p2node
837 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
863 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
838
864
839 else:
865 else:
840 linknode = lookup(revision.node)
866 linknode = lookup(revision.node)
841
867
842 revision.linknode = linknode
868 revision.linknode = linknode
843 yield revision
869 yield revision
844
870
845 if progress:
871 if progress:
846 progress.complete()
872 progress.complete()
847
873
848
874
849 class cgpacker(object):
875 class cgpacker(object):
850 def __init__(
876 def __init__(
851 self,
877 self,
852 repo,
878 repo,
853 oldmatcher,
879 oldmatcher,
854 matcher,
880 matcher,
855 version,
881 version,
856 builddeltaheader,
882 builddeltaheader,
857 manifestsend,
883 manifestsend,
858 forcedeltaparentprev=False,
884 forcedeltaparentprev=False,
859 bundlecaps=None,
885 bundlecaps=None,
860 ellipses=False,
886 ellipses=False,
861 shallow=False,
887 shallow=False,
862 ellipsisroots=None,
888 ellipsisroots=None,
863 fullnodes=None,
889 fullnodes=None,
890 remote_sidedata=None,
864 ):
891 ):
865 """Given a source repo, construct a bundler.
892 """Given a source repo, construct a bundler.
866
893
867 oldmatcher is a matcher that matches on files the client already has.
894 oldmatcher is a matcher that matches on files the client already has.
868 These will not be included in the changegroup.
895 These will not be included in the changegroup.
869
896
870 matcher is a matcher that matches on files to include in the
897 matcher is a matcher that matches on files to include in the
871 changegroup. Used to facilitate sparse changegroups.
898 changegroup. Used to facilitate sparse changegroups.
872
899
873 forcedeltaparentprev indicates whether delta parents must be against
900 forcedeltaparentprev indicates whether delta parents must be against
874 the previous revision in a delta group. This should only be used for
901 the previous revision in a delta group. This should only be used for
875 compatibility with changegroup version 1.
902 compatibility with changegroup version 1.
876
903
877 builddeltaheader is a callable that constructs the header for a group
904 builddeltaheader is a callable that constructs the header for a group
878 delta.
905 delta.
879
906
880 manifestsend is a chunk to send after manifests have been fully emitted.
907 manifestsend is a chunk to send after manifests have been fully emitted.
881
908
882 ellipses indicates whether ellipsis serving mode is enabled.
909 ellipses indicates whether ellipsis serving mode is enabled.
883
910
884 bundlecaps is optional and can be used to specify the set of
911 bundlecaps is optional and can be used to specify the set of
885 capabilities which can be used to build the bundle. While bundlecaps is
912 capabilities which can be used to build the bundle. While bundlecaps is
886 unused in core Mercurial, extensions rely on this feature to communicate
913 unused in core Mercurial, extensions rely on this feature to communicate
887 capabilities to customize the changegroup packer.
914 capabilities to customize the changegroup packer.
888
915
889 shallow indicates whether shallow data might be sent. The packer may
916 shallow indicates whether shallow data might be sent. The packer may
890 need to pack file contents not introduced by the changes being packed.
917 need to pack file contents not introduced by the changes being packed.
891
918
892 fullnodes is the set of changelog nodes which should not be ellipsis
919 fullnodes is the set of changelog nodes which should not be ellipsis
893 nodes. We store this rather than the set of nodes that should be
920 nodes. We store this rather than the set of nodes that should be
894 ellipsis because for very large histories we expect this to be
921 ellipsis because for very large histories we expect this to be
895 significantly smaller.
922 significantly smaller.
923
924 remote_sidedata is the set of sidedata categories wanted by the remote.
896 """
925 """
897 assert oldmatcher
926 assert oldmatcher
898 assert matcher
927 assert matcher
899 self._oldmatcher = oldmatcher
928 self._oldmatcher = oldmatcher
900 self._matcher = matcher
929 self._matcher = matcher
901
930
902 self.version = version
931 self.version = version
903 self._forcedeltaparentprev = forcedeltaparentprev
932 self._forcedeltaparentprev = forcedeltaparentprev
904 self._builddeltaheader = builddeltaheader
933 self._builddeltaheader = builddeltaheader
905 self._manifestsend = manifestsend
934 self._manifestsend = manifestsend
906 self._ellipses = ellipses
935 self._ellipses = ellipses
907
936
908 # Set of capabilities we can use to build the bundle.
937 # Set of capabilities we can use to build the bundle.
909 if bundlecaps is None:
938 if bundlecaps is None:
910 bundlecaps = set()
939 bundlecaps = set()
911 self._bundlecaps = bundlecaps
940 self._bundlecaps = bundlecaps
912 self._isshallow = shallow
941 self._isshallow = shallow
913 self._fullclnodes = fullnodes
942 self._fullclnodes = fullnodes
914
943
915 # Maps ellipsis revs to their roots at the changelog level.
944 # Maps ellipsis revs to their roots at the changelog level.
916 self._precomputedellipsis = ellipsisroots
945 self._precomputedellipsis = ellipsisroots
917
946
918 self._repo = repo
947 self._repo = repo
919
948
920 if self._repo.ui.verbose and not self._repo.ui.debugflag:
949 if self._repo.ui.verbose and not self._repo.ui.debugflag:
921 self._verbosenote = self._repo.ui.note
950 self._verbosenote = self._repo.ui.note
922 else:
951 else:
923 self._verbosenote = lambda s: None
952 self._verbosenote = lambda s: None
924
953
925 def generate(
954 def generate(
926 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
955 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
927 ):
956 ):
928 """Yield a sequence of changegroup byte chunks.
957 """Yield a sequence of changegroup byte chunks.
929 If changelog is False, changelog data won't be added to changegroup
958 If changelog is False, changelog data won't be added to changegroup
930 """
959 """
931
960
932 repo = self._repo
961 repo = self._repo
933 cl = repo.changelog
962 cl = repo.changelog
934
963
935 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
964 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
936 size = 0
965 size = 0
937
966
938 clstate, deltas = self._generatechangelog(
967 clstate, deltas = self._generatechangelog(
939 cl, clnodes, generate=changelog
968 cl, clnodes, generate=changelog
940 )
969 )
941 for delta in deltas:
970 for delta in deltas:
942 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
971 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
943 size += len(chunk)
972 size += len(chunk)
944 yield chunk
973 yield chunk
945
974
946 close = closechunk()
975 close = closechunk()
947 size += len(close)
976 size += len(close)
948 yield closechunk()
977 yield closechunk()
949
978
950 self._verbosenote(_(b'%8.i (changelog)\n') % size)
979 self._verbosenote(_(b'%8.i (changelog)\n') % size)
951
980
952 clrevorder = clstate[b'clrevorder']
981 clrevorder = clstate[b'clrevorder']
953 manifests = clstate[b'manifests']
982 manifests = clstate[b'manifests']
954 changedfiles = clstate[b'changedfiles']
983 changedfiles = clstate[b'changedfiles']
955
984
956 # We need to make sure that the linkrev in the changegroup refers to
985 # We need to make sure that the linkrev in the changegroup refers to
957 # the first changeset that introduced the manifest or file revision.
986 # the first changeset that introduced the manifest or file revision.
958 # The fastpath is usually safer than the slowpath, because the filelogs
987 # The fastpath is usually safer than the slowpath, because the filelogs
959 # are walked in revlog order.
988 # are walked in revlog order.
960 #
989 #
961 # When taking the slowpath when the manifest revlog uses generaldelta,
990 # When taking the slowpath when the manifest revlog uses generaldelta,
962 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
991 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
963 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
992 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
964 #
993 #
965 # When taking the fastpath, we are only vulnerable to reordering
994 # When taking the fastpath, we are only vulnerable to reordering
966 # of the changelog itself. The changelog never uses generaldelta and is
995 # of the changelog itself. The changelog never uses generaldelta and is
967 # never reordered. To handle this case, we simply take the slowpath,
996 # never reordered. To handle this case, we simply take the slowpath,
968 # which already has the 'clrevorder' logic. This was also fixed in
997 # which already has the 'clrevorder' logic. This was also fixed in
969 # cc0ff93d0c0c.
998 # cc0ff93d0c0c.
970
999
971 # Treemanifests don't work correctly with fastpathlinkrev
1000 # Treemanifests don't work correctly with fastpathlinkrev
972 # either, because we don't discover which directory nodes to
1001 # either, because we don't discover which directory nodes to
973 # send along with files. This could probably be fixed.
1002 # send along with files. This could probably be fixed.
974 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
1003 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
975
1004
976 fnodes = {} # needed file nodes
1005 fnodes = {} # needed file nodes
977
1006
978 size = 0
1007 size = 0
979 it = self.generatemanifests(
1008 it = self.generatemanifests(
980 commonrevs,
1009 commonrevs,
981 clrevorder,
1010 clrevorder,
982 fastpathlinkrev,
1011 fastpathlinkrev,
983 manifests,
1012 manifests,
984 fnodes,
1013 fnodes,
985 source,
1014 source,
986 clstate[b'clrevtomanifestrev'],
1015 clstate[b'clrevtomanifestrev'],
987 )
1016 )
988
1017
989 for tree, deltas in it:
1018 for tree, deltas in it:
990 if tree:
1019 if tree:
991 assert self.version == b'03'
1020 assert self.version in (b'03', b'04')
992 chunk = _fileheader(tree)
1021 chunk = _fileheader(tree)
993 size += len(chunk)
1022 size += len(chunk)
994 yield chunk
1023 yield chunk
995
1024
996 for delta in deltas:
1025 for delta in deltas:
997 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1026 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
998 for chunk in chunks:
1027 for chunk in chunks:
999 size += len(chunk)
1028 size += len(chunk)
1000 yield chunk
1029 yield chunk
1001
1030
1002 close = closechunk()
1031 close = closechunk()
1003 size += len(close)
1032 size += len(close)
1004 yield close
1033 yield close
1005
1034
1006 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1035 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1007 yield self._manifestsend
1036 yield self._manifestsend
1008
1037
1009 mfdicts = None
1038 mfdicts = None
1010 if self._ellipses and self._isshallow:
1039 if self._ellipses and self._isshallow:
1011 mfdicts = [
1040 mfdicts = [
1012 (repo.manifestlog[n].read(), lr)
1041 (repo.manifestlog[n].read(), lr)
1013 for (n, lr) in pycompat.iteritems(manifests)
1042 for (n, lr) in pycompat.iteritems(manifests)
1014 ]
1043 ]
1015
1044
1016 manifests.clear()
1045 manifests.clear()
1017 clrevs = {cl.rev(x) for x in clnodes}
1046 clrevs = {cl.rev(x) for x in clnodes}
1018
1047
1019 it = self.generatefiles(
1048 it = self.generatefiles(
1020 changedfiles,
1049 changedfiles,
1021 commonrevs,
1050 commonrevs,
1022 source,
1051 source,
1023 mfdicts,
1052 mfdicts,
1024 fastpathlinkrev,
1053 fastpathlinkrev,
1025 fnodes,
1054 fnodes,
1026 clrevs,
1055 clrevs,
1027 )
1056 )
1028
1057
1029 for path, deltas in it:
1058 for path, deltas in it:
1030 h = _fileheader(path)
1059 h = _fileheader(path)
1031 size = len(h)
1060 size = len(h)
1032 yield h
1061 yield h
1033
1062
1034 for delta in deltas:
1063 for delta in deltas:
1035 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1064 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1036 for chunk in chunks:
1065 for chunk in chunks:
1037 size += len(chunk)
1066 size += len(chunk)
1038 yield chunk
1067 yield chunk
1039
1068
1040 close = closechunk()
1069 close = closechunk()
1041 size += len(close)
1070 size += len(close)
1042 yield close
1071 yield close
1043
1072
1044 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1073 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1045
1074
1046 yield closechunk()
1075 yield closechunk()
1047
1076
1048 if clnodes:
1077 if clnodes:
1049 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1078 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1050
1079
1051 def _generatechangelog(self, cl, nodes, generate=True):
1080 def _generatechangelog(self, cl, nodes, generate=True):
1052 """Generate data for changelog chunks.
1081 """Generate data for changelog chunks.
1053
1082
1054 Returns a 2-tuple of a dict containing state and an iterable of
1083 Returns a 2-tuple of a dict containing state and an iterable of
1055 byte chunks. The state will not be fully populated until the
1084 byte chunks. The state will not be fully populated until the
1056 chunk stream has been fully consumed.
1085 chunk stream has been fully consumed.
1057
1086
1058 if generate is False, the state will be fully populated and no chunk
1087 if generate is False, the state will be fully populated and no chunk
1059 stream will be yielded
1088 stream will be yielded
1060 """
1089 """
1061 clrevorder = {}
1090 clrevorder = {}
1062 manifests = {}
1091 manifests = {}
1063 mfl = self._repo.manifestlog
1092 mfl = self._repo.manifestlog
1064 changedfiles = set()
1093 changedfiles = set()
1065 clrevtomanifestrev = {}
1094 clrevtomanifestrev = {}
1066
1095
1067 state = {
1096 state = {
1068 b'clrevorder': clrevorder,
1097 b'clrevorder': clrevorder,
1069 b'manifests': manifests,
1098 b'manifests': manifests,
1070 b'changedfiles': changedfiles,
1099 b'changedfiles': changedfiles,
1071 b'clrevtomanifestrev': clrevtomanifestrev,
1100 b'clrevtomanifestrev': clrevtomanifestrev,
1072 }
1101 }
1073
1102
1074 if not (generate or self._ellipses):
1103 if not (generate or self._ellipses):
1075 # sort the nodes in storage order
1104 # sort the nodes in storage order
1076 nodes = sorted(nodes, key=cl.rev)
1105 nodes = sorted(nodes, key=cl.rev)
1077 for node in nodes:
1106 for node in nodes:
1078 c = cl.changelogrevision(node)
1107 c = cl.changelogrevision(node)
1079 clrevorder[node] = len(clrevorder)
1108 clrevorder[node] = len(clrevorder)
1080 # record the first changeset introducing this manifest version
1109 # record the first changeset introducing this manifest version
1081 manifests.setdefault(c.manifest, node)
1110 manifests.setdefault(c.manifest, node)
1082 # Record a complete list of potentially-changed files in
1111 # Record a complete list of potentially-changed files in
1083 # this manifest.
1112 # this manifest.
1084 changedfiles.update(c.files)
1113 changedfiles.update(c.files)
1085
1114
1086 return state, ()
1115 return state, ()
1087
1116
1088 # Callback for the changelog, used to collect changed files and
1117 # Callback for the changelog, used to collect changed files and
1089 # manifest nodes.
1118 # manifest nodes.
1090 # Returns the linkrev node (identity in the changelog case).
1119 # Returns the linkrev node (identity in the changelog case).
1091 def lookupcl(x):
1120 def lookupcl(x):
1092 c = cl.changelogrevision(x)
1121 c = cl.changelogrevision(x)
1093 clrevorder[x] = len(clrevorder)
1122 clrevorder[x] = len(clrevorder)
1094
1123
1095 if self._ellipses:
1124 if self._ellipses:
1096 # Only update manifests if x is going to be sent. Otherwise we
1125 # Only update manifests if x is going to be sent. Otherwise we
1097 # end up with bogus linkrevs specified for manifests and
1126 # end up with bogus linkrevs specified for manifests and
1098 # we skip some manifest nodes that we should otherwise
1127 # we skip some manifest nodes that we should otherwise
1099 # have sent.
1128 # have sent.
1100 if (
1129 if (
1101 x in self._fullclnodes
1130 x in self._fullclnodes
1102 or cl.rev(x) in self._precomputedellipsis
1131 or cl.rev(x) in self._precomputedellipsis
1103 ):
1132 ):
1104
1133
1105 manifestnode = c.manifest
1134 manifestnode = c.manifest
1106 # Record the first changeset introducing this manifest
1135 # Record the first changeset introducing this manifest
1107 # version.
1136 # version.
1108 manifests.setdefault(manifestnode, x)
1137 manifests.setdefault(manifestnode, x)
1109 # Set this narrow-specific dict so we have the lowest
1138 # Set this narrow-specific dict so we have the lowest
1110 # manifest revnum to look up for this cl revnum. (Part of
1139 # manifest revnum to look up for this cl revnum. (Part of
1111 # mapping changelog ellipsis parents to manifest ellipsis
1140 # mapping changelog ellipsis parents to manifest ellipsis
1112 # parents)
1141 # parents)
1113 clrevtomanifestrev.setdefault(
1142 clrevtomanifestrev.setdefault(
1114 cl.rev(x), mfl.rev(manifestnode)
1143 cl.rev(x), mfl.rev(manifestnode)
1115 )
1144 )
1116 # We can't trust the changed files list in the changeset if the
1145 # We can't trust the changed files list in the changeset if the
1117 # client requested a shallow clone.
1146 # client requested a shallow clone.
1118 if self._isshallow:
1147 if self._isshallow:
1119 changedfiles.update(mfl[c.manifest].read().keys())
1148 changedfiles.update(mfl[c.manifest].read().keys())
1120 else:
1149 else:
1121 changedfiles.update(c.files)
1150 changedfiles.update(c.files)
1122 else:
1151 else:
1123 # record the first changeset introducing this manifest version
1152 # record the first changeset introducing this manifest version
1124 manifests.setdefault(c.manifest, x)
1153 manifests.setdefault(c.manifest, x)
1125 # Record a complete list of potentially-changed files in
1154 # Record a complete list of potentially-changed files in
1126 # this manifest.
1155 # this manifest.
1127 changedfiles.update(c.files)
1156 changedfiles.update(c.files)
1128
1157
1129 return x
1158 return x
1130
1159
1131 gen = deltagroup(
1160 gen = deltagroup(
1132 self._repo,
1161 self._repo,
1133 cl,
1162 cl,
1134 nodes,
1163 nodes,
1135 True,
1164 True,
1136 lookupcl,
1165 lookupcl,
1137 self._forcedeltaparentprev,
1166 self._forcedeltaparentprev,
1138 ellipses=self._ellipses,
1167 ellipses=self._ellipses,
1139 topic=_(b'changesets'),
1168 topic=_(b'changesets'),
1140 clrevtolocalrev={},
1169 clrevtolocalrev={},
1141 fullclnodes=self._fullclnodes,
1170 fullclnodes=self._fullclnodes,
1142 precomputedellipsis=self._precomputedellipsis,
1171 precomputedellipsis=self._precomputedellipsis,
1143 )
1172 )
1144
1173
1145 return state, gen
1174 return state, gen
1146
1175
1147 def generatemanifests(
1176 def generatemanifests(
1148 self,
1177 self,
1149 commonrevs,
1178 commonrevs,
1150 clrevorder,
1179 clrevorder,
1151 fastpathlinkrev,
1180 fastpathlinkrev,
1152 manifests,
1181 manifests,
1153 fnodes,
1182 fnodes,
1154 source,
1183 source,
1155 clrevtolocalrev,
1184 clrevtolocalrev,
1156 ):
1185 ):
1157 """Returns an iterator of changegroup chunks containing manifests.
1186 """Returns an iterator of changegroup chunks containing manifests.
1158
1187
1159 `source` is unused here, but is used by extensions like remotefilelog to
1188 `source` is unused here, but is used by extensions like remotefilelog to
1160 change what is sent based in pulls vs pushes, etc.
1189 change what is sent based in pulls vs pushes, etc.
1161 """
1190 """
1162 repo = self._repo
1191 repo = self._repo
1163 mfl = repo.manifestlog
1192 mfl = repo.manifestlog
1164 tmfnodes = {b'': manifests}
1193 tmfnodes = {b'': manifests}
1165
1194
1166 # Callback for the manifest, used to collect linkrevs for filelog
1195 # Callback for the manifest, used to collect linkrevs for filelog
1167 # revisions.
1196 # revisions.
1168 # Returns the linkrev node (collected in lookupcl).
1197 # Returns the linkrev node (collected in lookupcl).
1169 def makelookupmflinknode(tree, nodes):
1198 def makelookupmflinknode(tree, nodes):
1170 if fastpathlinkrev:
1199 if fastpathlinkrev:
1171 assert not tree
1200 assert not tree
1172 return (
1201 return (
1173 manifests.__getitem__
1202 manifests.__getitem__
1174 ) # pytype: disable=unsupported-operands
1203 ) # pytype: disable=unsupported-operands
1175
1204
1176 def lookupmflinknode(x):
1205 def lookupmflinknode(x):
1177 """Callback for looking up the linknode for manifests.
1206 """Callback for looking up the linknode for manifests.
1178
1207
1179 Returns the linkrev node for the specified manifest.
1208 Returns the linkrev node for the specified manifest.
1180
1209
1181 SIDE EFFECT:
1210 SIDE EFFECT:
1182
1211
1183 1) fclnodes gets populated with the list of relevant
1212 1) fclnodes gets populated with the list of relevant
1184 file nodes if we're not using fastpathlinkrev
1213 file nodes if we're not using fastpathlinkrev
1185 2) When treemanifests are in use, collects treemanifest nodes
1214 2) When treemanifests are in use, collects treemanifest nodes
1186 to send
1215 to send
1187
1216
1188 Note that this means manifests must be completely sent to
1217 Note that this means manifests must be completely sent to
1189 the client before you can trust the list of files and
1218 the client before you can trust the list of files and
1190 treemanifests to send.
1219 treemanifests to send.
1191 """
1220 """
1192 clnode = nodes[x]
1221 clnode = nodes[x]
1193 mdata = mfl.get(tree, x).readfast(shallow=True)
1222 mdata = mfl.get(tree, x).readfast(shallow=True)
1194 for p, n, fl in mdata.iterentries():
1223 for p, n, fl in mdata.iterentries():
1195 if fl == b't': # subdirectory manifest
1224 if fl == b't': # subdirectory manifest
1196 subtree = tree + p + b'/'
1225 subtree = tree + p + b'/'
1197 tmfclnodes = tmfnodes.setdefault(subtree, {})
1226 tmfclnodes = tmfnodes.setdefault(subtree, {})
1198 tmfclnode = tmfclnodes.setdefault(n, clnode)
1227 tmfclnode = tmfclnodes.setdefault(n, clnode)
1199 if clrevorder[clnode] < clrevorder[tmfclnode]:
1228 if clrevorder[clnode] < clrevorder[tmfclnode]:
1200 tmfclnodes[n] = clnode
1229 tmfclnodes[n] = clnode
1201 else:
1230 else:
1202 f = tree + p
1231 f = tree + p
1203 fclnodes = fnodes.setdefault(f, {})
1232 fclnodes = fnodes.setdefault(f, {})
1204 fclnode = fclnodes.setdefault(n, clnode)
1233 fclnode = fclnodes.setdefault(n, clnode)
1205 if clrevorder[clnode] < clrevorder[fclnode]:
1234 if clrevorder[clnode] < clrevorder[fclnode]:
1206 fclnodes[n] = clnode
1235 fclnodes[n] = clnode
1207 return clnode
1236 return clnode
1208
1237
1209 return lookupmflinknode
1238 return lookupmflinknode
1210
1239
1211 while tmfnodes:
1240 while tmfnodes:
1212 tree, nodes = tmfnodes.popitem()
1241 tree, nodes = tmfnodes.popitem()
1213
1242
1214 should_visit = self._matcher.visitdir(tree[:-1])
1243 should_visit = self._matcher.visitdir(tree[:-1])
1215 if tree and not should_visit:
1244 if tree and not should_visit:
1216 continue
1245 continue
1217
1246
1218 store = mfl.getstorage(tree)
1247 store = mfl.getstorage(tree)
1219
1248
1220 if not should_visit:
1249 if not should_visit:
1221 # No nodes to send because this directory is out of
1250 # No nodes to send because this directory is out of
1222 # the client's view of the repository (probably
1251 # the client's view of the repository (probably
1223 # because of narrow clones). Do this even for the root
1252 # because of narrow clones). Do this even for the root
1224 # directory (tree=='')
1253 # directory (tree=='')
1225 prunednodes = []
1254 prunednodes = []
1226 else:
1255 else:
1227 # Avoid sending any manifest nodes we can prove the
1256 # Avoid sending any manifest nodes we can prove the
1228 # client already has by checking linkrevs. See the
1257 # client already has by checking linkrevs. See the
1229 # related comment in generatefiles().
1258 # related comment in generatefiles().
1230 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1259 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1231
1260
1232 if tree and not prunednodes:
1261 if tree and not prunednodes:
1233 continue
1262 continue
1234
1263
1235 lookupfn = makelookupmflinknode(tree, nodes)
1264 lookupfn = makelookupmflinknode(tree, nodes)
1236
1265
1237 deltas = deltagroup(
1266 deltas = deltagroup(
1238 self._repo,
1267 self._repo,
1239 store,
1268 store,
1240 prunednodes,
1269 prunednodes,
1241 False,
1270 False,
1242 lookupfn,
1271 lookupfn,
1243 self._forcedeltaparentprev,
1272 self._forcedeltaparentprev,
1244 ellipses=self._ellipses,
1273 ellipses=self._ellipses,
1245 topic=_(b'manifests'),
1274 topic=_(b'manifests'),
1246 clrevtolocalrev=clrevtolocalrev,
1275 clrevtolocalrev=clrevtolocalrev,
1247 fullclnodes=self._fullclnodes,
1276 fullclnodes=self._fullclnodes,
1248 precomputedellipsis=self._precomputedellipsis,
1277 precomputedellipsis=self._precomputedellipsis,
1249 )
1278 )
1250
1279
1251 if not self._oldmatcher.visitdir(store.tree[:-1]):
1280 if not self._oldmatcher.visitdir(store.tree[:-1]):
1252 yield tree, deltas
1281 yield tree, deltas
1253 else:
1282 else:
1254 # 'deltas' is a generator and we need to consume it even if
1283 # 'deltas' is a generator and we need to consume it even if
1255 # we are not going to send it because a side-effect is that
1284 # we are not going to send it because a side-effect is that
1256 # it updates tmdnodes (via lookupfn)
1285 # it updates tmdnodes (via lookupfn)
1257 for d in deltas:
1286 for d in deltas:
1258 pass
1287 pass
1259 if not tree:
1288 if not tree:
1260 yield tree, []
1289 yield tree, []
1261
1290
1262 def _prunemanifests(self, store, nodes, commonrevs):
1291 def _prunemanifests(self, store, nodes, commonrevs):
1263 if not self._ellipses:
1292 if not self._ellipses:
1264 # In non-ellipses case and large repositories, it is better to
1293 # In non-ellipses case and large repositories, it is better to
1265 # prevent calling of store.rev and store.linkrev on a lot of
1294 # prevent calling of store.rev and store.linkrev on a lot of
1266 # nodes as compared to sending some extra data
1295 # nodes as compared to sending some extra data
1267 return nodes.copy()
1296 return nodes.copy()
1268 # This is split out as a separate method to allow filtering
1297 # This is split out as a separate method to allow filtering
1269 # commonrevs in extension code.
1298 # commonrevs in extension code.
1270 #
1299 #
1271 # TODO(augie): this shouldn't be required, instead we should
1300 # TODO(augie): this shouldn't be required, instead we should
1272 # make filtering of revisions to send delegated to the store
1301 # make filtering of revisions to send delegated to the store
1273 # layer.
1302 # layer.
1274 frev, flr = store.rev, store.linkrev
1303 frev, flr = store.rev, store.linkrev
1275 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1304 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1276
1305
1277 # The 'source' parameter is useful for extensions
1306 # The 'source' parameter is useful for extensions
1278 def generatefiles(
1307 def generatefiles(
1279 self,
1308 self,
1280 changedfiles,
1309 changedfiles,
1281 commonrevs,
1310 commonrevs,
1282 source,
1311 source,
1283 mfdicts,
1312 mfdicts,
1284 fastpathlinkrev,
1313 fastpathlinkrev,
1285 fnodes,
1314 fnodes,
1286 clrevs,
1315 clrevs,
1287 ):
1316 ):
1288 changedfiles = [
1317 changedfiles = [
1289 f
1318 f
1290 for f in changedfiles
1319 for f in changedfiles
1291 if self._matcher(f) and not self._oldmatcher(f)
1320 if self._matcher(f) and not self._oldmatcher(f)
1292 ]
1321 ]
1293
1322
1294 if not fastpathlinkrev:
1323 if not fastpathlinkrev:
1295
1324
1296 def normallinknodes(unused, fname):
1325 def normallinknodes(unused, fname):
1297 return fnodes.get(fname, {})
1326 return fnodes.get(fname, {})
1298
1327
1299 else:
1328 else:
1300 cln = self._repo.changelog.node
1329 cln = self._repo.changelog.node
1301
1330
1302 def normallinknodes(store, fname):
1331 def normallinknodes(store, fname):
1303 flinkrev = store.linkrev
1332 flinkrev = store.linkrev
1304 fnode = store.node
1333 fnode = store.node
1305 revs = ((r, flinkrev(r)) for r in store)
1334 revs = ((r, flinkrev(r)) for r in store)
1306 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1335 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1307
1336
1308 clrevtolocalrev = {}
1337 clrevtolocalrev = {}
1309
1338
1310 if self._isshallow:
1339 if self._isshallow:
1311 # In a shallow clone, the linknodes callback needs to also include
1340 # In a shallow clone, the linknodes callback needs to also include
1312 # those file nodes that are in the manifests we sent but weren't
1341 # those file nodes that are in the manifests we sent but weren't
1313 # introduced by those manifests.
1342 # introduced by those manifests.
1314 commonctxs = [self._repo[c] for c in commonrevs]
1343 commonctxs = [self._repo[c] for c in commonrevs]
1315 clrev = self._repo.changelog.rev
1344 clrev = self._repo.changelog.rev
1316
1345
1317 def linknodes(flog, fname):
1346 def linknodes(flog, fname):
1318 for c in commonctxs:
1347 for c in commonctxs:
1319 try:
1348 try:
1320 fnode = c.filenode(fname)
1349 fnode = c.filenode(fname)
1321 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1350 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1322 except error.ManifestLookupError:
1351 except error.ManifestLookupError:
1323 pass
1352 pass
1324 links = normallinknodes(flog, fname)
1353 links = normallinknodes(flog, fname)
1325 if len(links) != len(mfdicts):
1354 if len(links) != len(mfdicts):
1326 for mf, lr in mfdicts:
1355 for mf, lr in mfdicts:
1327 fnode = mf.get(fname, None)
1356 fnode = mf.get(fname, None)
1328 if fnode in links:
1357 if fnode in links:
1329 links[fnode] = min(links[fnode], lr, key=clrev)
1358 links[fnode] = min(links[fnode], lr, key=clrev)
1330 elif fnode:
1359 elif fnode:
1331 links[fnode] = lr
1360 links[fnode] = lr
1332 return links
1361 return links
1333
1362
1334 else:
1363 else:
1335 linknodes = normallinknodes
1364 linknodes = normallinknodes
1336
1365
1337 repo = self._repo
1366 repo = self._repo
1338 progress = repo.ui.makeprogress(
1367 progress = repo.ui.makeprogress(
1339 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1368 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1340 )
1369 )
1341 for i, fname in enumerate(sorted(changedfiles)):
1370 for i, fname in enumerate(sorted(changedfiles)):
1342 filerevlog = repo.file(fname)
1371 filerevlog = repo.file(fname)
1343 if not filerevlog:
1372 if not filerevlog:
1344 raise error.Abort(
1373 raise error.Abort(
1345 _(b"empty or missing file data for %s") % fname
1374 _(b"empty or missing file data for %s") % fname
1346 )
1375 )
1347
1376
1348 clrevtolocalrev.clear()
1377 clrevtolocalrev.clear()
1349
1378
1350 linkrevnodes = linknodes(filerevlog, fname)
1379 linkrevnodes = linknodes(filerevlog, fname)
1351 # Lookup for filenodes, we collected the linkrev nodes above in the
1380 # Lookup for filenodes, we collected the linkrev nodes above in the
1352 # fastpath case and with lookupmf in the slowpath case.
1381 # fastpath case and with lookupmf in the slowpath case.
1353 def lookupfilelog(x):
1382 def lookupfilelog(x):
1354 return linkrevnodes[x]
1383 return linkrevnodes[x]
1355
1384
1356 frev, flr = filerevlog.rev, filerevlog.linkrev
1385 frev, flr = filerevlog.rev, filerevlog.linkrev
1357 # Skip sending any filenode we know the client already
1386 # Skip sending any filenode we know the client already
1358 # has. This avoids over-sending files relatively
1387 # has. This avoids over-sending files relatively
1359 # inexpensively, so it's not a problem if we under-filter
1388 # inexpensively, so it's not a problem if we under-filter
1360 # here.
1389 # here.
1361 filenodes = [
1390 filenodes = [
1362 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1391 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1363 ]
1392 ]
1364
1393
1365 if not filenodes:
1394 if not filenodes:
1366 continue
1395 continue
1367
1396
1368 progress.update(i + 1, item=fname)
1397 progress.update(i + 1, item=fname)
1369
1398
1370 deltas = deltagroup(
1399 deltas = deltagroup(
1371 self._repo,
1400 self._repo,
1372 filerevlog,
1401 filerevlog,
1373 filenodes,
1402 filenodes,
1374 False,
1403 False,
1375 lookupfilelog,
1404 lookupfilelog,
1376 self._forcedeltaparentprev,
1405 self._forcedeltaparentprev,
1377 ellipses=self._ellipses,
1406 ellipses=self._ellipses,
1378 clrevtolocalrev=clrevtolocalrev,
1407 clrevtolocalrev=clrevtolocalrev,
1379 fullclnodes=self._fullclnodes,
1408 fullclnodes=self._fullclnodes,
1380 precomputedellipsis=self._precomputedellipsis,
1409 precomputedellipsis=self._precomputedellipsis,
1381 )
1410 )
1382
1411
1383 yield fname, deltas
1412 yield fname, deltas
1384
1413
1385 progress.complete()
1414 progress.complete()
1386
1415
1387
1416
1388 def _makecg1packer(
1417 def _makecg1packer(
1389 repo,
1418 repo,
1390 oldmatcher,
1419 oldmatcher,
1391 matcher,
1420 matcher,
1392 bundlecaps,
1421 bundlecaps,
1393 ellipses=False,
1422 ellipses=False,
1394 shallow=False,
1423 shallow=False,
1395 ellipsisroots=None,
1424 ellipsisroots=None,
1396 fullnodes=None,
1425 fullnodes=None,
1426 remote_sidedata=None,
1397 ):
1427 ):
1398 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1428 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1399 d.node, d.p1node, d.p2node, d.linknode
1429 d.node, d.p1node, d.p2node, d.linknode
1400 )
1430 )
1401
1431
1402 return cgpacker(
1432 return cgpacker(
1403 repo,
1433 repo,
1404 oldmatcher,
1434 oldmatcher,
1405 matcher,
1435 matcher,
1406 b'01',
1436 b'01',
1407 builddeltaheader=builddeltaheader,
1437 builddeltaheader=builddeltaheader,
1408 manifestsend=b'',
1438 manifestsend=b'',
1409 forcedeltaparentprev=True,
1439 forcedeltaparentprev=True,
1410 bundlecaps=bundlecaps,
1440 bundlecaps=bundlecaps,
1411 ellipses=ellipses,
1441 ellipses=ellipses,
1412 shallow=shallow,
1442 shallow=shallow,
1413 ellipsisroots=ellipsisroots,
1443 ellipsisroots=ellipsisroots,
1414 fullnodes=fullnodes,
1444 fullnodes=fullnodes,
1415 )
1445 )
1416
1446
1417
1447
1418 def _makecg2packer(
1448 def _makecg2packer(
1419 repo,
1449 repo,
1420 oldmatcher,
1450 oldmatcher,
1421 matcher,
1451 matcher,
1422 bundlecaps,
1452 bundlecaps,
1423 ellipses=False,
1453 ellipses=False,
1424 shallow=False,
1454 shallow=False,
1425 ellipsisroots=None,
1455 ellipsisroots=None,
1426 fullnodes=None,
1456 fullnodes=None,
1457 remote_sidedata=None,
1427 ):
1458 ):
1428 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1459 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1429 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1460 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1430 )
1461 )
1431
1462
1432 return cgpacker(
1463 return cgpacker(
1433 repo,
1464 repo,
1434 oldmatcher,
1465 oldmatcher,
1435 matcher,
1466 matcher,
1436 b'02',
1467 b'02',
1437 builddeltaheader=builddeltaheader,
1468 builddeltaheader=builddeltaheader,
1438 manifestsend=b'',
1469 manifestsend=b'',
1439 bundlecaps=bundlecaps,
1470 bundlecaps=bundlecaps,
1440 ellipses=ellipses,
1471 ellipses=ellipses,
1441 shallow=shallow,
1472 shallow=shallow,
1442 ellipsisroots=ellipsisroots,
1473 ellipsisroots=ellipsisroots,
1443 fullnodes=fullnodes,
1474 fullnodes=fullnodes,
1444 )
1475 )
1445
1476
1446
1477
1447 def _makecg3packer(
1478 def _makecg3packer(
1448 repo,
1479 repo,
1449 oldmatcher,
1480 oldmatcher,
1450 matcher,
1481 matcher,
1451 bundlecaps,
1482 bundlecaps,
1452 ellipses=False,
1483 ellipses=False,
1453 shallow=False,
1484 shallow=False,
1454 ellipsisroots=None,
1485 ellipsisroots=None,
1455 fullnodes=None,
1486 fullnodes=None,
1487 remote_sidedata=None,
1456 ):
1488 ):
1457 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1489 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1458 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1490 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1459 )
1491 )
1460
1492
1461 return cgpacker(
1493 return cgpacker(
1462 repo,
1494 repo,
1463 oldmatcher,
1495 oldmatcher,
1464 matcher,
1496 matcher,
1465 b'03',
1497 b'03',
1466 builddeltaheader=builddeltaheader,
1498 builddeltaheader=builddeltaheader,
1467 manifestsend=closechunk(),
1499 manifestsend=closechunk(),
1468 bundlecaps=bundlecaps,
1500 bundlecaps=bundlecaps,
1469 ellipses=ellipses,
1501 ellipses=ellipses,
1470 shallow=shallow,
1502 shallow=shallow,
1471 ellipsisroots=ellipsisroots,
1503 ellipsisroots=ellipsisroots,
1472 fullnodes=fullnodes,
1504 fullnodes=fullnodes,
1473 )
1505 )
1474
1506
1475
1507
1508 def _makecg4packer(
1509 repo,
1510 oldmatcher,
1511 matcher,
1512 bundlecaps,
1513 ellipses=False,
1514 shallow=False,
1515 ellipsisroots=None,
1516 fullnodes=None,
1517 remote_sidedata=None,
1518 ):
1519 # Same header func as cg3. Sidedata is in a separate chunk from the delta to
1520 # differenciate "raw delta" and sidedata.
1521 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1522 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1523 )
1524
1525 return cgpacker(
1526 repo,
1527 oldmatcher,
1528 matcher,
1529 b'04',
1530 builddeltaheader=builddeltaheader,
1531 manifestsend=closechunk(),
1532 bundlecaps=bundlecaps,
1533 ellipses=ellipses,
1534 shallow=shallow,
1535 ellipsisroots=ellipsisroots,
1536 fullnodes=fullnodes,
1537 remote_sidedata=remote_sidedata,
1538 )
1539
1540
1476 _packermap = {
1541 _packermap = {
1477 b'01': (_makecg1packer, cg1unpacker),
1542 b'01': (_makecg1packer, cg1unpacker),
1478 # cg2 adds support for exchanging generaldelta
1543 # cg2 adds support for exchanging generaldelta
1479 b'02': (_makecg2packer, cg2unpacker),
1544 b'02': (_makecg2packer, cg2unpacker),
1480 # cg3 adds support for exchanging revlog flags and treemanifests
1545 # cg3 adds support for exchanging revlog flags and treemanifests
1481 b'03': (_makecg3packer, cg3unpacker),
1546 b'03': (_makecg3packer, cg3unpacker),
1547 # ch4 adds support for exchanging sidedata
1548 b'04': (_makecg4packer, cg4unpacker),
1482 }
1549 }
1483
1550
1484
1551
1485 def allsupportedversions(repo):
1552 def allsupportedversions(repo):
1486 versions = set(_packermap.keys())
1553 versions = set(_packermap.keys())
1487 needv03 = False
1554 needv03 = False
1488 if (
1555 if (
1489 repo.ui.configbool(b'experimental', b'changegroup3')
1556 repo.ui.configbool(b'experimental', b'changegroup3')
1490 or repo.ui.configbool(b'experimental', b'treemanifest')
1557 or repo.ui.configbool(b'experimental', b'treemanifest')
1491 or scmutil.istreemanifest(repo)
1558 or scmutil.istreemanifest(repo)
1492 ):
1559 ):
1493 # we keep version 03 because we need to to exchange treemanifest data
1560 # we keep version 03 because we need to to exchange treemanifest data
1494 #
1561 #
1495 # we also keep vresion 01 and 02, because it is possible for repo to
1562 # we also keep vresion 01 and 02, because it is possible for repo to
1496 # contains both normal and tree manifest at the same time. so using
1563 # contains both normal and tree manifest at the same time. so using
1497 # older version to pull data is viable
1564 # older version to pull data is viable
1498 #
1565 #
1499 # (or even to push subset of history)
1566 # (or even to push subset of history)
1500 needv03 = True
1567 needv03 = True
1501 if b'exp-sidedata-flag' in repo.requirements:
1568 has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
1502 needv03 = True
1569 if not has_revlogv2:
1503 # don't attempt to use 01/02 until we do sidedata cleaning
1570 versions.discard(b'04')
1504 versions.discard(b'01')
1505 versions.discard(b'02')
1506 if not needv03:
1571 if not needv03:
1507 versions.discard(b'03')
1572 versions.discard(b'03')
1508 return versions
1573 return versions
1509
1574
1510
1575
1511 # Changegroup versions that can be applied to the repo
1576 # Changegroup versions that can be applied to the repo
1512 def supportedincomingversions(repo):
1577 def supportedincomingversions(repo):
1513 return allsupportedversions(repo)
1578 return allsupportedversions(repo)
1514
1579
1515
1580
1516 # Changegroup versions that can be created from the repo
1581 # Changegroup versions that can be created from the repo
1517 def supportedoutgoingversions(repo):
1582 def supportedoutgoingversions(repo):
1518 versions = allsupportedversions(repo)
1583 versions = allsupportedversions(repo)
1519 if scmutil.istreemanifest(repo):
1584 if scmutil.istreemanifest(repo):
1520 # Versions 01 and 02 support only flat manifests and it's just too
1585 # Versions 01 and 02 support only flat manifests and it's just too
1521 # expensive to convert between the flat manifest and tree manifest on
1586 # expensive to convert between the flat manifest and tree manifest on
1522 # the fly. Since tree manifests are hashed differently, all of history
1587 # the fly. Since tree manifests are hashed differently, all of history
1523 # would have to be converted. Instead, we simply don't even pretend to
1588 # would have to be converted. Instead, we simply don't even pretend to
1524 # support versions 01 and 02.
1589 # support versions 01 and 02.
1525 versions.discard(b'01')
1590 versions.discard(b'01')
1526 versions.discard(b'02')
1591 versions.discard(b'02')
1527 if requirements.NARROW_REQUIREMENT in repo.requirements:
1592 if requirements.NARROW_REQUIREMENT in repo.requirements:
1528 # Versions 01 and 02 don't support revlog flags, and we need to
1593 # Versions 01 and 02 don't support revlog flags, and we need to
1529 # support that for stripping and unbundling to work.
1594 # support that for stripping and unbundling to work.
1530 versions.discard(b'01')
1595 versions.discard(b'01')
1531 versions.discard(b'02')
1596 versions.discard(b'02')
1532 if LFS_REQUIREMENT in repo.requirements:
1597 if LFS_REQUIREMENT in repo.requirements:
1533 # Versions 01 and 02 don't support revlog flags, and we need to
1598 # Versions 01 and 02 don't support revlog flags, and we need to
1534 # mark LFS entries with REVIDX_EXTSTORED.
1599 # mark LFS entries with REVIDX_EXTSTORED.
1535 versions.discard(b'01')
1600 versions.discard(b'01')
1536 versions.discard(b'02')
1601 versions.discard(b'02')
1537
1602
1538 return versions
1603 return versions
1539
1604
1540
1605
1541 def localversion(repo):
1606 def localversion(repo):
1542 # Finds the best version to use for bundles that are meant to be used
1607 # Finds the best version to use for bundles that are meant to be used
1543 # locally, such as those from strip and shelve, and temporary bundles.
1608 # locally, such as those from strip and shelve, and temporary bundles.
1544 return max(supportedoutgoingversions(repo))
1609 return max(supportedoutgoingversions(repo))
1545
1610
1546
1611
1547 def safeversion(repo):
1612 def safeversion(repo):
1548 # Finds the smallest version that it's safe to assume clients of the repo
1613 # Finds the smallest version that it's safe to assume clients of the repo
1549 # will support. For example, all hg versions that support generaldelta also
1614 # will support. For example, all hg versions that support generaldelta also
1550 # support changegroup 02.
1615 # support changegroup 02.
1551 versions = supportedoutgoingversions(repo)
1616 versions = supportedoutgoingversions(repo)
1552 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
1617 if requirements.GENERALDELTA_REQUIREMENT in repo.requirements:
1553 versions.discard(b'01')
1618 versions.discard(b'01')
1554 assert versions
1619 assert versions
1555 return min(versions)
1620 return min(versions)
1556
1621
1557
1622
1558 def getbundler(
1623 def getbundler(
1559 version,
1624 version,
1560 repo,
1625 repo,
1561 bundlecaps=None,
1626 bundlecaps=None,
1562 oldmatcher=None,
1627 oldmatcher=None,
1563 matcher=None,
1628 matcher=None,
1564 ellipses=False,
1629 ellipses=False,
1565 shallow=False,
1630 shallow=False,
1566 ellipsisroots=None,
1631 ellipsisroots=None,
1567 fullnodes=None,
1632 fullnodes=None,
1633 remote_sidedata=None,
1568 ):
1634 ):
1569 assert version in supportedoutgoingversions(repo)
1635 assert version in supportedoutgoingversions(repo)
1570
1636
1571 if matcher is None:
1637 if matcher is None:
1572 matcher = matchmod.always()
1638 matcher = matchmod.always()
1573 if oldmatcher is None:
1639 if oldmatcher is None:
1574 oldmatcher = matchmod.never()
1640 oldmatcher = matchmod.never()
1575
1641
1576 if version == b'01' and not matcher.always():
1642 if version == b'01' and not matcher.always():
1577 raise error.ProgrammingError(
1643 raise error.ProgrammingError(
1578 b'version 01 changegroups do not support sparse file matchers'
1644 b'version 01 changegroups do not support sparse file matchers'
1579 )
1645 )
1580
1646
1581 if ellipses and version in (b'01', b'02'):
1647 if ellipses and version in (b'01', b'02'):
1582 raise error.Abort(
1648 raise error.Abort(
1583 _(
1649 _(
1584 b'ellipsis nodes require at least cg3 on client and server, '
1650 b'ellipsis nodes require at least cg3 on client and server, '
1585 b'but negotiated version %s'
1651 b'but negotiated version %s'
1586 )
1652 )
1587 % version
1653 % version
1588 )
1654 )
1589
1655
1590 # Requested files could include files not in the local store. So
1656 # Requested files could include files not in the local store. So
1591 # filter those out.
1657 # filter those out.
1592 matcher = repo.narrowmatch(matcher)
1658 matcher = repo.narrowmatch(matcher)
1593
1659
1594 fn = _packermap[version][0]
1660 fn = _packermap[version][0]
1595 return fn(
1661 return fn(
1596 repo,
1662 repo,
1597 oldmatcher,
1663 oldmatcher,
1598 matcher,
1664 matcher,
1599 bundlecaps,
1665 bundlecaps,
1600 ellipses=ellipses,
1666 ellipses=ellipses,
1601 shallow=shallow,
1667 shallow=shallow,
1602 ellipsisroots=ellipsisroots,
1668 ellipsisroots=ellipsisroots,
1603 fullnodes=fullnodes,
1669 fullnodes=fullnodes,
1670 remote_sidedata=remote_sidedata,
1604 )
1671 )
1605
1672
1606
1673
1607 def getunbundler(version, fh, alg, extras=None):
1674 def getunbundler(version, fh, alg, extras=None):
1608 return _packermap[version][1](fh, alg, extras=extras)
1675 return _packermap[version][1](fh, alg, extras=extras)
1609
1676
1610
1677
1611 def _changegroupinfo(repo, nodes, source):
1678 def _changegroupinfo(repo, nodes, source):
1612 if repo.ui.verbose or source == b'bundle':
1679 if repo.ui.verbose or source == b'bundle':
1613 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1680 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1614 if repo.ui.debugflag:
1681 if repo.ui.debugflag:
1615 repo.ui.debug(b"list of changesets:\n")
1682 repo.ui.debug(b"list of changesets:\n")
1616 for node in nodes:
1683 for node in nodes:
1617 repo.ui.debug(b"%s\n" % hex(node))
1684 repo.ui.debug(b"%s\n" % hex(node))
1618
1685
1619
1686
1620 def makechangegroup(
1687 def makechangegroup(
1621 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1688 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1622 ):
1689 ):
1623 cgstream = makestream(
1690 cgstream = makestream(
1624 repo,
1691 repo,
1625 outgoing,
1692 outgoing,
1626 version,
1693 version,
1627 source,
1694 source,
1628 fastpath=fastpath,
1695 fastpath=fastpath,
1629 bundlecaps=bundlecaps,
1696 bundlecaps=bundlecaps,
1630 )
1697 )
1631 return getunbundler(
1698 return getunbundler(
1632 version,
1699 version,
1633 util.chunkbuffer(cgstream),
1700 util.chunkbuffer(cgstream),
1634 None,
1701 None,
1635 {b'clcount': len(outgoing.missing)},
1702 {b'clcount': len(outgoing.missing)},
1636 )
1703 )
1637
1704
1638
1705
1639 def makestream(
1706 def makestream(
1640 repo,
1707 repo,
1641 outgoing,
1708 outgoing,
1642 version,
1709 version,
1643 source,
1710 source,
1644 fastpath=False,
1711 fastpath=False,
1645 bundlecaps=None,
1712 bundlecaps=None,
1646 matcher=None,
1713 matcher=None,
1714 remote_sidedata=None,
1647 ):
1715 ):
1648 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1716 bundler = getbundler(
1717 version,
1718 repo,
1719 bundlecaps=bundlecaps,
1720 matcher=matcher,
1721 remote_sidedata=remote_sidedata,
1722 )
1649
1723
1650 repo = repo.unfiltered()
1724 repo = repo.unfiltered()
1651 commonrevs = outgoing.common
1725 commonrevs = outgoing.common
1652 csets = outgoing.missing
1726 csets = outgoing.missing
1653 heads = outgoing.ancestorsof
1727 heads = outgoing.ancestorsof
1654 # We go through the fast path if we get told to, or if all (unfiltered
1728 # We go through the fast path if we get told to, or if all (unfiltered
1655 # heads have been requested (since we then know there all linkrevs will
1729 # heads have been requested (since we then know there all linkrevs will
1656 # be pulled by the client).
1730 # be pulled by the client).
1657 heads.sort()
1731 heads.sort()
1658 fastpathlinkrev = fastpath or (
1732 fastpathlinkrev = fastpath or (
1659 repo.filtername is None and heads == sorted(repo.heads())
1733 repo.filtername is None and heads == sorted(repo.heads())
1660 )
1734 )
1661
1735
1662 repo.hook(b'preoutgoing', throw=True, source=source)
1736 repo.hook(b'preoutgoing', throw=True, source=source)
1663 _changegroupinfo(repo, csets, source)
1737 _changegroupinfo(repo, csets, source)
1664 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1738 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1665
1739
1666
1740
1667 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1741 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1668 revisions = 0
1742 revisions = 0
1669 files = 0
1743 files = 0
1670 progress = repo.ui.makeprogress(
1744 progress = repo.ui.makeprogress(
1671 _(b'files'), unit=_(b'files'), total=expectedfiles
1745 _(b'files'), unit=_(b'files'), total=expectedfiles
1672 )
1746 )
1673 for chunkdata in iter(source.filelogheader, {}):
1747 for chunkdata in iter(source.filelogheader, {}):
1674 files += 1
1748 files += 1
1675 f = chunkdata[b"filename"]
1749 f = chunkdata[b"filename"]
1676 repo.ui.debug(b"adding %s revisions\n" % f)
1750 repo.ui.debug(b"adding %s revisions\n" % f)
1677 progress.increment()
1751 progress.increment()
1678 fl = repo.file(f)
1752 fl = repo.file(f)
1679 o = len(fl)
1753 o = len(fl)
1680 try:
1754 try:
1681 deltas = source.deltaiter()
1755 deltas = source.deltaiter()
1682 if not fl.addgroup(deltas, revmap, trp):
1756 if not fl.addgroup(deltas, revmap, trp):
1683 raise error.Abort(_(b"received file revlog group is empty"))
1757 raise error.Abort(_(b"received file revlog group is empty"))
1684 except error.CensoredBaseError as e:
1758 except error.CensoredBaseError as e:
1685 raise error.Abort(_(b"received delta base is censored: %s") % e)
1759 raise error.Abort(_(b"received delta base is censored: %s") % e)
1686 revisions += len(fl) - o
1760 revisions += len(fl) - o
1687 if f in needfiles:
1761 if f in needfiles:
1688 needs = needfiles[f]
1762 needs = needfiles[f]
1689 for new in pycompat.xrange(o, len(fl)):
1763 for new in pycompat.xrange(o, len(fl)):
1690 n = fl.node(new)
1764 n = fl.node(new)
1691 if n in needs:
1765 if n in needs:
1692 needs.remove(n)
1766 needs.remove(n)
1693 else:
1767 else:
1694 raise error.Abort(_(b"received spurious file revlog entry"))
1768 raise error.Abort(_(b"received spurious file revlog entry"))
1695 if not needs:
1769 if not needs:
1696 del needfiles[f]
1770 del needfiles[f]
1697 progress.complete()
1771 progress.complete()
1698
1772
1699 for f, needs in pycompat.iteritems(needfiles):
1773 for f, needs in pycompat.iteritems(needfiles):
1700 fl = repo.file(f)
1774 fl = repo.file(f)
1701 for n in needs:
1775 for n in needs:
1702 try:
1776 try:
1703 fl.rev(n)
1777 fl.rev(n)
1704 except error.LookupError:
1778 except error.LookupError:
1705 raise error.Abort(
1779 raise error.Abort(
1706 _(b'missing file data for %s:%s - run hg verify')
1780 _(b'missing file data for %s:%s - run hg verify')
1707 % (f, hex(n))
1781 % (f, hex(n))
1708 )
1782 )
1709
1783
1710 return revisions, files
1784 return revisions, files
@@ -1,4773 +1,4773 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import glob
14 import glob
15 import operator
15 import operator
16 import os
16 import os
17 import platform
17 import platform
18 import random
18 import random
19 import re
19 import re
20 import socket
20 import socket
21 import ssl
21 import ssl
22 import stat
22 import stat
23 import string
23 import string
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullid,
32 nullid,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 encoding,
49 encoding,
50 error,
50 error,
51 exchange,
51 exchange,
52 extensions,
52 extensions,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 mergestate as mergestatemod,
61 mergestate as mergestatemod,
62 metadata,
62 metadata,
63 obsolete,
63 obsolete,
64 obsutil,
64 obsutil,
65 pathutil,
65 pathutil,
66 phases,
66 phases,
67 policy,
67 policy,
68 pvec,
68 pvec,
69 pycompat,
69 pycompat,
70 registrar,
70 registrar,
71 repair,
71 repair,
72 repoview,
72 repoview,
73 revlog,
73 revlog,
74 revset,
74 revset,
75 revsetlang,
75 revsetlang,
76 scmutil,
76 scmutil,
77 setdiscovery,
77 setdiscovery,
78 simplemerge,
78 simplemerge,
79 sshpeer,
79 sshpeer,
80 sslutil,
80 sslutil,
81 streamclone,
81 streamclone,
82 strip,
82 strip,
83 tags as tagsmod,
83 tags as tagsmod,
84 templater,
84 templater,
85 treediscovery,
85 treediscovery,
86 upgrade,
86 upgrade,
87 url as urlmod,
87 url as urlmod,
88 util,
88 util,
89 vfs as vfsmod,
89 vfs as vfsmod,
90 wireprotoframing,
90 wireprotoframing,
91 wireprotoserver,
91 wireprotoserver,
92 wireprotov2peer,
92 wireprotov2peer,
93 )
93 )
94 from .utils import (
94 from .utils import (
95 cborutil,
95 cborutil,
96 compression,
96 compression,
97 dateutil,
97 dateutil,
98 procutil,
98 procutil,
99 stringutil,
99 stringutil,
100 )
100 )
101
101
102 from .revlogutils import (
102 from .revlogutils import (
103 deltas as deltautil,
103 deltas as deltautil,
104 nodemap,
104 nodemap,
105 sidedata,
105 sidedata,
106 )
106 )
107
107
108 release = lockmod.release
108 release = lockmod.release
109
109
110 table = {}
110 table = {}
111 table.update(strip.command._table)
111 table.update(strip.command._table)
112 command = registrar.command(table)
112 command = registrar.command(table)
113
113
114
114
115 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
115 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
116 def debugancestor(ui, repo, *args):
116 def debugancestor(ui, repo, *args):
117 """find the ancestor revision of two revisions in a given index"""
117 """find the ancestor revision of two revisions in a given index"""
118 if len(args) == 3:
118 if len(args) == 3:
119 index, rev1, rev2 = args
119 index, rev1, rev2 = args
120 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
120 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
121 lookup = r.lookup
121 lookup = r.lookup
122 elif len(args) == 2:
122 elif len(args) == 2:
123 if not repo:
123 if not repo:
124 raise error.Abort(
124 raise error.Abort(
125 _(b'there is no Mercurial repository here (.hg not found)')
125 _(b'there is no Mercurial repository here (.hg not found)')
126 )
126 )
127 rev1, rev2 = args
127 rev1, rev2 = args
128 r = repo.changelog
128 r = repo.changelog
129 lookup = repo.lookup
129 lookup = repo.lookup
130 else:
130 else:
131 raise error.Abort(_(b'either two or three arguments required'))
131 raise error.Abort(_(b'either two or three arguments required'))
132 a = r.ancestor(lookup(rev1), lookup(rev2))
132 a = r.ancestor(lookup(rev1), lookup(rev2))
133 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
133 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
134
134
135
135
136 @command(b'debugantivirusrunning', [])
136 @command(b'debugantivirusrunning', [])
137 def debugantivirusrunning(ui, repo):
137 def debugantivirusrunning(ui, repo):
138 """attempt to trigger an antivirus scanner to see if one is active"""
138 """attempt to trigger an antivirus scanner to see if one is active"""
139 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
139 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
140 f.write(
140 f.write(
141 util.b85decode(
141 util.b85decode(
142 # This is a base85-armored version of the EICAR test file. See
142 # This is a base85-armored version of the EICAR test file. See
143 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
143 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
144 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
144 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
145 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
145 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
146 )
146 )
147 )
147 )
148 # Give an AV engine time to scan the file.
148 # Give an AV engine time to scan the file.
149 time.sleep(2)
149 time.sleep(2)
150 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
150 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
151
151
152
152
153 @command(b'debugapplystreamclonebundle', [], b'FILE')
153 @command(b'debugapplystreamclonebundle', [], b'FILE')
154 def debugapplystreamclonebundle(ui, repo, fname):
154 def debugapplystreamclonebundle(ui, repo, fname):
155 """apply a stream clone bundle file"""
155 """apply a stream clone bundle file"""
156 f = hg.openpath(ui, fname)
156 f = hg.openpath(ui, fname)
157 gen = exchange.readbundle(ui, f, fname)
157 gen = exchange.readbundle(ui, f, fname)
158 gen.apply(repo)
158 gen.apply(repo)
159
159
160
160
161 @command(
161 @command(
162 b'debugbuilddag',
162 b'debugbuilddag',
163 [
163 [
164 (
164 (
165 b'm',
165 b'm',
166 b'mergeable-file',
166 b'mergeable-file',
167 None,
167 None,
168 _(b'add single file mergeable changes'),
168 _(b'add single file mergeable changes'),
169 ),
169 ),
170 (
170 (
171 b'o',
171 b'o',
172 b'overwritten-file',
172 b'overwritten-file',
173 None,
173 None,
174 _(b'add single file all revs overwrite'),
174 _(b'add single file all revs overwrite'),
175 ),
175 ),
176 (b'n', b'new-file', None, _(b'add new file at each rev')),
176 (b'n', b'new-file', None, _(b'add new file at each rev')),
177 ],
177 ],
178 _(b'[OPTION]... [TEXT]'),
178 _(b'[OPTION]... [TEXT]'),
179 )
179 )
180 def debugbuilddag(
180 def debugbuilddag(
181 ui,
181 ui,
182 repo,
182 repo,
183 text=None,
183 text=None,
184 mergeable_file=False,
184 mergeable_file=False,
185 overwritten_file=False,
185 overwritten_file=False,
186 new_file=False,
186 new_file=False,
187 ):
187 ):
188 """builds a repo with a given DAG from scratch in the current empty repo
188 """builds a repo with a given DAG from scratch in the current empty repo
189
189
190 The description of the DAG is read from stdin if not given on the
190 The description of the DAG is read from stdin if not given on the
191 command line.
191 command line.
192
192
193 Elements:
193 Elements:
194
194
195 - "+n" is a linear run of n nodes based on the current default parent
195 - "+n" is a linear run of n nodes based on the current default parent
196 - "." is a single node based on the current default parent
196 - "." is a single node based on the current default parent
197 - "$" resets the default parent to null (implied at the start);
197 - "$" resets the default parent to null (implied at the start);
198 otherwise the default parent is always the last node created
198 otherwise the default parent is always the last node created
199 - "<p" sets the default parent to the backref p
199 - "<p" sets the default parent to the backref p
200 - "*p" is a fork at parent p, which is a backref
200 - "*p" is a fork at parent p, which is a backref
201 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
201 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
202 - "/p2" is a merge of the preceding node and p2
202 - "/p2" is a merge of the preceding node and p2
203 - ":tag" defines a local tag for the preceding node
203 - ":tag" defines a local tag for the preceding node
204 - "@branch" sets the named branch for subsequent nodes
204 - "@branch" sets the named branch for subsequent nodes
205 - "#...\\n" is a comment up to the end of the line
205 - "#...\\n" is a comment up to the end of the line
206
206
207 Whitespace between the above elements is ignored.
207 Whitespace between the above elements is ignored.
208
208
209 A backref is either
209 A backref is either
210
210
211 - a number n, which references the node curr-n, where curr is the current
211 - a number n, which references the node curr-n, where curr is the current
212 node, or
212 node, or
213 - the name of a local tag you placed earlier using ":tag", or
213 - the name of a local tag you placed earlier using ":tag", or
214 - empty to denote the default parent.
214 - empty to denote the default parent.
215
215
216 All string valued-elements are either strictly alphanumeric, or must
216 All string valued-elements are either strictly alphanumeric, or must
217 be enclosed in double quotes ("..."), with "\\" as escape character.
217 be enclosed in double quotes ("..."), with "\\" as escape character.
218 """
218 """
219
219
220 if text is None:
220 if text is None:
221 ui.status(_(b"reading DAG from stdin\n"))
221 ui.status(_(b"reading DAG from stdin\n"))
222 text = ui.fin.read()
222 text = ui.fin.read()
223
223
224 cl = repo.changelog
224 cl = repo.changelog
225 if len(cl) > 0:
225 if len(cl) > 0:
226 raise error.Abort(_(b'repository is not empty'))
226 raise error.Abort(_(b'repository is not empty'))
227
227
228 # determine number of revs in DAG
228 # determine number of revs in DAG
229 total = 0
229 total = 0
230 for type, data in dagparser.parsedag(text):
230 for type, data in dagparser.parsedag(text):
231 if type == b'n':
231 if type == b'n':
232 total += 1
232 total += 1
233
233
234 if mergeable_file:
234 if mergeable_file:
235 linesperrev = 2
235 linesperrev = 2
236 # make a file with k lines per rev
236 # make a file with k lines per rev
237 initialmergedlines = [
237 initialmergedlines = [
238 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
238 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
239 ]
239 ]
240 initialmergedlines.append(b"")
240 initialmergedlines.append(b"")
241
241
242 tags = []
242 tags = []
243 progress = ui.makeprogress(
243 progress = ui.makeprogress(
244 _(b'building'), unit=_(b'revisions'), total=total
244 _(b'building'), unit=_(b'revisions'), total=total
245 )
245 )
246 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
246 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
247 at = -1
247 at = -1
248 atbranch = b'default'
248 atbranch = b'default'
249 nodeids = []
249 nodeids = []
250 id = 0
250 id = 0
251 progress.update(id)
251 progress.update(id)
252 for type, data in dagparser.parsedag(text):
252 for type, data in dagparser.parsedag(text):
253 if type == b'n':
253 if type == b'n':
254 ui.note((b'node %s\n' % pycompat.bytestr(data)))
254 ui.note((b'node %s\n' % pycompat.bytestr(data)))
255 id, ps = data
255 id, ps = data
256
256
257 files = []
257 files = []
258 filecontent = {}
258 filecontent = {}
259
259
260 p2 = None
260 p2 = None
261 if mergeable_file:
261 if mergeable_file:
262 fn = b"mf"
262 fn = b"mf"
263 p1 = repo[ps[0]]
263 p1 = repo[ps[0]]
264 if len(ps) > 1:
264 if len(ps) > 1:
265 p2 = repo[ps[1]]
265 p2 = repo[ps[1]]
266 pa = p1.ancestor(p2)
266 pa = p1.ancestor(p2)
267 base, local, other = [
267 base, local, other = [
268 x[fn].data() for x in (pa, p1, p2)
268 x[fn].data() for x in (pa, p1, p2)
269 ]
269 ]
270 m3 = simplemerge.Merge3Text(base, local, other)
270 m3 = simplemerge.Merge3Text(base, local, other)
271 ml = [l.strip() for l in m3.merge_lines()]
271 ml = [l.strip() for l in m3.merge_lines()]
272 ml.append(b"")
272 ml.append(b"")
273 elif at > 0:
273 elif at > 0:
274 ml = p1[fn].data().split(b"\n")
274 ml = p1[fn].data().split(b"\n")
275 else:
275 else:
276 ml = initialmergedlines
276 ml = initialmergedlines
277 ml[id * linesperrev] += b" r%i" % id
277 ml[id * linesperrev] += b" r%i" % id
278 mergedtext = b"\n".join(ml)
278 mergedtext = b"\n".join(ml)
279 files.append(fn)
279 files.append(fn)
280 filecontent[fn] = mergedtext
280 filecontent[fn] = mergedtext
281
281
282 if overwritten_file:
282 if overwritten_file:
283 fn = b"of"
283 fn = b"of"
284 files.append(fn)
284 files.append(fn)
285 filecontent[fn] = b"r%i\n" % id
285 filecontent[fn] = b"r%i\n" % id
286
286
287 if new_file:
287 if new_file:
288 fn = b"nf%i" % id
288 fn = b"nf%i" % id
289 files.append(fn)
289 files.append(fn)
290 filecontent[fn] = b"r%i\n" % id
290 filecontent[fn] = b"r%i\n" % id
291 if len(ps) > 1:
291 if len(ps) > 1:
292 if not p2:
292 if not p2:
293 p2 = repo[ps[1]]
293 p2 = repo[ps[1]]
294 for fn in p2:
294 for fn in p2:
295 if fn.startswith(b"nf"):
295 if fn.startswith(b"nf"):
296 files.append(fn)
296 files.append(fn)
297 filecontent[fn] = p2[fn].data()
297 filecontent[fn] = p2[fn].data()
298
298
299 def fctxfn(repo, cx, path):
299 def fctxfn(repo, cx, path):
300 if path in filecontent:
300 if path in filecontent:
301 return context.memfilectx(
301 return context.memfilectx(
302 repo, cx, path, filecontent[path]
302 repo, cx, path, filecontent[path]
303 )
303 )
304 return None
304 return None
305
305
306 if len(ps) == 0 or ps[0] < 0:
306 if len(ps) == 0 or ps[0] < 0:
307 pars = [None, None]
307 pars = [None, None]
308 elif len(ps) == 1:
308 elif len(ps) == 1:
309 pars = [nodeids[ps[0]], None]
309 pars = [nodeids[ps[0]], None]
310 else:
310 else:
311 pars = [nodeids[p] for p in ps]
311 pars = [nodeids[p] for p in ps]
312 cx = context.memctx(
312 cx = context.memctx(
313 repo,
313 repo,
314 pars,
314 pars,
315 b"r%i" % id,
315 b"r%i" % id,
316 files,
316 files,
317 fctxfn,
317 fctxfn,
318 date=(id, 0),
318 date=(id, 0),
319 user=b"debugbuilddag",
319 user=b"debugbuilddag",
320 extra={b'branch': atbranch},
320 extra={b'branch': atbranch},
321 )
321 )
322 nodeid = repo.commitctx(cx)
322 nodeid = repo.commitctx(cx)
323 nodeids.append(nodeid)
323 nodeids.append(nodeid)
324 at = id
324 at = id
325 elif type == b'l':
325 elif type == b'l':
326 id, name = data
326 id, name = data
327 ui.note((b'tag %s\n' % name))
327 ui.note((b'tag %s\n' % name))
328 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
328 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
329 elif type == b'a':
329 elif type == b'a':
330 ui.note((b'branch %s\n' % data))
330 ui.note((b'branch %s\n' % data))
331 atbranch = data
331 atbranch = data
332 progress.update(id)
332 progress.update(id)
333
333
334 if tags:
334 if tags:
335 repo.vfs.write(b"localtags", b"".join(tags))
335 repo.vfs.write(b"localtags", b"".join(tags))
336
336
337
337
338 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
338 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
339 indent_string = b' ' * indent
339 indent_string = b' ' * indent
340 if all:
340 if all:
341 ui.writenoi18n(
341 ui.writenoi18n(
342 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
342 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
343 % indent_string
343 % indent_string
344 )
344 )
345
345
346 def showchunks(named):
346 def showchunks(named):
347 ui.write(b"\n%s%s\n" % (indent_string, named))
347 ui.write(b"\n%s%s\n" % (indent_string, named))
348 for deltadata in gen.deltaiter():
348 for deltadata in gen.deltaiter():
349 node, p1, p2, cs, deltabase, delta, flags = deltadata
349 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
350 ui.write(
350 ui.write(
351 b"%s%s %s %s %s %s %d\n"
351 b"%s%s %s %s %s %s %d\n"
352 % (
352 % (
353 indent_string,
353 indent_string,
354 hex(node),
354 hex(node),
355 hex(p1),
355 hex(p1),
356 hex(p2),
356 hex(p2),
357 hex(cs),
357 hex(cs),
358 hex(deltabase),
358 hex(deltabase),
359 len(delta),
359 len(delta),
360 )
360 )
361 )
361 )
362
362
363 gen.changelogheader()
363 gen.changelogheader()
364 showchunks(b"changelog")
364 showchunks(b"changelog")
365 gen.manifestheader()
365 gen.manifestheader()
366 showchunks(b"manifest")
366 showchunks(b"manifest")
367 for chunkdata in iter(gen.filelogheader, {}):
367 for chunkdata in iter(gen.filelogheader, {}):
368 fname = chunkdata[b'filename']
368 fname = chunkdata[b'filename']
369 showchunks(fname)
369 showchunks(fname)
370 else:
370 else:
371 if isinstance(gen, bundle2.unbundle20):
371 if isinstance(gen, bundle2.unbundle20):
372 raise error.Abort(_(b'use debugbundle2 for this file'))
372 raise error.Abort(_(b'use debugbundle2 for this file'))
373 gen.changelogheader()
373 gen.changelogheader()
374 for deltadata in gen.deltaiter():
374 for deltadata in gen.deltaiter():
375 node, p1, p2, cs, deltabase, delta, flags = deltadata
375 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
376 ui.write(b"%s%s\n" % (indent_string, hex(node)))
376 ui.write(b"%s%s\n" % (indent_string, hex(node)))
377
377
378
378
379 def _debugobsmarkers(ui, part, indent=0, **opts):
379 def _debugobsmarkers(ui, part, indent=0, **opts):
380 """display version and markers contained in 'data'"""
380 """display version and markers contained in 'data'"""
381 opts = pycompat.byteskwargs(opts)
381 opts = pycompat.byteskwargs(opts)
382 data = part.read()
382 data = part.read()
383 indent_string = b' ' * indent
383 indent_string = b' ' * indent
384 try:
384 try:
385 version, markers = obsolete._readmarkers(data)
385 version, markers = obsolete._readmarkers(data)
386 except error.UnknownVersion as exc:
386 except error.UnknownVersion as exc:
387 msg = b"%sunsupported version: %s (%d bytes)\n"
387 msg = b"%sunsupported version: %s (%d bytes)\n"
388 msg %= indent_string, exc.version, len(data)
388 msg %= indent_string, exc.version, len(data)
389 ui.write(msg)
389 ui.write(msg)
390 else:
390 else:
391 msg = b"%sversion: %d (%d bytes)\n"
391 msg = b"%sversion: %d (%d bytes)\n"
392 msg %= indent_string, version, len(data)
392 msg %= indent_string, version, len(data)
393 ui.write(msg)
393 ui.write(msg)
394 fm = ui.formatter(b'debugobsolete', opts)
394 fm = ui.formatter(b'debugobsolete', opts)
395 for rawmarker in sorted(markers):
395 for rawmarker in sorted(markers):
396 m = obsutil.marker(None, rawmarker)
396 m = obsutil.marker(None, rawmarker)
397 fm.startitem()
397 fm.startitem()
398 fm.plain(indent_string)
398 fm.plain(indent_string)
399 cmdutil.showmarker(fm, m)
399 cmdutil.showmarker(fm, m)
400 fm.end()
400 fm.end()
401
401
402
402
403 def _debugphaseheads(ui, data, indent=0):
403 def _debugphaseheads(ui, data, indent=0):
404 """display version and markers contained in 'data'"""
404 """display version and markers contained in 'data'"""
405 indent_string = b' ' * indent
405 indent_string = b' ' * indent
406 headsbyphase = phases.binarydecode(data)
406 headsbyphase = phases.binarydecode(data)
407 for phase in phases.allphases:
407 for phase in phases.allphases:
408 for head in headsbyphase[phase]:
408 for head in headsbyphase[phase]:
409 ui.write(indent_string)
409 ui.write(indent_string)
410 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
410 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
411
411
412
412
413 def _quasirepr(thing):
413 def _quasirepr(thing):
414 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
414 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
415 return b'{%s}' % (
415 return b'{%s}' % (
416 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
416 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
417 )
417 )
418 return pycompat.bytestr(repr(thing))
418 return pycompat.bytestr(repr(thing))
419
419
420
420
421 def _debugbundle2(ui, gen, all=None, **opts):
421 def _debugbundle2(ui, gen, all=None, **opts):
422 """lists the contents of a bundle2"""
422 """lists the contents of a bundle2"""
423 if not isinstance(gen, bundle2.unbundle20):
423 if not isinstance(gen, bundle2.unbundle20):
424 raise error.Abort(_(b'not a bundle2 file'))
424 raise error.Abort(_(b'not a bundle2 file'))
425 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
425 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
426 parttypes = opts.get('part_type', [])
426 parttypes = opts.get('part_type', [])
427 for part in gen.iterparts():
427 for part in gen.iterparts():
428 if parttypes and part.type not in parttypes:
428 if parttypes and part.type not in parttypes:
429 continue
429 continue
430 msg = b'%s -- %s (mandatory: %r)\n'
430 msg = b'%s -- %s (mandatory: %r)\n'
431 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
431 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
432 if part.type == b'changegroup':
432 if part.type == b'changegroup':
433 version = part.params.get(b'version', b'01')
433 version = part.params.get(b'version', b'01')
434 cg = changegroup.getunbundler(version, part, b'UN')
434 cg = changegroup.getunbundler(version, part, b'UN')
435 if not ui.quiet:
435 if not ui.quiet:
436 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
436 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
437 if part.type == b'obsmarkers':
437 if part.type == b'obsmarkers':
438 if not ui.quiet:
438 if not ui.quiet:
439 _debugobsmarkers(ui, part, indent=4, **opts)
439 _debugobsmarkers(ui, part, indent=4, **opts)
440 if part.type == b'phase-heads':
440 if part.type == b'phase-heads':
441 if not ui.quiet:
441 if not ui.quiet:
442 _debugphaseheads(ui, part, indent=4)
442 _debugphaseheads(ui, part, indent=4)
443
443
444
444
445 @command(
445 @command(
446 b'debugbundle',
446 b'debugbundle',
447 [
447 [
448 (b'a', b'all', None, _(b'show all details')),
448 (b'a', b'all', None, _(b'show all details')),
449 (b'', b'part-type', [], _(b'show only the named part type')),
449 (b'', b'part-type', [], _(b'show only the named part type')),
450 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
450 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
451 ],
451 ],
452 _(b'FILE'),
452 _(b'FILE'),
453 norepo=True,
453 norepo=True,
454 )
454 )
455 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
455 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
456 """lists the contents of a bundle"""
456 """lists the contents of a bundle"""
457 with hg.openpath(ui, bundlepath) as f:
457 with hg.openpath(ui, bundlepath) as f:
458 if spec:
458 if spec:
459 spec = exchange.getbundlespec(ui, f)
459 spec = exchange.getbundlespec(ui, f)
460 ui.write(b'%s\n' % spec)
460 ui.write(b'%s\n' % spec)
461 return
461 return
462
462
463 gen = exchange.readbundle(ui, f, bundlepath)
463 gen = exchange.readbundle(ui, f, bundlepath)
464 if isinstance(gen, bundle2.unbundle20):
464 if isinstance(gen, bundle2.unbundle20):
465 return _debugbundle2(ui, gen, all=all, **opts)
465 return _debugbundle2(ui, gen, all=all, **opts)
466 _debugchangegroup(ui, gen, all=all, **opts)
466 _debugchangegroup(ui, gen, all=all, **opts)
467
467
468
468
469 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
469 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
470 def debugcapabilities(ui, path, **opts):
470 def debugcapabilities(ui, path, **opts):
471 """lists the capabilities of a remote peer"""
471 """lists the capabilities of a remote peer"""
472 opts = pycompat.byteskwargs(opts)
472 opts = pycompat.byteskwargs(opts)
473 peer = hg.peer(ui, opts, path)
473 peer = hg.peer(ui, opts, path)
474 try:
474 try:
475 caps = peer.capabilities()
475 caps = peer.capabilities()
476 ui.writenoi18n(b'Main capabilities:\n')
476 ui.writenoi18n(b'Main capabilities:\n')
477 for c in sorted(caps):
477 for c in sorted(caps):
478 ui.write(b' %s\n' % c)
478 ui.write(b' %s\n' % c)
479 b2caps = bundle2.bundle2caps(peer)
479 b2caps = bundle2.bundle2caps(peer)
480 if b2caps:
480 if b2caps:
481 ui.writenoi18n(b'Bundle2 capabilities:\n')
481 ui.writenoi18n(b'Bundle2 capabilities:\n')
482 for key, values in sorted(pycompat.iteritems(b2caps)):
482 for key, values in sorted(pycompat.iteritems(b2caps)):
483 ui.write(b' %s\n' % key)
483 ui.write(b' %s\n' % key)
484 for v in values:
484 for v in values:
485 ui.write(b' %s\n' % v)
485 ui.write(b' %s\n' % v)
486 finally:
486 finally:
487 peer.close()
487 peer.close()
488
488
489
489
490 @command(
490 @command(
491 b'debugchangedfiles',
491 b'debugchangedfiles',
492 [
492 [
493 (
493 (
494 b'',
494 b'',
495 b'compute',
495 b'compute',
496 False,
496 False,
497 b"compute information instead of reading it from storage",
497 b"compute information instead of reading it from storage",
498 ),
498 ),
499 ],
499 ],
500 b'REV',
500 b'REV',
501 )
501 )
502 def debugchangedfiles(ui, repo, rev, **opts):
502 def debugchangedfiles(ui, repo, rev, **opts):
503 """list the stored files changes for a revision"""
503 """list the stored files changes for a revision"""
504 ctx = scmutil.revsingle(repo, rev, None)
504 ctx = scmutil.revsingle(repo, rev, None)
505 files = None
505 files = None
506
506
507 if opts['compute']:
507 if opts['compute']:
508 files = metadata.compute_all_files_changes(ctx)
508 files = metadata.compute_all_files_changes(ctx)
509 else:
509 else:
510 sd = repo.changelog.sidedata(ctx.rev())
510 sd = repo.changelog.sidedata(ctx.rev())
511 files_block = sd.get(sidedata.SD_FILES)
511 files_block = sd.get(sidedata.SD_FILES)
512 if files_block is not None:
512 if files_block is not None:
513 files = metadata.decode_files_sidedata(sd)
513 files = metadata.decode_files_sidedata(sd)
514 if files is not None:
514 if files is not None:
515 for f in sorted(files.touched):
515 for f in sorted(files.touched):
516 if f in files.added:
516 if f in files.added:
517 action = b"added"
517 action = b"added"
518 elif f in files.removed:
518 elif f in files.removed:
519 action = b"removed"
519 action = b"removed"
520 elif f in files.merged:
520 elif f in files.merged:
521 action = b"merged"
521 action = b"merged"
522 elif f in files.salvaged:
522 elif f in files.salvaged:
523 action = b"salvaged"
523 action = b"salvaged"
524 else:
524 else:
525 action = b"touched"
525 action = b"touched"
526
526
527 copy_parent = b""
527 copy_parent = b""
528 copy_source = b""
528 copy_source = b""
529 if f in files.copied_from_p1:
529 if f in files.copied_from_p1:
530 copy_parent = b"p1"
530 copy_parent = b"p1"
531 copy_source = files.copied_from_p1[f]
531 copy_source = files.copied_from_p1[f]
532 elif f in files.copied_from_p2:
532 elif f in files.copied_from_p2:
533 copy_parent = b"p2"
533 copy_parent = b"p2"
534 copy_source = files.copied_from_p2[f]
534 copy_source = files.copied_from_p2[f]
535
535
536 data = (action, copy_parent, f, copy_source)
536 data = (action, copy_parent, f, copy_source)
537 template = b"%-8s %2s: %s, %s;\n"
537 template = b"%-8s %2s: %s, %s;\n"
538 ui.write(template % data)
538 ui.write(template % data)
539
539
540
540
541 @command(b'debugcheckstate', [], b'')
541 @command(b'debugcheckstate', [], b'')
542 def debugcheckstate(ui, repo):
542 def debugcheckstate(ui, repo):
543 """validate the correctness of the current dirstate"""
543 """validate the correctness of the current dirstate"""
544 parent1, parent2 = repo.dirstate.parents()
544 parent1, parent2 = repo.dirstate.parents()
545 m1 = repo[parent1].manifest()
545 m1 = repo[parent1].manifest()
546 m2 = repo[parent2].manifest()
546 m2 = repo[parent2].manifest()
547 errors = 0
547 errors = 0
548 for f in repo.dirstate:
548 for f in repo.dirstate:
549 state = repo.dirstate[f]
549 state = repo.dirstate[f]
550 if state in b"nr" and f not in m1:
550 if state in b"nr" and f not in m1:
551 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
551 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
552 errors += 1
552 errors += 1
553 if state in b"a" and f in m1:
553 if state in b"a" and f in m1:
554 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
554 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
555 errors += 1
555 errors += 1
556 if state in b"m" and f not in m1 and f not in m2:
556 if state in b"m" and f not in m1 and f not in m2:
557 ui.warn(
557 ui.warn(
558 _(b"%s in state %s, but not in either manifest\n") % (f, state)
558 _(b"%s in state %s, but not in either manifest\n") % (f, state)
559 )
559 )
560 errors += 1
560 errors += 1
561 for f in m1:
561 for f in m1:
562 state = repo.dirstate[f]
562 state = repo.dirstate[f]
563 if state not in b"nrm":
563 if state not in b"nrm":
564 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
564 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
565 errors += 1
565 errors += 1
566 if errors:
566 if errors:
567 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
567 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
568 raise error.Abort(errstr)
568 raise error.Abort(errstr)
569
569
570
570
571 @command(
571 @command(
572 b'debugcolor',
572 b'debugcolor',
573 [(b'', b'style', None, _(b'show all configured styles'))],
573 [(b'', b'style', None, _(b'show all configured styles'))],
574 b'hg debugcolor',
574 b'hg debugcolor',
575 )
575 )
576 def debugcolor(ui, repo, **opts):
576 def debugcolor(ui, repo, **opts):
577 """show available color, effects or style"""
577 """show available color, effects or style"""
578 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
578 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
579 if opts.get('style'):
579 if opts.get('style'):
580 return _debugdisplaystyle(ui)
580 return _debugdisplaystyle(ui)
581 else:
581 else:
582 return _debugdisplaycolor(ui)
582 return _debugdisplaycolor(ui)
583
583
584
584
585 def _debugdisplaycolor(ui):
585 def _debugdisplaycolor(ui):
586 ui = ui.copy()
586 ui = ui.copy()
587 ui._styles.clear()
587 ui._styles.clear()
588 for effect in color._activeeffects(ui).keys():
588 for effect in color._activeeffects(ui).keys():
589 ui._styles[effect] = effect
589 ui._styles[effect] = effect
590 if ui._terminfoparams:
590 if ui._terminfoparams:
591 for k, v in ui.configitems(b'color'):
591 for k, v in ui.configitems(b'color'):
592 if k.startswith(b'color.'):
592 if k.startswith(b'color.'):
593 ui._styles[k] = k[6:]
593 ui._styles[k] = k[6:]
594 elif k.startswith(b'terminfo.'):
594 elif k.startswith(b'terminfo.'):
595 ui._styles[k] = k[9:]
595 ui._styles[k] = k[9:]
596 ui.write(_(b'available colors:\n'))
596 ui.write(_(b'available colors:\n'))
597 # sort label with a '_' after the other to group '_background' entry.
597 # sort label with a '_' after the other to group '_background' entry.
598 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
598 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
599 for colorname, label in items:
599 for colorname, label in items:
600 ui.write(b'%s\n' % colorname, label=label)
600 ui.write(b'%s\n' % colorname, label=label)
601
601
602
602
603 def _debugdisplaystyle(ui):
603 def _debugdisplaystyle(ui):
604 ui.write(_(b'available style:\n'))
604 ui.write(_(b'available style:\n'))
605 if not ui._styles:
605 if not ui._styles:
606 return
606 return
607 width = max(len(s) for s in ui._styles)
607 width = max(len(s) for s in ui._styles)
608 for label, effects in sorted(ui._styles.items()):
608 for label, effects in sorted(ui._styles.items()):
609 ui.write(b'%s' % label, label=label)
609 ui.write(b'%s' % label, label=label)
610 if effects:
610 if effects:
611 # 50
611 # 50
612 ui.write(b': ')
612 ui.write(b': ')
613 ui.write(b' ' * (max(0, width - len(label))))
613 ui.write(b' ' * (max(0, width - len(label))))
614 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
614 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
615 ui.write(b'\n')
615 ui.write(b'\n')
616
616
617
617
618 @command(b'debugcreatestreamclonebundle', [], b'FILE')
618 @command(b'debugcreatestreamclonebundle', [], b'FILE')
619 def debugcreatestreamclonebundle(ui, repo, fname):
619 def debugcreatestreamclonebundle(ui, repo, fname):
620 """create a stream clone bundle file
620 """create a stream clone bundle file
621
621
622 Stream bundles are special bundles that are essentially archives of
622 Stream bundles are special bundles that are essentially archives of
623 revlog files. They are commonly used for cloning very quickly.
623 revlog files. They are commonly used for cloning very quickly.
624 """
624 """
625 # TODO we may want to turn this into an abort when this functionality
625 # TODO we may want to turn this into an abort when this functionality
626 # is moved into `hg bundle`.
626 # is moved into `hg bundle`.
627 if phases.hassecret(repo):
627 if phases.hassecret(repo):
628 ui.warn(
628 ui.warn(
629 _(
629 _(
630 b'(warning: stream clone bundle will contain secret '
630 b'(warning: stream clone bundle will contain secret '
631 b'revisions)\n'
631 b'revisions)\n'
632 )
632 )
633 )
633 )
634
634
635 requirements, gen = streamclone.generatebundlev1(repo)
635 requirements, gen = streamclone.generatebundlev1(repo)
636 changegroup.writechunks(ui, gen, fname)
636 changegroup.writechunks(ui, gen, fname)
637
637
638 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
638 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
639
639
640
640
641 @command(
641 @command(
642 b'debugdag',
642 b'debugdag',
643 [
643 [
644 (b't', b'tags', None, _(b'use tags as labels')),
644 (b't', b'tags', None, _(b'use tags as labels')),
645 (b'b', b'branches', None, _(b'annotate with branch names')),
645 (b'b', b'branches', None, _(b'annotate with branch names')),
646 (b'', b'dots', None, _(b'use dots for runs')),
646 (b'', b'dots', None, _(b'use dots for runs')),
647 (b's', b'spaces', None, _(b'separate elements by spaces')),
647 (b's', b'spaces', None, _(b'separate elements by spaces')),
648 ],
648 ],
649 _(b'[OPTION]... [FILE [REV]...]'),
649 _(b'[OPTION]... [FILE [REV]...]'),
650 optionalrepo=True,
650 optionalrepo=True,
651 )
651 )
652 def debugdag(ui, repo, file_=None, *revs, **opts):
652 def debugdag(ui, repo, file_=None, *revs, **opts):
653 """format the changelog or an index DAG as a concise textual description
653 """format the changelog or an index DAG as a concise textual description
654
654
655 If you pass a revlog index, the revlog's DAG is emitted. If you list
655 If you pass a revlog index, the revlog's DAG is emitted. If you list
656 revision numbers, they get labeled in the output as rN.
656 revision numbers, they get labeled in the output as rN.
657
657
658 Otherwise, the changelog DAG of the current repo is emitted.
658 Otherwise, the changelog DAG of the current repo is emitted.
659 """
659 """
660 spaces = opts.get('spaces')
660 spaces = opts.get('spaces')
661 dots = opts.get('dots')
661 dots = opts.get('dots')
662 if file_:
662 if file_:
663 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
663 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
664 revs = {int(r) for r in revs}
664 revs = {int(r) for r in revs}
665
665
666 def events():
666 def events():
667 for r in rlog:
667 for r in rlog:
668 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
668 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
669 if r in revs:
669 if r in revs:
670 yield b'l', (r, b"r%i" % r)
670 yield b'l', (r, b"r%i" % r)
671
671
672 elif repo:
672 elif repo:
673 cl = repo.changelog
673 cl = repo.changelog
674 tags = opts.get('tags')
674 tags = opts.get('tags')
675 branches = opts.get('branches')
675 branches = opts.get('branches')
676 if tags:
676 if tags:
677 labels = {}
677 labels = {}
678 for l, n in repo.tags().items():
678 for l, n in repo.tags().items():
679 labels.setdefault(cl.rev(n), []).append(l)
679 labels.setdefault(cl.rev(n), []).append(l)
680
680
681 def events():
681 def events():
682 b = b"default"
682 b = b"default"
683 for r in cl:
683 for r in cl:
684 if branches:
684 if branches:
685 newb = cl.read(cl.node(r))[5][b'branch']
685 newb = cl.read(cl.node(r))[5][b'branch']
686 if newb != b:
686 if newb != b:
687 yield b'a', newb
687 yield b'a', newb
688 b = newb
688 b = newb
689 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
689 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
690 if tags:
690 if tags:
691 ls = labels.get(r)
691 ls = labels.get(r)
692 if ls:
692 if ls:
693 for l in ls:
693 for l in ls:
694 yield b'l', (r, l)
694 yield b'l', (r, l)
695
695
696 else:
696 else:
697 raise error.Abort(_(b'need repo for changelog dag'))
697 raise error.Abort(_(b'need repo for changelog dag'))
698
698
699 for line in dagparser.dagtextlines(
699 for line in dagparser.dagtextlines(
700 events(),
700 events(),
701 addspaces=spaces,
701 addspaces=spaces,
702 wraplabels=True,
702 wraplabels=True,
703 wrapannotations=True,
703 wrapannotations=True,
704 wrapnonlinear=dots,
704 wrapnonlinear=dots,
705 usedots=dots,
705 usedots=dots,
706 maxlinewidth=70,
706 maxlinewidth=70,
707 ):
707 ):
708 ui.write(line)
708 ui.write(line)
709 ui.write(b"\n")
709 ui.write(b"\n")
710
710
711
711
712 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
712 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
713 def debugdata(ui, repo, file_, rev=None, **opts):
713 def debugdata(ui, repo, file_, rev=None, **opts):
714 """dump the contents of a data file revision"""
714 """dump the contents of a data file revision"""
715 opts = pycompat.byteskwargs(opts)
715 opts = pycompat.byteskwargs(opts)
716 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
716 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
717 if rev is not None:
717 if rev is not None:
718 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
718 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
719 file_, rev = None, file_
719 file_, rev = None, file_
720 elif rev is None:
720 elif rev is None:
721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
722 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
722 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
723 try:
723 try:
724 ui.write(r.rawdata(r.lookup(rev)))
724 ui.write(r.rawdata(r.lookup(rev)))
725 except KeyError:
725 except KeyError:
726 raise error.Abort(_(b'invalid revision identifier %s') % rev)
726 raise error.Abort(_(b'invalid revision identifier %s') % rev)
727
727
728
728
729 @command(
729 @command(
730 b'debugdate',
730 b'debugdate',
731 [(b'e', b'extended', None, _(b'try extended date formats'))],
731 [(b'e', b'extended', None, _(b'try extended date formats'))],
732 _(b'[-e] DATE [RANGE]'),
732 _(b'[-e] DATE [RANGE]'),
733 norepo=True,
733 norepo=True,
734 optionalrepo=True,
734 optionalrepo=True,
735 )
735 )
736 def debugdate(ui, date, range=None, **opts):
736 def debugdate(ui, date, range=None, **opts):
737 """parse and display a date"""
737 """parse and display a date"""
738 if opts["extended"]:
738 if opts["extended"]:
739 d = dateutil.parsedate(date, dateutil.extendeddateformats)
739 d = dateutil.parsedate(date, dateutil.extendeddateformats)
740 else:
740 else:
741 d = dateutil.parsedate(date)
741 d = dateutil.parsedate(date)
742 ui.writenoi18n(b"internal: %d %d\n" % d)
742 ui.writenoi18n(b"internal: %d %d\n" % d)
743 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
743 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
744 if range:
744 if range:
745 m = dateutil.matchdate(range)
745 m = dateutil.matchdate(range)
746 ui.writenoi18n(b"match: %s\n" % m(d[0]))
746 ui.writenoi18n(b"match: %s\n" % m(d[0]))
747
747
748
748
749 @command(
749 @command(
750 b'debugdeltachain',
750 b'debugdeltachain',
751 cmdutil.debugrevlogopts + cmdutil.formatteropts,
751 cmdutil.debugrevlogopts + cmdutil.formatteropts,
752 _(b'-c|-m|FILE'),
752 _(b'-c|-m|FILE'),
753 optionalrepo=True,
753 optionalrepo=True,
754 )
754 )
755 def debugdeltachain(ui, repo, file_=None, **opts):
755 def debugdeltachain(ui, repo, file_=None, **opts):
756 """dump information about delta chains in a revlog
756 """dump information about delta chains in a revlog
757
757
758 Output can be templatized. Available template keywords are:
758 Output can be templatized. Available template keywords are:
759
759
760 :``rev``: revision number
760 :``rev``: revision number
761 :``chainid``: delta chain identifier (numbered by unique base)
761 :``chainid``: delta chain identifier (numbered by unique base)
762 :``chainlen``: delta chain length to this revision
762 :``chainlen``: delta chain length to this revision
763 :``prevrev``: previous revision in delta chain
763 :``prevrev``: previous revision in delta chain
764 :``deltatype``: role of delta / how it was computed
764 :``deltatype``: role of delta / how it was computed
765 :``compsize``: compressed size of revision
765 :``compsize``: compressed size of revision
766 :``uncompsize``: uncompressed size of revision
766 :``uncompsize``: uncompressed size of revision
767 :``chainsize``: total size of compressed revisions in chain
767 :``chainsize``: total size of compressed revisions in chain
768 :``chainratio``: total chain size divided by uncompressed revision size
768 :``chainratio``: total chain size divided by uncompressed revision size
769 (new delta chains typically start at ratio 2.00)
769 (new delta chains typically start at ratio 2.00)
770 :``lindist``: linear distance from base revision in delta chain to end
770 :``lindist``: linear distance from base revision in delta chain to end
771 of this revision
771 of this revision
772 :``extradist``: total size of revisions not part of this delta chain from
772 :``extradist``: total size of revisions not part of this delta chain from
773 base of delta chain to end of this revision; a measurement
773 base of delta chain to end of this revision; a measurement
774 of how much extra data we need to read/seek across to read
774 of how much extra data we need to read/seek across to read
775 the delta chain for this revision
775 the delta chain for this revision
776 :``extraratio``: extradist divided by chainsize; another representation of
776 :``extraratio``: extradist divided by chainsize; another representation of
777 how much unrelated data is needed to load this delta chain
777 how much unrelated data is needed to load this delta chain
778
778
779 If the repository is configured to use the sparse read, additional keywords
779 If the repository is configured to use the sparse read, additional keywords
780 are available:
780 are available:
781
781
782 :``readsize``: total size of data read from the disk for a revision
782 :``readsize``: total size of data read from the disk for a revision
783 (sum of the sizes of all the blocks)
783 (sum of the sizes of all the blocks)
784 :``largestblock``: size of the largest block of data read from the disk
784 :``largestblock``: size of the largest block of data read from the disk
785 :``readdensity``: density of useful bytes in the data read from the disk
785 :``readdensity``: density of useful bytes in the data read from the disk
786 :``srchunks``: in how many data hunks the whole revision would be read
786 :``srchunks``: in how many data hunks the whole revision would be read
787
787
788 The sparse read can be enabled with experimental.sparse-read = True
788 The sparse read can be enabled with experimental.sparse-read = True
789 """
789 """
790 opts = pycompat.byteskwargs(opts)
790 opts = pycompat.byteskwargs(opts)
791 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
791 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
792 index = r.index
792 index = r.index
793 start = r.start
793 start = r.start
794 length = r.length
794 length = r.length
795 generaldelta = r.version & revlog.FLAG_GENERALDELTA
795 generaldelta = r.version & revlog.FLAG_GENERALDELTA
796 withsparseread = getattr(r, '_withsparseread', False)
796 withsparseread = getattr(r, '_withsparseread', False)
797
797
798 def revinfo(rev):
798 def revinfo(rev):
799 e = index[rev]
799 e = index[rev]
800 compsize = e[1]
800 compsize = e[1]
801 uncompsize = e[2]
801 uncompsize = e[2]
802 chainsize = 0
802 chainsize = 0
803
803
804 if generaldelta:
804 if generaldelta:
805 if e[3] == e[5]:
805 if e[3] == e[5]:
806 deltatype = b'p1'
806 deltatype = b'p1'
807 elif e[3] == e[6]:
807 elif e[3] == e[6]:
808 deltatype = b'p2'
808 deltatype = b'p2'
809 elif e[3] == rev - 1:
809 elif e[3] == rev - 1:
810 deltatype = b'prev'
810 deltatype = b'prev'
811 elif e[3] == rev:
811 elif e[3] == rev:
812 deltatype = b'base'
812 deltatype = b'base'
813 else:
813 else:
814 deltatype = b'other'
814 deltatype = b'other'
815 else:
815 else:
816 if e[3] == rev:
816 if e[3] == rev:
817 deltatype = b'base'
817 deltatype = b'base'
818 else:
818 else:
819 deltatype = b'prev'
819 deltatype = b'prev'
820
820
821 chain = r._deltachain(rev)[0]
821 chain = r._deltachain(rev)[0]
822 for iterrev in chain:
822 for iterrev in chain:
823 e = index[iterrev]
823 e = index[iterrev]
824 chainsize += e[1]
824 chainsize += e[1]
825
825
826 return compsize, uncompsize, deltatype, chain, chainsize
826 return compsize, uncompsize, deltatype, chain, chainsize
827
827
828 fm = ui.formatter(b'debugdeltachain', opts)
828 fm = ui.formatter(b'debugdeltachain', opts)
829
829
830 fm.plain(
830 fm.plain(
831 b' rev chain# chainlen prev delta '
831 b' rev chain# chainlen prev delta '
832 b'size rawsize chainsize ratio lindist extradist '
832 b'size rawsize chainsize ratio lindist extradist '
833 b'extraratio'
833 b'extraratio'
834 )
834 )
835 if withsparseread:
835 if withsparseread:
836 fm.plain(b' readsize largestblk rddensity srchunks')
836 fm.plain(b' readsize largestblk rddensity srchunks')
837 fm.plain(b'\n')
837 fm.plain(b'\n')
838
838
839 chainbases = {}
839 chainbases = {}
840 for rev in r:
840 for rev in r:
841 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
841 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
842 chainbase = chain[0]
842 chainbase = chain[0]
843 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
843 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
844 basestart = start(chainbase)
844 basestart = start(chainbase)
845 revstart = start(rev)
845 revstart = start(rev)
846 lineardist = revstart + comp - basestart
846 lineardist = revstart + comp - basestart
847 extradist = lineardist - chainsize
847 extradist = lineardist - chainsize
848 try:
848 try:
849 prevrev = chain[-2]
849 prevrev = chain[-2]
850 except IndexError:
850 except IndexError:
851 prevrev = -1
851 prevrev = -1
852
852
853 if uncomp != 0:
853 if uncomp != 0:
854 chainratio = float(chainsize) / float(uncomp)
854 chainratio = float(chainsize) / float(uncomp)
855 else:
855 else:
856 chainratio = chainsize
856 chainratio = chainsize
857
857
858 if chainsize != 0:
858 if chainsize != 0:
859 extraratio = float(extradist) / float(chainsize)
859 extraratio = float(extradist) / float(chainsize)
860 else:
860 else:
861 extraratio = extradist
861 extraratio = extradist
862
862
863 fm.startitem()
863 fm.startitem()
864 fm.write(
864 fm.write(
865 b'rev chainid chainlen prevrev deltatype compsize '
865 b'rev chainid chainlen prevrev deltatype compsize '
866 b'uncompsize chainsize chainratio lindist extradist '
866 b'uncompsize chainsize chainratio lindist extradist '
867 b'extraratio',
867 b'extraratio',
868 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
868 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
869 rev,
869 rev,
870 chainid,
870 chainid,
871 len(chain),
871 len(chain),
872 prevrev,
872 prevrev,
873 deltatype,
873 deltatype,
874 comp,
874 comp,
875 uncomp,
875 uncomp,
876 chainsize,
876 chainsize,
877 chainratio,
877 chainratio,
878 lineardist,
878 lineardist,
879 extradist,
879 extradist,
880 extraratio,
880 extraratio,
881 rev=rev,
881 rev=rev,
882 chainid=chainid,
882 chainid=chainid,
883 chainlen=len(chain),
883 chainlen=len(chain),
884 prevrev=prevrev,
884 prevrev=prevrev,
885 deltatype=deltatype,
885 deltatype=deltatype,
886 compsize=comp,
886 compsize=comp,
887 uncompsize=uncomp,
887 uncompsize=uncomp,
888 chainsize=chainsize,
888 chainsize=chainsize,
889 chainratio=chainratio,
889 chainratio=chainratio,
890 lindist=lineardist,
890 lindist=lineardist,
891 extradist=extradist,
891 extradist=extradist,
892 extraratio=extraratio,
892 extraratio=extraratio,
893 )
893 )
894 if withsparseread:
894 if withsparseread:
895 readsize = 0
895 readsize = 0
896 largestblock = 0
896 largestblock = 0
897 srchunks = 0
897 srchunks = 0
898
898
899 for revschunk in deltautil.slicechunk(r, chain):
899 for revschunk in deltautil.slicechunk(r, chain):
900 srchunks += 1
900 srchunks += 1
901 blkend = start(revschunk[-1]) + length(revschunk[-1])
901 blkend = start(revschunk[-1]) + length(revschunk[-1])
902 blksize = blkend - start(revschunk[0])
902 blksize = blkend - start(revschunk[0])
903
903
904 readsize += blksize
904 readsize += blksize
905 if largestblock < blksize:
905 if largestblock < blksize:
906 largestblock = blksize
906 largestblock = blksize
907
907
908 if readsize:
908 if readsize:
909 readdensity = float(chainsize) / float(readsize)
909 readdensity = float(chainsize) / float(readsize)
910 else:
910 else:
911 readdensity = 1
911 readdensity = 1
912
912
913 fm.write(
913 fm.write(
914 b'readsize largestblock readdensity srchunks',
914 b'readsize largestblock readdensity srchunks',
915 b' %10d %10d %9.5f %8d',
915 b' %10d %10d %9.5f %8d',
916 readsize,
916 readsize,
917 largestblock,
917 largestblock,
918 readdensity,
918 readdensity,
919 srchunks,
919 srchunks,
920 readsize=readsize,
920 readsize=readsize,
921 largestblock=largestblock,
921 largestblock=largestblock,
922 readdensity=readdensity,
922 readdensity=readdensity,
923 srchunks=srchunks,
923 srchunks=srchunks,
924 )
924 )
925
925
926 fm.plain(b'\n')
926 fm.plain(b'\n')
927
927
928 fm.end()
928 fm.end()
929
929
930
930
931 @command(
931 @command(
932 b'debugdirstate|debugstate',
932 b'debugdirstate|debugstate',
933 [
933 [
934 (
934 (
935 b'',
935 b'',
936 b'nodates',
936 b'nodates',
937 None,
937 None,
938 _(b'do not display the saved mtime (DEPRECATED)'),
938 _(b'do not display the saved mtime (DEPRECATED)'),
939 ),
939 ),
940 (b'', b'dates', True, _(b'display the saved mtime')),
940 (b'', b'dates', True, _(b'display the saved mtime')),
941 (b'', b'datesort', None, _(b'sort by saved mtime')),
941 (b'', b'datesort', None, _(b'sort by saved mtime')),
942 ],
942 ],
943 _(b'[OPTION]...'),
943 _(b'[OPTION]...'),
944 )
944 )
945 def debugstate(ui, repo, **opts):
945 def debugstate(ui, repo, **opts):
946 """show the contents of the current dirstate"""
946 """show the contents of the current dirstate"""
947
947
948 nodates = not opts['dates']
948 nodates = not opts['dates']
949 if opts.get('nodates') is not None:
949 if opts.get('nodates') is not None:
950 nodates = True
950 nodates = True
951 datesort = opts.get('datesort')
951 datesort = opts.get('datesort')
952
952
953 if datesort:
953 if datesort:
954 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
954 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
955 else:
955 else:
956 keyfunc = None # sort by filename
956 keyfunc = None # sort by filename
957 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
957 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
958 if ent[3] == -1:
958 if ent[3] == -1:
959 timestr = b'unset '
959 timestr = b'unset '
960 elif nodates:
960 elif nodates:
961 timestr = b'set '
961 timestr = b'set '
962 else:
962 else:
963 timestr = time.strftime(
963 timestr = time.strftime(
964 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
964 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
965 )
965 )
966 timestr = encoding.strtolocal(timestr)
966 timestr = encoding.strtolocal(timestr)
967 if ent[1] & 0o20000:
967 if ent[1] & 0o20000:
968 mode = b'lnk'
968 mode = b'lnk'
969 else:
969 else:
970 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
970 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
971 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
971 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
972 for f in repo.dirstate.copies():
972 for f in repo.dirstate.copies():
973 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
973 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
974
974
975
975
976 @command(
976 @command(
977 b'debugdiscovery',
977 b'debugdiscovery',
978 [
978 [
979 (b'', b'old', None, _(b'use old-style discovery')),
979 (b'', b'old', None, _(b'use old-style discovery')),
980 (
980 (
981 b'',
981 b'',
982 b'nonheads',
982 b'nonheads',
983 None,
983 None,
984 _(b'use old-style discovery with non-heads included'),
984 _(b'use old-style discovery with non-heads included'),
985 ),
985 ),
986 (b'', b'rev', [], b'restrict discovery to this set of revs'),
986 (b'', b'rev', [], b'restrict discovery to this set of revs'),
987 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
987 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
988 (
988 (
989 b'',
989 b'',
990 b'local-as-revs',
990 b'local-as-revs',
991 "",
991 "",
992 'treat local has having these revisions only',
992 'treat local has having these revisions only',
993 ),
993 ),
994 (
994 (
995 b'',
995 b'',
996 b'remote-as-revs',
996 b'remote-as-revs',
997 "",
997 "",
998 'use local as remote, with only these these revisions',
998 'use local as remote, with only these these revisions',
999 ),
999 ),
1000 ]
1000 ]
1001 + cmdutil.remoteopts,
1001 + cmdutil.remoteopts,
1002 _(b'[--rev REV] [OTHER]'),
1002 _(b'[--rev REV] [OTHER]'),
1003 )
1003 )
1004 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1004 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1005 """runs the changeset discovery protocol in isolation
1005 """runs the changeset discovery protocol in isolation
1006
1006
1007 The local peer can be "replaced" by a subset of the local repository by
1007 The local peer can be "replaced" by a subset of the local repository by
1008 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1008 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1009 be "replaced" by a subset of the local repository using the
1009 be "replaced" by a subset of the local repository using the
1010 `--local-as-revs` flag. This is useful to efficiently debug pathological
1010 `--local-as-revs` flag. This is useful to efficiently debug pathological
1011 discovery situation.
1011 discovery situation.
1012 """
1012 """
1013 opts = pycompat.byteskwargs(opts)
1013 opts = pycompat.byteskwargs(opts)
1014 unfi = repo.unfiltered()
1014 unfi = repo.unfiltered()
1015
1015
1016 # setup potential extra filtering
1016 # setup potential extra filtering
1017 local_revs = opts[b"local_as_revs"]
1017 local_revs = opts[b"local_as_revs"]
1018 remote_revs = opts[b"remote_as_revs"]
1018 remote_revs = opts[b"remote_as_revs"]
1019
1019
1020 # make sure tests are repeatable
1020 # make sure tests are repeatable
1021 random.seed(int(opts[b'seed']))
1021 random.seed(int(opts[b'seed']))
1022
1022
1023 if not remote_revs:
1023 if not remote_revs:
1024
1024
1025 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
1025 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
1026 remote = hg.peer(repo, opts, remoteurl)
1026 remote = hg.peer(repo, opts, remoteurl)
1027 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
1027 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
1028 else:
1028 else:
1029 branches = (None, [])
1029 branches = (None, [])
1030 remote_filtered_revs = scmutil.revrange(
1030 remote_filtered_revs = scmutil.revrange(
1031 unfi, [b"not (::(%s))" % remote_revs]
1031 unfi, [b"not (::(%s))" % remote_revs]
1032 )
1032 )
1033 remote_filtered_revs = frozenset(remote_filtered_revs)
1033 remote_filtered_revs = frozenset(remote_filtered_revs)
1034
1034
1035 def remote_func(x):
1035 def remote_func(x):
1036 return remote_filtered_revs
1036 return remote_filtered_revs
1037
1037
1038 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1038 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1039
1039
1040 remote = repo.peer()
1040 remote = repo.peer()
1041 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1041 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1042
1042
1043 if local_revs:
1043 if local_revs:
1044 local_filtered_revs = scmutil.revrange(
1044 local_filtered_revs = scmutil.revrange(
1045 unfi, [b"not (::(%s))" % local_revs]
1045 unfi, [b"not (::(%s))" % local_revs]
1046 )
1046 )
1047 local_filtered_revs = frozenset(local_filtered_revs)
1047 local_filtered_revs = frozenset(local_filtered_revs)
1048
1048
1049 def local_func(x):
1049 def local_func(x):
1050 return local_filtered_revs
1050 return local_filtered_revs
1051
1051
1052 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1052 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1053 repo = repo.filtered(b'debug-discovery-local-filter')
1053 repo = repo.filtered(b'debug-discovery-local-filter')
1054
1054
1055 data = {}
1055 data = {}
1056 if opts.get(b'old'):
1056 if opts.get(b'old'):
1057
1057
1058 def doit(pushedrevs, remoteheads, remote=remote):
1058 def doit(pushedrevs, remoteheads, remote=remote):
1059 if not util.safehasattr(remote, b'branches'):
1059 if not util.safehasattr(remote, b'branches'):
1060 # enable in-client legacy support
1060 # enable in-client legacy support
1061 remote = localrepo.locallegacypeer(remote.local())
1061 remote = localrepo.locallegacypeer(remote.local())
1062 common, _in, hds = treediscovery.findcommonincoming(
1062 common, _in, hds = treediscovery.findcommonincoming(
1063 repo, remote, force=True, audit=data
1063 repo, remote, force=True, audit=data
1064 )
1064 )
1065 common = set(common)
1065 common = set(common)
1066 if not opts.get(b'nonheads'):
1066 if not opts.get(b'nonheads'):
1067 ui.writenoi18n(
1067 ui.writenoi18n(
1068 b"unpruned common: %s\n"
1068 b"unpruned common: %s\n"
1069 % b" ".join(sorted(short(n) for n in common))
1069 % b" ".join(sorted(short(n) for n in common))
1070 )
1070 )
1071
1071
1072 clnode = repo.changelog.node
1072 clnode = repo.changelog.node
1073 common = repo.revs(b'heads(::%ln)', common)
1073 common = repo.revs(b'heads(::%ln)', common)
1074 common = {clnode(r) for r in common}
1074 common = {clnode(r) for r in common}
1075 return common, hds
1075 return common, hds
1076
1076
1077 else:
1077 else:
1078
1078
1079 def doit(pushedrevs, remoteheads, remote=remote):
1079 def doit(pushedrevs, remoteheads, remote=remote):
1080 nodes = None
1080 nodes = None
1081 if pushedrevs:
1081 if pushedrevs:
1082 revs = scmutil.revrange(repo, pushedrevs)
1082 revs = scmutil.revrange(repo, pushedrevs)
1083 nodes = [repo[r].node() for r in revs]
1083 nodes = [repo[r].node() for r in revs]
1084 common, any, hds = setdiscovery.findcommonheads(
1084 common, any, hds = setdiscovery.findcommonheads(
1085 ui, repo, remote, ancestorsof=nodes, audit=data
1085 ui, repo, remote, ancestorsof=nodes, audit=data
1086 )
1086 )
1087 return common, hds
1087 return common, hds
1088
1088
1089 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1089 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1090 localrevs = opts[b'rev']
1090 localrevs = opts[b'rev']
1091 with util.timedcm('debug-discovery') as t:
1091 with util.timedcm('debug-discovery') as t:
1092 common, hds = doit(localrevs, remoterevs)
1092 common, hds = doit(localrevs, remoterevs)
1093
1093
1094 # compute all statistics
1094 # compute all statistics
1095 heads_common = set(common)
1095 heads_common = set(common)
1096 heads_remote = set(hds)
1096 heads_remote = set(hds)
1097 heads_local = set(repo.heads())
1097 heads_local = set(repo.heads())
1098 # note: they cannot be a local or remote head that is in common and not
1098 # note: they cannot be a local or remote head that is in common and not
1099 # itself a head of common.
1099 # itself a head of common.
1100 heads_common_local = heads_common & heads_local
1100 heads_common_local = heads_common & heads_local
1101 heads_common_remote = heads_common & heads_remote
1101 heads_common_remote = heads_common & heads_remote
1102 heads_common_both = heads_common & heads_remote & heads_local
1102 heads_common_both = heads_common & heads_remote & heads_local
1103
1103
1104 all = repo.revs(b'all()')
1104 all = repo.revs(b'all()')
1105 common = repo.revs(b'::%ln', common)
1105 common = repo.revs(b'::%ln', common)
1106 roots_common = repo.revs(b'roots(::%ld)', common)
1106 roots_common = repo.revs(b'roots(::%ld)', common)
1107 missing = repo.revs(b'not ::%ld', common)
1107 missing = repo.revs(b'not ::%ld', common)
1108 heads_missing = repo.revs(b'heads(%ld)', missing)
1108 heads_missing = repo.revs(b'heads(%ld)', missing)
1109 roots_missing = repo.revs(b'roots(%ld)', missing)
1109 roots_missing = repo.revs(b'roots(%ld)', missing)
1110 assert len(common) + len(missing) == len(all)
1110 assert len(common) + len(missing) == len(all)
1111
1111
1112 initial_undecided = repo.revs(
1112 initial_undecided = repo.revs(
1113 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1113 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1114 )
1114 )
1115 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1115 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1116 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1116 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1117 common_initial_undecided = initial_undecided & common
1117 common_initial_undecided = initial_undecided & common
1118 missing_initial_undecided = initial_undecided & missing
1118 missing_initial_undecided = initial_undecided & missing
1119
1119
1120 data[b'elapsed'] = t.elapsed
1120 data[b'elapsed'] = t.elapsed
1121 data[b'nb-common-heads'] = len(heads_common)
1121 data[b'nb-common-heads'] = len(heads_common)
1122 data[b'nb-common-heads-local'] = len(heads_common_local)
1122 data[b'nb-common-heads-local'] = len(heads_common_local)
1123 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1123 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1124 data[b'nb-common-heads-both'] = len(heads_common_both)
1124 data[b'nb-common-heads-both'] = len(heads_common_both)
1125 data[b'nb-common-roots'] = len(roots_common)
1125 data[b'nb-common-roots'] = len(roots_common)
1126 data[b'nb-head-local'] = len(heads_local)
1126 data[b'nb-head-local'] = len(heads_local)
1127 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1127 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1128 data[b'nb-head-remote'] = len(heads_remote)
1128 data[b'nb-head-remote'] = len(heads_remote)
1129 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1129 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1130 heads_common_remote
1130 heads_common_remote
1131 )
1131 )
1132 data[b'nb-revs'] = len(all)
1132 data[b'nb-revs'] = len(all)
1133 data[b'nb-revs-common'] = len(common)
1133 data[b'nb-revs-common'] = len(common)
1134 data[b'nb-revs-missing'] = len(missing)
1134 data[b'nb-revs-missing'] = len(missing)
1135 data[b'nb-missing-heads'] = len(heads_missing)
1135 data[b'nb-missing-heads'] = len(heads_missing)
1136 data[b'nb-missing-roots'] = len(roots_missing)
1136 data[b'nb-missing-roots'] = len(roots_missing)
1137 data[b'nb-ini_und'] = len(initial_undecided)
1137 data[b'nb-ini_und'] = len(initial_undecided)
1138 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1138 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1139 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1139 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1140 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1140 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1141 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1141 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1142
1142
1143 # display discovery summary
1143 # display discovery summary
1144 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1144 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1145 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1145 ui.writenoi18n(b"round-trips: %(total-roundtrips)9d\n" % data)
1146 ui.writenoi18n(b"heads summary:\n")
1146 ui.writenoi18n(b"heads summary:\n")
1147 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1147 ui.writenoi18n(b" total common heads: %(nb-common-heads)9d\n" % data)
1148 ui.writenoi18n(
1148 ui.writenoi18n(
1149 b" also local heads: %(nb-common-heads-local)9d\n" % data
1149 b" also local heads: %(nb-common-heads-local)9d\n" % data
1150 )
1150 )
1151 ui.writenoi18n(
1151 ui.writenoi18n(
1152 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1152 b" also remote heads: %(nb-common-heads-remote)9d\n" % data
1153 )
1153 )
1154 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1154 ui.writenoi18n(b" both: %(nb-common-heads-both)9d\n" % data)
1155 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1155 ui.writenoi18n(b" local heads: %(nb-head-local)9d\n" % data)
1156 ui.writenoi18n(
1156 ui.writenoi18n(
1157 b" common: %(nb-common-heads-local)9d\n" % data
1157 b" common: %(nb-common-heads-local)9d\n" % data
1158 )
1158 )
1159 ui.writenoi18n(
1159 ui.writenoi18n(
1160 b" missing: %(nb-head-local-missing)9d\n" % data
1160 b" missing: %(nb-head-local-missing)9d\n" % data
1161 )
1161 )
1162 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1162 ui.writenoi18n(b" remote heads: %(nb-head-remote)9d\n" % data)
1163 ui.writenoi18n(
1163 ui.writenoi18n(
1164 b" common: %(nb-common-heads-remote)9d\n" % data
1164 b" common: %(nb-common-heads-remote)9d\n" % data
1165 )
1165 )
1166 ui.writenoi18n(
1166 ui.writenoi18n(
1167 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1167 b" unknown: %(nb-head-remote-unknown)9d\n" % data
1168 )
1168 )
1169 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1169 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1170 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1170 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1171 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1171 ui.writenoi18n(b" heads: %(nb-common-heads)9d\n" % data)
1172 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1172 ui.writenoi18n(b" roots: %(nb-common-roots)9d\n" % data)
1173 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1173 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1174 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1174 ui.writenoi18n(b" heads: %(nb-missing-heads)9d\n" % data)
1175 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1175 ui.writenoi18n(b" roots: %(nb-missing-roots)9d\n" % data)
1176 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1176 ui.writenoi18n(b" first undecided set: %(nb-ini_und)9d\n" % data)
1177 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1177 ui.writenoi18n(b" heads: %(nb-ini_und-heads)9d\n" % data)
1178 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1178 ui.writenoi18n(b" roots: %(nb-ini_und-roots)9d\n" % data)
1179 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1179 ui.writenoi18n(b" common: %(nb-ini_und-common)9d\n" % data)
1180 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1180 ui.writenoi18n(b" missing: %(nb-ini_und-missing)9d\n" % data)
1181
1181
1182 if ui.verbose:
1182 if ui.verbose:
1183 ui.writenoi18n(
1183 ui.writenoi18n(
1184 b"common heads: %s\n"
1184 b"common heads: %s\n"
1185 % b" ".join(sorted(short(n) for n in heads_common))
1185 % b" ".join(sorted(short(n) for n in heads_common))
1186 )
1186 )
1187
1187
1188
1188
1189 _chunksize = 4 << 10
1189 _chunksize = 4 << 10
1190
1190
1191
1191
1192 @command(
1192 @command(
1193 b'debugdownload',
1193 b'debugdownload',
1194 [
1194 [
1195 (b'o', b'output', b'', _(b'path')),
1195 (b'o', b'output', b'', _(b'path')),
1196 ],
1196 ],
1197 optionalrepo=True,
1197 optionalrepo=True,
1198 )
1198 )
1199 def debugdownload(ui, repo, url, output=None, **opts):
1199 def debugdownload(ui, repo, url, output=None, **opts):
1200 """download a resource using Mercurial logic and config"""
1200 """download a resource using Mercurial logic and config"""
1201 fh = urlmod.open(ui, url, output)
1201 fh = urlmod.open(ui, url, output)
1202
1202
1203 dest = ui
1203 dest = ui
1204 if output:
1204 if output:
1205 dest = open(output, b"wb", _chunksize)
1205 dest = open(output, b"wb", _chunksize)
1206 try:
1206 try:
1207 data = fh.read(_chunksize)
1207 data = fh.read(_chunksize)
1208 while data:
1208 while data:
1209 dest.write(data)
1209 dest.write(data)
1210 data = fh.read(_chunksize)
1210 data = fh.read(_chunksize)
1211 finally:
1211 finally:
1212 if output:
1212 if output:
1213 dest.close()
1213 dest.close()
1214
1214
1215
1215
1216 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1216 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1217 def debugextensions(ui, repo, **opts):
1217 def debugextensions(ui, repo, **opts):
1218 '''show information about active extensions'''
1218 '''show information about active extensions'''
1219 opts = pycompat.byteskwargs(opts)
1219 opts = pycompat.byteskwargs(opts)
1220 exts = extensions.extensions(ui)
1220 exts = extensions.extensions(ui)
1221 hgver = util.version()
1221 hgver = util.version()
1222 fm = ui.formatter(b'debugextensions', opts)
1222 fm = ui.formatter(b'debugextensions', opts)
1223 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1223 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1224 isinternal = extensions.ismoduleinternal(extmod)
1224 isinternal = extensions.ismoduleinternal(extmod)
1225 extsource = None
1225 extsource = None
1226
1226
1227 if util.safehasattr(extmod, '__file__'):
1227 if util.safehasattr(extmod, '__file__'):
1228 extsource = pycompat.fsencode(extmod.__file__)
1228 extsource = pycompat.fsencode(extmod.__file__)
1229 elif getattr(sys, 'oxidized', False):
1229 elif getattr(sys, 'oxidized', False):
1230 extsource = pycompat.sysexecutable
1230 extsource = pycompat.sysexecutable
1231 if isinternal:
1231 if isinternal:
1232 exttestedwith = [] # never expose magic string to users
1232 exttestedwith = [] # never expose magic string to users
1233 else:
1233 else:
1234 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1234 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1235 extbuglink = getattr(extmod, 'buglink', None)
1235 extbuglink = getattr(extmod, 'buglink', None)
1236
1236
1237 fm.startitem()
1237 fm.startitem()
1238
1238
1239 if ui.quiet or ui.verbose:
1239 if ui.quiet or ui.verbose:
1240 fm.write(b'name', b'%s\n', extname)
1240 fm.write(b'name', b'%s\n', extname)
1241 else:
1241 else:
1242 fm.write(b'name', b'%s', extname)
1242 fm.write(b'name', b'%s', extname)
1243 if isinternal or hgver in exttestedwith:
1243 if isinternal or hgver in exttestedwith:
1244 fm.plain(b'\n')
1244 fm.plain(b'\n')
1245 elif not exttestedwith:
1245 elif not exttestedwith:
1246 fm.plain(_(b' (untested!)\n'))
1246 fm.plain(_(b' (untested!)\n'))
1247 else:
1247 else:
1248 lasttestedversion = exttestedwith[-1]
1248 lasttestedversion = exttestedwith[-1]
1249 fm.plain(b' (%s!)\n' % lasttestedversion)
1249 fm.plain(b' (%s!)\n' % lasttestedversion)
1250
1250
1251 fm.condwrite(
1251 fm.condwrite(
1252 ui.verbose and extsource,
1252 ui.verbose and extsource,
1253 b'source',
1253 b'source',
1254 _(b' location: %s\n'),
1254 _(b' location: %s\n'),
1255 extsource or b"",
1255 extsource or b"",
1256 )
1256 )
1257
1257
1258 if ui.verbose:
1258 if ui.verbose:
1259 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1259 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1260 fm.data(bundled=isinternal)
1260 fm.data(bundled=isinternal)
1261
1261
1262 fm.condwrite(
1262 fm.condwrite(
1263 ui.verbose and exttestedwith,
1263 ui.verbose and exttestedwith,
1264 b'testedwith',
1264 b'testedwith',
1265 _(b' tested with: %s\n'),
1265 _(b' tested with: %s\n'),
1266 fm.formatlist(exttestedwith, name=b'ver'),
1266 fm.formatlist(exttestedwith, name=b'ver'),
1267 )
1267 )
1268
1268
1269 fm.condwrite(
1269 fm.condwrite(
1270 ui.verbose and extbuglink,
1270 ui.verbose and extbuglink,
1271 b'buglink',
1271 b'buglink',
1272 _(b' bug reporting: %s\n'),
1272 _(b' bug reporting: %s\n'),
1273 extbuglink or b"",
1273 extbuglink or b"",
1274 )
1274 )
1275
1275
1276 fm.end()
1276 fm.end()
1277
1277
1278
1278
1279 @command(
1279 @command(
1280 b'debugfileset',
1280 b'debugfileset',
1281 [
1281 [
1282 (
1282 (
1283 b'r',
1283 b'r',
1284 b'rev',
1284 b'rev',
1285 b'',
1285 b'',
1286 _(b'apply the filespec on this revision'),
1286 _(b'apply the filespec on this revision'),
1287 _(b'REV'),
1287 _(b'REV'),
1288 ),
1288 ),
1289 (
1289 (
1290 b'',
1290 b'',
1291 b'all-files',
1291 b'all-files',
1292 False,
1292 False,
1293 _(b'test files from all revisions and working directory'),
1293 _(b'test files from all revisions and working directory'),
1294 ),
1294 ),
1295 (
1295 (
1296 b's',
1296 b's',
1297 b'show-matcher',
1297 b'show-matcher',
1298 None,
1298 None,
1299 _(b'print internal representation of matcher'),
1299 _(b'print internal representation of matcher'),
1300 ),
1300 ),
1301 (
1301 (
1302 b'p',
1302 b'p',
1303 b'show-stage',
1303 b'show-stage',
1304 [],
1304 [],
1305 _(b'print parsed tree at the given stage'),
1305 _(b'print parsed tree at the given stage'),
1306 _(b'NAME'),
1306 _(b'NAME'),
1307 ),
1307 ),
1308 ],
1308 ],
1309 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1309 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1310 )
1310 )
1311 def debugfileset(ui, repo, expr, **opts):
1311 def debugfileset(ui, repo, expr, **opts):
1312 '''parse and apply a fileset specification'''
1312 '''parse and apply a fileset specification'''
1313 from . import fileset
1313 from . import fileset
1314
1314
1315 fileset.symbols # force import of fileset so we have predicates to optimize
1315 fileset.symbols # force import of fileset so we have predicates to optimize
1316 opts = pycompat.byteskwargs(opts)
1316 opts = pycompat.byteskwargs(opts)
1317 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1317 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1318
1318
1319 stages = [
1319 stages = [
1320 (b'parsed', pycompat.identity),
1320 (b'parsed', pycompat.identity),
1321 (b'analyzed', filesetlang.analyze),
1321 (b'analyzed', filesetlang.analyze),
1322 (b'optimized', filesetlang.optimize),
1322 (b'optimized', filesetlang.optimize),
1323 ]
1323 ]
1324 stagenames = {n for n, f in stages}
1324 stagenames = {n for n, f in stages}
1325
1325
1326 showalways = set()
1326 showalways = set()
1327 if ui.verbose and not opts[b'show_stage']:
1327 if ui.verbose and not opts[b'show_stage']:
1328 # show parsed tree by --verbose (deprecated)
1328 # show parsed tree by --verbose (deprecated)
1329 showalways.add(b'parsed')
1329 showalways.add(b'parsed')
1330 if opts[b'show_stage'] == [b'all']:
1330 if opts[b'show_stage'] == [b'all']:
1331 showalways.update(stagenames)
1331 showalways.update(stagenames)
1332 else:
1332 else:
1333 for n in opts[b'show_stage']:
1333 for n in opts[b'show_stage']:
1334 if n not in stagenames:
1334 if n not in stagenames:
1335 raise error.Abort(_(b'invalid stage name: %s') % n)
1335 raise error.Abort(_(b'invalid stage name: %s') % n)
1336 showalways.update(opts[b'show_stage'])
1336 showalways.update(opts[b'show_stage'])
1337
1337
1338 tree = filesetlang.parse(expr)
1338 tree = filesetlang.parse(expr)
1339 for n, f in stages:
1339 for n, f in stages:
1340 tree = f(tree)
1340 tree = f(tree)
1341 if n in showalways:
1341 if n in showalways:
1342 if opts[b'show_stage'] or n != b'parsed':
1342 if opts[b'show_stage'] or n != b'parsed':
1343 ui.write(b"* %s:\n" % n)
1343 ui.write(b"* %s:\n" % n)
1344 ui.write(filesetlang.prettyformat(tree), b"\n")
1344 ui.write(filesetlang.prettyformat(tree), b"\n")
1345
1345
1346 files = set()
1346 files = set()
1347 if opts[b'all_files']:
1347 if opts[b'all_files']:
1348 for r in repo:
1348 for r in repo:
1349 c = repo[r]
1349 c = repo[r]
1350 files.update(c.files())
1350 files.update(c.files())
1351 files.update(c.substate)
1351 files.update(c.substate)
1352 if opts[b'all_files'] or ctx.rev() is None:
1352 if opts[b'all_files'] or ctx.rev() is None:
1353 wctx = repo[None]
1353 wctx = repo[None]
1354 files.update(
1354 files.update(
1355 repo.dirstate.walk(
1355 repo.dirstate.walk(
1356 scmutil.matchall(repo),
1356 scmutil.matchall(repo),
1357 subrepos=list(wctx.substate),
1357 subrepos=list(wctx.substate),
1358 unknown=True,
1358 unknown=True,
1359 ignored=True,
1359 ignored=True,
1360 )
1360 )
1361 )
1361 )
1362 files.update(wctx.substate)
1362 files.update(wctx.substate)
1363 else:
1363 else:
1364 files.update(ctx.files())
1364 files.update(ctx.files())
1365 files.update(ctx.substate)
1365 files.update(ctx.substate)
1366
1366
1367 m = ctx.matchfileset(repo.getcwd(), expr)
1367 m = ctx.matchfileset(repo.getcwd(), expr)
1368 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1368 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1369 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1369 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1370 for f in sorted(files):
1370 for f in sorted(files):
1371 if not m(f):
1371 if not m(f):
1372 continue
1372 continue
1373 ui.write(b"%s\n" % f)
1373 ui.write(b"%s\n" % f)
1374
1374
1375
1375
1376 @command(b'debugformat', [] + cmdutil.formatteropts)
1376 @command(b'debugformat', [] + cmdutil.formatteropts)
1377 def debugformat(ui, repo, **opts):
1377 def debugformat(ui, repo, **opts):
1378 """display format information about the current repository
1378 """display format information about the current repository
1379
1379
1380 Use --verbose to get extra information about current config value and
1380 Use --verbose to get extra information about current config value and
1381 Mercurial default."""
1381 Mercurial default."""
1382 opts = pycompat.byteskwargs(opts)
1382 opts = pycompat.byteskwargs(opts)
1383 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1383 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1384 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1384 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1385
1385
1386 def makeformatname(name):
1386 def makeformatname(name):
1387 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1387 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1388
1388
1389 fm = ui.formatter(b'debugformat', opts)
1389 fm = ui.formatter(b'debugformat', opts)
1390 if fm.isplain():
1390 if fm.isplain():
1391
1391
1392 def formatvalue(value):
1392 def formatvalue(value):
1393 if util.safehasattr(value, b'startswith'):
1393 if util.safehasattr(value, b'startswith'):
1394 return value
1394 return value
1395 if value:
1395 if value:
1396 return b'yes'
1396 return b'yes'
1397 else:
1397 else:
1398 return b'no'
1398 return b'no'
1399
1399
1400 else:
1400 else:
1401 formatvalue = pycompat.identity
1401 formatvalue = pycompat.identity
1402
1402
1403 fm.plain(b'format-variant')
1403 fm.plain(b'format-variant')
1404 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1404 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1405 fm.plain(b' repo')
1405 fm.plain(b' repo')
1406 if ui.verbose:
1406 if ui.verbose:
1407 fm.plain(b' config default')
1407 fm.plain(b' config default')
1408 fm.plain(b'\n')
1408 fm.plain(b'\n')
1409 for fv in upgrade.allformatvariant:
1409 for fv in upgrade.allformatvariant:
1410 fm.startitem()
1410 fm.startitem()
1411 repovalue = fv.fromrepo(repo)
1411 repovalue = fv.fromrepo(repo)
1412 configvalue = fv.fromconfig(repo)
1412 configvalue = fv.fromconfig(repo)
1413
1413
1414 if repovalue != configvalue:
1414 if repovalue != configvalue:
1415 namelabel = b'formatvariant.name.mismatchconfig'
1415 namelabel = b'formatvariant.name.mismatchconfig'
1416 repolabel = b'formatvariant.repo.mismatchconfig'
1416 repolabel = b'formatvariant.repo.mismatchconfig'
1417 elif repovalue != fv.default:
1417 elif repovalue != fv.default:
1418 namelabel = b'formatvariant.name.mismatchdefault'
1418 namelabel = b'formatvariant.name.mismatchdefault'
1419 repolabel = b'formatvariant.repo.mismatchdefault'
1419 repolabel = b'formatvariant.repo.mismatchdefault'
1420 else:
1420 else:
1421 namelabel = b'formatvariant.name.uptodate'
1421 namelabel = b'formatvariant.name.uptodate'
1422 repolabel = b'formatvariant.repo.uptodate'
1422 repolabel = b'formatvariant.repo.uptodate'
1423
1423
1424 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1424 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1425 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1425 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1426 if fv.default != configvalue:
1426 if fv.default != configvalue:
1427 configlabel = b'formatvariant.config.special'
1427 configlabel = b'formatvariant.config.special'
1428 else:
1428 else:
1429 configlabel = b'formatvariant.config.default'
1429 configlabel = b'formatvariant.config.default'
1430 fm.condwrite(
1430 fm.condwrite(
1431 ui.verbose,
1431 ui.verbose,
1432 b'config',
1432 b'config',
1433 b' %6s',
1433 b' %6s',
1434 formatvalue(configvalue),
1434 formatvalue(configvalue),
1435 label=configlabel,
1435 label=configlabel,
1436 )
1436 )
1437 fm.condwrite(
1437 fm.condwrite(
1438 ui.verbose,
1438 ui.verbose,
1439 b'default',
1439 b'default',
1440 b' %7s',
1440 b' %7s',
1441 formatvalue(fv.default),
1441 formatvalue(fv.default),
1442 label=b'formatvariant.default',
1442 label=b'formatvariant.default',
1443 )
1443 )
1444 fm.plain(b'\n')
1444 fm.plain(b'\n')
1445 fm.end()
1445 fm.end()
1446
1446
1447
1447
1448 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1448 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1449 def debugfsinfo(ui, path=b"."):
1449 def debugfsinfo(ui, path=b"."):
1450 """show information detected about current filesystem"""
1450 """show information detected about current filesystem"""
1451 ui.writenoi18n(b'path: %s\n' % path)
1451 ui.writenoi18n(b'path: %s\n' % path)
1452 ui.writenoi18n(
1452 ui.writenoi18n(
1453 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1453 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1454 )
1454 )
1455 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1455 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1456 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1456 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1457 ui.writenoi18n(
1457 ui.writenoi18n(
1458 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1458 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1459 )
1459 )
1460 ui.writenoi18n(
1460 ui.writenoi18n(
1461 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1461 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1462 )
1462 )
1463 casesensitive = b'(unknown)'
1463 casesensitive = b'(unknown)'
1464 try:
1464 try:
1465 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1465 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1466 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1466 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1467 except OSError:
1467 except OSError:
1468 pass
1468 pass
1469 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1469 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1470
1470
1471
1471
1472 @command(
1472 @command(
1473 b'debuggetbundle',
1473 b'debuggetbundle',
1474 [
1474 [
1475 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1475 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1476 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1476 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1477 (
1477 (
1478 b't',
1478 b't',
1479 b'type',
1479 b'type',
1480 b'bzip2',
1480 b'bzip2',
1481 _(b'bundle compression type to use'),
1481 _(b'bundle compression type to use'),
1482 _(b'TYPE'),
1482 _(b'TYPE'),
1483 ),
1483 ),
1484 ],
1484 ],
1485 _(b'REPO FILE [-H|-C ID]...'),
1485 _(b'REPO FILE [-H|-C ID]...'),
1486 norepo=True,
1486 norepo=True,
1487 )
1487 )
1488 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1488 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1489 """retrieves a bundle from a repo
1489 """retrieves a bundle from a repo
1490
1490
1491 Every ID must be a full-length hex node id string. Saves the bundle to the
1491 Every ID must be a full-length hex node id string. Saves the bundle to the
1492 given file.
1492 given file.
1493 """
1493 """
1494 opts = pycompat.byteskwargs(opts)
1494 opts = pycompat.byteskwargs(opts)
1495 repo = hg.peer(ui, opts, repopath)
1495 repo = hg.peer(ui, opts, repopath)
1496 if not repo.capable(b'getbundle'):
1496 if not repo.capable(b'getbundle'):
1497 raise error.Abort(b"getbundle() not supported by target repository")
1497 raise error.Abort(b"getbundle() not supported by target repository")
1498 args = {}
1498 args = {}
1499 if common:
1499 if common:
1500 args['common'] = [bin(s) for s in common]
1500 args['common'] = [bin(s) for s in common]
1501 if head:
1501 if head:
1502 args['heads'] = [bin(s) for s in head]
1502 args['heads'] = [bin(s) for s in head]
1503 # TODO: get desired bundlecaps from command line.
1503 # TODO: get desired bundlecaps from command line.
1504 args['bundlecaps'] = None
1504 args['bundlecaps'] = None
1505 bundle = repo.getbundle(b'debug', **args)
1505 bundle = repo.getbundle(b'debug', **args)
1506
1506
1507 bundletype = opts.get(b'type', b'bzip2').lower()
1507 bundletype = opts.get(b'type', b'bzip2').lower()
1508 btypes = {
1508 btypes = {
1509 b'none': b'HG10UN',
1509 b'none': b'HG10UN',
1510 b'bzip2': b'HG10BZ',
1510 b'bzip2': b'HG10BZ',
1511 b'gzip': b'HG10GZ',
1511 b'gzip': b'HG10GZ',
1512 b'bundle2': b'HG20',
1512 b'bundle2': b'HG20',
1513 }
1513 }
1514 bundletype = btypes.get(bundletype)
1514 bundletype = btypes.get(bundletype)
1515 if bundletype not in bundle2.bundletypes:
1515 if bundletype not in bundle2.bundletypes:
1516 raise error.Abort(_(b'unknown bundle type specified with --type'))
1516 raise error.Abort(_(b'unknown bundle type specified with --type'))
1517 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1517 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1518
1518
1519
1519
1520 @command(b'debugignore', [], b'[FILE]')
1520 @command(b'debugignore', [], b'[FILE]')
1521 def debugignore(ui, repo, *files, **opts):
1521 def debugignore(ui, repo, *files, **opts):
1522 """display the combined ignore pattern and information about ignored files
1522 """display the combined ignore pattern and information about ignored files
1523
1523
1524 With no argument display the combined ignore pattern.
1524 With no argument display the combined ignore pattern.
1525
1525
1526 Given space separated file names, shows if the given file is ignored and
1526 Given space separated file names, shows if the given file is ignored and
1527 if so, show the ignore rule (file and line number) that matched it.
1527 if so, show the ignore rule (file and line number) that matched it.
1528 """
1528 """
1529 ignore = repo.dirstate._ignore
1529 ignore = repo.dirstate._ignore
1530 if not files:
1530 if not files:
1531 # Show all the patterns
1531 # Show all the patterns
1532 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1532 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1533 else:
1533 else:
1534 m = scmutil.match(repo[None], pats=files)
1534 m = scmutil.match(repo[None], pats=files)
1535 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1535 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1536 for f in m.files():
1536 for f in m.files():
1537 nf = util.normpath(f)
1537 nf = util.normpath(f)
1538 ignored = None
1538 ignored = None
1539 ignoredata = None
1539 ignoredata = None
1540 if nf != b'.':
1540 if nf != b'.':
1541 if ignore(nf):
1541 if ignore(nf):
1542 ignored = nf
1542 ignored = nf
1543 ignoredata = repo.dirstate._ignorefileandline(nf)
1543 ignoredata = repo.dirstate._ignorefileandline(nf)
1544 else:
1544 else:
1545 for p in pathutil.finddirs(nf):
1545 for p in pathutil.finddirs(nf):
1546 if ignore(p):
1546 if ignore(p):
1547 ignored = p
1547 ignored = p
1548 ignoredata = repo.dirstate._ignorefileandline(p)
1548 ignoredata = repo.dirstate._ignorefileandline(p)
1549 break
1549 break
1550 if ignored:
1550 if ignored:
1551 if ignored == nf:
1551 if ignored == nf:
1552 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1552 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1553 else:
1553 else:
1554 ui.write(
1554 ui.write(
1555 _(
1555 _(
1556 b"%s is ignored because of "
1556 b"%s is ignored because of "
1557 b"containing directory %s\n"
1557 b"containing directory %s\n"
1558 )
1558 )
1559 % (uipathfn(f), ignored)
1559 % (uipathfn(f), ignored)
1560 )
1560 )
1561 ignorefile, lineno, line = ignoredata
1561 ignorefile, lineno, line = ignoredata
1562 ui.write(
1562 ui.write(
1563 _(b"(ignore rule in %s, line %d: '%s')\n")
1563 _(b"(ignore rule in %s, line %d: '%s')\n")
1564 % (ignorefile, lineno, line)
1564 % (ignorefile, lineno, line)
1565 )
1565 )
1566 else:
1566 else:
1567 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1567 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1568
1568
1569
1569
1570 @command(
1570 @command(
1571 b'debugindex',
1571 b'debugindex',
1572 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1572 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1573 _(b'-c|-m|FILE'),
1573 _(b'-c|-m|FILE'),
1574 )
1574 )
1575 def debugindex(ui, repo, file_=None, **opts):
1575 def debugindex(ui, repo, file_=None, **opts):
1576 """dump index data for a storage primitive"""
1576 """dump index data for a storage primitive"""
1577 opts = pycompat.byteskwargs(opts)
1577 opts = pycompat.byteskwargs(opts)
1578 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1578 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1579
1579
1580 if ui.debugflag:
1580 if ui.debugflag:
1581 shortfn = hex
1581 shortfn = hex
1582 else:
1582 else:
1583 shortfn = short
1583 shortfn = short
1584
1584
1585 idlen = 12
1585 idlen = 12
1586 for i in store:
1586 for i in store:
1587 idlen = len(shortfn(store.node(i)))
1587 idlen = len(shortfn(store.node(i)))
1588 break
1588 break
1589
1589
1590 fm = ui.formatter(b'debugindex', opts)
1590 fm = ui.formatter(b'debugindex', opts)
1591 fm.plain(
1591 fm.plain(
1592 b' rev linkrev %s %s p2\n'
1592 b' rev linkrev %s %s p2\n'
1593 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1593 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1594 )
1594 )
1595
1595
1596 for rev in store:
1596 for rev in store:
1597 node = store.node(rev)
1597 node = store.node(rev)
1598 parents = store.parents(node)
1598 parents = store.parents(node)
1599
1599
1600 fm.startitem()
1600 fm.startitem()
1601 fm.write(b'rev', b'%6d ', rev)
1601 fm.write(b'rev', b'%6d ', rev)
1602 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1602 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1603 fm.write(b'node', b'%s ', shortfn(node))
1603 fm.write(b'node', b'%s ', shortfn(node))
1604 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1604 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1605 fm.write(b'p2', b'%s', shortfn(parents[1]))
1605 fm.write(b'p2', b'%s', shortfn(parents[1]))
1606 fm.plain(b'\n')
1606 fm.plain(b'\n')
1607
1607
1608 fm.end()
1608 fm.end()
1609
1609
1610
1610
1611 @command(
1611 @command(
1612 b'debugindexdot',
1612 b'debugindexdot',
1613 cmdutil.debugrevlogopts,
1613 cmdutil.debugrevlogopts,
1614 _(b'-c|-m|FILE'),
1614 _(b'-c|-m|FILE'),
1615 optionalrepo=True,
1615 optionalrepo=True,
1616 )
1616 )
1617 def debugindexdot(ui, repo, file_=None, **opts):
1617 def debugindexdot(ui, repo, file_=None, **opts):
1618 """dump an index DAG as a graphviz dot file"""
1618 """dump an index DAG as a graphviz dot file"""
1619 opts = pycompat.byteskwargs(opts)
1619 opts = pycompat.byteskwargs(opts)
1620 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1620 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1621 ui.writenoi18n(b"digraph G {\n")
1621 ui.writenoi18n(b"digraph G {\n")
1622 for i in r:
1622 for i in r:
1623 node = r.node(i)
1623 node = r.node(i)
1624 pp = r.parents(node)
1624 pp = r.parents(node)
1625 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1625 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1626 if pp[1] != nullid:
1626 if pp[1] != nullid:
1627 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1627 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1628 ui.write(b"}\n")
1628 ui.write(b"}\n")
1629
1629
1630
1630
1631 @command(b'debugindexstats', [])
1631 @command(b'debugindexstats', [])
1632 def debugindexstats(ui, repo):
1632 def debugindexstats(ui, repo):
1633 """show stats related to the changelog index"""
1633 """show stats related to the changelog index"""
1634 repo.changelog.shortest(nullid, 1)
1634 repo.changelog.shortest(nullid, 1)
1635 index = repo.changelog.index
1635 index = repo.changelog.index
1636 if not util.safehasattr(index, b'stats'):
1636 if not util.safehasattr(index, b'stats'):
1637 raise error.Abort(_(b'debugindexstats only works with native code'))
1637 raise error.Abort(_(b'debugindexstats only works with native code'))
1638 for k, v in sorted(index.stats().items()):
1638 for k, v in sorted(index.stats().items()):
1639 ui.write(b'%s: %d\n' % (k, v))
1639 ui.write(b'%s: %d\n' % (k, v))
1640
1640
1641
1641
1642 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1642 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1643 def debuginstall(ui, **opts):
1643 def debuginstall(ui, **opts):
1644 """test Mercurial installation
1644 """test Mercurial installation
1645
1645
1646 Returns 0 on success.
1646 Returns 0 on success.
1647 """
1647 """
1648 opts = pycompat.byteskwargs(opts)
1648 opts = pycompat.byteskwargs(opts)
1649
1649
1650 problems = 0
1650 problems = 0
1651
1651
1652 fm = ui.formatter(b'debuginstall', opts)
1652 fm = ui.formatter(b'debuginstall', opts)
1653 fm.startitem()
1653 fm.startitem()
1654
1654
1655 # encoding might be unknown or wrong. don't translate these messages.
1655 # encoding might be unknown or wrong. don't translate these messages.
1656 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1656 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1657 err = None
1657 err = None
1658 try:
1658 try:
1659 codecs.lookup(pycompat.sysstr(encoding.encoding))
1659 codecs.lookup(pycompat.sysstr(encoding.encoding))
1660 except LookupError as inst:
1660 except LookupError as inst:
1661 err = stringutil.forcebytestr(inst)
1661 err = stringutil.forcebytestr(inst)
1662 problems += 1
1662 problems += 1
1663 fm.condwrite(
1663 fm.condwrite(
1664 err,
1664 err,
1665 b'encodingerror',
1665 b'encodingerror',
1666 b" %s\n (check that your locale is properly set)\n",
1666 b" %s\n (check that your locale is properly set)\n",
1667 err,
1667 err,
1668 )
1668 )
1669
1669
1670 # Python
1670 # Python
1671 pythonlib = None
1671 pythonlib = None
1672 if util.safehasattr(os, '__file__'):
1672 if util.safehasattr(os, '__file__'):
1673 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1673 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1674 elif getattr(sys, 'oxidized', False):
1674 elif getattr(sys, 'oxidized', False):
1675 pythonlib = pycompat.sysexecutable
1675 pythonlib = pycompat.sysexecutable
1676
1676
1677 fm.write(
1677 fm.write(
1678 b'pythonexe',
1678 b'pythonexe',
1679 _(b"checking Python executable (%s)\n"),
1679 _(b"checking Python executable (%s)\n"),
1680 pycompat.sysexecutable or _(b"unknown"),
1680 pycompat.sysexecutable or _(b"unknown"),
1681 )
1681 )
1682 fm.write(
1682 fm.write(
1683 b'pythonimplementation',
1683 b'pythonimplementation',
1684 _(b"checking Python implementation (%s)\n"),
1684 _(b"checking Python implementation (%s)\n"),
1685 pycompat.sysbytes(platform.python_implementation()),
1685 pycompat.sysbytes(platform.python_implementation()),
1686 )
1686 )
1687 fm.write(
1687 fm.write(
1688 b'pythonver',
1688 b'pythonver',
1689 _(b"checking Python version (%s)\n"),
1689 _(b"checking Python version (%s)\n"),
1690 (b"%d.%d.%d" % sys.version_info[:3]),
1690 (b"%d.%d.%d" % sys.version_info[:3]),
1691 )
1691 )
1692 fm.write(
1692 fm.write(
1693 b'pythonlib',
1693 b'pythonlib',
1694 _(b"checking Python lib (%s)...\n"),
1694 _(b"checking Python lib (%s)...\n"),
1695 pythonlib or _(b"unknown"),
1695 pythonlib or _(b"unknown"),
1696 )
1696 )
1697
1697
1698 try:
1698 try:
1699 from . import rustext
1699 from . import rustext
1700
1700
1701 rustext.__doc__ # trigger lazy import
1701 rustext.__doc__ # trigger lazy import
1702 except ImportError:
1702 except ImportError:
1703 rustext = None
1703 rustext = None
1704
1704
1705 security = set(sslutil.supportedprotocols)
1705 security = set(sslutil.supportedprotocols)
1706 if sslutil.hassni:
1706 if sslutil.hassni:
1707 security.add(b'sni')
1707 security.add(b'sni')
1708
1708
1709 fm.write(
1709 fm.write(
1710 b'pythonsecurity',
1710 b'pythonsecurity',
1711 _(b"checking Python security support (%s)\n"),
1711 _(b"checking Python security support (%s)\n"),
1712 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1712 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1713 )
1713 )
1714
1714
1715 # These are warnings, not errors. So don't increment problem count. This
1715 # These are warnings, not errors. So don't increment problem count. This
1716 # may change in the future.
1716 # may change in the future.
1717 if b'tls1.2' not in security:
1717 if b'tls1.2' not in security:
1718 fm.plain(
1718 fm.plain(
1719 _(
1719 _(
1720 b' TLS 1.2 not supported by Python install; '
1720 b' TLS 1.2 not supported by Python install; '
1721 b'network connections lack modern security\n'
1721 b'network connections lack modern security\n'
1722 )
1722 )
1723 )
1723 )
1724 if b'sni' not in security:
1724 if b'sni' not in security:
1725 fm.plain(
1725 fm.plain(
1726 _(
1726 _(
1727 b' SNI not supported by Python install; may have '
1727 b' SNI not supported by Python install; may have '
1728 b'connectivity issues with some servers\n'
1728 b'connectivity issues with some servers\n'
1729 )
1729 )
1730 )
1730 )
1731
1731
1732 fm.plain(
1732 fm.plain(
1733 _(
1733 _(
1734 b"checking Rust extensions (%s)\n"
1734 b"checking Rust extensions (%s)\n"
1735 % (b'missing' if rustext is None else b'installed')
1735 % (b'missing' if rustext is None else b'installed')
1736 ),
1736 ),
1737 )
1737 )
1738
1738
1739 # TODO print CA cert info
1739 # TODO print CA cert info
1740
1740
1741 # hg version
1741 # hg version
1742 hgver = util.version()
1742 hgver = util.version()
1743 fm.write(
1743 fm.write(
1744 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1744 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1745 )
1745 )
1746 fm.write(
1746 fm.write(
1747 b'hgverextra',
1747 b'hgverextra',
1748 _(b"checking Mercurial custom build (%s)\n"),
1748 _(b"checking Mercurial custom build (%s)\n"),
1749 b'+'.join(hgver.split(b'+')[1:]),
1749 b'+'.join(hgver.split(b'+')[1:]),
1750 )
1750 )
1751
1751
1752 # compiled modules
1752 # compiled modules
1753 hgmodules = None
1753 hgmodules = None
1754 if util.safehasattr(sys.modules[__name__], '__file__'):
1754 if util.safehasattr(sys.modules[__name__], '__file__'):
1755 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1755 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1756 elif getattr(sys, 'oxidized', False):
1756 elif getattr(sys, 'oxidized', False):
1757 hgmodules = pycompat.sysexecutable
1757 hgmodules = pycompat.sysexecutable
1758
1758
1759 fm.write(
1759 fm.write(
1760 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1760 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1761 )
1761 )
1762 fm.write(
1762 fm.write(
1763 b'hgmodules',
1763 b'hgmodules',
1764 _(b"checking installed modules (%s)...\n"),
1764 _(b"checking installed modules (%s)...\n"),
1765 hgmodules or _(b"unknown"),
1765 hgmodules or _(b"unknown"),
1766 )
1766 )
1767
1767
1768 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1768 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1769 rustext = rustandc # for now, that's the only case
1769 rustext = rustandc # for now, that's the only case
1770 cext = policy.policy in (b'c', b'allow') or rustandc
1770 cext = policy.policy in (b'c', b'allow') or rustandc
1771 nopure = cext or rustext
1771 nopure = cext or rustext
1772 if nopure:
1772 if nopure:
1773 err = None
1773 err = None
1774 try:
1774 try:
1775 if cext:
1775 if cext:
1776 from .cext import ( # pytype: disable=import-error
1776 from .cext import ( # pytype: disable=import-error
1777 base85,
1777 base85,
1778 bdiff,
1778 bdiff,
1779 mpatch,
1779 mpatch,
1780 osutil,
1780 osutil,
1781 )
1781 )
1782
1782
1783 # quiet pyflakes
1783 # quiet pyflakes
1784 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1784 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1785 if rustext:
1785 if rustext:
1786 from .rustext import ( # pytype: disable=import-error
1786 from .rustext import ( # pytype: disable=import-error
1787 ancestor,
1787 ancestor,
1788 dirstate,
1788 dirstate,
1789 )
1789 )
1790
1790
1791 dir(ancestor), dir(dirstate) # quiet pyflakes
1791 dir(ancestor), dir(dirstate) # quiet pyflakes
1792 except Exception as inst:
1792 except Exception as inst:
1793 err = stringutil.forcebytestr(inst)
1793 err = stringutil.forcebytestr(inst)
1794 problems += 1
1794 problems += 1
1795 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1795 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1796
1796
1797 compengines = util.compengines._engines.values()
1797 compengines = util.compengines._engines.values()
1798 fm.write(
1798 fm.write(
1799 b'compengines',
1799 b'compengines',
1800 _(b'checking registered compression engines (%s)\n'),
1800 _(b'checking registered compression engines (%s)\n'),
1801 fm.formatlist(
1801 fm.formatlist(
1802 sorted(e.name() for e in compengines),
1802 sorted(e.name() for e in compengines),
1803 name=b'compengine',
1803 name=b'compengine',
1804 fmt=b'%s',
1804 fmt=b'%s',
1805 sep=b', ',
1805 sep=b', ',
1806 ),
1806 ),
1807 )
1807 )
1808 fm.write(
1808 fm.write(
1809 b'compenginesavail',
1809 b'compenginesavail',
1810 _(b'checking available compression engines (%s)\n'),
1810 _(b'checking available compression engines (%s)\n'),
1811 fm.formatlist(
1811 fm.formatlist(
1812 sorted(e.name() for e in compengines if e.available()),
1812 sorted(e.name() for e in compengines if e.available()),
1813 name=b'compengine',
1813 name=b'compengine',
1814 fmt=b'%s',
1814 fmt=b'%s',
1815 sep=b', ',
1815 sep=b', ',
1816 ),
1816 ),
1817 )
1817 )
1818 wirecompengines = compression.compengines.supportedwireengines(
1818 wirecompengines = compression.compengines.supportedwireengines(
1819 compression.SERVERROLE
1819 compression.SERVERROLE
1820 )
1820 )
1821 fm.write(
1821 fm.write(
1822 b'compenginesserver',
1822 b'compenginesserver',
1823 _(
1823 _(
1824 b'checking available compression engines '
1824 b'checking available compression engines '
1825 b'for wire protocol (%s)\n'
1825 b'for wire protocol (%s)\n'
1826 ),
1826 ),
1827 fm.formatlist(
1827 fm.formatlist(
1828 [e.name() for e in wirecompengines if e.wireprotosupport()],
1828 [e.name() for e in wirecompengines if e.wireprotosupport()],
1829 name=b'compengine',
1829 name=b'compengine',
1830 fmt=b'%s',
1830 fmt=b'%s',
1831 sep=b', ',
1831 sep=b', ',
1832 ),
1832 ),
1833 )
1833 )
1834 re2 = b'missing'
1834 re2 = b'missing'
1835 if util._re2:
1835 if util._re2:
1836 re2 = b'available'
1836 re2 = b'available'
1837 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1837 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1838 fm.data(re2=bool(util._re2))
1838 fm.data(re2=bool(util._re2))
1839
1839
1840 # templates
1840 # templates
1841 p = templater.templatedir()
1841 p = templater.templatedir()
1842 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1842 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1843 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1843 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1844 if p:
1844 if p:
1845 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1845 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1846 if m:
1846 if m:
1847 # template found, check if it is working
1847 # template found, check if it is working
1848 err = None
1848 err = None
1849 try:
1849 try:
1850 templater.templater.frommapfile(m)
1850 templater.templater.frommapfile(m)
1851 except Exception as inst:
1851 except Exception as inst:
1852 err = stringutil.forcebytestr(inst)
1852 err = stringutil.forcebytestr(inst)
1853 p = None
1853 p = None
1854 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1854 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1855 else:
1855 else:
1856 p = None
1856 p = None
1857 fm.condwrite(
1857 fm.condwrite(
1858 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1858 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1859 )
1859 )
1860 fm.condwrite(
1860 fm.condwrite(
1861 not m,
1861 not m,
1862 b'defaulttemplatenotfound',
1862 b'defaulttemplatenotfound',
1863 _(b" template '%s' not found\n"),
1863 _(b" template '%s' not found\n"),
1864 b"default",
1864 b"default",
1865 )
1865 )
1866 if not p:
1866 if not p:
1867 problems += 1
1867 problems += 1
1868 fm.condwrite(
1868 fm.condwrite(
1869 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1869 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1870 )
1870 )
1871
1871
1872 # editor
1872 # editor
1873 editor = ui.geteditor()
1873 editor = ui.geteditor()
1874 editor = util.expandpath(editor)
1874 editor = util.expandpath(editor)
1875 editorbin = procutil.shellsplit(editor)[0]
1875 editorbin = procutil.shellsplit(editor)[0]
1876 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1876 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1877 cmdpath = procutil.findexe(editorbin)
1877 cmdpath = procutil.findexe(editorbin)
1878 fm.condwrite(
1878 fm.condwrite(
1879 not cmdpath and editor == b'vi',
1879 not cmdpath and editor == b'vi',
1880 b'vinotfound',
1880 b'vinotfound',
1881 _(
1881 _(
1882 b" No commit editor set and can't find %s in PATH\n"
1882 b" No commit editor set and can't find %s in PATH\n"
1883 b" (specify a commit editor in your configuration"
1883 b" (specify a commit editor in your configuration"
1884 b" file)\n"
1884 b" file)\n"
1885 ),
1885 ),
1886 not cmdpath and editor == b'vi' and editorbin,
1886 not cmdpath and editor == b'vi' and editorbin,
1887 )
1887 )
1888 fm.condwrite(
1888 fm.condwrite(
1889 not cmdpath and editor != b'vi',
1889 not cmdpath and editor != b'vi',
1890 b'editornotfound',
1890 b'editornotfound',
1891 _(
1891 _(
1892 b" Can't find editor '%s' in PATH\n"
1892 b" Can't find editor '%s' in PATH\n"
1893 b" (specify a commit editor in your configuration"
1893 b" (specify a commit editor in your configuration"
1894 b" file)\n"
1894 b" file)\n"
1895 ),
1895 ),
1896 not cmdpath and editorbin,
1896 not cmdpath and editorbin,
1897 )
1897 )
1898 if not cmdpath and editor != b'vi':
1898 if not cmdpath and editor != b'vi':
1899 problems += 1
1899 problems += 1
1900
1900
1901 # check username
1901 # check username
1902 username = None
1902 username = None
1903 err = None
1903 err = None
1904 try:
1904 try:
1905 username = ui.username()
1905 username = ui.username()
1906 except error.Abort as e:
1906 except error.Abort as e:
1907 err = e.message
1907 err = e.message
1908 problems += 1
1908 problems += 1
1909
1909
1910 fm.condwrite(
1910 fm.condwrite(
1911 username, b'username', _(b"checking username (%s)\n"), username
1911 username, b'username', _(b"checking username (%s)\n"), username
1912 )
1912 )
1913 fm.condwrite(
1913 fm.condwrite(
1914 err,
1914 err,
1915 b'usernameerror',
1915 b'usernameerror',
1916 _(
1916 _(
1917 b"checking username...\n %s\n"
1917 b"checking username...\n %s\n"
1918 b" (specify a username in your configuration file)\n"
1918 b" (specify a username in your configuration file)\n"
1919 ),
1919 ),
1920 err,
1920 err,
1921 )
1921 )
1922
1922
1923 for name, mod in extensions.extensions():
1923 for name, mod in extensions.extensions():
1924 handler = getattr(mod, 'debuginstall', None)
1924 handler = getattr(mod, 'debuginstall', None)
1925 if handler is not None:
1925 if handler is not None:
1926 problems += handler(ui, fm)
1926 problems += handler(ui, fm)
1927
1927
1928 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1928 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1929 if not problems:
1929 if not problems:
1930 fm.data(problems=problems)
1930 fm.data(problems=problems)
1931 fm.condwrite(
1931 fm.condwrite(
1932 problems,
1932 problems,
1933 b'problems',
1933 b'problems',
1934 _(b"%d problems detected, please check your install!\n"),
1934 _(b"%d problems detected, please check your install!\n"),
1935 problems,
1935 problems,
1936 )
1936 )
1937 fm.end()
1937 fm.end()
1938
1938
1939 return problems
1939 return problems
1940
1940
1941
1941
1942 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1942 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1943 def debugknown(ui, repopath, *ids, **opts):
1943 def debugknown(ui, repopath, *ids, **opts):
1944 """test whether node ids are known to a repo
1944 """test whether node ids are known to a repo
1945
1945
1946 Every ID must be a full-length hex node id string. Returns a list of 0s
1946 Every ID must be a full-length hex node id string. Returns a list of 0s
1947 and 1s indicating unknown/known.
1947 and 1s indicating unknown/known.
1948 """
1948 """
1949 opts = pycompat.byteskwargs(opts)
1949 opts = pycompat.byteskwargs(opts)
1950 repo = hg.peer(ui, opts, repopath)
1950 repo = hg.peer(ui, opts, repopath)
1951 if not repo.capable(b'known'):
1951 if not repo.capable(b'known'):
1952 raise error.Abort(b"known() not supported by target repository")
1952 raise error.Abort(b"known() not supported by target repository")
1953 flags = repo.known([bin(s) for s in ids])
1953 flags = repo.known([bin(s) for s in ids])
1954 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1954 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1955
1955
1956
1956
1957 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1957 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1958 def debuglabelcomplete(ui, repo, *args):
1958 def debuglabelcomplete(ui, repo, *args):
1959 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1959 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1960 debugnamecomplete(ui, repo, *args)
1960 debugnamecomplete(ui, repo, *args)
1961
1961
1962
1962
1963 @command(
1963 @command(
1964 b'debuglocks',
1964 b'debuglocks',
1965 [
1965 [
1966 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
1966 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
1967 (
1967 (
1968 b'W',
1968 b'W',
1969 b'force-free-wlock',
1969 b'force-free-wlock',
1970 None,
1970 None,
1971 _(b'free the working state lock (DANGEROUS)'),
1971 _(b'free the working state lock (DANGEROUS)'),
1972 ),
1972 ),
1973 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1973 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1974 (
1974 (
1975 b'S',
1975 b'S',
1976 b'set-wlock',
1976 b'set-wlock',
1977 None,
1977 None,
1978 _(b'set the working state lock until stopped'),
1978 _(b'set the working state lock until stopped'),
1979 ),
1979 ),
1980 ],
1980 ],
1981 _(b'[OPTION]...'),
1981 _(b'[OPTION]...'),
1982 )
1982 )
1983 def debuglocks(ui, repo, **opts):
1983 def debuglocks(ui, repo, **opts):
1984 """show or modify state of locks
1984 """show or modify state of locks
1985
1985
1986 By default, this command will show which locks are held. This
1986 By default, this command will show which locks are held. This
1987 includes the user and process holding the lock, the amount of time
1987 includes the user and process holding the lock, the amount of time
1988 the lock has been held, and the machine name where the process is
1988 the lock has been held, and the machine name where the process is
1989 running if it's not local.
1989 running if it's not local.
1990
1990
1991 Locks protect the integrity of Mercurial's data, so should be
1991 Locks protect the integrity of Mercurial's data, so should be
1992 treated with care. System crashes or other interruptions may cause
1992 treated with care. System crashes or other interruptions may cause
1993 locks to not be properly released, though Mercurial will usually
1993 locks to not be properly released, though Mercurial will usually
1994 detect and remove such stale locks automatically.
1994 detect and remove such stale locks automatically.
1995
1995
1996 However, detecting stale locks may not always be possible (for
1996 However, detecting stale locks may not always be possible (for
1997 instance, on a shared filesystem). Removing locks may also be
1997 instance, on a shared filesystem). Removing locks may also be
1998 blocked by filesystem permissions.
1998 blocked by filesystem permissions.
1999
1999
2000 Setting a lock will prevent other commands from changing the data.
2000 Setting a lock will prevent other commands from changing the data.
2001 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2001 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2002 The set locks are removed when the command exits.
2002 The set locks are removed when the command exits.
2003
2003
2004 Returns 0 if no locks are held.
2004 Returns 0 if no locks are held.
2005
2005
2006 """
2006 """
2007
2007
2008 if opts.get('force_free_lock'):
2008 if opts.get('force_free_lock'):
2009 repo.svfs.unlink(b'lock')
2009 repo.svfs.unlink(b'lock')
2010 if opts.get('force_free_wlock'):
2010 if opts.get('force_free_wlock'):
2011 repo.vfs.unlink(b'wlock')
2011 repo.vfs.unlink(b'wlock')
2012 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2012 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2013 return 0
2013 return 0
2014
2014
2015 locks = []
2015 locks = []
2016 try:
2016 try:
2017 if opts.get('set_wlock'):
2017 if opts.get('set_wlock'):
2018 try:
2018 try:
2019 locks.append(repo.wlock(False))
2019 locks.append(repo.wlock(False))
2020 except error.LockHeld:
2020 except error.LockHeld:
2021 raise error.Abort(_(b'wlock is already held'))
2021 raise error.Abort(_(b'wlock is already held'))
2022 if opts.get('set_lock'):
2022 if opts.get('set_lock'):
2023 try:
2023 try:
2024 locks.append(repo.lock(False))
2024 locks.append(repo.lock(False))
2025 except error.LockHeld:
2025 except error.LockHeld:
2026 raise error.Abort(_(b'lock is already held'))
2026 raise error.Abort(_(b'lock is already held'))
2027 if len(locks):
2027 if len(locks):
2028 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2028 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2029 return 0
2029 return 0
2030 finally:
2030 finally:
2031 release(*locks)
2031 release(*locks)
2032
2032
2033 now = time.time()
2033 now = time.time()
2034 held = 0
2034 held = 0
2035
2035
2036 def report(vfs, name, method):
2036 def report(vfs, name, method):
2037 # this causes stale locks to get reaped for more accurate reporting
2037 # this causes stale locks to get reaped for more accurate reporting
2038 try:
2038 try:
2039 l = method(False)
2039 l = method(False)
2040 except error.LockHeld:
2040 except error.LockHeld:
2041 l = None
2041 l = None
2042
2042
2043 if l:
2043 if l:
2044 l.release()
2044 l.release()
2045 else:
2045 else:
2046 try:
2046 try:
2047 st = vfs.lstat(name)
2047 st = vfs.lstat(name)
2048 age = now - st[stat.ST_MTIME]
2048 age = now - st[stat.ST_MTIME]
2049 user = util.username(st.st_uid)
2049 user = util.username(st.st_uid)
2050 locker = vfs.readlock(name)
2050 locker = vfs.readlock(name)
2051 if b":" in locker:
2051 if b":" in locker:
2052 host, pid = locker.split(b':')
2052 host, pid = locker.split(b':')
2053 if host == socket.gethostname():
2053 if host == socket.gethostname():
2054 locker = b'user %s, process %s' % (user or b'None', pid)
2054 locker = b'user %s, process %s' % (user or b'None', pid)
2055 else:
2055 else:
2056 locker = b'user %s, process %s, host %s' % (
2056 locker = b'user %s, process %s, host %s' % (
2057 user or b'None',
2057 user or b'None',
2058 pid,
2058 pid,
2059 host,
2059 host,
2060 )
2060 )
2061 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2061 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2062 return 1
2062 return 1
2063 except OSError as e:
2063 except OSError as e:
2064 if e.errno != errno.ENOENT:
2064 if e.errno != errno.ENOENT:
2065 raise
2065 raise
2066
2066
2067 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2067 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2068 return 0
2068 return 0
2069
2069
2070 held += report(repo.svfs, b"lock", repo.lock)
2070 held += report(repo.svfs, b"lock", repo.lock)
2071 held += report(repo.vfs, b"wlock", repo.wlock)
2071 held += report(repo.vfs, b"wlock", repo.wlock)
2072
2072
2073 return held
2073 return held
2074
2074
2075
2075
2076 @command(
2076 @command(
2077 b'debugmanifestfulltextcache',
2077 b'debugmanifestfulltextcache',
2078 [
2078 [
2079 (b'', b'clear', False, _(b'clear the cache')),
2079 (b'', b'clear', False, _(b'clear the cache')),
2080 (
2080 (
2081 b'a',
2081 b'a',
2082 b'add',
2082 b'add',
2083 [],
2083 [],
2084 _(b'add the given manifest nodes to the cache'),
2084 _(b'add the given manifest nodes to the cache'),
2085 _(b'NODE'),
2085 _(b'NODE'),
2086 ),
2086 ),
2087 ],
2087 ],
2088 b'',
2088 b'',
2089 )
2089 )
2090 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2090 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2091 """show, clear or amend the contents of the manifest fulltext cache"""
2091 """show, clear or amend the contents of the manifest fulltext cache"""
2092
2092
2093 def getcache():
2093 def getcache():
2094 r = repo.manifestlog.getstorage(b'')
2094 r = repo.manifestlog.getstorage(b'')
2095 try:
2095 try:
2096 return r._fulltextcache
2096 return r._fulltextcache
2097 except AttributeError:
2097 except AttributeError:
2098 msg = _(
2098 msg = _(
2099 b"Current revlog implementation doesn't appear to have a "
2099 b"Current revlog implementation doesn't appear to have a "
2100 b"manifest fulltext cache\n"
2100 b"manifest fulltext cache\n"
2101 )
2101 )
2102 raise error.Abort(msg)
2102 raise error.Abort(msg)
2103
2103
2104 if opts.get('clear'):
2104 if opts.get('clear'):
2105 with repo.wlock():
2105 with repo.wlock():
2106 cache = getcache()
2106 cache = getcache()
2107 cache.clear(clear_persisted_data=True)
2107 cache.clear(clear_persisted_data=True)
2108 return
2108 return
2109
2109
2110 if add:
2110 if add:
2111 with repo.wlock():
2111 with repo.wlock():
2112 m = repo.manifestlog
2112 m = repo.manifestlog
2113 store = m.getstorage(b'')
2113 store = m.getstorage(b'')
2114 for n in add:
2114 for n in add:
2115 try:
2115 try:
2116 manifest = m[store.lookup(n)]
2116 manifest = m[store.lookup(n)]
2117 except error.LookupError as e:
2117 except error.LookupError as e:
2118 raise error.Abort(e, hint=b"Check your manifest node id")
2118 raise error.Abort(e, hint=b"Check your manifest node id")
2119 manifest.read() # stores revisision in cache too
2119 manifest.read() # stores revisision in cache too
2120 return
2120 return
2121
2121
2122 cache = getcache()
2122 cache = getcache()
2123 if not len(cache):
2123 if not len(cache):
2124 ui.write(_(b'cache empty\n'))
2124 ui.write(_(b'cache empty\n'))
2125 else:
2125 else:
2126 ui.write(
2126 ui.write(
2127 _(
2127 _(
2128 b'cache contains %d manifest entries, in order of most to '
2128 b'cache contains %d manifest entries, in order of most to '
2129 b'least recent:\n'
2129 b'least recent:\n'
2130 )
2130 )
2131 % (len(cache),)
2131 % (len(cache),)
2132 )
2132 )
2133 totalsize = 0
2133 totalsize = 0
2134 for nodeid in cache:
2134 for nodeid in cache:
2135 # Use cache.get to not update the LRU order
2135 # Use cache.get to not update the LRU order
2136 data = cache.peek(nodeid)
2136 data = cache.peek(nodeid)
2137 size = len(data)
2137 size = len(data)
2138 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2138 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2139 ui.write(
2139 ui.write(
2140 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2140 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2141 )
2141 )
2142 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2142 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2143 ui.write(
2143 ui.write(
2144 _(b'total cache data size %s, on-disk %s\n')
2144 _(b'total cache data size %s, on-disk %s\n')
2145 % (util.bytecount(totalsize), util.bytecount(ondisk))
2145 % (util.bytecount(totalsize), util.bytecount(ondisk))
2146 )
2146 )
2147
2147
2148
2148
2149 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2149 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2150 def debugmergestate(ui, repo, *args, **opts):
2150 def debugmergestate(ui, repo, *args, **opts):
2151 """print merge state
2151 """print merge state
2152
2152
2153 Use --verbose to print out information about whether v1 or v2 merge state
2153 Use --verbose to print out information about whether v1 or v2 merge state
2154 was chosen."""
2154 was chosen."""
2155
2155
2156 if ui.verbose:
2156 if ui.verbose:
2157 ms = mergestatemod.mergestate(repo)
2157 ms = mergestatemod.mergestate(repo)
2158
2158
2159 # sort so that reasonable information is on top
2159 # sort so that reasonable information is on top
2160 v1records = ms._readrecordsv1()
2160 v1records = ms._readrecordsv1()
2161 v2records = ms._readrecordsv2()
2161 v2records = ms._readrecordsv2()
2162
2162
2163 if not v1records and not v2records:
2163 if not v1records and not v2records:
2164 pass
2164 pass
2165 elif not v2records:
2165 elif not v2records:
2166 ui.writenoi18n(b'no version 2 merge state\n')
2166 ui.writenoi18n(b'no version 2 merge state\n')
2167 elif ms._v1v2match(v1records, v2records):
2167 elif ms._v1v2match(v1records, v2records):
2168 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2168 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2169 else:
2169 else:
2170 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2170 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2171
2171
2172 opts = pycompat.byteskwargs(opts)
2172 opts = pycompat.byteskwargs(opts)
2173 if not opts[b'template']:
2173 if not opts[b'template']:
2174 opts[b'template'] = (
2174 opts[b'template'] = (
2175 b'{if(commits, "", "no merge state found\n")}'
2175 b'{if(commits, "", "no merge state found\n")}'
2176 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2176 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2177 b'{files % "file: {path} (state \\"{state}\\")\n'
2177 b'{files % "file: {path} (state \\"{state}\\")\n'
2178 b'{if(local_path, "'
2178 b'{if(local_path, "'
2179 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2179 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2180 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2180 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2181 b' other path: {other_path} (node {other_node})\n'
2181 b' other path: {other_path} (node {other_node})\n'
2182 b'")}'
2182 b'")}'
2183 b'{if(rename_side, "'
2183 b'{if(rename_side, "'
2184 b' rename side: {rename_side}\n'
2184 b' rename side: {rename_side}\n'
2185 b' renamed path: {renamed_path}\n'
2185 b' renamed path: {renamed_path}\n'
2186 b'")}'
2186 b'")}'
2187 b'{extras % " extra: {key} = {value}\n"}'
2187 b'{extras % " extra: {key} = {value}\n"}'
2188 b'"}'
2188 b'"}'
2189 b'{extras % "extra: {file} ({key} = {value})\n"}'
2189 b'{extras % "extra: {file} ({key} = {value})\n"}'
2190 )
2190 )
2191
2191
2192 ms = mergestatemod.mergestate.read(repo)
2192 ms = mergestatemod.mergestate.read(repo)
2193
2193
2194 fm = ui.formatter(b'debugmergestate', opts)
2194 fm = ui.formatter(b'debugmergestate', opts)
2195 fm.startitem()
2195 fm.startitem()
2196
2196
2197 fm_commits = fm.nested(b'commits')
2197 fm_commits = fm.nested(b'commits')
2198 if ms.active():
2198 if ms.active():
2199 for name, node, label_index in (
2199 for name, node, label_index in (
2200 (b'local', ms.local, 0),
2200 (b'local', ms.local, 0),
2201 (b'other', ms.other, 1),
2201 (b'other', ms.other, 1),
2202 ):
2202 ):
2203 fm_commits.startitem()
2203 fm_commits.startitem()
2204 fm_commits.data(name=name)
2204 fm_commits.data(name=name)
2205 fm_commits.data(node=hex(node))
2205 fm_commits.data(node=hex(node))
2206 if ms._labels and len(ms._labels) > label_index:
2206 if ms._labels and len(ms._labels) > label_index:
2207 fm_commits.data(label=ms._labels[label_index])
2207 fm_commits.data(label=ms._labels[label_index])
2208 fm_commits.end()
2208 fm_commits.end()
2209
2209
2210 fm_files = fm.nested(b'files')
2210 fm_files = fm.nested(b'files')
2211 if ms.active():
2211 if ms.active():
2212 for f in ms:
2212 for f in ms:
2213 fm_files.startitem()
2213 fm_files.startitem()
2214 fm_files.data(path=f)
2214 fm_files.data(path=f)
2215 state = ms._state[f]
2215 state = ms._state[f]
2216 fm_files.data(state=state[0])
2216 fm_files.data(state=state[0])
2217 if state[0] in (
2217 if state[0] in (
2218 mergestatemod.MERGE_RECORD_UNRESOLVED,
2218 mergestatemod.MERGE_RECORD_UNRESOLVED,
2219 mergestatemod.MERGE_RECORD_RESOLVED,
2219 mergestatemod.MERGE_RECORD_RESOLVED,
2220 ):
2220 ):
2221 fm_files.data(local_key=state[1])
2221 fm_files.data(local_key=state[1])
2222 fm_files.data(local_path=state[2])
2222 fm_files.data(local_path=state[2])
2223 fm_files.data(ancestor_path=state[3])
2223 fm_files.data(ancestor_path=state[3])
2224 fm_files.data(ancestor_node=state[4])
2224 fm_files.data(ancestor_node=state[4])
2225 fm_files.data(other_path=state[5])
2225 fm_files.data(other_path=state[5])
2226 fm_files.data(other_node=state[6])
2226 fm_files.data(other_node=state[6])
2227 fm_files.data(local_flags=state[7])
2227 fm_files.data(local_flags=state[7])
2228 elif state[0] in (
2228 elif state[0] in (
2229 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2229 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2230 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2230 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2231 ):
2231 ):
2232 fm_files.data(renamed_path=state[1])
2232 fm_files.data(renamed_path=state[1])
2233 fm_files.data(rename_side=state[2])
2233 fm_files.data(rename_side=state[2])
2234 fm_extras = fm_files.nested(b'extras')
2234 fm_extras = fm_files.nested(b'extras')
2235 for k, v in sorted(ms.extras(f).items()):
2235 for k, v in sorted(ms.extras(f).items()):
2236 fm_extras.startitem()
2236 fm_extras.startitem()
2237 fm_extras.data(key=k)
2237 fm_extras.data(key=k)
2238 fm_extras.data(value=v)
2238 fm_extras.data(value=v)
2239 fm_extras.end()
2239 fm_extras.end()
2240
2240
2241 fm_files.end()
2241 fm_files.end()
2242
2242
2243 fm_extras = fm.nested(b'extras')
2243 fm_extras = fm.nested(b'extras')
2244 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2244 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2245 if f in ms:
2245 if f in ms:
2246 # If file is in mergestate, we have already processed it's extras
2246 # If file is in mergestate, we have already processed it's extras
2247 continue
2247 continue
2248 for k, v in pycompat.iteritems(d):
2248 for k, v in pycompat.iteritems(d):
2249 fm_extras.startitem()
2249 fm_extras.startitem()
2250 fm_extras.data(file=f)
2250 fm_extras.data(file=f)
2251 fm_extras.data(key=k)
2251 fm_extras.data(key=k)
2252 fm_extras.data(value=v)
2252 fm_extras.data(value=v)
2253 fm_extras.end()
2253 fm_extras.end()
2254
2254
2255 fm.end()
2255 fm.end()
2256
2256
2257
2257
2258 @command(b'debugnamecomplete', [], _(b'NAME...'))
2258 @command(b'debugnamecomplete', [], _(b'NAME...'))
2259 def debugnamecomplete(ui, repo, *args):
2259 def debugnamecomplete(ui, repo, *args):
2260 '''complete "names" - tags, open branch names, bookmark names'''
2260 '''complete "names" - tags, open branch names, bookmark names'''
2261
2261
2262 names = set()
2262 names = set()
2263 # since we previously only listed open branches, we will handle that
2263 # since we previously only listed open branches, we will handle that
2264 # specially (after this for loop)
2264 # specially (after this for loop)
2265 for name, ns in pycompat.iteritems(repo.names):
2265 for name, ns in pycompat.iteritems(repo.names):
2266 if name != b'branches':
2266 if name != b'branches':
2267 names.update(ns.listnames(repo))
2267 names.update(ns.listnames(repo))
2268 names.update(
2268 names.update(
2269 tag
2269 tag
2270 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2270 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2271 if not closed
2271 if not closed
2272 )
2272 )
2273 completions = set()
2273 completions = set()
2274 if not args:
2274 if not args:
2275 args = [b'']
2275 args = [b'']
2276 for a in args:
2276 for a in args:
2277 completions.update(n for n in names if n.startswith(a))
2277 completions.update(n for n in names if n.startswith(a))
2278 ui.write(b'\n'.join(sorted(completions)))
2278 ui.write(b'\n'.join(sorted(completions)))
2279 ui.write(b'\n')
2279 ui.write(b'\n')
2280
2280
2281
2281
2282 @command(
2282 @command(
2283 b'debugnodemap',
2283 b'debugnodemap',
2284 [
2284 [
2285 (
2285 (
2286 b'',
2286 b'',
2287 b'dump-new',
2287 b'dump-new',
2288 False,
2288 False,
2289 _(b'write a (new) persistent binary nodemap on stdout'),
2289 _(b'write a (new) persistent binary nodemap on stdout'),
2290 ),
2290 ),
2291 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2291 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2292 (
2292 (
2293 b'',
2293 b'',
2294 b'check',
2294 b'check',
2295 False,
2295 False,
2296 _(b'check that the data on disk data are correct.'),
2296 _(b'check that the data on disk data are correct.'),
2297 ),
2297 ),
2298 (
2298 (
2299 b'',
2299 b'',
2300 b'metadata',
2300 b'metadata',
2301 False,
2301 False,
2302 _(b'display the on disk meta data for the nodemap'),
2302 _(b'display the on disk meta data for the nodemap'),
2303 ),
2303 ),
2304 ],
2304 ],
2305 )
2305 )
2306 def debugnodemap(ui, repo, **opts):
2306 def debugnodemap(ui, repo, **opts):
2307 """write and inspect on disk nodemap"""
2307 """write and inspect on disk nodemap"""
2308 if opts['dump_new']:
2308 if opts['dump_new']:
2309 unfi = repo.unfiltered()
2309 unfi = repo.unfiltered()
2310 cl = unfi.changelog
2310 cl = unfi.changelog
2311 if util.safehasattr(cl.index, "nodemap_data_all"):
2311 if util.safehasattr(cl.index, "nodemap_data_all"):
2312 data = cl.index.nodemap_data_all()
2312 data = cl.index.nodemap_data_all()
2313 else:
2313 else:
2314 data = nodemap.persistent_data(cl.index)
2314 data = nodemap.persistent_data(cl.index)
2315 ui.write(data)
2315 ui.write(data)
2316 elif opts['dump_disk']:
2316 elif opts['dump_disk']:
2317 unfi = repo.unfiltered()
2317 unfi = repo.unfiltered()
2318 cl = unfi.changelog
2318 cl = unfi.changelog
2319 nm_data = nodemap.persisted_data(cl)
2319 nm_data = nodemap.persisted_data(cl)
2320 if nm_data is not None:
2320 if nm_data is not None:
2321 docket, data = nm_data
2321 docket, data = nm_data
2322 ui.write(data[:])
2322 ui.write(data[:])
2323 elif opts['check']:
2323 elif opts['check']:
2324 unfi = repo.unfiltered()
2324 unfi = repo.unfiltered()
2325 cl = unfi.changelog
2325 cl = unfi.changelog
2326 nm_data = nodemap.persisted_data(cl)
2326 nm_data = nodemap.persisted_data(cl)
2327 if nm_data is not None:
2327 if nm_data is not None:
2328 docket, data = nm_data
2328 docket, data = nm_data
2329 return nodemap.check_data(ui, cl.index, data)
2329 return nodemap.check_data(ui, cl.index, data)
2330 elif opts['metadata']:
2330 elif opts['metadata']:
2331 unfi = repo.unfiltered()
2331 unfi = repo.unfiltered()
2332 cl = unfi.changelog
2332 cl = unfi.changelog
2333 nm_data = nodemap.persisted_data(cl)
2333 nm_data = nodemap.persisted_data(cl)
2334 if nm_data is not None:
2334 if nm_data is not None:
2335 docket, data = nm_data
2335 docket, data = nm_data
2336 ui.write((b"uid: %s\n") % docket.uid)
2336 ui.write((b"uid: %s\n") % docket.uid)
2337 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2337 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2338 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2338 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2339 ui.write((b"data-length: %d\n") % docket.data_length)
2339 ui.write((b"data-length: %d\n") % docket.data_length)
2340 ui.write((b"data-unused: %d\n") % docket.data_unused)
2340 ui.write((b"data-unused: %d\n") % docket.data_unused)
2341 unused_perc = docket.data_unused * 100.0 / docket.data_length
2341 unused_perc = docket.data_unused * 100.0 / docket.data_length
2342 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2342 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2343
2343
2344
2344
2345 @command(
2345 @command(
2346 b'debugobsolete',
2346 b'debugobsolete',
2347 [
2347 [
2348 (b'', b'flags', 0, _(b'markers flag')),
2348 (b'', b'flags', 0, _(b'markers flag')),
2349 (
2349 (
2350 b'',
2350 b'',
2351 b'record-parents',
2351 b'record-parents',
2352 False,
2352 False,
2353 _(b'record parent information for the precursor'),
2353 _(b'record parent information for the precursor'),
2354 ),
2354 ),
2355 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2355 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2356 (
2356 (
2357 b'',
2357 b'',
2358 b'exclusive',
2358 b'exclusive',
2359 False,
2359 False,
2360 _(b'restrict display to markers only relevant to REV'),
2360 _(b'restrict display to markers only relevant to REV'),
2361 ),
2361 ),
2362 (b'', b'index', False, _(b'display index of the marker')),
2362 (b'', b'index', False, _(b'display index of the marker')),
2363 (b'', b'delete', [], _(b'delete markers specified by indices')),
2363 (b'', b'delete', [], _(b'delete markers specified by indices')),
2364 ]
2364 ]
2365 + cmdutil.commitopts2
2365 + cmdutil.commitopts2
2366 + cmdutil.formatteropts,
2366 + cmdutil.formatteropts,
2367 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2367 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2368 )
2368 )
2369 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2369 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2370 """create arbitrary obsolete marker
2370 """create arbitrary obsolete marker
2371
2371
2372 With no arguments, displays the list of obsolescence markers."""
2372 With no arguments, displays the list of obsolescence markers."""
2373
2373
2374 opts = pycompat.byteskwargs(opts)
2374 opts = pycompat.byteskwargs(opts)
2375
2375
2376 def parsenodeid(s):
2376 def parsenodeid(s):
2377 try:
2377 try:
2378 # We do not use revsingle/revrange functions here to accept
2378 # We do not use revsingle/revrange functions here to accept
2379 # arbitrary node identifiers, possibly not present in the
2379 # arbitrary node identifiers, possibly not present in the
2380 # local repository.
2380 # local repository.
2381 n = bin(s)
2381 n = bin(s)
2382 if len(n) != len(nullid):
2382 if len(n) != len(nullid):
2383 raise TypeError()
2383 raise TypeError()
2384 return n
2384 return n
2385 except TypeError:
2385 except TypeError:
2386 raise error.InputError(
2386 raise error.InputError(
2387 b'changeset references must be full hexadecimal '
2387 b'changeset references must be full hexadecimal '
2388 b'node identifiers'
2388 b'node identifiers'
2389 )
2389 )
2390
2390
2391 if opts.get(b'delete'):
2391 if opts.get(b'delete'):
2392 indices = []
2392 indices = []
2393 for v in opts.get(b'delete'):
2393 for v in opts.get(b'delete'):
2394 try:
2394 try:
2395 indices.append(int(v))
2395 indices.append(int(v))
2396 except ValueError:
2396 except ValueError:
2397 raise error.InputError(
2397 raise error.InputError(
2398 _(b'invalid index value: %r') % v,
2398 _(b'invalid index value: %r') % v,
2399 hint=_(b'use integers for indices'),
2399 hint=_(b'use integers for indices'),
2400 )
2400 )
2401
2401
2402 if repo.currenttransaction():
2402 if repo.currenttransaction():
2403 raise error.Abort(
2403 raise error.Abort(
2404 _(b'cannot delete obsmarkers in the middle of transaction.')
2404 _(b'cannot delete obsmarkers in the middle of transaction.')
2405 )
2405 )
2406
2406
2407 with repo.lock():
2407 with repo.lock():
2408 n = repair.deleteobsmarkers(repo.obsstore, indices)
2408 n = repair.deleteobsmarkers(repo.obsstore, indices)
2409 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2409 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2410
2410
2411 return
2411 return
2412
2412
2413 if precursor is not None:
2413 if precursor is not None:
2414 if opts[b'rev']:
2414 if opts[b'rev']:
2415 raise error.InputError(
2415 raise error.InputError(
2416 b'cannot select revision when creating marker'
2416 b'cannot select revision when creating marker'
2417 )
2417 )
2418 metadata = {}
2418 metadata = {}
2419 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2419 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2420 succs = tuple(parsenodeid(succ) for succ in successors)
2420 succs = tuple(parsenodeid(succ) for succ in successors)
2421 l = repo.lock()
2421 l = repo.lock()
2422 try:
2422 try:
2423 tr = repo.transaction(b'debugobsolete')
2423 tr = repo.transaction(b'debugobsolete')
2424 try:
2424 try:
2425 date = opts.get(b'date')
2425 date = opts.get(b'date')
2426 if date:
2426 if date:
2427 date = dateutil.parsedate(date)
2427 date = dateutil.parsedate(date)
2428 else:
2428 else:
2429 date = None
2429 date = None
2430 prec = parsenodeid(precursor)
2430 prec = parsenodeid(precursor)
2431 parents = None
2431 parents = None
2432 if opts[b'record_parents']:
2432 if opts[b'record_parents']:
2433 if prec not in repo.unfiltered():
2433 if prec not in repo.unfiltered():
2434 raise error.Abort(
2434 raise error.Abort(
2435 b'cannot used --record-parents on '
2435 b'cannot used --record-parents on '
2436 b'unknown changesets'
2436 b'unknown changesets'
2437 )
2437 )
2438 parents = repo.unfiltered()[prec].parents()
2438 parents = repo.unfiltered()[prec].parents()
2439 parents = tuple(p.node() for p in parents)
2439 parents = tuple(p.node() for p in parents)
2440 repo.obsstore.create(
2440 repo.obsstore.create(
2441 tr,
2441 tr,
2442 prec,
2442 prec,
2443 succs,
2443 succs,
2444 opts[b'flags'],
2444 opts[b'flags'],
2445 parents=parents,
2445 parents=parents,
2446 date=date,
2446 date=date,
2447 metadata=metadata,
2447 metadata=metadata,
2448 ui=ui,
2448 ui=ui,
2449 )
2449 )
2450 tr.close()
2450 tr.close()
2451 except ValueError as exc:
2451 except ValueError as exc:
2452 raise error.Abort(
2452 raise error.Abort(
2453 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2453 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2454 )
2454 )
2455 finally:
2455 finally:
2456 tr.release()
2456 tr.release()
2457 finally:
2457 finally:
2458 l.release()
2458 l.release()
2459 else:
2459 else:
2460 if opts[b'rev']:
2460 if opts[b'rev']:
2461 revs = scmutil.revrange(repo, opts[b'rev'])
2461 revs = scmutil.revrange(repo, opts[b'rev'])
2462 nodes = [repo[r].node() for r in revs]
2462 nodes = [repo[r].node() for r in revs]
2463 markers = list(
2463 markers = list(
2464 obsutil.getmarkers(
2464 obsutil.getmarkers(
2465 repo, nodes=nodes, exclusive=opts[b'exclusive']
2465 repo, nodes=nodes, exclusive=opts[b'exclusive']
2466 )
2466 )
2467 )
2467 )
2468 markers.sort(key=lambda x: x._data)
2468 markers.sort(key=lambda x: x._data)
2469 else:
2469 else:
2470 markers = obsutil.getmarkers(repo)
2470 markers = obsutil.getmarkers(repo)
2471
2471
2472 markerstoiter = markers
2472 markerstoiter = markers
2473 isrelevant = lambda m: True
2473 isrelevant = lambda m: True
2474 if opts.get(b'rev') and opts.get(b'index'):
2474 if opts.get(b'rev') and opts.get(b'index'):
2475 markerstoiter = obsutil.getmarkers(repo)
2475 markerstoiter = obsutil.getmarkers(repo)
2476 markerset = set(markers)
2476 markerset = set(markers)
2477 isrelevant = lambda m: m in markerset
2477 isrelevant = lambda m: m in markerset
2478
2478
2479 fm = ui.formatter(b'debugobsolete', opts)
2479 fm = ui.formatter(b'debugobsolete', opts)
2480 for i, m in enumerate(markerstoiter):
2480 for i, m in enumerate(markerstoiter):
2481 if not isrelevant(m):
2481 if not isrelevant(m):
2482 # marker can be irrelevant when we're iterating over a set
2482 # marker can be irrelevant when we're iterating over a set
2483 # of markers (markerstoiter) which is bigger than the set
2483 # of markers (markerstoiter) which is bigger than the set
2484 # of markers we want to display (markers)
2484 # of markers we want to display (markers)
2485 # this can happen if both --index and --rev options are
2485 # this can happen if both --index and --rev options are
2486 # provided and thus we need to iterate over all of the markers
2486 # provided and thus we need to iterate over all of the markers
2487 # to get the correct indices, but only display the ones that
2487 # to get the correct indices, but only display the ones that
2488 # are relevant to --rev value
2488 # are relevant to --rev value
2489 continue
2489 continue
2490 fm.startitem()
2490 fm.startitem()
2491 ind = i if opts.get(b'index') else None
2491 ind = i if opts.get(b'index') else None
2492 cmdutil.showmarker(fm, m, index=ind)
2492 cmdutil.showmarker(fm, m, index=ind)
2493 fm.end()
2493 fm.end()
2494
2494
2495
2495
2496 @command(
2496 @command(
2497 b'debugp1copies',
2497 b'debugp1copies',
2498 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2498 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2499 _(b'[-r REV]'),
2499 _(b'[-r REV]'),
2500 )
2500 )
2501 def debugp1copies(ui, repo, **opts):
2501 def debugp1copies(ui, repo, **opts):
2502 """dump copy information compared to p1"""
2502 """dump copy information compared to p1"""
2503
2503
2504 opts = pycompat.byteskwargs(opts)
2504 opts = pycompat.byteskwargs(opts)
2505 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2505 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2506 for dst, src in ctx.p1copies().items():
2506 for dst, src in ctx.p1copies().items():
2507 ui.write(b'%s -> %s\n' % (src, dst))
2507 ui.write(b'%s -> %s\n' % (src, dst))
2508
2508
2509
2509
2510 @command(
2510 @command(
2511 b'debugp2copies',
2511 b'debugp2copies',
2512 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2512 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2513 _(b'[-r REV]'),
2513 _(b'[-r REV]'),
2514 )
2514 )
2515 def debugp1copies(ui, repo, **opts):
2515 def debugp1copies(ui, repo, **opts):
2516 """dump copy information compared to p2"""
2516 """dump copy information compared to p2"""
2517
2517
2518 opts = pycompat.byteskwargs(opts)
2518 opts = pycompat.byteskwargs(opts)
2519 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2519 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2520 for dst, src in ctx.p2copies().items():
2520 for dst, src in ctx.p2copies().items():
2521 ui.write(b'%s -> %s\n' % (src, dst))
2521 ui.write(b'%s -> %s\n' % (src, dst))
2522
2522
2523
2523
2524 @command(
2524 @command(
2525 b'debugpathcomplete',
2525 b'debugpathcomplete',
2526 [
2526 [
2527 (b'f', b'full', None, _(b'complete an entire path')),
2527 (b'f', b'full', None, _(b'complete an entire path')),
2528 (b'n', b'normal', None, _(b'show only normal files')),
2528 (b'n', b'normal', None, _(b'show only normal files')),
2529 (b'a', b'added', None, _(b'show only added files')),
2529 (b'a', b'added', None, _(b'show only added files')),
2530 (b'r', b'removed', None, _(b'show only removed files')),
2530 (b'r', b'removed', None, _(b'show only removed files')),
2531 ],
2531 ],
2532 _(b'FILESPEC...'),
2532 _(b'FILESPEC...'),
2533 )
2533 )
2534 def debugpathcomplete(ui, repo, *specs, **opts):
2534 def debugpathcomplete(ui, repo, *specs, **opts):
2535 """complete part or all of a tracked path
2535 """complete part or all of a tracked path
2536
2536
2537 This command supports shells that offer path name completion. It
2537 This command supports shells that offer path name completion. It
2538 currently completes only files already known to the dirstate.
2538 currently completes only files already known to the dirstate.
2539
2539
2540 Completion extends only to the next path segment unless
2540 Completion extends only to the next path segment unless
2541 --full is specified, in which case entire paths are used."""
2541 --full is specified, in which case entire paths are used."""
2542
2542
2543 def complete(path, acceptable):
2543 def complete(path, acceptable):
2544 dirstate = repo.dirstate
2544 dirstate = repo.dirstate
2545 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2545 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2546 rootdir = repo.root + pycompat.ossep
2546 rootdir = repo.root + pycompat.ossep
2547 if spec != repo.root and not spec.startswith(rootdir):
2547 if spec != repo.root and not spec.startswith(rootdir):
2548 return [], []
2548 return [], []
2549 if os.path.isdir(spec):
2549 if os.path.isdir(spec):
2550 spec += b'/'
2550 spec += b'/'
2551 spec = spec[len(rootdir) :]
2551 spec = spec[len(rootdir) :]
2552 fixpaths = pycompat.ossep != b'/'
2552 fixpaths = pycompat.ossep != b'/'
2553 if fixpaths:
2553 if fixpaths:
2554 spec = spec.replace(pycompat.ossep, b'/')
2554 spec = spec.replace(pycompat.ossep, b'/')
2555 speclen = len(spec)
2555 speclen = len(spec)
2556 fullpaths = opts['full']
2556 fullpaths = opts['full']
2557 files, dirs = set(), set()
2557 files, dirs = set(), set()
2558 adddir, addfile = dirs.add, files.add
2558 adddir, addfile = dirs.add, files.add
2559 for f, st in pycompat.iteritems(dirstate):
2559 for f, st in pycompat.iteritems(dirstate):
2560 if f.startswith(spec) and st[0] in acceptable:
2560 if f.startswith(spec) and st[0] in acceptable:
2561 if fixpaths:
2561 if fixpaths:
2562 f = f.replace(b'/', pycompat.ossep)
2562 f = f.replace(b'/', pycompat.ossep)
2563 if fullpaths:
2563 if fullpaths:
2564 addfile(f)
2564 addfile(f)
2565 continue
2565 continue
2566 s = f.find(pycompat.ossep, speclen)
2566 s = f.find(pycompat.ossep, speclen)
2567 if s >= 0:
2567 if s >= 0:
2568 adddir(f[:s])
2568 adddir(f[:s])
2569 else:
2569 else:
2570 addfile(f)
2570 addfile(f)
2571 return files, dirs
2571 return files, dirs
2572
2572
2573 acceptable = b''
2573 acceptable = b''
2574 if opts['normal']:
2574 if opts['normal']:
2575 acceptable += b'nm'
2575 acceptable += b'nm'
2576 if opts['added']:
2576 if opts['added']:
2577 acceptable += b'a'
2577 acceptable += b'a'
2578 if opts['removed']:
2578 if opts['removed']:
2579 acceptable += b'r'
2579 acceptable += b'r'
2580 cwd = repo.getcwd()
2580 cwd = repo.getcwd()
2581 if not specs:
2581 if not specs:
2582 specs = [b'.']
2582 specs = [b'.']
2583
2583
2584 files, dirs = set(), set()
2584 files, dirs = set(), set()
2585 for spec in specs:
2585 for spec in specs:
2586 f, d = complete(spec, acceptable or b'nmar')
2586 f, d = complete(spec, acceptable or b'nmar')
2587 files.update(f)
2587 files.update(f)
2588 dirs.update(d)
2588 dirs.update(d)
2589 files.update(dirs)
2589 files.update(dirs)
2590 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2590 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2591 ui.write(b'\n')
2591 ui.write(b'\n')
2592
2592
2593
2593
2594 @command(
2594 @command(
2595 b'debugpathcopies',
2595 b'debugpathcopies',
2596 cmdutil.walkopts,
2596 cmdutil.walkopts,
2597 b'hg debugpathcopies REV1 REV2 [FILE]',
2597 b'hg debugpathcopies REV1 REV2 [FILE]',
2598 inferrepo=True,
2598 inferrepo=True,
2599 )
2599 )
2600 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2600 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2601 """show copies between two revisions"""
2601 """show copies between two revisions"""
2602 ctx1 = scmutil.revsingle(repo, rev1)
2602 ctx1 = scmutil.revsingle(repo, rev1)
2603 ctx2 = scmutil.revsingle(repo, rev2)
2603 ctx2 = scmutil.revsingle(repo, rev2)
2604 m = scmutil.match(ctx1, pats, opts)
2604 m = scmutil.match(ctx1, pats, opts)
2605 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2605 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2606 ui.write(b'%s -> %s\n' % (src, dst))
2606 ui.write(b'%s -> %s\n' % (src, dst))
2607
2607
2608
2608
2609 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2609 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2610 def debugpeer(ui, path):
2610 def debugpeer(ui, path):
2611 """establish a connection to a peer repository"""
2611 """establish a connection to a peer repository"""
2612 # Always enable peer request logging. Requires --debug to display
2612 # Always enable peer request logging. Requires --debug to display
2613 # though.
2613 # though.
2614 overrides = {
2614 overrides = {
2615 (b'devel', b'debug.peer-request'): True,
2615 (b'devel', b'debug.peer-request'): True,
2616 }
2616 }
2617
2617
2618 with ui.configoverride(overrides):
2618 with ui.configoverride(overrides):
2619 peer = hg.peer(ui, {}, path)
2619 peer = hg.peer(ui, {}, path)
2620
2620
2621 try:
2621 try:
2622 local = peer.local() is not None
2622 local = peer.local() is not None
2623 canpush = peer.canpush()
2623 canpush = peer.canpush()
2624
2624
2625 ui.write(_(b'url: %s\n') % peer.url())
2625 ui.write(_(b'url: %s\n') % peer.url())
2626 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2626 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2627 ui.write(
2627 ui.write(
2628 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2628 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2629 )
2629 )
2630 finally:
2630 finally:
2631 peer.close()
2631 peer.close()
2632
2632
2633
2633
2634 @command(
2634 @command(
2635 b'debugpickmergetool',
2635 b'debugpickmergetool',
2636 [
2636 [
2637 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2637 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2638 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2638 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2639 ]
2639 ]
2640 + cmdutil.walkopts
2640 + cmdutil.walkopts
2641 + cmdutil.mergetoolopts,
2641 + cmdutil.mergetoolopts,
2642 _(b'[PATTERN]...'),
2642 _(b'[PATTERN]...'),
2643 inferrepo=True,
2643 inferrepo=True,
2644 )
2644 )
2645 def debugpickmergetool(ui, repo, *pats, **opts):
2645 def debugpickmergetool(ui, repo, *pats, **opts):
2646 """examine which merge tool is chosen for specified file
2646 """examine which merge tool is chosen for specified file
2647
2647
2648 As described in :hg:`help merge-tools`, Mercurial examines
2648 As described in :hg:`help merge-tools`, Mercurial examines
2649 configurations below in this order to decide which merge tool is
2649 configurations below in this order to decide which merge tool is
2650 chosen for specified file.
2650 chosen for specified file.
2651
2651
2652 1. ``--tool`` option
2652 1. ``--tool`` option
2653 2. ``HGMERGE`` environment variable
2653 2. ``HGMERGE`` environment variable
2654 3. configurations in ``merge-patterns`` section
2654 3. configurations in ``merge-patterns`` section
2655 4. configuration of ``ui.merge``
2655 4. configuration of ``ui.merge``
2656 5. configurations in ``merge-tools`` section
2656 5. configurations in ``merge-tools`` section
2657 6. ``hgmerge`` tool (for historical reason only)
2657 6. ``hgmerge`` tool (for historical reason only)
2658 7. default tool for fallback (``:merge`` or ``:prompt``)
2658 7. default tool for fallback (``:merge`` or ``:prompt``)
2659
2659
2660 This command writes out examination result in the style below::
2660 This command writes out examination result in the style below::
2661
2661
2662 FILE = MERGETOOL
2662 FILE = MERGETOOL
2663
2663
2664 By default, all files known in the first parent context of the
2664 By default, all files known in the first parent context of the
2665 working directory are examined. Use file patterns and/or -I/-X
2665 working directory are examined. Use file patterns and/or -I/-X
2666 options to limit target files. -r/--rev is also useful to examine
2666 options to limit target files. -r/--rev is also useful to examine
2667 files in another context without actual updating to it.
2667 files in another context without actual updating to it.
2668
2668
2669 With --debug, this command shows warning messages while matching
2669 With --debug, this command shows warning messages while matching
2670 against ``merge-patterns`` and so on, too. It is recommended to
2670 against ``merge-patterns`` and so on, too. It is recommended to
2671 use this option with explicit file patterns and/or -I/-X options,
2671 use this option with explicit file patterns and/or -I/-X options,
2672 because this option increases amount of output per file according
2672 because this option increases amount of output per file according
2673 to configurations in hgrc.
2673 to configurations in hgrc.
2674
2674
2675 With -v/--verbose, this command shows configurations below at
2675 With -v/--verbose, this command shows configurations below at
2676 first (only if specified).
2676 first (only if specified).
2677
2677
2678 - ``--tool`` option
2678 - ``--tool`` option
2679 - ``HGMERGE`` environment variable
2679 - ``HGMERGE`` environment variable
2680 - configuration of ``ui.merge``
2680 - configuration of ``ui.merge``
2681
2681
2682 If merge tool is chosen before matching against
2682 If merge tool is chosen before matching against
2683 ``merge-patterns``, this command can't show any helpful
2683 ``merge-patterns``, this command can't show any helpful
2684 information, even with --debug. In such case, information above is
2684 information, even with --debug. In such case, information above is
2685 useful to know why a merge tool is chosen.
2685 useful to know why a merge tool is chosen.
2686 """
2686 """
2687 opts = pycompat.byteskwargs(opts)
2687 opts = pycompat.byteskwargs(opts)
2688 overrides = {}
2688 overrides = {}
2689 if opts[b'tool']:
2689 if opts[b'tool']:
2690 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2690 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2691 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2691 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2692
2692
2693 with ui.configoverride(overrides, b'debugmergepatterns'):
2693 with ui.configoverride(overrides, b'debugmergepatterns'):
2694 hgmerge = encoding.environ.get(b"HGMERGE")
2694 hgmerge = encoding.environ.get(b"HGMERGE")
2695 if hgmerge is not None:
2695 if hgmerge is not None:
2696 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2696 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2697 uimerge = ui.config(b"ui", b"merge")
2697 uimerge = ui.config(b"ui", b"merge")
2698 if uimerge:
2698 if uimerge:
2699 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2699 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2700
2700
2701 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2701 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2702 m = scmutil.match(ctx, pats, opts)
2702 m = scmutil.match(ctx, pats, opts)
2703 changedelete = opts[b'changedelete']
2703 changedelete = opts[b'changedelete']
2704 for path in ctx.walk(m):
2704 for path in ctx.walk(m):
2705 fctx = ctx[path]
2705 fctx = ctx[path]
2706 try:
2706 try:
2707 if not ui.debugflag:
2707 if not ui.debugflag:
2708 ui.pushbuffer(error=True)
2708 ui.pushbuffer(error=True)
2709 tool, toolpath = filemerge._picktool(
2709 tool, toolpath = filemerge._picktool(
2710 repo,
2710 repo,
2711 ui,
2711 ui,
2712 path,
2712 path,
2713 fctx.isbinary(),
2713 fctx.isbinary(),
2714 b'l' in fctx.flags(),
2714 b'l' in fctx.flags(),
2715 changedelete,
2715 changedelete,
2716 )
2716 )
2717 finally:
2717 finally:
2718 if not ui.debugflag:
2718 if not ui.debugflag:
2719 ui.popbuffer()
2719 ui.popbuffer()
2720 ui.write(b'%s = %s\n' % (path, tool))
2720 ui.write(b'%s = %s\n' % (path, tool))
2721
2721
2722
2722
2723 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2723 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2724 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2724 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2725 """access the pushkey key/value protocol
2725 """access the pushkey key/value protocol
2726
2726
2727 With two args, list the keys in the given namespace.
2727 With two args, list the keys in the given namespace.
2728
2728
2729 With five args, set a key to new if it currently is set to old.
2729 With five args, set a key to new if it currently is set to old.
2730 Reports success or failure.
2730 Reports success or failure.
2731 """
2731 """
2732
2732
2733 target = hg.peer(ui, {}, repopath)
2733 target = hg.peer(ui, {}, repopath)
2734 try:
2734 try:
2735 if keyinfo:
2735 if keyinfo:
2736 key, old, new = keyinfo
2736 key, old, new = keyinfo
2737 with target.commandexecutor() as e:
2737 with target.commandexecutor() as e:
2738 r = e.callcommand(
2738 r = e.callcommand(
2739 b'pushkey',
2739 b'pushkey',
2740 {
2740 {
2741 b'namespace': namespace,
2741 b'namespace': namespace,
2742 b'key': key,
2742 b'key': key,
2743 b'old': old,
2743 b'old': old,
2744 b'new': new,
2744 b'new': new,
2745 },
2745 },
2746 ).result()
2746 ).result()
2747
2747
2748 ui.status(pycompat.bytestr(r) + b'\n')
2748 ui.status(pycompat.bytestr(r) + b'\n')
2749 return not r
2749 return not r
2750 else:
2750 else:
2751 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2751 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2752 ui.write(
2752 ui.write(
2753 b"%s\t%s\n"
2753 b"%s\t%s\n"
2754 % (stringutil.escapestr(k), stringutil.escapestr(v))
2754 % (stringutil.escapestr(k), stringutil.escapestr(v))
2755 )
2755 )
2756 finally:
2756 finally:
2757 target.close()
2757 target.close()
2758
2758
2759
2759
2760 @command(b'debugpvec', [], _(b'A B'))
2760 @command(b'debugpvec', [], _(b'A B'))
2761 def debugpvec(ui, repo, a, b=None):
2761 def debugpvec(ui, repo, a, b=None):
2762 ca = scmutil.revsingle(repo, a)
2762 ca = scmutil.revsingle(repo, a)
2763 cb = scmutil.revsingle(repo, b)
2763 cb = scmutil.revsingle(repo, b)
2764 pa = pvec.ctxpvec(ca)
2764 pa = pvec.ctxpvec(ca)
2765 pb = pvec.ctxpvec(cb)
2765 pb = pvec.ctxpvec(cb)
2766 if pa == pb:
2766 if pa == pb:
2767 rel = b"="
2767 rel = b"="
2768 elif pa > pb:
2768 elif pa > pb:
2769 rel = b">"
2769 rel = b">"
2770 elif pa < pb:
2770 elif pa < pb:
2771 rel = b"<"
2771 rel = b"<"
2772 elif pa | pb:
2772 elif pa | pb:
2773 rel = b"|"
2773 rel = b"|"
2774 ui.write(_(b"a: %s\n") % pa)
2774 ui.write(_(b"a: %s\n") % pa)
2775 ui.write(_(b"b: %s\n") % pb)
2775 ui.write(_(b"b: %s\n") % pb)
2776 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2776 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2777 ui.write(
2777 ui.write(
2778 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2778 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2779 % (
2779 % (
2780 abs(pa._depth - pb._depth),
2780 abs(pa._depth - pb._depth),
2781 pvec._hamming(pa._vec, pb._vec),
2781 pvec._hamming(pa._vec, pb._vec),
2782 pa.distance(pb),
2782 pa.distance(pb),
2783 rel,
2783 rel,
2784 )
2784 )
2785 )
2785 )
2786
2786
2787
2787
2788 @command(
2788 @command(
2789 b'debugrebuilddirstate|debugrebuildstate',
2789 b'debugrebuilddirstate|debugrebuildstate',
2790 [
2790 [
2791 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2791 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2792 (
2792 (
2793 b'',
2793 b'',
2794 b'minimal',
2794 b'minimal',
2795 None,
2795 None,
2796 _(
2796 _(
2797 b'only rebuild files that are inconsistent with '
2797 b'only rebuild files that are inconsistent with '
2798 b'the working copy parent'
2798 b'the working copy parent'
2799 ),
2799 ),
2800 ),
2800 ),
2801 ],
2801 ],
2802 _(b'[-r REV]'),
2802 _(b'[-r REV]'),
2803 )
2803 )
2804 def debugrebuilddirstate(ui, repo, rev, **opts):
2804 def debugrebuilddirstate(ui, repo, rev, **opts):
2805 """rebuild the dirstate as it would look like for the given revision
2805 """rebuild the dirstate as it would look like for the given revision
2806
2806
2807 If no revision is specified the first current parent will be used.
2807 If no revision is specified the first current parent will be used.
2808
2808
2809 The dirstate will be set to the files of the given revision.
2809 The dirstate will be set to the files of the given revision.
2810 The actual working directory content or existing dirstate
2810 The actual working directory content or existing dirstate
2811 information such as adds or removes is not considered.
2811 information such as adds or removes is not considered.
2812
2812
2813 ``minimal`` will only rebuild the dirstate status for files that claim to be
2813 ``minimal`` will only rebuild the dirstate status for files that claim to be
2814 tracked but are not in the parent manifest, or that exist in the parent
2814 tracked but are not in the parent manifest, or that exist in the parent
2815 manifest but are not in the dirstate. It will not change adds, removes, or
2815 manifest but are not in the dirstate. It will not change adds, removes, or
2816 modified files that are in the working copy parent.
2816 modified files that are in the working copy parent.
2817
2817
2818 One use of this command is to make the next :hg:`status` invocation
2818 One use of this command is to make the next :hg:`status` invocation
2819 check the actual file content.
2819 check the actual file content.
2820 """
2820 """
2821 ctx = scmutil.revsingle(repo, rev)
2821 ctx = scmutil.revsingle(repo, rev)
2822 with repo.wlock():
2822 with repo.wlock():
2823 dirstate = repo.dirstate
2823 dirstate = repo.dirstate
2824 changedfiles = None
2824 changedfiles = None
2825 # See command doc for what minimal does.
2825 # See command doc for what minimal does.
2826 if opts.get('minimal'):
2826 if opts.get('minimal'):
2827 manifestfiles = set(ctx.manifest().keys())
2827 manifestfiles = set(ctx.manifest().keys())
2828 dirstatefiles = set(dirstate)
2828 dirstatefiles = set(dirstate)
2829 manifestonly = manifestfiles - dirstatefiles
2829 manifestonly = manifestfiles - dirstatefiles
2830 dsonly = dirstatefiles - manifestfiles
2830 dsonly = dirstatefiles - manifestfiles
2831 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2831 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2832 changedfiles = manifestonly | dsnotadded
2832 changedfiles = manifestonly | dsnotadded
2833
2833
2834 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2834 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2835
2835
2836
2836
2837 @command(b'debugrebuildfncache', [], b'')
2837 @command(b'debugrebuildfncache', [], b'')
2838 def debugrebuildfncache(ui, repo):
2838 def debugrebuildfncache(ui, repo):
2839 """rebuild the fncache file"""
2839 """rebuild the fncache file"""
2840 repair.rebuildfncache(ui, repo)
2840 repair.rebuildfncache(ui, repo)
2841
2841
2842
2842
2843 @command(
2843 @command(
2844 b'debugrename',
2844 b'debugrename',
2845 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2845 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2846 _(b'[-r REV] [FILE]...'),
2846 _(b'[-r REV] [FILE]...'),
2847 )
2847 )
2848 def debugrename(ui, repo, *pats, **opts):
2848 def debugrename(ui, repo, *pats, **opts):
2849 """dump rename information"""
2849 """dump rename information"""
2850
2850
2851 opts = pycompat.byteskwargs(opts)
2851 opts = pycompat.byteskwargs(opts)
2852 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2852 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2853 m = scmutil.match(ctx, pats, opts)
2853 m = scmutil.match(ctx, pats, opts)
2854 for abs in ctx.walk(m):
2854 for abs in ctx.walk(m):
2855 fctx = ctx[abs]
2855 fctx = ctx[abs]
2856 o = fctx.filelog().renamed(fctx.filenode())
2856 o = fctx.filelog().renamed(fctx.filenode())
2857 rel = repo.pathto(abs)
2857 rel = repo.pathto(abs)
2858 if o:
2858 if o:
2859 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2859 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2860 else:
2860 else:
2861 ui.write(_(b"%s not renamed\n") % rel)
2861 ui.write(_(b"%s not renamed\n") % rel)
2862
2862
2863
2863
2864 @command(b'debugrequires|debugrequirements', [], b'')
2864 @command(b'debugrequires|debugrequirements', [], b'')
2865 def debugrequirements(ui, repo):
2865 def debugrequirements(ui, repo):
2866 """ print the current repo requirements """
2866 """ print the current repo requirements """
2867 for r in sorted(repo.requirements):
2867 for r in sorted(repo.requirements):
2868 ui.write(b"%s\n" % r)
2868 ui.write(b"%s\n" % r)
2869
2869
2870
2870
2871 @command(
2871 @command(
2872 b'debugrevlog',
2872 b'debugrevlog',
2873 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2873 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2874 _(b'-c|-m|FILE'),
2874 _(b'-c|-m|FILE'),
2875 optionalrepo=True,
2875 optionalrepo=True,
2876 )
2876 )
2877 def debugrevlog(ui, repo, file_=None, **opts):
2877 def debugrevlog(ui, repo, file_=None, **opts):
2878 """show data and statistics about a revlog"""
2878 """show data and statistics about a revlog"""
2879 opts = pycompat.byteskwargs(opts)
2879 opts = pycompat.byteskwargs(opts)
2880 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2880 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2881
2881
2882 if opts.get(b"dump"):
2882 if opts.get(b"dump"):
2883 numrevs = len(r)
2883 numrevs = len(r)
2884 ui.write(
2884 ui.write(
2885 (
2885 (
2886 b"# rev p1rev p2rev start end deltastart base p1 p2"
2886 b"# rev p1rev p2rev start end deltastart base p1 p2"
2887 b" rawsize totalsize compression heads chainlen\n"
2887 b" rawsize totalsize compression heads chainlen\n"
2888 )
2888 )
2889 )
2889 )
2890 ts = 0
2890 ts = 0
2891 heads = set()
2891 heads = set()
2892
2892
2893 for rev in pycompat.xrange(numrevs):
2893 for rev in pycompat.xrange(numrevs):
2894 dbase = r.deltaparent(rev)
2894 dbase = r.deltaparent(rev)
2895 if dbase == -1:
2895 if dbase == -1:
2896 dbase = rev
2896 dbase = rev
2897 cbase = r.chainbase(rev)
2897 cbase = r.chainbase(rev)
2898 clen = r.chainlen(rev)
2898 clen = r.chainlen(rev)
2899 p1, p2 = r.parentrevs(rev)
2899 p1, p2 = r.parentrevs(rev)
2900 rs = r.rawsize(rev)
2900 rs = r.rawsize(rev)
2901 ts = ts + rs
2901 ts = ts + rs
2902 heads -= set(r.parentrevs(rev))
2902 heads -= set(r.parentrevs(rev))
2903 heads.add(rev)
2903 heads.add(rev)
2904 try:
2904 try:
2905 compression = ts / r.end(rev)
2905 compression = ts / r.end(rev)
2906 except ZeroDivisionError:
2906 except ZeroDivisionError:
2907 compression = 0
2907 compression = 0
2908 ui.write(
2908 ui.write(
2909 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2909 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2910 b"%11d %5d %8d\n"
2910 b"%11d %5d %8d\n"
2911 % (
2911 % (
2912 rev,
2912 rev,
2913 p1,
2913 p1,
2914 p2,
2914 p2,
2915 r.start(rev),
2915 r.start(rev),
2916 r.end(rev),
2916 r.end(rev),
2917 r.start(dbase),
2917 r.start(dbase),
2918 r.start(cbase),
2918 r.start(cbase),
2919 r.start(p1),
2919 r.start(p1),
2920 r.start(p2),
2920 r.start(p2),
2921 rs,
2921 rs,
2922 ts,
2922 ts,
2923 compression,
2923 compression,
2924 len(heads),
2924 len(heads),
2925 clen,
2925 clen,
2926 )
2926 )
2927 )
2927 )
2928 return 0
2928 return 0
2929
2929
2930 v = r.version
2930 v = r.version
2931 format = v & 0xFFFF
2931 format = v & 0xFFFF
2932 flags = []
2932 flags = []
2933 gdelta = False
2933 gdelta = False
2934 if v & revlog.FLAG_INLINE_DATA:
2934 if v & revlog.FLAG_INLINE_DATA:
2935 flags.append(b'inline')
2935 flags.append(b'inline')
2936 if v & revlog.FLAG_GENERALDELTA:
2936 if v & revlog.FLAG_GENERALDELTA:
2937 gdelta = True
2937 gdelta = True
2938 flags.append(b'generaldelta')
2938 flags.append(b'generaldelta')
2939 if not flags:
2939 if not flags:
2940 flags = [b'(none)']
2940 flags = [b'(none)']
2941
2941
2942 ### tracks merge vs single parent
2942 ### tracks merge vs single parent
2943 nummerges = 0
2943 nummerges = 0
2944
2944
2945 ### tracks ways the "delta" are build
2945 ### tracks ways the "delta" are build
2946 # nodelta
2946 # nodelta
2947 numempty = 0
2947 numempty = 0
2948 numemptytext = 0
2948 numemptytext = 0
2949 numemptydelta = 0
2949 numemptydelta = 0
2950 # full file content
2950 # full file content
2951 numfull = 0
2951 numfull = 0
2952 # intermediate snapshot against a prior snapshot
2952 # intermediate snapshot against a prior snapshot
2953 numsemi = 0
2953 numsemi = 0
2954 # snapshot count per depth
2954 # snapshot count per depth
2955 numsnapdepth = collections.defaultdict(lambda: 0)
2955 numsnapdepth = collections.defaultdict(lambda: 0)
2956 # delta against previous revision
2956 # delta against previous revision
2957 numprev = 0
2957 numprev = 0
2958 # delta against first or second parent (not prev)
2958 # delta against first or second parent (not prev)
2959 nump1 = 0
2959 nump1 = 0
2960 nump2 = 0
2960 nump2 = 0
2961 # delta against neither prev nor parents
2961 # delta against neither prev nor parents
2962 numother = 0
2962 numother = 0
2963 # delta against prev that are also first or second parent
2963 # delta against prev that are also first or second parent
2964 # (details of `numprev`)
2964 # (details of `numprev`)
2965 nump1prev = 0
2965 nump1prev = 0
2966 nump2prev = 0
2966 nump2prev = 0
2967
2967
2968 # data about delta chain of each revs
2968 # data about delta chain of each revs
2969 chainlengths = []
2969 chainlengths = []
2970 chainbases = []
2970 chainbases = []
2971 chainspans = []
2971 chainspans = []
2972
2972
2973 # data about each revision
2973 # data about each revision
2974 datasize = [None, 0, 0]
2974 datasize = [None, 0, 0]
2975 fullsize = [None, 0, 0]
2975 fullsize = [None, 0, 0]
2976 semisize = [None, 0, 0]
2976 semisize = [None, 0, 0]
2977 # snapshot count per depth
2977 # snapshot count per depth
2978 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2978 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2979 deltasize = [None, 0, 0]
2979 deltasize = [None, 0, 0]
2980 chunktypecounts = {}
2980 chunktypecounts = {}
2981 chunktypesizes = {}
2981 chunktypesizes = {}
2982
2982
2983 def addsize(size, l):
2983 def addsize(size, l):
2984 if l[0] is None or size < l[0]:
2984 if l[0] is None or size < l[0]:
2985 l[0] = size
2985 l[0] = size
2986 if size > l[1]:
2986 if size > l[1]:
2987 l[1] = size
2987 l[1] = size
2988 l[2] += size
2988 l[2] += size
2989
2989
2990 numrevs = len(r)
2990 numrevs = len(r)
2991 for rev in pycompat.xrange(numrevs):
2991 for rev in pycompat.xrange(numrevs):
2992 p1, p2 = r.parentrevs(rev)
2992 p1, p2 = r.parentrevs(rev)
2993 delta = r.deltaparent(rev)
2993 delta = r.deltaparent(rev)
2994 if format > 0:
2994 if format > 0:
2995 addsize(r.rawsize(rev), datasize)
2995 addsize(r.rawsize(rev), datasize)
2996 if p2 != nullrev:
2996 if p2 != nullrev:
2997 nummerges += 1
2997 nummerges += 1
2998 size = r.length(rev)
2998 size = r.length(rev)
2999 if delta == nullrev:
2999 if delta == nullrev:
3000 chainlengths.append(0)
3000 chainlengths.append(0)
3001 chainbases.append(r.start(rev))
3001 chainbases.append(r.start(rev))
3002 chainspans.append(size)
3002 chainspans.append(size)
3003 if size == 0:
3003 if size == 0:
3004 numempty += 1
3004 numempty += 1
3005 numemptytext += 1
3005 numemptytext += 1
3006 else:
3006 else:
3007 numfull += 1
3007 numfull += 1
3008 numsnapdepth[0] += 1
3008 numsnapdepth[0] += 1
3009 addsize(size, fullsize)
3009 addsize(size, fullsize)
3010 addsize(size, snapsizedepth[0])
3010 addsize(size, snapsizedepth[0])
3011 else:
3011 else:
3012 chainlengths.append(chainlengths[delta] + 1)
3012 chainlengths.append(chainlengths[delta] + 1)
3013 baseaddr = chainbases[delta]
3013 baseaddr = chainbases[delta]
3014 revaddr = r.start(rev)
3014 revaddr = r.start(rev)
3015 chainbases.append(baseaddr)
3015 chainbases.append(baseaddr)
3016 chainspans.append((revaddr - baseaddr) + size)
3016 chainspans.append((revaddr - baseaddr) + size)
3017 if size == 0:
3017 if size == 0:
3018 numempty += 1
3018 numempty += 1
3019 numemptydelta += 1
3019 numemptydelta += 1
3020 elif r.issnapshot(rev):
3020 elif r.issnapshot(rev):
3021 addsize(size, semisize)
3021 addsize(size, semisize)
3022 numsemi += 1
3022 numsemi += 1
3023 depth = r.snapshotdepth(rev)
3023 depth = r.snapshotdepth(rev)
3024 numsnapdepth[depth] += 1
3024 numsnapdepth[depth] += 1
3025 addsize(size, snapsizedepth[depth])
3025 addsize(size, snapsizedepth[depth])
3026 else:
3026 else:
3027 addsize(size, deltasize)
3027 addsize(size, deltasize)
3028 if delta == rev - 1:
3028 if delta == rev - 1:
3029 numprev += 1
3029 numprev += 1
3030 if delta == p1:
3030 if delta == p1:
3031 nump1prev += 1
3031 nump1prev += 1
3032 elif delta == p2:
3032 elif delta == p2:
3033 nump2prev += 1
3033 nump2prev += 1
3034 elif delta == p1:
3034 elif delta == p1:
3035 nump1 += 1
3035 nump1 += 1
3036 elif delta == p2:
3036 elif delta == p2:
3037 nump2 += 1
3037 nump2 += 1
3038 elif delta != nullrev:
3038 elif delta != nullrev:
3039 numother += 1
3039 numother += 1
3040
3040
3041 # Obtain data on the raw chunks in the revlog.
3041 # Obtain data on the raw chunks in the revlog.
3042 if util.safehasattr(r, b'_getsegmentforrevs'):
3042 if util.safehasattr(r, b'_getsegmentforrevs'):
3043 segment = r._getsegmentforrevs(rev, rev)[1]
3043 segment = r._getsegmentforrevs(rev, rev)[1]
3044 else:
3044 else:
3045 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3045 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3046 if segment:
3046 if segment:
3047 chunktype = bytes(segment[0:1])
3047 chunktype = bytes(segment[0:1])
3048 else:
3048 else:
3049 chunktype = b'empty'
3049 chunktype = b'empty'
3050
3050
3051 if chunktype not in chunktypecounts:
3051 if chunktype not in chunktypecounts:
3052 chunktypecounts[chunktype] = 0
3052 chunktypecounts[chunktype] = 0
3053 chunktypesizes[chunktype] = 0
3053 chunktypesizes[chunktype] = 0
3054
3054
3055 chunktypecounts[chunktype] += 1
3055 chunktypecounts[chunktype] += 1
3056 chunktypesizes[chunktype] += size
3056 chunktypesizes[chunktype] += size
3057
3057
3058 # Adjust size min value for empty cases
3058 # Adjust size min value for empty cases
3059 for size in (datasize, fullsize, semisize, deltasize):
3059 for size in (datasize, fullsize, semisize, deltasize):
3060 if size[0] is None:
3060 if size[0] is None:
3061 size[0] = 0
3061 size[0] = 0
3062
3062
3063 numdeltas = numrevs - numfull - numempty - numsemi
3063 numdeltas = numrevs - numfull - numempty - numsemi
3064 numoprev = numprev - nump1prev - nump2prev
3064 numoprev = numprev - nump1prev - nump2prev
3065 totalrawsize = datasize[2]
3065 totalrawsize = datasize[2]
3066 datasize[2] /= numrevs
3066 datasize[2] /= numrevs
3067 fulltotal = fullsize[2]
3067 fulltotal = fullsize[2]
3068 if numfull == 0:
3068 if numfull == 0:
3069 fullsize[2] = 0
3069 fullsize[2] = 0
3070 else:
3070 else:
3071 fullsize[2] /= numfull
3071 fullsize[2] /= numfull
3072 semitotal = semisize[2]
3072 semitotal = semisize[2]
3073 snaptotal = {}
3073 snaptotal = {}
3074 if numsemi > 0:
3074 if numsemi > 0:
3075 semisize[2] /= numsemi
3075 semisize[2] /= numsemi
3076 for depth in snapsizedepth:
3076 for depth in snapsizedepth:
3077 snaptotal[depth] = snapsizedepth[depth][2]
3077 snaptotal[depth] = snapsizedepth[depth][2]
3078 snapsizedepth[depth][2] /= numsnapdepth[depth]
3078 snapsizedepth[depth][2] /= numsnapdepth[depth]
3079
3079
3080 deltatotal = deltasize[2]
3080 deltatotal = deltasize[2]
3081 if numdeltas > 0:
3081 if numdeltas > 0:
3082 deltasize[2] /= numdeltas
3082 deltasize[2] /= numdeltas
3083 totalsize = fulltotal + semitotal + deltatotal
3083 totalsize = fulltotal + semitotal + deltatotal
3084 avgchainlen = sum(chainlengths) / numrevs
3084 avgchainlen = sum(chainlengths) / numrevs
3085 maxchainlen = max(chainlengths)
3085 maxchainlen = max(chainlengths)
3086 maxchainspan = max(chainspans)
3086 maxchainspan = max(chainspans)
3087 compratio = 1
3087 compratio = 1
3088 if totalsize:
3088 if totalsize:
3089 compratio = totalrawsize / totalsize
3089 compratio = totalrawsize / totalsize
3090
3090
3091 basedfmtstr = b'%%%dd\n'
3091 basedfmtstr = b'%%%dd\n'
3092 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3092 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3093
3093
3094 def dfmtstr(max):
3094 def dfmtstr(max):
3095 return basedfmtstr % len(str(max))
3095 return basedfmtstr % len(str(max))
3096
3096
3097 def pcfmtstr(max, padding=0):
3097 def pcfmtstr(max, padding=0):
3098 return basepcfmtstr % (len(str(max)), b' ' * padding)
3098 return basepcfmtstr % (len(str(max)), b' ' * padding)
3099
3099
3100 def pcfmt(value, total):
3100 def pcfmt(value, total):
3101 if total:
3101 if total:
3102 return (value, 100 * float(value) / total)
3102 return (value, 100 * float(value) / total)
3103 else:
3103 else:
3104 return value, 100.0
3104 return value, 100.0
3105
3105
3106 ui.writenoi18n(b'format : %d\n' % format)
3106 ui.writenoi18n(b'format : %d\n' % format)
3107 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3107 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3108
3108
3109 ui.write(b'\n')
3109 ui.write(b'\n')
3110 fmt = pcfmtstr(totalsize)
3110 fmt = pcfmtstr(totalsize)
3111 fmt2 = dfmtstr(totalsize)
3111 fmt2 = dfmtstr(totalsize)
3112 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3112 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3113 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3113 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3114 ui.writenoi18n(
3114 ui.writenoi18n(
3115 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3115 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3116 )
3116 )
3117 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3117 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3118 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3118 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3119 ui.writenoi18n(
3119 ui.writenoi18n(
3120 b' text : '
3120 b' text : '
3121 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3121 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3122 )
3122 )
3123 ui.writenoi18n(
3123 ui.writenoi18n(
3124 b' delta : '
3124 b' delta : '
3125 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3125 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3126 )
3126 )
3127 ui.writenoi18n(
3127 ui.writenoi18n(
3128 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3128 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3129 )
3129 )
3130 for depth in sorted(numsnapdepth):
3130 for depth in sorted(numsnapdepth):
3131 ui.write(
3131 ui.write(
3132 (b' lvl-%-3d : ' % depth)
3132 (b' lvl-%-3d : ' % depth)
3133 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3133 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3134 )
3134 )
3135 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3135 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3136 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3136 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3137 ui.writenoi18n(
3137 ui.writenoi18n(
3138 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3138 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3139 )
3139 )
3140 for depth in sorted(numsnapdepth):
3140 for depth in sorted(numsnapdepth):
3141 ui.write(
3141 ui.write(
3142 (b' lvl-%-3d : ' % depth)
3142 (b' lvl-%-3d : ' % depth)
3143 + fmt % pcfmt(snaptotal[depth], totalsize)
3143 + fmt % pcfmt(snaptotal[depth], totalsize)
3144 )
3144 )
3145 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3145 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3146
3146
3147 def fmtchunktype(chunktype):
3147 def fmtchunktype(chunktype):
3148 if chunktype == b'empty':
3148 if chunktype == b'empty':
3149 return b' %s : ' % chunktype
3149 return b' %s : ' % chunktype
3150 elif chunktype in pycompat.bytestr(string.ascii_letters):
3150 elif chunktype in pycompat.bytestr(string.ascii_letters):
3151 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3151 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3152 else:
3152 else:
3153 return b' 0x%s : ' % hex(chunktype)
3153 return b' 0x%s : ' % hex(chunktype)
3154
3154
3155 ui.write(b'\n')
3155 ui.write(b'\n')
3156 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3156 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3157 for chunktype in sorted(chunktypecounts):
3157 for chunktype in sorted(chunktypecounts):
3158 ui.write(fmtchunktype(chunktype))
3158 ui.write(fmtchunktype(chunktype))
3159 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3159 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3160 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3160 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3161 for chunktype in sorted(chunktypecounts):
3161 for chunktype in sorted(chunktypecounts):
3162 ui.write(fmtchunktype(chunktype))
3162 ui.write(fmtchunktype(chunktype))
3163 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3163 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3164
3164
3165 ui.write(b'\n')
3165 ui.write(b'\n')
3166 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3166 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3167 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3167 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3168 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3168 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3169 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3169 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3170 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3170 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3171
3171
3172 if format > 0:
3172 if format > 0:
3173 ui.write(b'\n')
3173 ui.write(b'\n')
3174 ui.writenoi18n(
3174 ui.writenoi18n(
3175 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3175 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3176 % tuple(datasize)
3176 % tuple(datasize)
3177 )
3177 )
3178 ui.writenoi18n(
3178 ui.writenoi18n(
3179 b'full revision size (min/max/avg) : %d / %d / %d\n'
3179 b'full revision size (min/max/avg) : %d / %d / %d\n'
3180 % tuple(fullsize)
3180 % tuple(fullsize)
3181 )
3181 )
3182 ui.writenoi18n(
3182 ui.writenoi18n(
3183 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3183 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3184 % tuple(semisize)
3184 % tuple(semisize)
3185 )
3185 )
3186 for depth in sorted(snapsizedepth):
3186 for depth in sorted(snapsizedepth):
3187 if depth == 0:
3187 if depth == 0:
3188 continue
3188 continue
3189 ui.writenoi18n(
3189 ui.writenoi18n(
3190 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3190 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3191 % ((depth,) + tuple(snapsizedepth[depth]))
3191 % ((depth,) + tuple(snapsizedepth[depth]))
3192 )
3192 )
3193 ui.writenoi18n(
3193 ui.writenoi18n(
3194 b'delta size (min/max/avg) : %d / %d / %d\n'
3194 b'delta size (min/max/avg) : %d / %d / %d\n'
3195 % tuple(deltasize)
3195 % tuple(deltasize)
3196 )
3196 )
3197
3197
3198 if numdeltas > 0:
3198 if numdeltas > 0:
3199 ui.write(b'\n')
3199 ui.write(b'\n')
3200 fmt = pcfmtstr(numdeltas)
3200 fmt = pcfmtstr(numdeltas)
3201 fmt2 = pcfmtstr(numdeltas, 4)
3201 fmt2 = pcfmtstr(numdeltas, 4)
3202 ui.writenoi18n(
3202 ui.writenoi18n(
3203 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3203 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3204 )
3204 )
3205 if numprev > 0:
3205 if numprev > 0:
3206 ui.writenoi18n(
3206 ui.writenoi18n(
3207 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3207 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3208 )
3208 )
3209 ui.writenoi18n(
3209 ui.writenoi18n(
3210 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3210 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3211 )
3211 )
3212 ui.writenoi18n(
3212 ui.writenoi18n(
3213 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3213 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3214 )
3214 )
3215 if gdelta:
3215 if gdelta:
3216 ui.writenoi18n(
3216 ui.writenoi18n(
3217 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3217 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3218 )
3218 )
3219 ui.writenoi18n(
3219 ui.writenoi18n(
3220 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3220 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3221 )
3221 )
3222 ui.writenoi18n(
3222 ui.writenoi18n(
3223 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3223 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3224 )
3224 )
3225
3225
3226
3226
3227 @command(
3227 @command(
3228 b'debugrevlogindex',
3228 b'debugrevlogindex',
3229 cmdutil.debugrevlogopts
3229 cmdutil.debugrevlogopts
3230 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3230 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3231 _(b'[-f FORMAT] -c|-m|FILE'),
3231 _(b'[-f FORMAT] -c|-m|FILE'),
3232 optionalrepo=True,
3232 optionalrepo=True,
3233 )
3233 )
3234 def debugrevlogindex(ui, repo, file_=None, **opts):
3234 def debugrevlogindex(ui, repo, file_=None, **opts):
3235 """dump the contents of a revlog index"""
3235 """dump the contents of a revlog index"""
3236 opts = pycompat.byteskwargs(opts)
3236 opts = pycompat.byteskwargs(opts)
3237 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3237 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3238 format = opts.get(b'format', 0)
3238 format = opts.get(b'format', 0)
3239 if format not in (0, 1):
3239 if format not in (0, 1):
3240 raise error.Abort(_(b"unknown format %d") % format)
3240 raise error.Abort(_(b"unknown format %d") % format)
3241
3241
3242 if ui.debugflag:
3242 if ui.debugflag:
3243 shortfn = hex
3243 shortfn = hex
3244 else:
3244 else:
3245 shortfn = short
3245 shortfn = short
3246
3246
3247 # There might not be anything in r, so have a sane default
3247 # There might not be anything in r, so have a sane default
3248 idlen = 12
3248 idlen = 12
3249 for i in r:
3249 for i in r:
3250 idlen = len(shortfn(r.node(i)))
3250 idlen = len(shortfn(r.node(i)))
3251 break
3251 break
3252
3252
3253 if format == 0:
3253 if format == 0:
3254 if ui.verbose:
3254 if ui.verbose:
3255 ui.writenoi18n(
3255 ui.writenoi18n(
3256 b" rev offset length linkrev %s %s p2\n"
3256 b" rev offset length linkrev %s %s p2\n"
3257 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3257 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3258 )
3258 )
3259 else:
3259 else:
3260 ui.writenoi18n(
3260 ui.writenoi18n(
3261 b" rev linkrev %s %s p2\n"
3261 b" rev linkrev %s %s p2\n"
3262 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3262 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3263 )
3263 )
3264 elif format == 1:
3264 elif format == 1:
3265 if ui.verbose:
3265 if ui.verbose:
3266 ui.writenoi18n(
3266 ui.writenoi18n(
3267 (
3267 (
3268 b" rev flag offset length size link p1"
3268 b" rev flag offset length size link p1"
3269 b" p2 %s\n"
3269 b" p2 %s\n"
3270 )
3270 )
3271 % b"nodeid".rjust(idlen)
3271 % b"nodeid".rjust(idlen)
3272 )
3272 )
3273 else:
3273 else:
3274 ui.writenoi18n(
3274 ui.writenoi18n(
3275 b" rev flag size link p1 p2 %s\n"
3275 b" rev flag size link p1 p2 %s\n"
3276 % b"nodeid".rjust(idlen)
3276 % b"nodeid".rjust(idlen)
3277 )
3277 )
3278
3278
3279 for i in r:
3279 for i in r:
3280 node = r.node(i)
3280 node = r.node(i)
3281 if format == 0:
3281 if format == 0:
3282 try:
3282 try:
3283 pp = r.parents(node)
3283 pp = r.parents(node)
3284 except Exception:
3284 except Exception:
3285 pp = [nullid, nullid]
3285 pp = [nullid, nullid]
3286 if ui.verbose:
3286 if ui.verbose:
3287 ui.write(
3287 ui.write(
3288 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3288 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3289 % (
3289 % (
3290 i,
3290 i,
3291 r.start(i),
3291 r.start(i),
3292 r.length(i),
3292 r.length(i),
3293 r.linkrev(i),
3293 r.linkrev(i),
3294 shortfn(node),
3294 shortfn(node),
3295 shortfn(pp[0]),
3295 shortfn(pp[0]),
3296 shortfn(pp[1]),
3296 shortfn(pp[1]),
3297 )
3297 )
3298 )
3298 )
3299 else:
3299 else:
3300 ui.write(
3300 ui.write(
3301 b"% 6d % 7d %s %s %s\n"
3301 b"% 6d % 7d %s %s %s\n"
3302 % (
3302 % (
3303 i,
3303 i,
3304 r.linkrev(i),
3304 r.linkrev(i),
3305 shortfn(node),
3305 shortfn(node),
3306 shortfn(pp[0]),
3306 shortfn(pp[0]),
3307 shortfn(pp[1]),
3307 shortfn(pp[1]),
3308 )
3308 )
3309 )
3309 )
3310 elif format == 1:
3310 elif format == 1:
3311 pr = r.parentrevs(i)
3311 pr = r.parentrevs(i)
3312 if ui.verbose:
3312 if ui.verbose:
3313 ui.write(
3313 ui.write(
3314 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3314 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3315 % (
3315 % (
3316 i,
3316 i,
3317 r.flags(i),
3317 r.flags(i),
3318 r.start(i),
3318 r.start(i),
3319 r.length(i),
3319 r.length(i),
3320 r.rawsize(i),
3320 r.rawsize(i),
3321 r.linkrev(i),
3321 r.linkrev(i),
3322 pr[0],
3322 pr[0],
3323 pr[1],
3323 pr[1],
3324 shortfn(node),
3324 shortfn(node),
3325 )
3325 )
3326 )
3326 )
3327 else:
3327 else:
3328 ui.write(
3328 ui.write(
3329 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3329 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3330 % (
3330 % (
3331 i,
3331 i,
3332 r.flags(i),
3332 r.flags(i),
3333 r.rawsize(i),
3333 r.rawsize(i),
3334 r.linkrev(i),
3334 r.linkrev(i),
3335 pr[0],
3335 pr[0],
3336 pr[1],
3336 pr[1],
3337 shortfn(node),
3337 shortfn(node),
3338 )
3338 )
3339 )
3339 )
3340
3340
3341
3341
3342 @command(
3342 @command(
3343 b'debugrevspec',
3343 b'debugrevspec',
3344 [
3344 [
3345 (
3345 (
3346 b'',
3346 b'',
3347 b'optimize',
3347 b'optimize',
3348 None,
3348 None,
3349 _(b'print parsed tree after optimizing (DEPRECATED)'),
3349 _(b'print parsed tree after optimizing (DEPRECATED)'),
3350 ),
3350 ),
3351 (
3351 (
3352 b'',
3352 b'',
3353 b'show-revs',
3353 b'show-revs',
3354 True,
3354 True,
3355 _(b'print list of result revisions (default)'),
3355 _(b'print list of result revisions (default)'),
3356 ),
3356 ),
3357 (
3357 (
3358 b's',
3358 b's',
3359 b'show-set',
3359 b'show-set',
3360 None,
3360 None,
3361 _(b'print internal representation of result set'),
3361 _(b'print internal representation of result set'),
3362 ),
3362 ),
3363 (
3363 (
3364 b'p',
3364 b'p',
3365 b'show-stage',
3365 b'show-stage',
3366 [],
3366 [],
3367 _(b'print parsed tree at the given stage'),
3367 _(b'print parsed tree at the given stage'),
3368 _(b'NAME'),
3368 _(b'NAME'),
3369 ),
3369 ),
3370 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3370 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3371 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3371 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3372 ],
3372 ],
3373 b'REVSPEC',
3373 b'REVSPEC',
3374 )
3374 )
3375 def debugrevspec(ui, repo, expr, **opts):
3375 def debugrevspec(ui, repo, expr, **opts):
3376 """parse and apply a revision specification
3376 """parse and apply a revision specification
3377
3377
3378 Use -p/--show-stage option to print the parsed tree at the given stages.
3378 Use -p/--show-stage option to print the parsed tree at the given stages.
3379 Use -p all to print tree at every stage.
3379 Use -p all to print tree at every stage.
3380
3380
3381 Use --no-show-revs option with -s or -p to print only the set
3381 Use --no-show-revs option with -s or -p to print only the set
3382 representation or the parsed tree respectively.
3382 representation or the parsed tree respectively.
3383
3383
3384 Use --verify-optimized to compare the optimized result with the unoptimized
3384 Use --verify-optimized to compare the optimized result with the unoptimized
3385 one. Returns 1 if the optimized result differs.
3385 one. Returns 1 if the optimized result differs.
3386 """
3386 """
3387 opts = pycompat.byteskwargs(opts)
3387 opts = pycompat.byteskwargs(opts)
3388 aliases = ui.configitems(b'revsetalias')
3388 aliases = ui.configitems(b'revsetalias')
3389 stages = [
3389 stages = [
3390 (b'parsed', lambda tree: tree),
3390 (b'parsed', lambda tree: tree),
3391 (
3391 (
3392 b'expanded',
3392 b'expanded',
3393 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3393 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3394 ),
3394 ),
3395 (b'concatenated', revsetlang.foldconcat),
3395 (b'concatenated', revsetlang.foldconcat),
3396 (b'analyzed', revsetlang.analyze),
3396 (b'analyzed', revsetlang.analyze),
3397 (b'optimized', revsetlang.optimize),
3397 (b'optimized', revsetlang.optimize),
3398 ]
3398 ]
3399 if opts[b'no_optimized']:
3399 if opts[b'no_optimized']:
3400 stages = stages[:-1]
3400 stages = stages[:-1]
3401 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3401 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3402 raise error.Abort(
3402 raise error.Abort(
3403 _(b'cannot use --verify-optimized with --no-optimized')
3403 _(b'cannot use --verify-optimized with --no-optimized')
3404 )
3404 )
3405 stagenames = {n for n, f in stages}
3405 stagenames = {n for n, f in stages}
3406
3406
3407 showalways = set()
3407 showalways = set()
3408 showchanged = set()
3408 showchanged = set()
3409 if ui.verbose and not opts[b'show_stage']:
3409 if ui.verbose and not opts[b'show_stage']:
3410 # show parsed tree by --verbose (deprecated)
3410 # show parsed tree by --verbose (deprecated)
3411 showalways.add(b'parsed')
3411 showalways.add(b'parsed')
3412 showchanged.update([b'expanded', b'concatenated'])
3412 showchanged.update([b'expanded', b'concatenated'])
3413 if opts[b'optimize']:
3413 if opts[b'optimize']:
3414 showalways.add(b'optimized')
3414 showalways.add(b'optimized')
3415 if opts[b'show_stage'] and opts[b'optimize']:
3415 if opts[b'show_stage'] and opts[b'optimize']:
3416 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3416 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3417 if opts[b'show_stage'] == [b'all']:
3417 if opts[b'show_stage'] == [b'all']:
3418 showalways.update(stagenames)
3418 showalways.update(stagenames)
3419 else:
3419 else:
3420 for n in opts[b'show_stage']:
3420 for n in opts[b'show_stage']:
3421 if n not in stagenames:
3421 if n not in stagenames:
3422 raise error.Abort(_(b'invalid stage name: %s') % n)
3422 raise error.Abort(_(b'invalid stage name: %s') % n)
3423 showalways.update(opts[b'show_stage'])
3423 showalways.update(opts[b'show_stage'])
3424
3424
3425 treebystage = {}
3425 treebystage = {}
3426 printedtree = None
3426 printedtree = None
3427 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3427 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3428 for n, f in stages:
3428 for n, f in stages:
3429 treebystage[n] = tree = f(tree)
3429 treebystage[n] = tree = f(tree)
3430 if n in showalways or (n in showchanged and tree != printedtree):
3430 if n in showalways or (n in showchanged and tree != printedtree):
3431 if opts[b'show_stage'] or n != b'parsed':
3431 if opts[b'show_stage'] or n != b'parsed':
3432 ui.write(b"* %s:\n" % n)
3432 ui.write(b"* %s:\n" % n)
3433 ui.write(revsetlang.prettyformat(tree), b"\n")
3433 ui.write(revsetlang.prettyformat(tree), b"\n")
3434 printedtree = tree
3434 printedtree = tree
3435
3435
3436 if opts[b'verify_optimized']:
3436 if opts[b'verify_optimized']:
3437 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3437 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3438 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3438 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3439 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3439 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3440 ui.writenoi18n(
3440 ui.writenoi18n(
3441 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3441 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3442 )
3442 )
3443 ui.writenoi18n(
3443 ui.writenoi18n(
3444 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3444 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3445 )
3445 )
3446 arevs = list(arevs)
3446 arevs = list(arevs)
3447 brevs = list(brevs)
3447 brevs = list(brevs)
3448 if arevs == brevs:
3448 if arevs == brevs:
3449 return 0
3449 return 0
3450 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3450 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3451 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3451 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3452 sm = difflib.SequenceMatcher(None, arevs, brevs)
3452 sm = difflib.SequenceMatcher(None, arevs, brevs)
3453 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3453 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3454 if tag in ('delete', 'replace'):
3454 if tag in ('delete', 'replace'):
3455 for c in arevs[alo:ahi]:
3455 for c in arevs[alo:ahi]:
3456 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3456 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3457 if tag in ('insert', 'replace'):
3457 if tag in ('insert', 'replace'):
3458 for c in brevs[blo:bhi]:
3458 for c in brevs[blo:bhi]:
3459 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3459 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3460 if tag == 'equal':
3460 if tag == 'equal':
3461 for c in arevs[alo:ahi]:
3461 for c in arevs[alo:ahi]:
3462 ui.write(b' %d\n' % c)
3462 ui.write(b' %d\n' % c)
3463 return 1
3463 return 1
3464
3464
3465 func = revset.makematcher(tree)
3465 func = revset.makematcher(tree)
3466 revs = func(repo)
3466 revs = func(repo)
3467 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3467 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3468 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3468 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3469 if not opts[b'show_revs']:
3469 if not opts[b'show_revs']:
3470 return
3470 return
3471 for c in revs:
3471 for c in revs:
3472 ui.write(b"%d\n" % c)
3472 ui.write(b"%d\n" % c)
3473
3473
3474
3474
3475 @command(
3475 @command(
3476 b'debugserve',
3476 b'debugserve',
3477 [
3477 [
3478 (
3478 (
3479 b'',
3479 b'',
3480 b'sshstdio',
3480 b'sshstdio',
3481 False,
3481 False,
3482 _(b'run an SSH server bound to process handles'),
3482 _(b'run an SSH server bound to process handles'),
3483 ),
3483 ),
3484 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3484 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3485 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3485 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3486 ],
3486 ],
3487 b'',
3487 b'',
3488 )
3488 )
3489 def debugserve(ui, repo, **opts):
3489 def debugserve(ui, repo, **opts):
3490 """run a server with advanced settings
3490 """run a server with advanced settings
3491
3491
3492 This command is similar to :hg:`serve`. It exists partially as a
3492 This command is similar to :hg:`serve`. It exists partially as a
3493 workaround to the fact that ``hg serve --stdio`` must have specific
3493 workaround to the fact that ``hg serve --stdio`` must have specific
3494 arguments for security reasons.
3494 arguments for security reasons.
3495 """
3495 """
3496 opts = pycompat.byteskwargs(opts)
3496 opts = pycompat.byteskwargs(opts)
3497
3497
3498 if not opts[b'sshstdio']:
3498 if not opts[b'sshstdio']:
3499 raise error.Abort(_(b'only --sshstdio is currently supported'))
3499 raise error.Abort(_(b'only --sshstdio is currently supported'))
3500
3500
3501 logfh = None
3501 logfh = None
3502
3502
3503 if opts[b'logiofd'] and opts[b'logiofile']:
3503 if opts[b'logiofd'] and opts[b'logiofile']:
3504 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3504 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3505
3505
3506 if opts[b'logiofd']:
3506 if opts[b'logiofd']:
3507 # Ideally we would be line buffered. But line buffering in binary
3507 # Ideally we would be line buffered. But line buffering in binary
3508 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3508 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3509 # buffering could have performance impacts. But since this isn't
3509 # buffering could have performance impacts. But since this isn't
3510 # performance critical code, it should be fine.
3510 # performance critical code, it should be fine.
3511 try:
3511 try:
3512 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3512 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3513 except OSError as e:
3513 except OSError as e:
3514 if e.errno != errno.ESPIPE:
3514 if e.errno != errno.ESPIPE:
3515 raise
3515 raise
3516 # can't seek a pipe, so `ab` mode fails on py3
3516 # can't seek a pipe, so `ab` mode fails on py3
3517 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3517 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3518 elif opts[b'logiofile']:
3518 elif opts[b'logiofile']:
3519 logfh = open(opts[b'logiofile'], b'ab', 0)
3519 logfh = open(opts[b'logiofile'], b'ab', 0)
3520
3520
3521 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3521 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3522 s.serve_forever()
3522 s.serve_forever()
3523
3523
3524
3524
3525 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3525 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3526 def debugsetparents(ui, repo, rev1, rev2=None):
3526 def debugsetparents(ui, repo, rev1, rev2=None):
3527 """manually set the parents of the current working directory (DANGEROUS)
3527 """manually set the parents of the current working directory (DANGEROUS)
3528
3528
3529 This command is not what you are looking for and should not be used. Using
3529 This command is not what you are looking for and should not be used. Using
3530 this command will most certainly results in slight corruption of the file
3530 this command will most certainly results in slight corruption of the file
3531 level histories withing your repository. DO NOT USE THIS COMMAND.
3531 level histories withing your repository. DO NOT USE THIS COMMAND.
3532
3532
3533 The command update the p1 and p2 field in the dirstate, and not touching
3533 The command update the p1 and p2 field in the dirstate, and not touching
3534 anything else. This useful for writing repository conversion tools, but
3534 anything else. This useful for writing repository conversion tools, but
3535 should be used with extreme care. For example, neither the working
3535 should be used with extreme care. For example, neither the working
3536 directory nor the dirstate is updated, so file status may be incorrect
3536 directory nor the dirstate is updated, so file status may be incorrect
3537 after running this command. Only used if you are one of the few people that
3537 after running this command. Only used if you are one of the few people that
3538 deeply unstand both conversion tools and file level histories. If you are
3538 deeply unstand both conversion tools and file level histories. If you are
3539 reading this help, you are not one of this people (most of them sailed west
3539 reading this help, you are not one of this people (most of them sailed west
3540 from Mithlond anyway.
3540 from Mithlond anyway.
3541
3541
3542 So one last time DO NOT USE THIS COMMAND.
3542 So one last time DO NOT USE THIS COMMAND.
3543
3543
3544 Returns 0 on success.
3544 Returns 0 on success.
3545 """
3545 """
3546
3546
3547 node1 = scmutil.revsingle(repo, rev1).node()
3547 node1 = scmutil.revsingle(repo, rev1).node()
3548 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3548 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3549
3549
3550 with repo.wlock():
3550 with repo.wlock():
3551 repo.setparents(node1, node2)
3551 repo.setparents(node1, node2)
3552
3552
3553
3553
3554 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3554 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3555 def debugsidedata(ui, repo, file_, rev=None, **opts):
3555 def debugsidedata(ui, repo, file_, rev=None, **opts):
3556 """dump the side data for a cl/manifest/file revision
3556 """dump the side data for a cl/manifest/file revision
3557
3557
3558 Use --verbose to dump the sidedata content."""
3558 Use --verbose to dump the sidedata content."""
3559 opts = pycompat.byteskwargs(opts)
3559 opts = pycompat.byteskwargs(opts)
3560 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3560 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3561 if rev is not None:
3561 if rev is not None:
3562 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3562 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3563 file_, rev = None, file_
3563 file_, rev = None, file_
3564 elif rev is None:
3564 elif rev is None:
3565 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3565 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3566 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3566 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3567 r = getattr(r, '_revlog', r)
3567 r = getattr(r, '_revlog', r)
3568 try:
3568 try:
3569 sidedata = r.sidedata(r.lookup(rev))
3569 sidedata = r.sidedata(r.lookup(rev))
3570 except KeyError:
3570 except KeyError:
3571 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3571 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3572 if sidedata:
3572 if sidedata:
3573 sidedata = list(sidedata.items())
3573 sidedata = list(sidedata.items())
3574 sidedata.sort()
3574 sidedata.sort()
3575 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3575 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3576 for key, value in sidedata:
3576 for key, value in sidedata:
3577 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3577 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3578 if ui.verbose:
3578 if ui.verbose:
3579 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3579 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3580
3580
3581
3581
3582 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3582 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3583 def debugssl(ui, repo, source=None, **opts):
3583 def debugssl(ui, repo, source=None, **opts):
3584 """test a secure connection to a server
3584 """test a secure connection to a server
3585
3585
3586 This builds the certificate chain for the server on Windows, installing the
3586 This builds the certificate chain for the server on Windows, installing the
3587 missing intermediates and trusted root via Windows Update if necessary. It
3587 missing intermediates and trusted root via Windows Update if necessary. It
3588 does nothing on other platforms.
3588 does nothing on other platforms.
3589
3589
3590 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3590 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3591 that server is used. See :hg:`help urls` for more information.
3591 that server is used. See :hg:`help urls` for more information.
3592
3592
3593 If the update succeeds, retry the original operation. Otherwise, the cause
3593 If the update succeeds, retry the original operation. Otherwise, the cause
3594 of the SSL error is likely another issue.
3594 of the SSL error is likely another issue.
3595 """
3595 """
3596 if not pycompat.iswindows:
3596 if not pycompat.iswindows:
3597 raise error.Abort(
3597 raise error.Abort(
3598 _(b'certificate chain building is only possible on Windows')
3598 _(b'certificate chain building is only possible on Windows')
3599 )
3599 )
3600
3600
3601 if not source:
3601 if not source:
3602 if not repo:
3602 if not repo:
3603 raise error.Abort(
3603 raise error.Abort(
3604 _(
3604 _(
3605 b"there is no Mercurial repository here, and no "
3605 b"there is no Mercurial repository here, and no "
3606 b"server specified"
3606 b"server specified"
3607 )
3607 )
3608 )
3608 )
3609 source = b"default"
3609 source = b"default"
3610
3610
3611 source, branches = hg.parseurl(ui.expandpath(source))
3611 source, branches = hg.parseurl(ui.expandpath(source))
3612 url = util.url(source)
3612 url = util.url(source)
3613
3613
3614 defaultport = {b'https': 443, b'ssh': 22}
3614 defaultport = {b'https': 443, b'ssh': 22}
3615 if url.scheme in defaultport:
3615 if url.scheme in defaultport:
3616 try:
3616 try:
3617 addr = (url.host, int(url.port or defaultport[url.scheme]))
3617 addr = (url.host, int(url.port or defaultport[url.scheme]))
3618 except ValueError:
3618 except ValueError:
3619 raise error.Abort(_(b"malformed port number in URL"))
3619 raise error.Abort(_(b"malformed port number in URL"))
3620 else:
3620 else:
3621 raise error.Abort(_(b"only https and ssh connections are supported"))
3621 raise error.Abort(_(b"only https and ssh connections are supported"))
3622
3622
3623 from . import win32
3623 from . import win32
3624
3624
3625 s = ssl.wrap_socket(
3625 s = ssl.wrap_socket(
3626 socket.socket(),
3626 socket.socket(),
3627 ssl_version=ssl.PROTOCOL_TLS,
3627 ssl_version=ssl.PROTOCOL_TLS,
3628 cert_reqs=ssl.CERT_NONE,
3628 cert_reqs=ssl.CERT_NONE,
3629 ca_certs=None,
3629 ca_certs=None,
3630 )
3630 )
3631
3631
3632 try:
3632 try:
3633 s.connect(addr)
3633 s.connect(addr)
3634 cert = s.getpeercert(True)
3634 cert = s.getpeercert(True)
3635
3635
3636 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3636 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3637
3637
3638 complete = win32.checkcertificatechain(cert, build=False)
3638 complete = win32.checkcertificatechain(cert, build=False)
3639
3639
3640 if not complete:
3640 if not complete:
3641 ui.status(_(b'certificate chain is incomplete, updating... '))
3641 ui.status(_(b'certificate chain is incomplete, updating... '))
3642
3642
3643 if not win32.checkcertificatechain(cert):
3643 if not win32.checkcertificatechain(cert):
3644 ui.status(_(b'failed.\n'))
3644 ui.status(_(b'failed.\n'))
3645 else:
3645 else:
3646 ui.status(_(b'done.\n'))
3646 ui.status(_(b'done.\n'))
3647 else:
3647 else:
3648 ui.status(_(b'full certificate chain is available\n'))
3648 ui.status(_(b'full certificate chain is available\n'))
3649 finally:
3649 finally:
3650 s.close()
3650 s.close()
3651
3651
3652
3652
3653 @command(
3653 @command(
3654 b"debugbackupbundle",
3654 b"debugbackupbundle",
3655 [
3655 [
3656 (
3656 (
3657 b"",
3657 b"",
3658 b"recover",
3658 b"recover",
3659 b"",
3659 b"",
3660 b"brings the specified changeset back into the repository",
3660 b"brings the specified changeset back into the repository",
3661 )
3661 )
3662 ]
3662 ]
3663 + cmdutil.logopts,
3663 + cmdutil.logopts,
3664 _(b"hg debugbackupbundle [--recover HASH]"),
3664 _(b"hg debugbackupbundle [--recover HASH]"),
3665 )
3665 )
3666 def debugbackupbundle(ui, repo, *pats, **opts):
3666 def debugbackupbundle(ui, repo, *pats, **opts):
3667 """lists the changesets available in backup bundles
3667 """lists the changesets available in backup bundles
3668
3668
3669 Without any arguments, this command prints a list of the changesets in each
3669 Without any arguments, this command prints a list of the changesets in each
3670 backup bundle.
3670 backup bundle.
3671
3671
3672 --recover takes a changeset hash and unbundles the first bundle that
3672 --recover takes a changeset hash and unbundles the first bundle that
3673 contains that hash, which puts that changeset back in your repository.
3673 contains that hash, which puts that changeset back in your repository.
3674
3674
3675 --verbose will print the entire commit message and the bundle path for that
3675 --verbose will print the entire commit message and the bundle path for that
3676 backup.
3676 backup.
3677 """
3677 """
3678 backups = list(
3678 backups = list(
3679 filter(
3679 filter(
3680 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3680 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3681 )
3681 )
3682 )
3682 )
3683 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3683 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3684
3684
3685 opts = pycompat.byteskwargs(opts)
3685 opts = pycompat.byteskwargs(opts)
3686 opts[b"bundle"] = b""
3686 opts[b"bundle"] = b""
3687 opts[b"force"] = None
3687 opts[b"force"] = None
3688 limit = logcmdutil.getlimit(opts)
3688 limit = logcmdutil.getlimit(opts)
3689
3689
3690 def display(other, chlist, displayer):
3690 def display(other, chlist, displayer):
3691 if opts.get(b"newest_first"):
3691 if opts.get(b"newest_first"):
3692 chlist.reverse()
3692 chlist.reverse()
3693 count = 0
3693 count = 0
3694 for n in chlist:
3694 for n in chlist:
3695 if limit is not None and count >= limit:
3695 if limit is not None and count >= limit:
3696 break
3696 break
3697 parents = [True for p in other.changelog.parents(n) if p != nullid]
3697 parents = [True for p in other.changelog.parents(n) if p != nullid]
3698 if opts.get(b"no_merges") and len(parents) == 2:
3698 if opts.get(b"no_merges") and len(parents) == 2:
3699 continue
3699 continue
3700 count += 1
3700 count += 1
3701 displayer.show(other[n])
3701 displayer.show(other[n])
3702
3702
3703 recovernode = opts.get(b"recover")
3703 recovernode = opts.get(b"recover")
3704 if recovernode:
3704 if recovernode:
3705 if scmutil.isrevsymbol(repo, recovernode):
3705 if scmutil.isrevsymbol(repo, recovernode):
3706 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3706 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3707 return
3707 return
3708 elif backups:
3708 elif backups:
3709 msg = _(
3709 msg = _(
3710 b"Recover changesets using: hg debugbackupbundle --recover "
3710 b"Recover changesets using: hg debugbackupbundle --recover "
3711 b"<changeset hash>\n\nAvailable backup changesets:"
3711 b"<changeset hash>\n\nAvailable backup changesets:"
3712 )
3712 )
3713 ui.status(msg, label=b"status.removed")
3713 ui.status(msg, label=b"status.removed")
3714 else:
3714 else:
3715 ui.status(_(b"no backup changesets found\n"))
3715 ui.status(_(b"no backup changesets found\n"))
3716 return
3716 return
3717
3717
3718 for backup in backups:
3718 for backup in backups:
3719 # Much of this is copied from the hg incoming logic
3719 # Much of this is copied from the hg incoming logic
3720 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3720 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3721 source, branches = hg.parseurl(source, opts.get(b"branch"))
3721 source, branches = hg.parseurl(source, opts.get(b"branch"))
3722 try:
3722 try:
3723 other = hg.peer(repo, opts, source)
3723 other = hg.peer(repo, opts, source)
3724 except error.LookupError as ex:
3724 except error.LookupError as ex:
3725 msg = _(b"\nwarning: unable to open bundle %s") % source
3725 msg = _(b"\nwarning: unable to open bundle %s") % source
3726 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3726 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3727 ui.warn(msg, hint=hint)
3727 ui.warn(msg, hint=hint)
3728 continue
3728 continue
3729 revs, checkout = hg.addbranchrevs(
3729 revs, checkout = hg.addbranchrevs(
3730 repo, other, branches, opts.get(b"rev")
3730 repo, other, branches, opts.get(b"rev")
3731 )
3731 )
3732
3732
3733 if revs:
3733 if revs:
3734 revs = [other.lookup(rev) for rev in revs]
3734 revs = [other.lookup(rev) for rev in revs]
3735
3735
3736 quiet = ui.quiet
3736 quiet = ui.quiet
3737 try:
3737 try:
3738 ui.quiet = True
3738 ui.quiet = True
3739 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3739 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3740 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3740 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3741 )
3741 )
3742 except error.LookupError:
3742 except error.LookupError:
3743 continue
3743 continue
3744 finally:
3744 finally:
3745 ui.quiet = quiet
3745 ui.quiet = quiet
3746
3746
3747 try:
3747 try:
3748 if not chlist:
3748 if not chlist:
3749 continue
3749 continue
3750 if recovernode:
3750 if recovernode:
3751 with repo.lock(), repo.transaction(b"unbundle") as tr:
3751 with repo.lock(), repo.transaction(b"unbundle") as tr:
3752 if scmutil.isrevsymbol(other, recovernode):
3752 if scmutil.isrevsymbol(other, recovernode):
3753 ui.status(_(b"Unbundling %s\n") % (recovernode))
3753 ui.status(_(b"Unbundling %s\n") % (recovernode))
3754 f = hg.openpath(ui, source)
3754 f = hg.openpath(ui, source)
3755 gen = exchange.readbundle(ui, f, source)
3755 gen = exchange.readbundle(ui, f, source)
3756 if isinstance(gen, bundle2.unbundle20):
3756 if isinstance(gen, bundle2.unbundle20):
3757 bundle2.applybundle(
3757 bundle2.applybundle(
3758 repo,
3758 repo,
3759 gen,
3759 gen,
3760 tr,
3760 tr,
3761 source=b"unbundle",
3761 source=b"unbundle",
3762 url=b"bundle:" + source,
3762 url=b"bundle:" + source,
3763 )
3763 )
3764 else:
3764 else:
3765 gen.apply(repo, b"unbundle", b"bundle:" + source)
3765 gen.apply(repo, b"unbundle", b"bundle:" + source)
3766 break
3766 break
3767 else:
3767 else:
3768 backupdate = encoding.strtolocal(
3768 backupdate = encoding.strtolocal(
3769 time.strftime(
3769 time.strftime(
3770 "%a %H:%M, %Y-%m-%d",
3770 "%a %H:%M, %Y-%m-%d",
3771 time.localtime(os.path.getmtime(source)),
3771 time.localtime(os.path.getmtime(source)),
3772 )
3772 )
3773 )
3773 )
3774 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3774 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3775 if ui.verbose:
3775 if ui.verbose:
3776 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3776 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3777 else:
3777 else:
3778 opts[
3778 opts[
3779 b"template"
3779 b"template"
3780 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3780 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3781 displayer = logcmdutil.changesetdisplayer(
3781 displayer = logcmdutil.changesetdisplayer(
3782 ui, other, opts, False
3782 ui, other, opts, False
3783 )
3783 )
3784 display(other, chlist, displayer)
3784 display(other, chlist, displayer)
3785 displayer.close()
3785 displayer.close()
3786 finally:
3786 finally:
3787 cleanupfn()
3787 cleanupfn()
3788
3788
3789
3789
3790 @command(
3790 @command(
3791 b'debugsub',
3791 b'debugsub',
3792 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3792 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3793 _(b'[-r REV] [REV]'),
3793 _(b'[-r REV] [REV]'),
3794 )
3794 )
3795 def debugsub(ui, repo, rev=None):
3795 def debugsub(ui, repo, rev=None):
3796 ctx = scmutil.revsingle(repo, rev, None)
3796 ctx = scmutil.revsingle(repo, rev, None)
3797 for k, v in sorted(ctx.substate.items()):
3797 for k, v in sorted(ctx.substate.items()):
3798 ui.writenoi18n(b'path %s\n' % k)
3798 ui.writenoi18n(b'path %s\n' % k)
3799 ui.writenoi18n(b' source %s\n' % v[0])
3799 ui.writenoi18n(b' source %s\n' % v[0])
3800 ui.writenoi18n(b' revision %s\n' % v[1])
3800 ui.writenoi18n(b' revision %s\n' % v[1])
3801
3801
3802
3802
3803 @command(b'debugshell', optionalrepo=True)
3803 @command(b'debugshell', optionalrepo=True)
3804 def debugshell(ui, repo):
3804 def debugshell(ui, repo):
3805 """run an interactive Python interpreter
3805 """run an interactive Python interpreter
3806
3806
3807 The local namespace is provided with a reference to the ui and
3807 The local namespace is provided with a reference to the ui and
3808 the repo instance (if available).
3808 the repo instance (if available).
3809 """
3809 """
3810 import code
3810 import code
3811
3811
3812 imported_objects = {
3812 imported_objects = {
3813 'ui': ui,
3813 'ui': ui,
3814 'repo': repo,
3814 'repo': repo,
3815 }
3815 }
3816
3816
3817 code.interact(local=imported_objects)
3817 code.interact(local=imported_objects)
3818
3818
3819
3819
3820 @command(
3820 @command(
3821 b'debugsuccessorssets',
3821 b'debugsuccessorssets',
3822 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3822 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3823 _(b'[REV]'),
3823 _(b'[REV]'),
3824 )
3824 )
3825 def debugsuccessorssets(ui, repo, *revs, **opts):
3825 def debugsuccessorssets(ui, repo, *revs, **opts):
3826 """show set of successors for revision
3826 """show set of successors for revision
3827
3827
3828 A successors set of changeset A is a consistent group of revisions that
3828 A successors set of changeset A is a consistent group of revisions that
3829 succeed A. It contains non-obsolete changesets only unless closests
3829 succeed A. It contains non-obsolete changesets only unless closests
3830 successors set is set.
3830 successors set is set.
3831
3831
3832 In most cases a changeset A has a single successors set containing a single
3832 In most cases a changeset A has a single successors set containing a single
3833 successor (changeset A replaced by A').
3833 successor (changeset A replaced by A').
3834
3834
3835 A changeset that is made obsolete with no successors are called "pruned".
3835 A changeset that is made obsolete with no successors are called "pruned".
3836 Such changesets have no successors sets at all.
3836 Such changesets have no successors sets at all.
3837
3837
3838 A changeset that has been "split" will have a successors set containing
3838 A changeset that has been "split" will have a successors set containing
3839 more than one successor.
3839 more than one successor.
3840
3840
3841 A changeset that has been rewritten in multiple different ways is called
3841 A changeset that has been rewritten in multiple different ways is called
3842 "divergent". Such changesets have multiple successor sets (each of which
3842 "divergent". Such changesets have multiple successor sets (each of which
3843 may also be split, i.e. have multiple successors).
3843 may also be split, i.e. have multiple successors).
3844
3844
3845 Results are displayed as follows::
3845 Results are displayed as follows::
3846
3846
3847 <rev1>
3847 <rev1>
3848 <successors-1A>
3848 <successors-1A>
3849 <rev2>
3849 <rev2>
3850 <successors-2A>
3850 <successors-2A>
3851 <successors-2B1> <successors-2B2> <successors-2B3>
3851 <successors-2B1> <successors-2B2> <successors-2B3>
3852
3852
3853 Here rev2 has two possible (i.e. divergent) successors sets. The first
3853 Here rev2 has two possible (i.e. divergent) successors sets. The first
3854 holds one element, whereas the second holds three (i.e. the changeset has
3854 holds one element, whereas the second holds three (i.e. the changeset has
3855 been split).
3855 been split).
3856 """
3856 """
3857 # passed to successorssets caching computation from one call to another
3857 # passed to successorssets caching computation from one call to another
3858 cache = {}
3858 cache = {}
3859 ctx2str = bytes
3859 ctx2str = bytes
3860 node2str = short
3860 node2str = short
3861 for rev in scmutil.revrange(repo, revs):
3861 for rev in scmutil.revrange(repo, revs):
3862 ctx = repo[rev]
3862 ctx = repo[rev]
3863 ui.write(b'%s\n' % ctx2str(ctx))
3863 ui.write(b'%s\n' % ctx2str(ctx))
3864 for succsset in obsutil.successorssets(
3864 for succsset in obsutil.successorssets(
3865 repo, ctx.node(), closest=opts['closest'], cache=cache
3865 repo, ctx.node(), closest=opts['closest'], cache=cache
3866 ):
3866 ):
3867 if succsset:
3867 if succsset:
3868 ui.write(b' ')
3868 ui.write(b' ')
3869 ui.write(node2str(succsset[0]))
3869 ui.write(node2str(succsset[0]))
3870 for node in succsset[1:]:
3870 for node in succsset[1:]:
3871 ui.write(b' ')
3871 ui.write(b' ')
3872 ui.write(node2str(node))
3872 ui.write(node2str(node))
3873 ui.write(b'\n')
3873 ui.write(b'\n')
3874
3874
3875
3875
3876 @command(b'debugtagscache', [])
3876 @command(b'debugtagscache', [])
3877 def debugtagscache(ui, repo):
3877 def debugtagscache(ui, repo):
3878 """display the contents of .hg/cache/hgtagsfnodes1"""
3878 """display the contents of .hg/cache/hgtagsfnodes1"""
3879 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3879 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3880 flog = repo.file(b'.hgtags')
3880 flog = repo.file(b'.hgtags')
3881 for r in repo:
3881 for r in repo:
3882 node = repo[r].node()
3882 node = repo[r].node()
3883 tagsnode = cache.getfnode(node, computemissing=False)
3883 tagsnode = cache.getfnode(node, computemissing=False)
3884 if tagsnode:
3884 if tagsnode:
3885 tagsnodedisplay = hex(tagsnode)
3885 tagsnodedisplay = hex(tagsnode)
3886 if not flog.hasnode(tagsnode):
3886 if not flog.hasnode(tagsnode):
3887 tagsnodedisplay += b' (unknown node)'
3887 tagsnodedisplay += b' (unknown node)'
3888 elif tagsnode is None:
3888 elif tagsnode is None:
3889 tagsnodedisplay = b'missing'
3889 tagsnodedisplay = b'missing'
3890 else:
3890 else:
3891 tagsnodedisplay = b'invalid'
3891 tagsnodedisplay = b'invalid'
3892
3892
3893 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3893 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3894
3894
3895
3895
3896 @command(
3896 @command(
3897 b'debugtemplate',
3897 b'debugtemplate',
3898 [
3898 [
3899 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3899 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3900 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3900 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3901 ],
3901 ],
3902 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3902 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3903 optionalrepo=True,
3903 optionalrepo=True,
3904 )
3904 )
3905 def debugtemplate(ui, repo, tmpl, **opts):
3905 def debugtemplate(ui, repo, tmpl, **opts):
3906 """parse and apply a template
3906 """parse and apply a template
3907
3907
3908 If -r/--rev is given, the template is processed as a log template and
3908 If -r/--rev is given, the template is processed as a log template and
3909 applied to the given changesets. Otherwise, it is processed as a generic
3909 applied to the given changesets. Otherwise, it is processed as a generic
3910 template.
3910 template.
3911
3911
3912 Use --verbose to print the parsed tree.
3912 Use --verbose to print the parsed tree.
3913 """
3913 """
3914 revs = None
3914 revs = None
3915 if opts['rev']:
3915 if opts['rev']:
3916 if repo is None:
3916 if repo is None:
3917 raise error.RepoError(
3917 raise error.RepoError(
3918 _(b'there is no Mercurial repository here (.hg not found)')
3918 _(b'there is no Mercurial repository here (.hg not found)')
3919 )
3919 )
3920 revs = scmutil.revrange(repo, opts['rev'])
3920 revs = scmutil.revrange(repo, opts['rev'])
3921
3921
3922 props = {}
3922 props = {}
3923 for d in opts['define']:
3923 for d in opts['define']:
3924 try:
3924 try:
3925 k, v = (e.strip() for e in d.split(b'=', 1))
3925 k, v = (e.strip() for e in d.split(b'=', 1))
3926 if not k or k == b'ui':
3926 if not k or k == b'ui':
3927 raise ValueError
3927 raise ValueError
3928 props[k] = v
3928 props[k] = v
3929 except ValueError:
3929 except ValueError:
3930 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3930 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3931
3931
3932 if ui.verbose:
3932 if ui.verbose:
3933 aliases = ui.configitems(b'templatealias')
3933 aliases = ui.configitems(b'templatealias')
3934 tree = templater.parse(tmpl)
3934 tree = templater.parse(tmpl)
3935 ui.note(templater.prettyformat(tree), b'\n')
3935 ui.note(templater.prettyformat(tree), b'\n')
3936 newtree = templater.expandaliases(tree, aliases)
3936 newtree = templater.expandaliases(tree, aliases)
3937 if newtree != tree:
3937 if newtree != tree:
3938 ui.notenoi18n(
3938 ui.notenoi18n(
3939 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3939 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3940 )
3940 )
3941
3941
3942 if revs is None:
3942 if revs is None:
3943 tres = formatter.templateresources(ui, repo)
3943 tres = formatter.templateresources(ui, repo)
3944 t = formatter.maketemplater(ui, tmpl, resources=tres)
3944 t = formatter.maketemplater(ui, tmpl, resources=tres)
3945 if ui.verbose:
3945 if ui.verbose:
3946 kwds, funcs = t.symbolsuseddefault()
3946 kwds, funcs = t.symbolsuseddefault()
3947 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3947 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3948 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3948 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3949 ui.write(t.renderdefault(props))
3949 ui.write(t.renderdefault(props))
3950 else:
3950 else:
3951 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3951 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3952 if ui.verbose:
3952 if ui.verbose:
3953 kwds, funcs = displayer.t.symbolsuseddefault()
3953 kwds, funcs = displayer.t.symbolsuseddefault()
3954 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3954 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3955 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3955 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3956 for r in revs:
3956 for r in revs:
3957 displayer.show(repo[r], **pycompat.strkwargs(props))
3957 displayer.show(repo[r], **pycompat.strkwargs(props))
3958 displayer.close()
3958 displayer.close()
3959
3959
3960
3960
3961 @command(
3961 @command(
3962 b'debuguigetpass',
3962 b'debuguigetpass',
3963 [
3963 [
3964 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3964 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3965 ],
3965 ],
3966 _(b'[-p TEXT]'),
3966 _(b'[-p TEXT]'),
3967 norepo=True,
3967 norepo=True,
3968 )
3968 )
3969 def debuguigetpass(ui, prompt=b''):
3969 def debuguigetpass(ui, prompt=b''):
3970 """show prompt to type password"""
3970 """show prompt to type password"""
3971 r = ui.getpass(prompt)
3971 r = ui.getpass(prompt)
3972 if r is None:
3972 if r is None:
3973 r = b"<default response>"
3973 r = b"<default response>"
3974 ui.writenoi18n(b'response: %s\n' % r)
3974 ui.writenoi18n(b'response: %s\n' % r)
3975
3975
3976
3976
3977 @command(
3977 @command(
3978 b'debuguiprompt',
3978 b'debuguiprompt',
3979 [
3979 [
3980 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3980 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
3981 ],
3981 ],
3982 _(b'[-p TEXT]'),
3982 _(b'[-p TEXT]'),
3983 norepo=True,
3983 norepo=True,
3984 )
3984 )
3985 def debuguiprompt(ui, prompt=b''):
3985 def debuguiprompt(ui, prompt=b''):
3986 """show plain prompt"""
3986 """show plain prompt"""
3987 r = ui.prompt(prompt)
3987 r = ui.prompt(prompt)
3988 ui.writenoi18n(b'response: %s\n' % r)
3988 ui.writenoi18n(b'response: %s\n' % r)
3989
3989
3990
3990
3991 @command(b'debugupdatecaches', [])
3991 @command(b'debugupdatecaches', [])
3992 def debugupdatecaches(ui, repo, *pats, **opts):
3992 def debugupdatecaches(ui, repo, *pats, **opts):
3993 """warm all known caches in the repository"""
3993 """warm all known caches in the repository"""
3994 with repo.wlock(), repo.lock():
3994 with repo.wlock(), repo.lock():
3995 repo.updatecaches(full=True)
3995 repo.updatecaches(full=True)
3996
3996
3997
3997
3998 @command(
3998 @command(
3999 b'debugupgraderepo',
3999 b'debugupgraderepo',
4000 [
4000 [
4001 (
4001 (
4002 b'o',
4002 b'o',
4003 b'optimize',
4003 b'optimize',
4004 [],
4004 [],
4005 _(b'extra optimization to perform'),
4005 _(b'extra optimization to perform'),
4006 _(b'NAME'),
4006 _(b'NAME'),
4007 ),
4007 ),
4008 (b'', b'run', False, _(b'performs an upgrade')),
4008 (b'', b'run', False, _(b'performs an upgrade')),
4009 (b'', b'backup', True, _(b'keep the old repository content around')),
4009 (b'', b'backup', True, _(b'keep the old repository content around')),
4010 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4010 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4011 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4011 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4012 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4012 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4013 ],
4013 ],
4014 )
4014 )
4015 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4015 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4016 """upgrade a repository to use different features
4016 """upgrade a repository to use different features
4017
4017
4018 If no arguments are specified, the repository is evaluated for upgrade
4018 If no arguments are specified, the repository is evaluated for upgrade
4019 and a list of problems and potential optimizations is printed.
4019 and a list of problems and potential optimizations is printed.
4020
4020
4021 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4021 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4022 can be influenced via additional arguments. More details will be provided
4022 can be influenced via additional arguments. More details will be provided
4023 by the command output when run without ``--run``.
4023 by the command output when run without ``--run``.
4024
4024
4025 During the upgrade, the repository will be locked and no writes will be
4025 During the upgrade, the repository will be locked and no writes will be
4026 allowed.
4026 allowed.
4027
4027
4028 At the end of the upgrade, the repository may not be readable while new
4028 At the end of the upgrade, the repository may not be readable while new
4029 repository data is swapped in. This window will be as long as it takes to
4029 repository data is swapped in. This window will be as long as it takes to
4030 rename some directories inside the ``.hg`` directory. On most machines, this
4030 rename some directories inside the ``.hg`` directory. On most machines, this
4031 should complete almost instantaneously and the chances of a consumer being
4031 should complete almost instantaneously and the chances of a consumer being
4032 unable to access the repository should be low.
4032 unable to access the repository should be low.
4033
4033
4034 By default, all revlog will be upgraded. You can restrict this using flag
4034 By default, all revlog will be upgraded. You can restrict this using flag
4035 such as `--manifest`:
4035 such as `--manifest`:
4036
4036
4037 * `--manifest`: only optimize the manifest
4037 * `--manifest`: only optimize the manifest
4038 * `--no-manifest`: optimize all revlog but the manifest
4038 * `--no-manifest`: optimize all revlog but the manifest
4039 * `--changelog`: optimize the changelog only
4039 * `--changelog`: optimize the changelog only
4040 * `--no-changelog --no-manifest`: optimize filelogs only
4040 * `--no-changelog --no-manifest`: optimize filelogs only
4041 * `--filelogs`: optimize the filelogs only
4041 * `--filelogs`: optimize the filelogs only
4042 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4042 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4043 """
4043 """
4044 return upgrade.upgraderepo(
4044 return upgrade.upgraderepo(
4045 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4045 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4046 )
4046 )
4047
4047
4048
4048
4049 @command(
4049 @command(
4050 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4050 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4051 )
4051 )
4052 def debugwalk(ui, repo, *pats, **opts):
4052 def debugwalk(ui, repo, *pats, **opts):
4053 """show how files match on given patterns"""
4053 """show how files match on given patterns"""
4054 opts = pycompat.byteskwargs(opts)
4054 opts = pycompat.byteskwargs(opts)
4055 m = scmutil.match(repo[None], pats, opts)
4055 m = scmutil.match(repo[None], pats, opts)
4056 if ui.verbose:
4056 if ui.verbose:
4057 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4057 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4058 items = list(repo[None].walk(m))
4058 items = list(repo[None].walk(m))
4059 if not items:
4059 if not items:
4060 return
4060 return
4061 f = lambda fn: fn
4061 f = lambda fn: fn
4062 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4062 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4063 f = lambda fn: util.normpath(fn)
4063 f = lambda fn: util.normpath(fn)
4064 fmt = b'f %%-%ds %%-%ds %%s' % (
4064 fmt = b'f %%-%ds %%-%ds %%s' % (
4065 max([len(abs) for abs in items]),
4065 max([len(abs) for abs in items]),
4066 max([len(repo.pathto(abs)) for abs in items]),
4066 max([len(repo.pathto(abs)) for abs in items]),
4067 )
4067 )
4068 for abs in items:
4068 for abs in items:
4069 line = fmt % (
4069 line = fmt % (
4070 abs,
4070 abs,
4071 f(repo.pathto(abs)),
4071 f(repo.pathto(abs)),
4072 m.exact(abs) and b'exact' or b'',
4072 m.exact(abs) and b'exact' or b'',
4073 )
4073 )
4074 ui.write(b"%s\n" % line.rstrip())
4074 ui.write(b"%s\n" % line.rstrip())
4075
4075
4076
4076
4077 @command(b'debugwhyunstable', [], _(b'REV'))
4077 @command(b'debugwhyunstable', [], _(b'REV'))
4078 def debugwhyunstable(ui, repo, rev):
4078 def debugwhyunstable(ui, repo, rev):
4079 """explain instabilities of a changeset"""
4079 """explain instabilities of a changeset"""
4080 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4080 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4081 dnodes = b''
4081 dnodes = b''
4082 if entry.get(b'divergentnodes'):
4082 if entry.get(b'divergentnodes'):
4083 dnodes = (
4083 dnodes = (
4084 b' '.join(
4084 b' '.join(
4085 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4085 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4086 for ctx in entry[b'divergentnodes']
4086 for ctx in entry[b'divergentnodes']
4087 )
4087 )
4088 + b' '
4088 + b' '
4089 )
4089 )
4090 ui.write(
4090 ui.write(
4091 b'%s: %s%s %s\n'
4091 b'%s: %s%s %s\n'
4092 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4092 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4093 )
4093 )
4094
4094
4095
4095
4096 @command(
4096 @command(
4097 b'debugwireargs',
4097 b'debugwireargs',
4098 [
4098 [
4099 (b'', b'three', b'', b'three'),
4099 (b'', b'three', b'', b'three'),
4100 (b'', b'four', b'', b'four'),
4100 (b'', b'four', b'', b'four'),
4101 (b'', b'five', b'', b'five'),
4101 (b'', b'five', b'', b'five'),
4102 ]
4102 ]
4103 + cmdutil.remoteopts,
4103 + cmdutil.remoteopts,
4104 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4104 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4105 norepo=True,
4105 norepo=True,
4106 )
4106 )
4107 def debugwireargs(ui, repopath, *vals, **opts):
4107 def debugwireargs(ui, repopath, *vals, **opts):
4108 opts = pycompat.byteskwargs(opts)
4108 opts = pycompat.byteskwargs(opts)
4109 repo = hg.peer(ui, opts, repopath)
4109 repo = hg.peer(ui, opts, repopath)
4110 try:
4110 try:
4111 for opt in cmdutil.remoteopts:
4111 for opt in cmdutil.remoteopts:
4112 del opts[opt[1]]
4112 del opts[opt[1]]
4113 args = {}
4113 args = {}
4114 for k, v in pycompat.iteritems(opts):
4114 for k, v in pycompat.iteritems(opts):
4115 if v:
4115 if v:
4116 args[k] = v
4116 args[k] = v
4117 args = pycompat.strkwargs(args)
4117 args = pycompat.strkwargs(args)
4118 # run twice to check that we don't mess up the stream for the next command
4118 # run twice to check that we don't mess up the stream for the next command
4119 res1 = repo.debugwireargs(*vals, **args)
4119 res1 = repo.debugwireargs(*vals, **args)
4120 res2 = repo.debugwireargs(*vals, **args)
4120 res2 = repo.debugwireargs(*vals, **args)
4121 ui.write(b"%s\n" % res1)
4121 ui.write(b"%s\n" % res1)
4122 if res1 != res2:
4122 if res1 != res2:
4123 ui.warn(b"%s\n" % res2)
4123 ui.warn(b"%s\n" % res2)
4124 finally:
4124 finally:
4125 repo.close()
4125 repo.close()
4126
4126
4127
4127
4128 def _parsewirelangblocks(fh):
4128 def _parsewirelangblocks(fh):
4129 activeaction = None
4129 activeaction = None
4130 blocklines = []
4130 blocklines = []
4131 lastindent = 0
4131 lastindent = 0
4132
4132
4133 for line in fh:
4133 for line in fh:
4134 line = line.rstrip()
4134 line = line.rstrip()
4135 if not line:
4135 if not line:
4136 continue
4136 continue
4137
4137
4138 if line.startswith(b'#'):
4138 if line.startswith(b'#'):
4139 continue
4139 continue
4140
4140
4141 if not line.startswith(b' '):
4141 if not line.startswith(b' '):
4142 # New block. Flush previous one.
4142 # New block. Flush previous one.
4143 if activeaction:
4143 if activeaction:
4144 yield activeaction, blocklines
4144 yield activeaction, blocklines
4145
4145
4146 activeaction = line
4146 activeaction = line
4147 blocklines = []
4147 blocklines = []
4148 lastindent = 0
4148 lastindent = 0
4149 continue
4149 continue
4150
4150
4151 # Else we start with an indent.
4151 # Else we start with an indent.
4152
4152
4153 if not activeaction:
4153 if not activeaction:
4154 raise error.Abort(_(b'indented line outside of block'))
4154 raise error.Abort(_(b'indented line outside of block'))
4155
4155
4156 indent = len(line) - len(line.lstrip())
4156 indent = len(line) - len(line.lstrip())
4157
4157
4158 # If this line is indented more than the last line, concatenate it.
4158 # If this line is indented more than the last line, concatenate it.
4159 if indent > lastindent and blocklines:
4159 if indent > lastindent and blocklines:
4160 blocklines[-1] += line.lstrip()
4160 blocklines[-1] += line.lstrip()
4161 else:
4161 else:
4162 blocklines.append(line)
4162 blocklines.append(line)
4163 lastindent = indent
4163 lastindent = indent
4164
4164
4165 # Flush last block.
4165 # Flush last block.
4166 if activeaction:
4166 if activeaction:
4167 yield activeaction, blocklines
4167 yield activeaction, blocklines
4168
4168
4169
4169
4170 @command(
4170 @command(
4171 b'debugwireproto',
4171 b'debugwireproto',
4172 [
4172 [
4173 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4173 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4174 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4174 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4175 (
4175 (
4176 b'',
4176 b'',
4177 b'noreadstderr',
4177 b'noreadstderr',
4178 False,
4178 False,
4179 _(b'do not read from stderr of the remote'),
4179 _(b'do not read from stderr of the remote'),
4180 ),
4180 ),
4181 (
4181 (
4182 b'',
4182 b'',
4183 b'nologhandshake',
4183 b'nologhandshake',
4184 False,
4184 False,
4185 _(b'do not log I/O related to the peer handshake'),
4185 _(b'do not log I/O related to the peer handshake'),
4186 ),
4186 ),
4187 ]
4187 ]
4188 + cmdutil.remoteopts,
4188 + cmdutil.remoteopts,
4189 _(b'[PATH]'),
4189 _(b'[PATH]'),
4190 optionalrepo=True,
4190 optionalrepo=True,
4191 )
4191 )
4192 def debugwireproto(ui, repo, path=None, **opts):
4192 def debugwireproto(ui, repo, path=None, **opts):
4193 """send wire protocol commands to a server
4193 """send wire protocol commands to a server
4194
4194
4195 This command can be used to issue wire protocol commands to remote
4195 This command can be used to issue wire protocol commands to remote
4196 peers and to debug the raw data being exchanged.
4196 peers and to debug the raw data being exchanged.
4197
4197
4198 ``--localssh`` will start an SSH server against the current repository
4198 ``--localssh`` will start an SSH server against the current repository
4199 and connect to that. By default, the connection will perform a handshake
4199 and connect to that. By default, the connection will perform a handshake
4200 and establish an appropriate peer instance.
4200 and establish an appropriate peer instance.
4201
4201
4202 ``--peer`` can be used to bypass the handshake protocol and construct a
4202 ``--peer`` can be used to bypass the handshake protocol and construct a
4203 peer instance using the specified class type. Valid values are ``raw``,
4203 peer instance using the specified class type. Valid values are ``raw``,
4204 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4204 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4205 raw data payloads and don't support higher-level command actions.
4205 raw data payloads and don't support higher-level command actions.
4206
4206
4207 ``--noreadstderr`` can be used to disable automatic reading from stderr
4207 ``--noreadstderr`` can be used to disable automatic reading from stderr
4208 of the peer (for SSH connections only). Disabling automatic reading of
4208 of the peer (for SSH connections only). Disabling automatic reading of
4209 stderr is useful for making output more deterministic.
4209 stderr is useful for making output more deterministic.
4210
4210
4211 Commands are issued via a mini language which is specified via stdin.
4211 Commands are issued via a mini language which is specified via stdin.
4212 The language consists of individual actions to perform. An action is
4212 The language consists of individual actions to perform. An action is
4213 defined by a block. A block is defined as a line with no leading
4213 defined by a block. A block is defined as a line with no leading
4214 space followed by 0 or more lines with leading space. Blocks are
4214 space followed by 0 or more lines with leading space. Blocks are
4215 effectively a high-level command with additional metadata.
4215 effectively a high-level command with additional metadata.
4216
4216
4217 Lines beginning with ``#`` are ignored.
4217 Lines beginning with ``#`` are ignored.
4218
4218
4219 The following sections denote available actions.
4219 The following sections denote available actions.
4220
4220
4221 raw
4221 raw
4222 ---
4222 ---
4223
4223
4224 Send raw data to the server.
4224 Send raw data to the server.
4225
4225
4226 The block payload contains the raw data to send as one atomic send
4226 The block payload contains the raw data to send as one atomic send
4227 operation. The data may not actually be delivered in a single system
4227 operation. The data may not actually be delivered in a single system
4228 call: it depends on the abilities of the transport being used.
4228 call: it depends on the abilities of the transport being used.
4229
4229
4230 Each line in the block is de-indented and concatenated. Then, that
4230 Each line in the block is de-indented and concatenated. Then, that
4231 value is evaluated as a Python b'' literal. This allows the use of
4231 value is evaluated as a Python b'' literal. This allows the use of
4232 backslash escaping, etc.
4232 backslash escaping, etc.
4233
4233
4234 raw+
4234 raw+
4235 ----
4235 ----
4236
4236
4237 Behaves like ``raw`` except flushes output afterwards.
4237 Behaves like ``raw`` except flushes output afterwards.
4238
4238
4239 command <X>
4239 command <X>
4240 -----------
4240 -----------
4241
4241
4242 Send a request to run a named command, whose name follows the ``command``
4242 Send a request to run a named command, whose name follows the ``command``
4243 string.
4243 string.
4244
4244
4245 Arguments to the command are defined as lines in this block. The format of
4245 Arguments to the command are defined as lines in this block. The format of
4246 each line is ``<key> <value>``. e.g.::
4246 each line is ``<key> <value>``. e.g.::
4247
4247
4248 command listkeys
4248 command listkeys
4249 namespace bookmarks
4249 namespace bookmarks
4250
4250
4251 If the value begins with ``eval:``, it will be interpreted as a Python
4251 If the value begins with ``eval:``, it will be interpreted as a Python
4252 literal expression. Otherwise values are interpreted as Python b'' literals.
4252 literal expression. Otherwise values are interpreted as Python b'' literals.
4253 This allows sending complex types and encoding special byte sequences via
4253 This allows sending complex types and encoding special byte sequences via
4254 backslash escaping.
4254 backslash escaping.
4255
4255
4256 The following arguments have special meaning:
4256 The following arguments have special meaning:
4257
4257
4258 ``PUSHFILE``
4258 ``PUSHFILE``
4259 When defined, the *push* mechanism of the peer will be used instead
4259 When defined, the *push* mechanism of the peer will be used instead
4260 of the static request-response mechanism and the content of the
4260 of the static request-response mechanism and the content of the
4261 file specified in the value of this argument will be sent as the
4261 file specified in the value of this argument will be sent as the
4262 command payload.
4262 command payload.
4263
4263
4264 This can be used to submit a local bundle file to the remote.
4264 This can be used to submit a local bundle file to the remote.
4265
4265
4266 batchbegin
4266 batchbegin
4267 ----------
4267 ----------
4268
4268
4269 Instruct the peer to begin a batched send.
4269 Instruct the peer to begin a batched send.
4270
4270
4271 All ``command`` blocks are queued for execution until the next
4271 All ``command`` blocks are queued for execution until the next
4272 ``batchsubmit`` block.
4272 ``batchsubmit`` block.
4273
4273
4274 batchsubmit
4274 batchsubmit
4275 -----------
4275 -----------
4276
4276
4277 Submit previously queued ``command`` blocks as a batch request.
4277 Submit previously queued ``command`` blocks as a batch request.
4278
4278
4279 This action MUST be paired with a ``batchbegin`` action.
4279 This action MUST be paired with a ``batchbegin`` action.
4280
4280
4281 httprequest <method> <path>
4281 httprequest <method> <path>
4282 ---------------------------
4282 ---------------------------
4283
4283
4284 (HTTP peer only)
4284 (HTTP peer only)
4285
4285
4286 Send an HTTP request to the peer.
4286 Send an HTTP request to the peer.
4287
4287
4288 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4288 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4289
4289
4290 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4290 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4291 headers to add to the request. e.g. ``Accept: foo``.
4291 headers to add to the request. e.g. ``Accept: foo``.
4292
4292
4293 The following arguments are special:
4293 The following arguments are special:
4294
4294
4295 ``BODYFILE``
4295 ``BODYFILE``
4296 The content of the file defined as the value to this argument will be
4296 The content of the file defined as the value to this argument will be
4297 transferred verbatim as the HTTP request body.
4297 transferred verbatim as the HTTP request body.
4298
4298
4299 ``frame <type> <flags> <payload>``
4299 ``frame <type> <flags> <payload>``
4300 Send a unified protocol frame as part of the request body.
4300 Send a unified protocol frame as part of the request body.
4301
4301
4302 All frames will be collected and sent as the body to the HTTP
4302 All frames will be collected and sent as the body to the HTTP
4303 request.
4303 request.
4304
4304
4305 close
4305 close
4306 -----
4306 -----
4307
4307
4308 Close the connection to the server.
4308 Close the connection to the server.
4309
4309
4310 flush
4310 flush
4311 -----
4311 -----
4312
4312
4313 Flush data written to the server.
4313 Flush data written to the server.
4314
4314
4315 readavailable
4315 readavailable
4316 -------------
4316 -------------
4317
4317
4318 Close the write end of the connection and read all available data from
4318 Close the write end of the connection and read all available data from
4319 the server.
4319 the server.
4320
4320
4321 If the connection to the server encompasses multiple pipes, we poll both
4321 If the connection to the server encompasses multiple pipes, we poll both
4322 pipes and read available data.
4322 pipes and read available data.
4323
4323
4324 readline
4324 readline
4325 --------
4325 --------
4326
4326
4327 Read a line of output from the server. If there are multiple output
4327 Read a line of output from the server. If there are multiple output
4328 pipes, reads only the main pipe.
4328 pipes, reads only the main pipe.
4329
4329
4330 ereadline
4330 ereadline
4331 ---------
4331 ---------
4332
4332
4333 Like ``readline``, but read from the stderr pipe, if available.
4333 Like ``readline``, but read from the stderr pipe, if available.
4334
4334
4335 read <X>
4335 read <X>
4336 --------
4336 --------
4337
4337
4338 ``read()`` N bytes from the server's main output pipe.
4338 ``read()`` N bytes from the server's main output pipe.
4339
4339
4340 eread <X>
4340 eread <X>
4341 ---------
4341 ---------
4342
4342
4343 ``read()`` N bytes from the server's stderr pipe, if available.
4343 ``read()`` N bytes from the server's stderr pipe, if available.
4344
4344
4345 Specifying Unified Frame-Based Protocol Frames
4345 Specifying Unified Frame-Based Protocol Frames
4346 ----------------------------------------------
4346 ----------------------------------------------
4347
4347
4348 It is possible to emit a *Unified Frame-Based Protocol* by using special
4348 It is possible to emit a *Unified Frame-Based Protocol* by using special
4349 syntax.
4349 syntax.
4350
4350
4351 A frame is composed as a type, flags, and payload. These can be parsed
4351 A frame is composed as a type, flags, and payload. These can be parsed
4352 from a string of the form:
4352 from a string of the form:
4353
4353
4354 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4354 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4355
4355
4356 ``request-id`` and ``stream-id`` are integers defining the request and
4356 ``request-id`` and ``stream-id`` are integers defining the request and
4357 stream identifiers.
4357 stream identifiers.
4358
4358
4359 ``type`` can be an integer value for the frame type or the string name
4359 ``type`` can be an integer value for the frame type or the string name
4360 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4360 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4361 ``command-name``.
4361 ``command-name``.
4362
4362
4363 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4363 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4364 components. Each component (and there can be just one) can be an integer
4364 components. Each component (and there can be just one) can be an integer
4365 or a flag name for stream flags or frame flags, respectively. Values are
4365 or a flag name for stream flags or frame flags, respectively. Values are
4366 resolved to integers and then bitwise OR'd together.
4366 resolved to integers and then bitwise OR'd together.
4367
4367
4368 ``payload`` represents the raw frame payload. If it begins with
4368 ``payload`` represents the raw frame payload. If it begins with
4369 ``cbor:``, the following string is evaluated as Python code and the
4369 ``cbor:``, the following string is evaluated as Python code and the
4370 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4370 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4371 as a Python byte string literal.
4371 as a Python byte string literal.
4372 """
4372 """
4373 opts = pycompat.byteskwargs(opts)
4373 opts = pycompat.byteskwargs(opts)
4374
4374
4375 if opts[b'localssh'] and not repo:
4375 if opts[b'localssh'] and not repo:
4376 raise error.Abort(_(b'--localssh requires a repository'))
4376 raise error.Abort(_(b'--localssh requires a repository'))
4377
4377
4378 if opts[b'peer'] and opts[b'peer'] not in (
4378 if opts[b'peer'] and opts[b'peer'] not in (
4379 b'raw',
4379 b'raw',
4380 b'http2',
4380 b'http2',
4381 b'ssh1',
4381 b'ssh1',
4382 b'ssh2',
4382 b'ssh2',
4383 ):
4383 ):
4384 raise error.Abort(
4384 raise error.Abort(
4385 _(b'invalid value for --peer'),
4385 _(b'invalid value for --peer'),
4386 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4386 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4387 )
4387 )
4388
4388
4389 if path and opts[b'localssh']:
4389 if path and opts[b'localssh']:
4390 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4390 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4391
4391
4392 if ui.interactive():
4392 if ui.interactive():
4393 ui.write(_(b'(waiting for commands on stdin)\n'))
4393 ui.write(_(b'(waiting for commands on stdin)\n'))
4394
4394
4395 blocks = list(_parsewirelangblocks(ui.fin))
4395 blocks = list(_parsewirelangblocks(ui.fin))
4396
4396
4397 proc = None
4397 proc = None
4398 stdin = None
4398 stdin = None
4399 stdout = None
4399 stdout = None
4400 stderr = None
4400 stderr = None
4401 opener = None
4401 opener = None
4402
4402
4403 if opts[b'localssh']:
4403 if opts[b'localssh']:
4404 # We start the SSH server in its own process so there is process
4404 # We start the SSH server in its own process so there is process
4405 # separation. This prevents a whole class of potential bugs around
4405 # separation. This prevents a whole class of potential bugs around
4406 # shared state from interfering with server operation.
4406 # shared state from interfering with server operation.
4407 args = procutil.hgcmd() + [
4407 args = procutil.hgcmd() + [
4408 b'-R',
4408 b'-R',
4409 repo.root,
4409 repo.root,
4410 b'debugserve',
4410 b'debugserve',
4411 b'--sshstdio',
4411 b'--sshstdio',
4412 ]
4412 ]
4413 proc = subprocess.Popen(
4413 proc = subprocess.Popen(
4414 pycompat.rapply(procutil.tonativestr, args),
4414 pycompat.rapply(procutil.tonativestr, args),
4415 stdin=subprocess.PIPE,
4415 stdin=subprocess.PIPE,
4416 stdout=subprocess.PIPE,
4416 stdout=subprocess.PIPE,
4417 stderr=subprocess.PIPE,
4417 stderr=subprocess.PIPE,
4418 bufsize=0,
4418 bufsize=0,
4419 )
4419 )
4420
4420
4421 stdin = proc.stdin
4421 stdin = proc.stdin
4422 stdout = proc.stdout
4422 stdout = proc.stdout
4423 stderr = proc.stderr
4423 stderr = proc.stderr
4424
4424
4425 # We turn the pipes into observers so we can log I/O.
4425 # We turn the pipes into observers so we can log I/O.
4426 if ui.verbose or opts[b'peer'] == b'raw':
4426 if ui.verbose or opts[b'peer'] == b'raw':
4427 stdin = util.makeloggingfileobject(
4427 stdin = util.makeloggingfileobject(
4428 ui, proc.stdin, b'i', logdata=True
4428 ui, proc.stdin, b'i', logdata=True
4429 )
4429 )
4430 stdout = util.makeloggingfileobject(
4430 stdout = util.makeloggingfileobject(
4431 ui, proc.stdout, b'o', logdata=True
4431 ui, proc.stdout, b'o', logdata=True
4432 )
4432 )
4433 stderr = util.makeloggingfileobject(
4433 stderr = util.makeloggingfileobject(
4434 ui, proc.stderr, b'e', logdata=True
4434 ui, proc.stderr, b'e', logdata=True
4435 )
4435 )
4436
4436
4437 # --localssh also implies the peer connection settings.
4437 # --localssh also implies the peer connection settings.
4438
4438
4439 url = b'ssh://localserver'
4439 url = b'ssh://localserver'
4440 autoreadstderr = not opts[b'noreadstderr']
4440 autoreadstderr = not opts[b'noreadstderr']
4441
4441
4442 if opts[b'peer'] == b'ssh1':
4442 if opts[b'peer'] == b'ssh1':
4443 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4443 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4444 peer = sshpeer.sshv1peer(
4444 peer = sshpeer.sshv1peer(
4445 ui,
4445 ui,
4446 url,
4446 url,
4447 proc,
4447 proc,
4448 stdin,
4448 stdin,
4449 stdout,
4449 stdout,
4450 stderr,
4450 stderr,
4451 None,
4451 None,
4452 autoreadstderr=autoreadstderr,
4452 autoreadstderr=autoreadstderr,
4453 )
4453 )
4454 elif opts[b'peer'] == b'ssh2':
4454 elif opts[b'peer'] == b'ssh2':
4455 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4455 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4456 peer = sshpeer.sshv2peer(
4456 peer = sshpeer.sshv2peer(
4457 ui,
4457 ui,
4458 url,
4458 url,
4459 proc,
4459 proc,
4460 stdin,
4460 stdin,
4461 stdout,
4461 stdout,
4462 stderr,
4462 stderr,
4463 None,
4463 None,
4464 autoreadstderr=autoreadstderr,
4464 autoreadstderr=autoreadstderr,
4465 )
4465 )
4466 elif opts[b'peer'] == b'raw':
4466 elif opts[b'peer'] == b'raw':
4467 ui.write(_(b'using raw connection to peer\n'))
4467 ui.write(_(b'using raw connection to peer\n'))
4468 peer = None
4468 peer = None
4469 else:
4469 else:
4470 ui.write(_(b'creating ssh peer from handshake results\n'))
4470 ui.write(_(b'creating ssh peer from handshake results\n'))
4471 peer = sshpeer.makepeer(
4471 peer = sshpeer.makepeer(
4472 ui,
4472 ui,
4473 url,
4473 url,
4474 proc,
4474 proc,
4475 stdin,
4475 stdin,
4476 stdout,
4476 stdout,
4477 stderr,
4477 stderr,
4478 autoreadstderr=autoreadstderr,
4478 autoreadstderr=autoreadstderr,
4479 )
4479 )
4480
4480
4481 elif path:
4481 elif path:
4482 # We bypass hg.peer() so we can proxy the sockets.
4482 # We bypass hg.peer() so we can proxy the sockets.
4483 # TODO consider not doing this because we skip
4483 # TODO consider not doing this because we skip
4484 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4484 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4485 u = util.url(path)
4485 u = util.url(path)
4486 if u.scheme != b'http':
4486 if u.scheme != b'http':
4487 raise error.Abort(_(b'only http:// paths are currently supported'))
4487 raise error.Abort(_(b'only http:// paths are currently supported'))
4488
4488
4489 url, authinfo = u.authinfo()
4489 url, authinfo = u.authinfo()
4490 openerargs = {
4490 openerargs = {
4491 'useragent': b'Mercurial debugwireproto',
4491 'useragent': b'Mercurial debugwireproto',
4492 }
4492 }
4493
4493
4494 # Turn pipes/sockets into observers so we can log I/O.
4494 # Turn pipes/sockets into observers so we can log I/O.
4495 if ui.verbose:
4495 if ui.verbose:
4496 openerargs.update(
4496 openerargs.update(
4497 {
4497 {
4498 'loggingfh': ui,
4498 'loggingfh': ui,
4499 'loggingname': b's',
4499 'loggingname': b's',
4500 'loggingopts': {
4500 'loggingopts': {
4501 'logdata': True,
4501 'logdata': True,
4502 'logdataapis': False,
4502 'logdataapis': False,
4503 },
4503 },
4504 }
4504 }
4505 )
4505 )
4506
4506
4507 if ui.debugflag:
4507 if ui.debugflag:
4508 openerargs['loggingopts']['logdataapis'] = True
4508 openerargs['loggingopts']['logdataapis'] = True
4509
4509
4510 # Don't send default headers when in raw mode. This allows us to
4510 # Don't send default headers when in raw mode. This allows us to
4511 # bypass most of the behavior of our URL handling code so we can
4511 # bypass most of the behavior of our URL handling code so we can
4512 # have near complete control over what's sent on the wire.
4512 # have near complete control over what's sent on the wire.
4513 if opts[b'peer'] == b'raw':
4513 if opts[b'peer'] == b'raw':
4514 openerargs['sendaccept'] = False
4514 openerargs['sendaccept'] = False
4515
4515
4516 opener = urlmod.opener(ui, authinfo, **openerargs)
4516 opener = urlmod.opener(ui, authinfo, **openerargs)
4517
4517
4518 if opts[b'peer'] == b'http2':
4518 if opts[b'peer'] == b'http2':
4519 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4519 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4520 # We go through makepeer() because we need an API descriptor for
4520 # We go through makepeer() because we need an API descriptor for
4521 # the peer instance to be useful.
4521 # the peer instance to be useful.
4522 with ui.configoverride(
4522 with ui.configoverride(
4523 {(b'experimental', b'httppeer.advertise-v2'): True}
4523 {(b'experimental', b'httppeer.advertise-v2'): True}
4524 ):
4524 ):
4525 if opts[b'nologhandshake']:
4525 if opts[b'nologhandshake']:
4526 ui.pushbuffer()
4526 ui.pushbuffer()
4527
4527
4528 peer = httppeer.makepeer(ui, path, opener=opener)
4528 peer = httppeer.makepeer(ui, path, opener=opener)
4529
4529
4530 if opts[b'nologhandshake']:
4530 if opts[b'nologhandshake']:
4531 ui.popbuffer()
4531 ui.popbuffer()
4532
4532
4533 if not isinstance(peer, httppeer.httpv2peer):
4533 if not isinstance(peer, httppeer.httpv2peer):
4534 raise error.Abort(
4534 raise error.Abort(
4535 _(
4535 _(
4536 b'could not instantiate HTTP peer for '
4536 b'could not instantiate HTTP peer for '
4537 b'wire protocol version 2'
4537 b'wire protocol version 2'
4538 ),
4538 ),
4539 hint=_(
4539 hint=_(
4540 b'the server may not have the feature '
4540 b'the server may not have the feature '
4541 b'enabled or is not allowing this '
4541 b'enabled or is not allowing this '
4542 b'client version'
4542 b'client version'
4543 ),
4543 ),
4544 )
4544 )
4545
4545
4546 elif opts[b'peer'] == b'raw':
4546 elif opts[b'peer'] == b'raw':
4547 ui.write(_(b'using raw connection to peer\n'))
4547 ui.write(_(b'using raw connection to peer\n'))
4548 peer = None
4548 peer = None
4549 elif opts[b'peer']:
4549 elif opts[b'peer']:
4550 raise error.Abort(
4550 raise error.Abort(
4551 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4551 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4552 )
4552 )
4553 else:
4553 else:
4554 peer = httppeer.makepeer(ui, path, opener=opener)
4554 peer = httppeer.makepeer(ui, path, opener=opener)
4555
4555
4556 # We /could/ populate stdin/stdout with sock.makefile()...
4556 # We /could/ populate stdin/stdout with sock.makefile()...
4557 else:
4557 else:
4558 raise error.Abort(_(b'unsupported connection configuration'))
4558 raise error.Abort(_(b'unsupported connection configuration'))
4559
4559
4560 batchedcommands = None
4560 batchedcommands = None
4561
4561
4562 # Now perform actions based on the parsed wire language instructions.
4562 # Now perform actions based on the parsed wire language instructions.
4563 for action, lines in blocks:
4563 for action, lines in blocks:
4564 if action in (b'raw', b'raw+'):
4564 if action in (b'raw', b'raw+'):
4565 if not stdin:
4565 if not stdin:
4566 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4566 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4567
4567
4568 # Concatenate the data together.
4568 # Concatenate the data together.
4569 data = b''.join(l.lstrip() for l in lines)
4569 data = b''.join(l.lstrip() for l in lines)
4570 data = stringutil.unescapestr(data)
4570 data = stringutil.unescapestr(data)
4571 stdin.write(data)
4571 stdin.write(data)
4572
4572
4573 if action == b'raw+':
4573 if action == b'raw+':
4574 stdin.flush()
4574 stdin.flush()
4575 elif action == b'flush':
4575 elif action == b'flush':
4576 if not stdin:
4576 if not stdin:
4577 raise error.Abort(_(b'cannot call flush on this peer'))
4577 raise error.Abort(_(b'cannot call flush on this peer'))
4578 stdin.flush()
4578 stdin.flush()
4579 elif action.startswith(b'command'):
4579 elif action.startswith(b'command'):
4580 if not peer:
4580 if not peer:
4581 raise error.Abort(
4581 raise error.Abort(
4582 _(
4582 _(
4583 b'cannot send commands unless peer instance '
4583 b'cannot send commands unless peer instance '
4584 b'is available'
4584 b'is available'
4585 )
4585 )
4586 )
4586 )
4587
4587
4588 command = action.split(b' ', 1)[1]
4588 command = action.split(b' ', 1)[1]
4589
4589
4590 args = {}
4590 args = {}
4591 for line in lines:
4591 for line in lines:
4592 # We need to allow empty values.
4592 # We need to allow empty values.
4593 fields = line.lstrip().split(b' ', 1)
4593 fields = line.lstrip().split(b' ', 1)
4594 if len(fields) == 1:
4594 if len(fields) == 1:
4595 key = fields[0]
4595 key = fields[0]
4596 value = b''
4596 value = b''
4597 else:
4597 else:
4598 key, value = fields
4598 key, value = fields
4599
4599
4600 if value.startswith(b'eval:'):
4600 if value.startswith(b'eval:'):
4601 value = stringutil.evalpythonliteral(value[5:])
4601 value = stringutil.evalpythonliteral(value[5:])
4602 else:
4602 else:
4603 value = stringutil.unescapestr(value)
4603 value = stringutil.unescapestr(value)
4604
4604
4605 args[key] = value
4605 args[key] = value
4606
4606
4607 if batchedcommands is not None:
4607 if batchedcommands is not None:
4608 batchedcommands.append((command, args))
4608 batchedcommands.append((command, args))
4609 continue
4609 continue
4610
4610
4611 ui.status(_(b'sending %s command\n') % command)
4611 ui.status(_(b'sending %s command\n') % command)
4612
4612
4613 if b'PUSHFILE' in args:
4613 if b'PUSHFILE' in args:
4614 with open(args[b'PUSHFILE'], 'rb') as fh:
4614 with open(args[b'PUSHFILE'], 'rb') as fh:
4615 del args[b'PUSHFILE']
4615 del args[b'PUSHFILE']
4616 res, output = peer._callpush(
4616 res, output = peer._callpush(
4617 command, fh, **pycompat.strkwargs(args)
4617 command, fh, **pycompat.strkwargs(args)
4618 )
4618 )
4619 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4619 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4620 ui.status(
4620 ui.status(
4621 _(b'remote output: %s\n') % stringutil.escapestr(output)
4621 _(b'remote output: %s\n') % stringutil.escapestr(output)
4622 )
4622 )
4623 else:
4623 else:
4624 with peer.commandexecutor() as e:
4624 with peer.commandexecutor() as e:
4625 res = e.callcommand(command, args).result()
4625 res = e.callcommand(command, args).result()
4626
4626
4627 if isinstance(res, wireprotov2peer.commandresponse):
4627 if isinstance(res, wireprotov2peer.commandresponse):
4628 val = res.objects()
4628 val = res.objects()
4629 ui.status(
4629 ui.status(
4630 _(b'response: %s\n')
4630 _(b'response: %s\n')
4631 % stringutil.pprint(val, bprefix=True, indent=2)
4631 % stringutil.pprint(val, bprefix=True, indent=2)
4632 )
4632 )
4633 else:
4633 else:
4634 ui.status(
4634 ui.status(
4635 _(b'response: %s\n')
4635 _(b'response: %s\n')
4636 % stringutil.pprint(res, bprefix=True, indent=2)
4636 % stringutil.pprint(res, bprefix=True, indent=2)
4637 )
4637 )
4638
4638
4639 elif action == b'batchbegin':
4639 elif action == b'batchbegin':
4640 if batchedcommands is not None:
4640 if batchedcommands is not None:
4641 raise error.Abort(_(b'nested batchbegin not allowed'))
4641 raise error.Abort(_(b'nested batchbegin not allowed'))
4642
4642
4643 batchedcommands = []
4643 batchedcommands = []
4644 elif action == b'batchsubmit':
4644 elif action == b'batchsubmit':
4645 # There is a batching API we could go through. But it would be
4645 # There is a batching API we could go through. But it would be
4646 # difficult to normalize requests into function calls. It is easier
4646 # difficult to normalize requests into function calls. It is easier
4647 # to bypass this layer and normalize to commands + args.
4647 # to bypass this layer and normalize to commands + args.
4648 ui.status(
4648 ui.status(
4649 _(b'sending batch with %d sub-commands\n')
4649 _(b'sending batch with %d sub-commands\n')
4650 % len(batchedcommands)
4650 % len(batchedcommands)
4651 )
4651 )
4652 assert peer is not None
4652 assert peer is not None
4653 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4653 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4654 ui.status(
4654 ui.status(
4655 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4655 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4656 )
4656 )
4657
4657
4658 batchedcommands = None
4658 batchedcommands = None
4659
4659
4660 elif action.startswith(b'httprequest '):
4660 elif action.startswith(b'httprequest '):
4661 if not opener:
4661 if not opener:
4662 raise error.Abort(
4662 raise error.Abort(
4663 _(b'cannot use httprequest without an HTTP peer')
4663 _(b'cannot use httprequest without an HTTP peer')
4664 )
4664 )
4665
4665
4666 request = action.split(b' ', 2)
4666 request = action.split(b' ', 2)
4667 if len(request) != 3:
4667 if len(request) != 3:
4668 raise error.Abort(
4668 raise error.Abort(
4669 _(
4669 _(
4670 b'invalid httprequest: expected format is '
4670 b'invalid httprequest: expected format is '
4671 b'"httprequest <method> <path>'
4671 b'"httprequest <method> <path>'
4672 )
4672 )
4673 )
4673 )
4674
4674
4675 method, httppath = request[1:]
4675 method, httppath = request[1:]
4676 headers = {}
4676 headers = {}
4677 body = None
4677 body = None
4678 frames = []
4678 frames = []
4679 for line in lines:
4679 for line in lines:
4680 line = line.lstrip()
4680 line = line.lstrip()
4681 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4681 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4682 if m:
4682 if m:
4683 # Headers need to use native strings.
4683 # Headers need to use native strings.
4684 key = pycompat.strurl(m.group(1))
4684 key = pycompat.strurl(m.group(1))
4685 value = pycompat.strurl(m.group(2))
4685 value = pycompat.strurl(m.group(2))
4686 headers[key] = value
4686 headers[key] = value
4687 continue
4687 continue
4688
4688
4689 if line.startswith(b'BODYFILE '):
4689 if line.startswith(b'BODYFILE '):
4690 with open(line.split(b' ', 1), b'rb') as fh:
4690 with open(line.split(b' ', 1), b'rb') as fh:
4691 body = fh.read()
4691 body = fh.read()
4692 elif line.startswith(b'frame '):
4692 elif line.startswith(b'frame '):
4693 frame = wireprotoframing.makeframefromhumanstring(
4693 frame = wireprotoframing.makeframefromhumanstring(
4694 line[len(b'frame ') :]
4694 line[len(b'frame ') :]
4695 )
4695 )
4696
4696
4697 frames.append(frame)
4697 frames.append(frame)
4698 else:
4698 else:
4699 raise error.Abort(
4699 raise error.Abort(
4700 _(b'unknown argument to httprequest: %s') % line
4700 _(b'unknown argument to httprequest: %s') % line
4701 )
4701 )
4702
4702
4703 url = path + httppath
4703 url = path + httppath
4704
4704
4705 if frames:
4705 if frames:
4706 body = b''.join(bytes(f) for f in frames)
4706 body = b''.join(bytes(f) for f in frames)
4707
4707
4708 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4708 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4709
4709
4710 # urllib.Request insists on using has_data() as a proxy for
4710 # urllib.Request insists on using has_data() as a proxy for
4711 # determining the request method. Override that to use our
4711 # determining the request method. Override that to use our
4712 # explicitly requested method.
4712 # explicitly requested method.
4713 req.get_method = lambda: pycompat.sysstr(method)
4713 req.get_method = lambda: pycompat.sysstr(method)
4714
4714
4715 try:
4715 try:
4716 res = opener.open(req)
4716 res = opener.open(req)
4717 body = res.read()
4717 body = res.read()
4718 except util.urlerr.urlerror as e:
4718 except util.urlerr.urlerror as e:
4719 # read() method must be called, but only exists in Python 2
4719 # read() method must be called, but only exists in Python 2
4720 getattr(e, 'read', lambda: None)()
4720 getattr(e, 'read', lambda: None)()
4721 continue
4721 continue
4722
4722
4723 ct = res.headers.get('Content-Type')
4723 ct = res.headers.get('Content-Type')
4724 if ct == 'application/mercurial-cbor':
4724 if ct == 'application/mercurial-cbor':
4725 ui.write(
4725 ui.write(
4726 _(b'cbor> %s\n')
4726 _(b'cbor> %s\n')
4727 % stringutil.pprint(
4727 % stringutil.pprint(
4728 cborutil.decodeall(body), bprefix=True, indent=2
4728 cborutil.decodeall(body), bprefix=True, indent=2
4729 )
4729 )
4730 )
4730 )
4731
4731
4732 elif action == b'close':
4732 elif action == b'close':
4733 assert peer is not None
4733 assert peer is not None
4734 peer.close()
4734 peer.close()
4735 elif action == b'readavailable':
4735 elif action == b'readavailable':
4736 if not stdout or not stderr:
4736 if not stdout or not stderr:
4737 raise error.Abort(
4737 raise error.Abort(
4738 _(b'readavailable not available on this peer')
4738 _(b'readavailable not available on this peer')
4739 )
4739 )
4740
4740
4741 stdin.close()
4741 stdin.close()
4742 stdout.read()
4742 stdout.read()
4743 stderr.read()
4743 stderr.read()
4744
4744
4745 elif action == b'readline':
4745 elif action == b'readline':
4746 if not stdout:
4746 if not stdout:
4747 raise error.Abort(_(b'readline not available on this peer'))
4747 raise error.Abort(_(b'readline not available on this peer'))
4748 stdout.readline()
4748 stdout.readline()
4749 elif action == b'ereadline':
4749 elif action == b'ereadline':
4750 if not stderr:
4750 if not stderr:
4751 raise error.Abort(_(b'ereadline not available on this peer'))
4751 raise error.Abort(_(b'ereadline not available on this peer'))
4752 stderr.readline()
4752 stderr.readline()
4753 elif action.startswith(b'read '):
4753 elif action.startswith(b'read '):
4754 count = int(action.split(b' ', 1)[1])
4754 count = int(action.split(b' ', 1)[1])
4755 if not stdout:
4755 if not stdout:
4756 raise error.Abort(_(b'read not available on this peer'))
4756 raise error.Abort(_(b'read not available on this peer'))
4757 stdout.read(count)
4757 stdout.read(count)
4758 elif action.startswith(b'eread '):
4758 elif action.startswith(b'eread '):
4759 count = int(action.split(b' ', 1)[1])
4759 count = int(action.split(b' ', 1)[1])
4760 if not stderr:
4760 if not stderr:
4761 raise error.Abort(_(b'eread not available on this peer'))
4761 raise error.Abort(_(b'eread not available on this peer'))
4762 stderr.read(count)
4762 stderr.read(count)
4763 else:
4763 else:
4764 raise error.Abort(_(b'unknown action: %s') % action)
4764 raise error.Abort(_(b'unknown action: %s') % action)
4765
4765
4766 if batchedcommands is not None:
4766 if batchedcommands is not None:
4767 raise error.Abort(_(b'unclosed "batchbegin" request'))
4767 raise error.Abort(_(b'unclosed "batchbegin" request'))
4768
4768
4769 if peer:
4769 if peer:
4770 peer.close()
4770 peer.close()
4771
4771
4772 if proc:
4772 if proc:
4773 proc.kill()
4773 proc.kill()
@@ -1,2756 +1,2775 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from . import (
19 from . import (
20 bookmarks as bookmod,
20 bookmarks as bookmod,
21 bundle2,
21 bundle2,
22 bundlecaches,
22 bundlecaches,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 requirements,
35 requirements,
36 scmutil,
36 scmutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 wireprototypes,
40 wireprototypes,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 hashutil,
43 hashutil,
44 stringutil,
44 stringutil,
45 )
45 )
46
46
47 urlerr = util.urlerr
47 urlerr = util.urlerr
48 urlreq = util.urlreq
48 urlreq = util.urlreq
49
49
50 _NARROWACL_SECTION = b'narrowacl'
50 _NARROWACL_SECTION = b'narrowacl'
51
51
52
52
53 def readbundle(ui, fh, fname, vfs=None):
53 def readbundle(ui, fh, fname, vfs=None):
54 header = changegroup.readexactly(fh, 4)
54 header = changegroup.readexactly(fh, 4)
55
55
56 alg = None
56 alg = None
57 if not fname:
57 if not fname:
58 fname = b"stream"
58 fname = b"stream"
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
59 if not header.startswith(b'HG') and header.startswith(b'\0'):
60 fh = changegroup.headerlessfixup(fh, header)
60 fh = changegroup.headerlessfixup(fh, header)
61 header = b"HG10"
61 header = b"HG10"
62 alg = b'UN'
62 alg = b'UN'
63 elif vfs:
63 elif vfs:
64 fname = vfs.join(fname)
64 fname = vfs.join(fname)
65
65
66 magic, version = header[0:2], header[2:4]
66 magic, version = header[0:2], header[2:4]
67
67
68 if magic != b'HG':
68 if magic != b'HG':
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
70 if version == b'10':
70 if version == b'10':
71 if alg is None:
71 if alg is None:
72 alg = changegroup.readexactly(fh, 2)
72 alg = changegroup.readexactly(fh, 2)
73 return changegroup.cg1unpacker(fh, alg)
73 return changegroup.cg1unpacker(fh, alg)
74 elif version.startswith(b'2'):
74 elif version.startswith(b'2'):
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
76 elif version == b'S1':
76 elif version == b'S1':
77 return streamclone.streamcloneapplier(fh)
77 return streamclone.streamcloneapplier(fh)
78 else:
78 else:
79 raise error.Abort(
79 raise error.Abort(
80 _(b'%s: unknown bundle version %s') % (fname, version)
80 _(b'%s: unknown bundle version %s') % (fname, version)
81 )
81 )
82
82
83
83
84 def getbundlespec(ui, fh):
84 def getbundlespec(ui, fh):
85 """Infer the bundlespec from a bundle file handle.
85 """Infer the bundlespec from a bundle file handle.
86
86
87 The input file handle is seeked and the original seek position is not
87 The input file handle is seeked and the original seek position is not
88 restored.
88 restored.
89 """
89 """
90
90
91 def speccompression(alg):
91 def speccompression(alg):
92 try:
92 try:
93 return util.compengines.forbundletype(alg).bundletype()[0]
93 return util.compengines.forbundletype(alg).bundletype()[0]
94 except KeyError:
94 except KeyError:
95 return None
95 return None
96
96
97 b = readbundle(ui, fh, None)
97 b = readbundle(ui, fh, None)
98 if isinstance(b, changegroup.cg1unpacker):
98 if isinstance(b, changegroup.cg1unpacker):
99 alg = b._type
99 alg = b._type
100 if alg == b'_truncatedBZ':
100 if alg == b'_truncatedBZ':
101 alg = b'BZ'
101 alg = b'BZ'
102 comp = speccompression(alg)
102 comp = speccompression(alg)
103 if not comp:
103 if not comp:
104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
105 return b'%s-v1' % comp
105 return b'%s-v1' % comp
106 elif isinstance(b, bundle2.unbundle20):
106 elif isinstance(b, bundle2.unbundle20):
107 if b'Compression' in b.params:
107 if b'Compression' in b.params:
108 comp = speccompression(b.params[b'Compression'])
108 comp = speccompression(b.params[b'Compression'])
109 if not comp:
109 if not comp:
110 raise error.Abort(
110 raise error.Abort(
111 _(b'unknown compression algorithm: %s') % comp
111 _(b'unknown compression algorithm: %s') % comp
112 )
112 )
113 else:
113 else:
114 comp = b'none'
114 comp = b'none'
115
115
116 version = None
116 version = None
117 for part in b.iterparts():
117 for part in b.iterparts():
118 if part.type == b'changegroup':
118 if part.type == b'changegroup':
119 version = part.params[b'version']
119 version = part.params[b'version']
120 if version in (b'01', b'02'):
120 if version in (b'01', b'02'):
121 version = b'v2'
121 version = b'v2'
122 else:
122 else:
123 raise error.Abort(
123 raise error.Abort(
124 _(
124 _(
125 b'changegroup version %s does not have '
125 b'changegroup version %s does not have '
126 b'a known bundlespec'
126 b'a known bundlespec'
127 )
127 )
128 % version,
128 % version,
129 hint=_(b'try upgrading your Mercurial client'),
129 hint=_(b'try upgrading your Mercurial client'),
130 )
130 )
131 elif part.type == b'stream2' and version is None:
131 elif part.type == b'stream2' and version is None:
132 # A stream2 part requires to be part of a v2 bundle
132 # A stream2 part requires to be part of a v2 bundle
133 requirements = urlreq.unquote(part.params[b'requirements'])
133 requirements = urlreq.unquote(part.params[b'requirements'])
134 splitted = requirements.split()
134 splitted = requirements.split()
135 params = bundle2._formatrequirementsparams(splitted)
135 params = bundle2._formatrequirementsparams(splitted)
136 return b'none-v2;stream=v2;%s' % params
136 return b'none-v2;stream=v2;%s' % params
137
137
138 if not version:
138 if not version:
139 raise error.Abort(
139 raise error.Abort(
140 _(b'could not identify changegroup version in bundle')
140 _(b'could not identify changegroup version in bundle')
141 )
141 )
142
142
143 return b'%s-%s' % (comp, version)
143 return b'%s-%s' % (comp, version)
144 elif isinstance(b, streamclone.streamcloneapplier):
144 elif isinstance(b, streamclone.streamcloneapplier):
145 requirements = streamclone.readbundle1header(fh)[2]
145 requirements = streamclone.readbundle1header(fh)[2]
146 formatted = bundle2._formatrequirementsparams(requirements)
146 formatted = bundle2._formatrequirementsparams(requirements)
147 return b'none-packed1;%s' % formatted
147 return b'none-packed1;%s' % formatted
148 else:
148 else:
149 raise error.Abort(_(b'unknown bundle type: %s') % b)
149 raise error.Abort(_(b'unknown bundle type: %s') % b)
150
150
151
151
152 def _computeoutgoing(repo, heads, common):
152 def _computeoutgoing(repo, heads, common):
153 """Computes which revs are outgoing given a set of common
153 """Computes which revs are outgoing given a set of common
154 and a set of heads.
154 and a set of heads.
155
155
156 This is a separate function so extensions can have access to
156 This is a separate function so extensions can have access to
157 the logic.
157 the logic.
158
158
159 Returns a discovery.outgoing object.
159 Returns a discovery.outgoing object.
160 """
160 """
161 cl = repo.changelog
161 cl = repo.changelog
162 if common:
162 if common:
163 hasnode = cl.hasnode
163 hasnode = cl.hasnode
164 common = [n for n in common if hasnode(n)]
164 common = [n for n in common if hasnode(n)]
165 else:
165 else:
166 common = [nullid]
166 common = [nullid]
167 if not heads:
167 if not heads:
168 heads = cl.heads()
168 heads = cl.heads()
169 return discovery.outgoing(repo, common, heads)
169 return discovery.outgoing(repo, common, heads)
170
170
171
171
172 def _checkpublish(pushop):
172 def _checkpublish(pushop):
173 repo = pushop.repo
173 repo = pushop.repo
174 ui = repo.ui
174 ui = repo.ui
175 behavior = ui.config(b'experimental', b'auto-publish')
175 behavior = ui.config(b'experimental', b'auto-publish')
176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
177 return
177 return
178 remotephases = listkeys(pushop.remote, b'phases')
178 remotephases = listkeys(pushop.remote, b'phases')
179 if not remotephases.get(b'publishing', False):
179 if not remotephases.get(b'publishing', False):
180 return
180 return
181
181
182 if pushop.revs is None:
182 if pushop.revs is None:
183 published = repo.filtered(b'served').revs(b'not public()')
183 published = repo.filtered(b'served').revs(b'not public()')
184 else:
184 else:
185 published = repo.revs(b'::%ln - public()', pushop.revs)
185 published = repo.revs(b'::%ln - public()', pushop.revs)
186 if published:
186 if published:
187 if behavior == b'warn':
187 if behavior == b'warn':
188 ui.warn(
188 ui.warn(
189 _(b'%i changesets about to be published\n') % len(published)
189 _(b'%i changesets about to be published\n') % len(published)
190 )
190 )
191 elif behavior == b'confirm':
191 elif behavior == b'confirm':
192 if ui.promptchoice(
192 if ui.promptchoice(
193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
194 % len(published)
194 % len(published)
195 ):
195 ):
196 raise error.CanceledError(_(b'user quit'))
196 raise error.CanceledError(_(b'user quit'))
197 elif behavior == b'abort':
197 elif behavior == b'abort':
198 msg = _(b'push would publish %i changesets') % len(published)
198 msg = _(b'push would publish %i changesets') % len(published)
199 hint = _(
199 hint = _(
200 b"use --publish or adjust 'experimental.auto-publish'"
200 b"use --publish or adjust 'experimental.auto-publish'"
201 b" config"
201 b" config"
202 )
202 )
203 raise error.Abort(msg, hint=hint)
203 raise error.Abort(msg, hint=hint)
204
204
205
205
206 def _forcebundle1(op):
206 def _forcebundle1(op):
207 """return true if a pull/push must use bundle1
207 """return true if a pull/push must use bundle1
208
208
209 This function is used to allow testing of the older bundle version"""
209 This function is used to allow testing of the older bundle version"""
210 ui = op.repo.ui
210 ui = op.repo.ui
211 # The goal is this config is to allow developer to choose the bundle
211 # The goal is this config is to allow developer to choose the bundle
212 # version used during exchanged. This is especially handy during test.
212 # version used during exchanged. This is especially handy during test.
213 # Value is a list of bundle version to be picked from, highest version
213 # Value is a list of bundle version to be picked from, highest version
214 # should be used.
214 # should be used.
215 #
215 #
216 # developer config: devel.legacy.exchange
216 # developer config: devel.legacy.exchange
217 exchange = ui.configlist(b'devel', b'legacy.exchange')
217 exchange = ui.configlist(b'devel', b'legacy.exchange')
218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
219 return forcebundle1 or not op.remote.capable(b'bundle2')
219 return forcebundle1 or not op.remote.capable(b'bundle2')
220
220
221
221
222 class pushoperation(object):
222 class pushoperation(object):
223 """A object that represent a single push operation
223 """A object that represent a single push operation
224
224
225 Its purpose is to carry push related state and very common operations.
225 Its purpose is to carry push related state and very common operations.
226
226
227 A new pushoperation should be created at the beginning of each push and
227 A new pushoperation should be created at the beginning of each push and
228 discarded afterward.
228 discarded afterward.
229 """
229 """
230
230
231 def __init__(
231 def __init__(
232 self,
232 self,
233 repo,
233 repo,
234 remote,
234 remote,
235 force=False,
235 force=False,
236 revs=None,
236 revs=None,
237 newbranch=False,
237 newbranch=False,
238 bookmarks=(),
238 bookmarks=(),
239 publish=False,
239 publish=False,
240 pushvars=None,
240 pushvars=None,
241 ):
241 ):
242 # repo we push from
242 # repo we push from
243 self.repo = repo
243 self.repo = repo
244 self.ui = repo.ui
244 self.ui = repo.ui
245 # repo we push to
245 # repo we push to
246 self.remote = remote
246 self.remote = remote
247 # force option provided
247 # force option provided
248 self.force = force
248 self.force = force
249 # revs to be pushed (None is "all")
249 # revs to be pushed (None is "all")
250 self.revs = revs
250 self.revs = revs
251 # bookmark explicitly pushed
251 # bookmark explicitly pushed
252 self.bookmarks = bookmarks
252 self.bookmarks = bookmarks
253 # allow push of new branch
253 # allow push of new branch
254 self.newbranch = newbranch
254 self.newbranch = newbranch
255 # step already performed
255 # step already performed
256 # (used to check what steps have been already performed through bundle2)
256 # (used to check what steps have been already performed through bundle2)
257 self.stepsdone = set()
257 self.stepsdone = set()
258 # Integer version of the changegroup push result
258 # Integer version of the changegroup push result
259 # - None means nothing to push
259 # - None means nothing to push
260 # - 0 means HTTP error
260 # - 0 means HTTP error
261 # - 1 means we pushed and remote head count is unchanged *or*
261 # - 1 means we pushed and remote head count is unchanged *or*
262 # we have outgoing changesets but refused to push
262 # we have outgoing changesets but refused to push
263 # - other values as described by addchangegroup()
263 # - other values as described by addchangegroup()
264 self.cgresult = None
264 self.cgresult = None
265 # Boolean value for the bookmark push
265 # Boolean value for the bookmark push
266 self.bkresult = None
266 self.bkresult = None
267 # discover.outgoing object (contains common and outgoing data)
267 # discover.outgoing object (contains common and outgoing data)
268 self.outgoing = None
268 self.outgoing = None
269 # all remote topological heads before the push
269 # all remote topological heads before the push
270 self.remoteheads = None
270 self.remoteheads = None
271 # Details of the remote branch pre and post push
271 # Details of the remote branch pre and post push
272 #
272 #
273 # mapping: {'branch': ([remoteheads],
273 # mapping: {'branch': ([remoteheads],
274 # [newheads],
274 # [newheads],
275 # [unsyncedheads],
275 # [unsyncedheads],
276 # [discardedheads])}
276 # [discardedheads])}
277 # - branch: the branch name
277 # - branch: the branch name
278 # - remoteheads: the list of remote heads known locally
278 # - remoteheads: the list of remote heads known locally
279 # None if the branch is new
279 # None if the branch is new
280 # - newheads: the new remote heads (known locally) with outgoing pushed
280 # - newheads: the new remote heads (known locally) with outgoing pushed
281 # - unsyncedheads: the list of remote heads unknown locally.
281 # - unsyncedheads: the list of remote heads unknown locally.
282 # - discardedheads: the list of remote heads made obsolete by the push
282 # - discardedheads: the list of remote heads made obsolete by the push
283 self.pushbranchmap = None
283 self.pushbranchmap = None
284 # testable as a boolean indicating if any nodes are missing locally.
284 # testable as a boolean indicating if any nodes are missing locally.
285 self.incoming = None
285 self.incoming = None
286 # summary of the remote phase situation
286 # summary of the remote phase situation
287 self.remotephases = None
287 self.remotephases = None
288 # phases changes that must be pushed along side the changesets
288 # phases changes that must be pushed along side the changesets
289 self.outdatedphases = None
289 self.outdatedphases = None
290 # phases changes that must be pushed if changeset push fails
290 # phases changes that must be pushed if changeset push fails
291 self.fallbackoutdatedphases = None
291 self.fallbackoutdatedphases = None
292 # outgoing obsmarkers
292 # outgoing obsmarkers
293 self.outobsmarkers = set()
293 self.outobsmarkers = set()
294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
295 self.outbookmarks = []
295 self.outbookmarks = []
296 # transaction manager
296 # transaction manager
297 self.trmanager = None
297 self.trmanager = None
298 # map { pushkey partid -> callback handling failure}
298 # map { pushkey partid -> callback handling failure}
299 # used to handle exception from mandatory pushkey part failure
299 # used to handle exception from mandatory pushkey part failure
300 self.pkfailcb = {}
300 self.pkfailcb = {}
301 # an iterable of pushvars or None
301 # an iterable of pushvars or None
302 self.pushvars = pushvars
302 self.pushvars = pushvars
303 # publish pushed changesets
303 # publish pushed changesets
304 self.publish = publish
304 self.publish = publish
305
305
306 @util.propertycache
306 @util.propertycache
307 def futureheads(self):
307 def futureheads(self):
308 """future remote heads if the changeset push succeeds"""
308 """future remote heads if the changeset push succeeds"""
309 return self.outgoing.ancestorsof
309 return self.outgoing.ancestorsof
310
310
311 @util.propertycache
311 @util.propertycache
312 def fallbackheads(self):
312 def fallbackheads(self):
313 """future remote heads if the changeset push fails"""
313 """future remote heads if the changeset push fails"""
314 if self.revs is None:
314 if self.revs is None:
315 # not target to push, all common are relevant
315 # not target to push, all common are relevant
316 return self.outgoing.commonheads
316 return self.outgoing.commonheads
317 unfi = self.repo.unfiltered()
317 unfi = self.repo.unfiltered()
318 # I want cheads = heads(::ancestorsof and ::commonheads)
318 # I want cheads = heads(::ancestorsof and ::commonheads)
319 # (ancestorsof is revs with secret changeset filtered out)
319 # (ancestorsof is revs with secret changeset filtered out)
320 #
320 #
321 # This can be expressed as:
321 # This can be expressed as:
322 # cheads = ( (ancestorsof and ::commonheads)
322 # cheads = ( (ancestorsof and ::commonheads)
323 # + (commonheads and ::ancestorsof))"
323 # + (commonheads and ::ancestorsof))"
324 # )
324 # )
325 #
325 #
326 # while trying to push we already computed the following:
326 # while trying to push we already computed the following:
327 # common = (::commonheads)
327 # common = (::commonheads)
328 # missing = ((commonheads::ancestorsof) - commonheads)
328 # missing = ((commonheads::ancestorsof) - commonheads)
329 #
329 #
330 # We can pick:
330 # We can pick:
331 # * ancestorsof part of common (::commonheads)
331 # * ancestorsof part of common (::commonheads)
332 common = self.outgoing.common
332 common = self.outgoing.common
333 rev = self.repo.changelog.index.rev
333 rev = self.repo.changelog.index.rev
334 cheads = [node for node in self.revs if rev(node) in common]
334 cheads = [node for node in self.revs if rev(node) in common]
335 # and
335 # and
336 # * commonheads parents on missing
336 # * commonheads parents on missing
337 revset = unfi.set(
337 revset = unfi.set(
338 b'%ln and parents(roots(%ln))',
338 b'%ln and parents(roots(%ln))',
339 self.outgoing.commonheads,
339 self.outgoing.commonheads,
340 self.outgoing.missing,
340 self.outgoing.missing,
341 )
341 )
342 cheads.extend(c.node() for c in revset)
342 cheads.extend(c.node() for c in revset)
343 return cheads
343 return cheads
344
344
345 @property
345 @property
346 def commonheads(self):
346 def commonheads(self):
347 """set of all common heads after changeset bundle push"""
347 """set of all common heads after changeset bundle push"""
348 if self.cgresult:
348 if self.cgresult:
349 return self.futureheads
349 return self.futureheads
350 else:
350 else:
351 return self.fallbackheads
351 return self.fallbackheads
352
352
353
353
354 # mapping of message used when pushing bookmark
354 # mapping of message used when pushing bookmark
355 bookmsgmap = {
355 bookmsgmap = {
356 b'update': (
356 b'update': (
357 _(b"updating bookmark %s\n"),
357 _(b"updating bookmark %s\n"),
358 _(b'updating bookmark %s failed\n'),
358 _(b'updating bookmark %s failed\n'),
359 ),
359 ),
360 b'export': (
360 b'export': (
361 _(b"exporting bookmark %s\n"),
361 _(b"exporting bookmark %s\n"),
362 _(b'exporting bookmark %s failed\n'),
362 _(b'exporting bookmark %s failed\n'),
363 ),
363 ),
364 b'delete': (
364 b'delete': (
365 _(b"deleting remote bookmark %s\n"),
365 _(b"deleting remote bookmark %s\n"),
366 _(b'deleting remote bookmark %s failed\n'),
366 _(b'deleting remote bookmark %s failed\n'),
367 ),
367 ),
368 }
368 }
369
369
370
370
371 def push(
371 def push(
372 repo,
372 repo,
373 remote,
373 remote,
374 force=False,
374 force=False,
375 revs=None,
375 revs=None,
376 newbranch=False,
376 newbranch=False,
377 bookmarks=(),
377 bookmarks=(),
378 publish=False,
378 publish=False,
379 opargs=None,
379 opargs=None,
380 ):
380 ):
381 """Push outgoing changesets (limited by revs) from a local
381 """Push outgoing changesets (limited by revs) from a local
382 repository to remote. Return an integer:
382 repository to remote. Return an integer:
383 - None means nothing to push
383 - None means nothing to push
384 - 0 means HTTP error
384 - 0 means HTTP error
385 - 1 means we pushed and remote head count is unchanged *or*
385 - 1 means we pushed and remote head count is unchanged *or*
386 we have outgoing changesets but refused to push
386 we have outgoing changesets but refused to push
387 - other values as described by addchangegroup()
387 - other values as described by addchangegroup()
388 """
388 """
389 if opargs is None:
389 if opargs is None:
390 opargs = {}
390 opargs = {}
391 pushop = pushoperation(
391 pushop = pushoperation(
392 repo,
392 repo,
393 remote,
393 remote,
394 force,
394 force,
395 revs,
395 revs,
396 newbranch,
396 newbranch,
397 bookmarks,
397 bookmarks,
398 publish,
398 publish,
399 **pycompat.strkwargs(opargs)
399 **pycompat.strkwargs(opargs)
400 )
400 )
401 if pushop.remote.local():
401 if pushop.remote.local():
402 missing = (
402 missing = (
403 set(pushop.repo.requirements) - pushop.remote.local().supported
403 set(pushop.repo.requirements) - pushop.remote.local().supported
404 )
404 )
405 if missing:
405 if missing:
406 msg = _(
406 msg = _(
407 b"required features are not"
407 b"required features are not"
408 b" supported in the destination:"
408 b" supported in the destination:"
409 b" %s"
409 b" %s"
410 ) % (b', '.join(sorted(missing)))
410 ) % (b', '.join(sorted(missing)))
411 raise error.Abort(msg)
411 raise error.Abort(msg)
412
412
413 if not pushop.remote.canpush():
413 if not pushop.remote.canpush():
414 raise error.Abort(_(b"destination does not support push"))
414 raise error.Abort(_(b"destination does not support push"))
415
415
416 if not pushop.remote.capable(b'unbundle'):
416 if not pushop.remote.capable(b'unbundle'):
417 raise error.Abort(
417 raise error.Abort(
418 _(
418 _(
419 b'cannot push: destination does not support the '
419 b'cannot push: destination does not support the '
420 b'unbundle wire protocol command'
420 b'unbundle wire protocol command'
421 )
421 )
422 )
422 )
423
423
424 # get lock as we might write phase data
424 # get lock as we might write phase data
425 wlock = lock = None
425 wlock = lock = None
426 try:
426 try:
427 # bundle2 push may receive a reply bundle touching bookmarks
427 # bundle2 push may receive a reply bundle touching bookmarks
428 # requiring the wlock. Take it now to ensure proper ordering.
428 # requiring the wlock. Take it now to ensure proper ordering.
429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
430 if (
430 if (
431 (not _forcebundle1(pushop))
431 (not _forcebundle1(pushop))
432 and maypushback
432 and maypushback
433 and not bookmod.bookmarksinstore(repo)
433 and not bookmod.bookmarksinstore(repo)
434 ):
434 ):
435 wlock = pushop.repo.wlock()
435 wlock = pushop.repo.wlock()
436 lock = pushop.repo.lock()
436 lock = pushop.repo.lock()
437 pushop.trmanager = transactionmanager(
437 pushop.trmanager = transactionmanager(
438 pushop.repo, b'push-response', pushop.remote.url()
438 pushop.repo, b'push-response', pushop.remote.url()
439 )
439 )
440 except error.LockUnavailable as err:
440 except error.LockUnavailable as err:
441 # source repo cannot be locked.
441 # source repo cannot be locked.
442 # We do not abort the push, but just disable the local phase
442 # We do not abort the push, but just disable the local phase
443 # synchronisation.
443 # synchronisation.
444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
445 err
445 err
446 )
446 )
447 pushop.ui.debug(msg)
447 pushop.ui.debug(msg)
448
448
449 with wlock or util.nullcontextmanager():
449 with wlock or util.nullcontextmanager():
450 with lock or util.nullcontextmanager():
450 with lock or util.nullcontextmanager():
451 with pushop.trmanager or util.nullcontextmanager():
451 with pushop.trmanager or util.nullcontextmanager():
452 pushop.repo.checkpush(pushop)
452 pushop.repo.checkpush(pushop)
453 _checkpublish(pushop)
453 _checkpublish(pushop)
454 _pushdiscovery(pushop)
454 _pushdiscovery(pushop)
455 if not pushop.force:
455 if not pushop.force:
456 _checksubrepostate(pushop)
456 _checksubrepostate(pushop)
457 if not _forcebundle1(pushop):
457 if not _forcebundle1(pushop):
458 _pushbundle2(pushop)
458 _pushbundle2(pushop)
459 _pushchangeset(pushop)
459 _pushchangeset(pushop)
460 _pushsyncphase(pushop)
460 _pushsyncphase(pushop)
461 _pushobsolete(pushop)
461 _pushobsolete(pushop)
462 _pushbookmark(pushop)
462 _pushbookmark(pushop)
463
463
464 if repo.ui.configbool(b'experimental', b'remotenames'):
464 if repo.ui.configbool(b'experimental', b'remotenames'):
465 logexchange.pullremotenames(repo, remote)
465 logexchange.pullremotenames(repo, remote)
466
466
467 return pushop
467 return pushop
468
468
469
469
470 # list of steps to perform discovery before push
470 # list of steps to perform discovery before push
471 pushdiscoveryorder = []
471 pushdiscoveryorder = []
472
472
473 # Mapping between step name and function
473 # Mapping between step name and function
474 #
474 #
475 # This exists to help extensions wrap steps if necessary
475 # This exists to help extensions wrap steps if necessary
476 pushdiscoverymapping = {}
476 pushdiscoverymapping = {}
477
477
478
478
479 def pushdiscovery(stepname):
479 def pushdiscovery(stepname):
480 """decorator for function performing discovery before push
480 """decorator for function performing discovery before push
481
481
482 The function is added to the step -> function mapping and appended to the
482 The function is added to the step -> function mapping and appended to the
483 list of steps. Beware that decorated function will be added in order (this
483 list of steps. Beware that decorated function will be added in order (this
484 may matter).
484 may matter).
485
485
486 You can only use this decorator for a new step, if you want to wrap a step
486 You can only use this decorator for a new step, if you want to wrap a step
487 from an extension, change the pushdiscovery dictionary directly."""
487 from an extension, change the pushdiscovery dictionary directly."""
488
488
489 def dec(func):
489 def dec(func):
490 assert stepname not in pushdiscoverymapping
490 assert stepname not in pushdiscoverymapping
491 pushdiscoverymapping[stepname] = func
491 pushdiscoverymapping[stepname] = func
492 pushdiscoveryorder.append(stepname)
492 pushdiscoveryorder.append(stepname)
493 return func
493 return func
494
494
495 return dec
495 return dec
496
496
497
497
498 def _pushdiscovery(pushop):
498 def _pushdiscovery(pushop):
499 """Run all discovery steps"""
499 """Run all discovery steps"""
500 for stepname in pushdiscoveryorder:
500 for stepname in pushdiscoveryorder:
501 step = pushdiscoverymapping[stepname]
501 step = pushdiscoverymapping[stepname]
502 step(pushop)
502 step(pushop)
503
503
504
504
505 def _checksubrepostate(pushop):
505 def _checksubrepostate(pushop):
506 """Ensure all outgoing referenced subrepo revisions are present locally"""
506 """Ensure all outgoing referenced subrepo revisions are present locally"""
507 for n in pushop.outgoing.missing:
507 for n in pushop.outgoing.missing:
508 ctx = pushop.repo[n]
508 ctx = pushop.repo[n]
509
509
510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
511 for subpath in sorted(ctx.substate):
511 for subpath in sorted(ctx.substate):
512 sub = ctx.sub(subpath)
512 sub = ctx.sub(subpath)
513 sub.verify(onpush=True)
513 sub.verify(onpush=True)
514
514
515
515
516 @pushdiscovery(b'changeset')
516 @pushdiscovery(b'changeset')
517 def _pushdiscoverychangeset(pushop):
517 def _pushdiscoverychangeset(pushop):
518 """discover the changeset that need to be pushed"""
518 """discover the changeset that need to be pushed"""
519 fci = discovery.findcommonincoming
519 fci = discovery.findcommonincoming
520 if pushop.revs:
520 if pushop.revs:
521 commoninc = fci(
521 commoninc = fci(
522 pushop.repo,
522 pushop.repo,
523 pushop.remote,
523 pushop.remote,
524 force=pushop.force,
524 force=pushop.force,
525 ancestorsof=pushop.revs,
525 ancestorsof=pushop.revs,
526 )
526 )
527 else:
527 else:
528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
529 common, inc, remoteheads = commoninc
529 common, inc, remoteheads = commoninc
530 fco = discovery.findcommonoutgoing
530 fco = discovery.findcommonoutgoing
531 outgoing = fco(
531 outgoing = fco(
532 pushop.repo,
532 pushop.repo,
533 pushop.remote,
533 pushop.remote,
534 onlyheads=pushop.revs,
534 onlyheads=pushop.revs,
535 commoninc=commoninc,
535 commoninc=commoninc,
536 force=pushop.force,
536 force=pushop.force,
537 )
537 )
538 pushop.outgoing = outgoing
538 pushop.outgoing = outgoing
539 pushop.remoteheads = remoteheads
539 pushop.remoteheads = remoteheads
540 pushop.incoming = inc
540 pushop.incoming = inc
541
541
542
542
543 @pushdiscovery(b'phase')
543 @pushdiscovery(b'phase')
544 def _pushdiscoveryphase(pushop):
544 def _pushdiscoveryphase(pushop):
545 """discover the phase that needs to be pushed
545 """discover the phase that needs to be pushed
546
546
547 (computed for both success and failure case for changesets push)"""
547 (computed for both success and failure case for changesets push)"""
548 outgoing = pushop.outgoing
548 outgoing = pushop.outgoing
549 unfi = pushop.repo.unfiltered()
549 unfi = pushop.repo.unfiltered()
550 remotephases = listkeys(pushop.remote, b'phases')
550 remotephases = listkeys(pushop.remote, b'phases')
551
551
552 if (
552 if (
553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
554 and remotephases # server supports phases
554 and remotephases # server supports phases
555 and not pushop.outgoing.missing # no changesets to be pushed
555 and not pushop.outgoing.missing # no changesets to be pushed
556 and remotephases.get(b'publishing', False)
556 and remotephases.get(b'publishing', False)
557 ):
557 ):
558 # When:
558 # When:
559 # - this is a subrepo push
559 # - this is a subrepo push
560 # - and remote support phase
560 # - and remote support phase
561 # - and no changeset are to be pushed
561 # - and no changeset are to be pushed
562 # - and remote is publishing
562 # - and remote is publishing
563 # We may be in issue 3781 case!
563 # We may be in issue 3781 case!
564 # We drop the possible phase synchronisation done by
564 # We drop the possible phase synchronisation done by
565 # courtesy to publish changesets possibly locally draft
565 # courtesy to publish changesets possibly locally draft
566 # on the remote.
566 # on the remote.
567 pushop.outdatedphases = []
567 pushop.outdatedphases = []
568 pushop.fallbackoutdatedphases = []
568 pushop.fallbackoutdatedphases = []
569 return
569 return
570
570
571 pushop.remotephases = phases.remotephasessummary(
571 pushop.remotephases = phases.remotephasessummary(
572 pushop.repo, pushop.fallbackheads, remotephases
572 pushop.repo, pushop.fallbackheads, remotephases
573 )
573 )
574 droots = pushop.remotephases.draftroots
574 droots = pushop.remotephases.draftroots
575
575
576 extracond = b''
576 extracond = b''
577 if not pushop.remotephases.publishing:
577 if not pushop.remotephases.publishing:
578 extracond = b' and public()'
578 extracond = b' and public()'
579 revset = b'heads((%%ln::%%ln) %s)' % extracond
579 revset = b'heads((%%ln::%%ln) %s)' % extracond
580 # Get the list of all revs draft on remote by public here.
580 # Get the list of all revs draft on remote by public here.
581 # XXX Beware that revset break if droots is not strictly
581 # XXX Beware that revset break if droots is not strictly
582 # XXX root we may want to ensure it is but it is costly
582 # XXX root we may want to ensure it is but it is costly
583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
584 if not pushop.remotephases.publishing and pushop.publish:
584 if not pushop.remotephases.publishing and pushop.publish:
585 future = list(
585 future = list(
586 unfi.set(
586 unfi.set(
587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
588 )
588 )
589 )
589 )
590 elif not outgoing.missing:
590 elif not outgoing.missing:
591 future = fallback
591 future = fallback
592 else:
592 else:
593 # adds changeset we are going to push as draft
593 # adds changeset we are going to push as draft
594 #
594 #
595 # should not be necessary for publishing server, but because of an
595 # should not be necessary for publishing server, but because of an
596 # issue fixed in xxxxx we have to do it anyway.
596 # issue fixed in xxxxx we have to do it anyway.
597 fdroots = list(
597 fdroots = list(
598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
599 )
599 )
600 fdroots = [f.node() for f in fdroots]
600 fdroots = [f.node() for f in fdroots]
601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
602 pushop.outdatedphases = future
602 pushop.outdatedphases = future
603 pushop.fallbackoutdatedphases = fallback
603 pushop.fallbackoutdatedphases = fallback
604
604
605
605
606 @pushdiscovery(b'obsmarker')
606 @pushdiscovery(b'obsmarker')
607 def _pushdiscoveryobsmarkers(pushop):
607 def _pushdiscoveryobsmarkers(pushop):
608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
609 return
609 return
610
610
611 if not pushop.repo.obsstore:
611 if not pushop.repo.obsstore:
612 return
612 return
613
613
614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
615 return
615 return
616
616
617 repo = pushop.repo
617 repo = pushop.repo
618 # very naive computation, that can be quite expensive on big repo.
618 # very naive computation, that can be quite expensive on big repo.
619 # However: evolution is currently slow on them anyway.
619 # However: evolution is currently slow on them anyway.
620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
622
622
623
623
624 @pushdiscovery(b'bookmarks')
624 @pushdiscovery(b'bookmarks')
625 def _pushdiscoverybookmarks(pushop):
625 def _pushdiscoverybookmarks(pushop):
626 ui = pushop.ui
626 ui = pushop.ui
627 repo = pushop.repo.unfiltered()
627 repo = pushop.repo.unfiltered()
628 remote = pushop.remote
628 remote = pushop.remote
629 ui.debug(b"checking for updated bookmarks\n")
629 ui.debug(b"checking for updated bookmarks\n")
630 ancestors = ()
630 ancestors = ()
631 if pushop.revs:
631 if pushop.revs:
632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
634
634
635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
636
636
637 explicit = {
637 explicit = {
638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
639 }
639 }
640
640
641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
643
643
644
644
645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
646 """take decision on bookmarks to push to the remote repo
646 """take decision on bookmarks to push to the remote repo
647
647
648 Exists to help extensions alter this behavior.
648 Exists to help extensions alter this behavior.
649 """
649 """
650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
651
651
652 repo = pushop.repo
652 repo = pushop.repo
653
653
654 for b, scid, dcid in advsrc:
654 for b, scid, dcid in advsrc:
655 if b in explicit:
655 if b in explicit:
656 explicit.remove(b)
656 explicit.remove(b)
657 if not pushed or repo[scid].rev() in pushed:
657 if not pushed or repo[scid].rev() in pushed:
658 pushop.outbookmarks.append((b, dcid, scid))
658 pushop.outbookmarks.append((b, dcid, scid))
659 # search added bookmark
659 # search added bookmark
660 for b, scid, dcid in addsrc:
660 for b, scid, dcid in addsrc:
661 if b in explicit:
661 if b in explicit:
662 explicit.remove(b)
662 explicit.remove(b)
663 if bookmod.isdivergent(b):
663 if bookmod.isdivergent(b):
664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
665 pushop.bkresult = 2
665 pushop.bkresult = 2
666 else:
666 else:
667 pushop.outbookmarks.append((b, b'', scid))
667 pushop.outbookmarks.append((b, b'', scid))
668 # search for overwritten bookmark
668 # search for overwritten bookmark
669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
670 if b in explicit:
670 if b in explicit:
671 explicit.remove(b)
671 explicit.remove(b)
672 pushop.outbookmarks.append((b, dcid, scid))
672 pushop.outbookmarks.append((b, dcid, scid))
673 # search for bookmark to delete
673 # search for bookmark to delete
674 for b, scid, dcid in adddst:
674 for b, scid, dcid in adddst:
675 if b in explicit:
675 if b in explicit:
676 explicit.remove(b)
676 explicit.remove(b)
677 # treat as "deleted locally"
677 # treat as "deleted locally"
678 pushop.outbookmarks.append((b, dcid, b''))
678 pushop.outbookmarks.append((b, dcid, b''))
679 # identical bookmarks shouldn't get reported
679 # identical bookmarks shouldn't get reported
680 for b, scid, dcid in same:
680 for b, scid, dcid in same:
681 if b in explicit:
681 if b in explicit:
682 explicit.remove(b)
682 explicit.remove(b)
683
683
684 if explicit:
684 if explicit:
685 explicit = sorted(explicit)
685 explicit = sorted(explicit)
686 # we should probably list all of them
686 # we should probably list all of them
687 pushop.ui.warn(
687 pushop.ui.warn(
688 _(
688 _(
689 b'bookmark %s does not exist on the local '
689 b'bookmark %s does not exist on the local '
690 b'or remote repository!\n'
690 b'or remote repository!\n'
691 )
691 )
692 % explicit[0]
692 % explicit[0]
693 )
693 )
694 pushop.bkresult = 2
694 pushop.bkresult = 2
695
695
696 pushop.outbookmarks.sort()
696 pushop.outbookmarks.sort()
697
697
698
698
699 def _pushcheckoutgoing(pushop):
699 def _pushcheckoutgoing(pushop):
700 outgoing = pushop.outgoing
700 outgoing = pushop.outgoing
701 unfi = pushop.repo.unfiltered()
701 unfi = pushop.repo.unfiltered()
702 if not outgoing.missing:
702 if not outgoing.missing:
703 # nothing to push
703 # nothing to push
704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
705 return False
705 return False
706 # something to push
706 # something to push
707 if not pushop.force:
707 if not pushop.force:
708 # if repo.obsstore == False --> no obsolete
708 # if repo.obsstore == False --> no obsolete
709 # then, save the iteration
709 # then, save the iteration
710 if unfi.obsstore:
710 if unfi.obsstore:
711 # this message are here for 80 char limit reason
711 # this message are here for 80 char limit reason
712 mso = _(b"push includes obsolete changeset: %s!")
712 mso = _(b"push includes obsolete changeset: %s!")
713 mspd = _(b"push includes phase-divergent changeset: %s!")
713 mspd = _(b"push includes phase-divergent changeset: %s!")
714 mscd = _(b"push includes content-divergent changeset: %s!")
714 mscd = _(b"push includes content-divergent changeset: %s!")
715 mst = {
715 mst = {
716 b"orphan": _(b"push includes orphan changeset: %s!"),
716 b"orphan": _(b"push includes orphan changeset: %s!"),
717 b"phase-divergent": mspd,
717 b"phase-divergent": mspd,
718 b"content-divergent": mscd,
718 b"content-divergent": mscd,
719 }
719 }
720 # If we are to push if there is at least one
720 # If we are to push if there is at least one
721 # obsolete or unstable changeset in missing, at
721 # obsolete or unstable changeset in missing, at
722 # least one of the missinghead will be obsolete or
722 # least one of the missinghead will be obsolete or
723 # unstable. So checking heads only is ok
723 # unstable. So checking heads only is ok
724 for node in outgoing.ancestorsof:
724 for node in outgoing.ancestorsof:
725 ctx = unfi[node]
725 ctx = unfi[node]
726 if ctx.obsolete():
726 if ctx.obsolete():
727 raise error.Abort(mso % ctx)
727 raise error.Abort(mso % ctx)
728 elif ctx.isunstable():
728 elif ctx.isunstable():
729 # TODO print more than one instability in the abort
729 # TODO print more than one instability in the abort
730 # message
730 # message
731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
732
732
733 discovery.checkheads(pushop)
733 discovery.checkheads(pushop)
734 return True
734 return True
735
735
736
736
737 # List of names of steps to perform for an outgoing bundle2, order matters.
737 # List of names of steps to perform for an outgoing bundle2, order matters.
738 b2partsgenorder = []
738 b2partsgenorder = []
739
739
740 # Mapping between step name and function
740 # Mapping between step name and function
741 #
741 #
742 # This exists to help extensions wrap steps if necessary
742 # This exists to help extensions wrap steps if necessary
743 b2partsgenmapping = {}
743 b2partsgenmapping = {}
744
744
745
745
746 def b2partsgenerator(stepname, idx=None):
746 def b2partsgenerator(stepname, idx=None):
747 """decorator for function generating bundle2 part
747 """decorator for function generating bundle2 part
748
748
749 The function is added to the step -> function mapping and appended to the
749 The function is added to the step -> function mapping and appended to the
750 list of steps. Beware that decorated functions will be added in order
750 list of steps. Beware that decorated functions will be added in order
751 (this may matter).
751 (this may matter).
752
752
753 You can only use this decorator for new steps, if you want to wrap a step
753 You can only use this decorator for new steps, if you want to wrap a step
754 from an extension, attack the b2partsgenmapping dictionary directly."""
754 from an extension, attack the b2partsgenmapping dictionary directly."""
755
755
756 def dec(func):
756 def dec(func):
757 assert stepname not in b2partsgenmapping
757 assert stepname not in b2partsgenmapping
758 b2partsgenmapping[stepname] = func
758 b2partsgenmapping[stepname] = func
759 if idx is None:
759 if idx is None:
760 b2partsgenorder.append(stepname)
760 b2partsgenorder.append(stepname)
761 else:
761 else:
762 b2partsgenorder.insert(idx, stepname)
762 b2partsgenorder.insert(idx, stepname)
763 return func
763 return func
764
764
765 return dec
765 return dec
766
766
767
767
768 def _pushb2ctxcheckheads(pushop, bundler):
768 def _pushb2ctxcheckheads(pushop, bundler):
769 """Generate race condition checking parts
769 """Generate race condition checking parts
770
770
771 Exists as an independent function to aid extensions
771 Exists as an independent function to aid extensions
772 """
772 """
773 # * 'force' do not check for push race,
773 # * 'force' do not check for push race,
774 # * if we don't push anything, there are nothing to check.
774 # * if we don't push anything, there are nothing to check.
775 if not pushop.force and pushop.outgoing.ancestorsof:
775 if not pushop.force and pushop.outgoing.ancestorsof:
776 allowunrelated = b'related' in bundler.capabilities.get(
776 allowunrelated = b'related' in bundler.capabilities.get(
777 b'checkheads', ()
777 b'checkheads', ()
778 )
778 )
779 emptyremote = pushop.pushbranchmap is None
779 emptyremote = pushop.pushbranchmap is None
780 if not allowunrelated or emptyremote:
780 if not allowunrelated or emptyremote:
781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
782 else:
782 else:
783 affected = set()
783 affected = set()
784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
785 remoteheads, newheads, unsyncedheads, discardedheads = heads
785 remoteheads, newheads, unsyncedheads, discardedheads = heads
786 if remoteheads is not None:
786 if remoteheads is not None:
787 remote = set(remoteheads)
787 remote = set(remoteheads)
788 affected |= set(discardedheads) & remote
788 affected |= set(discardedheads) & remote
789 affected |= remote - set(newheads)
789 affected |= remote - set(newheads)
790 if affected:
790 if affected:
791 data = iter(sorted(affected))
791 data = iter(sorted(affected))
792 bundler.newpart(b'check:updated-heads', data=data)
792 bundler.newpart(b'check:updated-heads', data=data)
793
793
794
794
795 def _pushing(pushop):
795 def _pushing(pushop):
796 """return True if we are pushing anything"""
796 """return True if we are pushing anything"""
797 return bool(
797 return bool(
798 pushop.outgoing.missing
798 pushop.outgoing.missing
799 or pushop.outdatedphases
799 or pushop.outdatedphases
800 or pushop.outobsmarkers
800 or pushop.outobsmarkers
801 or pushop.outbookmarks
801 or pushop.outbookmarks
802 )
802 )
803
803
804
804
805 @b2partsgenerator(b'check-bookmarks')
805 @b2partsgenerator(b'check-bookmarks')
806 def _pushb2checkbookmarks(pushop, bundler):
806 def _pushb2checkbookmarks(pushop, bundler):
807 """insert bookmark move checking"""
807 """insert bookmark move checking"""
808 if not _pushing(pushop) or pushop.force:
808 if not _pushing(pushop) or pushop.force:
809 return
809 return
810 b2caps = bundle2.bundle2caps(pushop.remote)
810 b2caps = bundle2.bundle2caps(pushop.remote)
811 hasbookmarkcheck = b'bookmarks' in b2caps
811 hasbookmarkcheck = b'bookmarks' in b2caps
812 if not (pushop.outbookmarks and hasbookmarkcheck):
812 if not (pushop.outbookmarks and hasbookmarkcheck):
813 return
813 return
814 data = []
814 data = []
815 for book, old, new in pushop.outbookmarks:
815 for book, old, new in pushop.outbookmarks:
816 data.append((book, old))
816 data.append((book, old))
817 checkdata = bookmod.binaryencode(data)
817 checkdata = bookmod.binaryencode(data)
818 bundler.newpart(b'check:bookmarks', data=checkdata)
818 bundler.newpart(b'check:bookmarks', data=checkdata)
819
819
820
820
821 @b2partsgenerator(b'check-phases')
821 @b2partsgenerator(b'check-phases')
822 def _pushb2checkphases(pushop, bundler):
822 def _pushb2checkphases(pushop, bundler):
823 """insert phase move checking"""
823 """insert phase move checking"""
824 if not _pushing(pushop) or pushop.force:
824 if not _pushing(pushop) or pushop.force:
825 return
825 return
826 b2caps = bundle2.bundle2caps(pushop.remote)
826 b2caps = bundle2.bundle2caps(pushop.remote)
827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
828 if pushop.remotephases is not None and hasphaseheads:
828 if pushop.remotephases is not None and hasphaseheads:
829 # check that the remote phase has not changed
829 # check that the remote phase has not changed
830 checks = {p: [] for p in phases.allphases}
830 checks = {p: [] for p in phases.allphases}
831 checks[phases.public].extend(pushop.remotephases.publicheads)
831 checks[phases.public].extend(pushop.remotephases.publicheads)
832 checks[phases.draft].extend(pushop.remotephases.draftroots)
832 checks[phases.draft].extend(pushop.remotephases.draftroots)
833 if any(pycompat.itervalues(checks)):
833 if any(pycompat.itervalues(checks)):
834 for phase in checks:
834 for phase in checks:
835 checks[phase].sort()
835 checks[phase].sort()
836 checkdata = phases.binaryencode(checks)
836 checkdata = phases.binaryencode(checks)
837 bundler.newpart(b'check:phases', data=checkdata)
837 bundler.newpart(b'check:phases', data=checkdata)
838
838
839
839
840 @b2partsgenerator(b'changeset')
840 @b2partsgenerator(b'changeset')
841 def _pushb2ctx(pushop, bundler):
841 def _pushb2ctx(pushop, bundler):
842 """handle changegroup push through bundle2
842 """handle changegroup push through bundle2
843
843
844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
845 """
845 """
846 if b'changesets' in pushop.stepsdone:
846 if b'changesets' in pushop.stepsdone:
847 return
847 return
848 pushop.stepsdone.add(b'changesets')
848 pushop.stepsdone.add(b'changesets')
849 # Send known heads to the server for race detection.
849 # Send known heads to the server for race detection.
850 if not _pushcheckoutgoing(pushop):
850 if not _pushcheckoutgoing(pushop):
851 return
851 return
852 pushop.repo.prepushoutgoinghooks(pushop)
852 pushop.repo.prepushoutgoinghooks(pushop)
853
853
854 _pushb2ctxcheckheads(pushop, bundler)
854 _pushb2ctxcheckheads(pushop, bundler)
855
855
856 b2caps = bundle2.bundle2caps(pushop.remote)
856 b2caps = bundle2.bundle2caps(pushop.remote)
857 version = b'01'
857 version = b'01'
858 cgversions = b2caps.get(b'changegroup')
858 cgversions = b2caps.get(b'changegroup')
859 if cgversions: # 3.1 and 3.2 ship with an empty value
859 if cgversions: # 3.1 and 3.2 ship with an empty value
860 cgversions = [
860 cgversions = [
861 v
861 v
862 for v in cgversions
862 for v in cgversions
863 if v in changegroup.supportedoutgoingversions(pushop.repo)
863 if v in changegroup.supportedoutgoingversions(pushop.repo)
864 ]
864 ]
865 if not cgversions:
865 if not cgversions:
866 raise error.Abort(_(b'no common changegroup version'))
866 raise error.Abort(_(b'no common changegroup version'))
867 version = max(cgversions)
867 version = max(cgversions)
868 cgstream = changegroup.makestream(
868 cgstream = changegroup.makestream(
869 pushop.repo, pushop.outgoing, version, b'push'
869 pushop.repo, pushop.outgoing, version, b'push'
870 )
870 )
871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
872 if cgversions:
872 if cgversions:
873 cgpart.addparam(b'version', version)
873 cgpart.addparam(b'version', version)
874 if scmutil.istreemanifest(pushop.repo):
874 if scmutil.istreemanifest(pushop.repo):
875 cgpart.addparam(b'treemanifest', b'1')
875 cgpart.addparam(b'treemanifest', b'1')
876 if b'exp-sidedata-flag' in pushop.repo.requirements:
876 if b'exp-sidedata-flag' in pushop.repo.requirements:
877 cgpart.addparam(b'exp-sidedata', b'1')
877 cgpart.addparam(b'exp-sidedata', b'1')
878
878
879 def handlereply(op):
879 def handlereply(op):
880 """extract addchangegroup returns from server reply"""
880 """extract addchangegroup returns from server reply"""
881 cgreplies = op.records.getreplies(cgpart.id)
881 cgreplies = op.records.getreplies(cgpart.id)
882 assert len(cgreplies[b'changegroup']) == 1
882 assert len(cgreplies[b'changegroup']) == 1
883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
884
884
885 return handlereply
885 return handlereply
886
886
887
887
888 @b2partsgenerator(b'phase')
888 @b2partsgenerator(b'phase')
889 def _pushb2phases(pushop, bundler):
889 def _pushb2phases(pushop, bundler):
890 """handle phase push through bundle2"""
890 """handle phase push through bundle2"""
891 if b'phases' in pushop.stepsdone:
891 if b'phases' in pushop.stepsdone:
892 return
892 return
893 b2caps = bundle2.bundle2caps(pushop.remote)
893 b2caps = bundle2.bundle2caps(pushop.remote)
894 ui = pushop.repo.ui
894 ui = pushop.repo.ui
895
895
896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
897 haspushkey = b'pushkey' in b2caps
897 haspushkey = b'pushkey' in b2caps
898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
899
899
900 if hasphaseheads and not legacyphase:
900 if hasphaseheads and not legacyphase:
901 return _pushb2phaseheads(pushop, bundler)
901 return _pushb2phaseheads(pushop, bundler)
902 elif haspushkey:
902 elif haspushkey:
903 return _pushb2phasespushkey(pushop, bundler)
903 return _pushb2phasespushkey(pushop, bundler)
904
904
905
905
906 def _pushb2phaseheads(pushop, bundler):
906 def _pushb2phaseheads(pushop, bundler):
907 """push phase information through a bundle2 - binary part"""
907 """push phase information through a bundle2 - binary part"""
908 pushop.stepsdone.add(b'phases')
908 pushop.stepsdone.add(b'phases')
909 if pushop.outdatedphases:
909 if pushop.outdatedphases:
910 updates = {p: [] for p in phases.allphases}
910 updates = {p: [] for p in phases.allphases}
911 updates[0].extend(h.node() for h in pushop.outdatedphases)
911 updates[0].extend(h.node() for h in pushop.outdatedphases)
912 phasedata = phases.binaryencode(updates)
912 phasedata = phases.binaryencode(updates)
913 bundler.newpart(b'phase-heads', data=phasedata)
913 bundler.newpart(b'phase-heads', data=phasedata)
914
914
915
915
916 def _pushb2phasespushkey(pushop, bundler):
916 def _pushb2phasespushkey(pushop, bundler):
917 """push phase information through a bundle2 - pushkey part"""
917 """push phase information through a bundle2 - pushkey part"""
918 pushop.stepsdone.add(b'phases')
918 pushop.stepsdone.add(b'phases')
919 part2node = []
919 part2node = []
920
920
921 def handlefailure(pushop, exc):
921 def handlefailure(pushop, exc):
922 targetid = int(exc.partid)
922 targetid = int(exc.partid)
923 for partid, node in part2node:
923 for partid, node in part2node:
924 if partid == targetid:
924 if partid == targetid:
925 raise error.Abort(_(b'updating %s to public failed') % node)
925 raise error.Abort(_(b'updating %s to public failed') % node)
926
926
927 enc = pushkey.encode
927 enc = pushkey.encode
928 for newremotehead in pushop.outdatedphases:
928 for newremotehead in pushop.outdatedphases:
929 part = bundler.newpart(b'pushkey')
929 part = bundler.newpart(b'pushkey')
930 part.addparam(b'namespace', enc(b'phases'))
930 part.addparam(b'namespace', enc(b'phases'))
931 part.addparam(b'key', enc(newremotehead.hex()))
931 part.addparam(b'key', enc(newremotehead.hex()))
932 part.addparam(b'old', enc(b'%d' % phases.draft))
932 part.addparam(b'old', enc(b'%d' % phases.draft))
933 part.addparam(b'new', enc(b'%d' % phases.public))
933 part.addparam(b'new', enc(b'%d' % phases.public))
934 part2node.append((part.id, newremotehead))
934 part2node.append((part.id, newremotehead))
935 pushop.pkfailcb[part.id] = handlefailure
935 pushop.pkfailcb[part.id] = handlefailure
936
936
937 def handlereply(op):
937 def handlereply(op):
938 for partid, node in part2node:
938 for partid, node in part2node:
939 partrep = op.records.getreplies(partid)
939 partrep = op.records.getreplies(partid)
940 results = partrep[b'pushkey']
940 results = partrep[b'pushkey']
941 assert len(results) <= 1
941 assert len(results) <= 1
942 msg = None
942 msg = None
943 if not results:
943 if not results:
944 msg = _(b'server ignored update of %s to public!\n') % node
944 msg = _(b'server ignored update of %s to public!\n') % node
945 elif not int(results[0][b'return']):
945 elif not int(results[0][b'return']):
946 msg = _(b'updating %s to public failed!\n') % node
946 msg = _(b'updating %s to public failed!\n') % node
947 if msg is not None:
947 if msg is not None:
948 pushop.ui.warn(msg)
948 pushop.ui.warn(msg)
949
949
950 return handlereply
950 return handlereply
951
951
952
952
953 @b2partsgenerator(b'obsmarkers')
953 @b2partsgenerator(b'obsmarkers')
954 def _pushb2obsmarkers(pushop, bundler):
954 def _pushb2obsmarkers(pushop, bundler):
955 if b'obsmarkers' in pushop.stepsdone:
955 if b'obsmarkers' in pushop.stepsdone:
956 return
956 return
957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
958 if obsolete.commonversion(remoteversions) is None:
958 if obsolete.commonversion(remoteversions) is None:
959 return
959 return
960 pushop.stepsdone.add(b'obsmarkers')
960 pushop.stepsdone.add(b'obsmarkers')
961 if pushop.outobsmarkers:
961 if pushop.outobsmarkers:
962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
963 bundle2.buildobsmarkerspart(bundler, markers)
963 bundle2.buildobsmarkerspart(bundler, markers)
964
964
965
965
966 @b2partsgenerator(b'bookmarks')
966 @b2partsgenerator(b'bookmarks')
967 def _pushb2bookmarks(pushop, bundler):
967 def _pushb2bookmarks(pushop, bundler):
968 """handle bookmark push through bundle2"""
968 """handle bookmark push through bundle2"""
969 if b'bookmarks' in pushop.stepsdone:
969 if b'bookmarks' in pushop.stepsdone:
970 return
970 return
971 b2caps = bundle2.bundle2caps(pushop.remote)
971 b2caps = bundle2.bundle2caps(pushop.remote)
972
972
973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
974 legacybooks = b'bookmarks' in legacy
974 legacybooks = b'bookmarks' in legacy
975
975
976 if not legacybooks and b'bookmarks' in b2caps:
976 if not legacybooks and b'bookmarks' in b2caps:
977 return _pushb2bookmarkspart(pushop, bundler)
977 return _pushb2bookmarkspart(pushop, bundler)
978 elif b'pushkey' in b2caps:
978 elif b'pushkey' in b2caps:
979 return _pushb2bookmarkspushkey(pushop, bundler)
979 return _pushb2bookmarkspushkey(pushop, bundler)
980
980
981
981
982 def _bmaction(old, new):
982 def _bmaction(old, new):
983 """small utility for bookmark pushing"""
983 """small utility for bookmark pushing"""
984 if not old:
984 if not old:
985 return b'export'
985 return b'export'
986 elif not new:
986 elif not new:
987 return b'delete'
987 return b'delete'
988 return b'update'
988 return b'update'
989
989
990
990
991 def _abortonsecretctx(pushop, node, b):
991 def _abortonsecretctx(pushop, node, b):
992 """abort if a given bookmark points to a secret changeset"""
992 """abort if a given bookmark points to a secret changeset"""
993 if node and pushop.repo[node].phase() == phases.secret:
993 if node and pushop.repo[node].phase() == phases.secret:
994 raise error.Abort(
994 raise error.Abort(
995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
996 )
996 )
997
997
998
998
999 def _pushb2bookmarkspart(pushop, bundler):
999 def _pushb2bookmarkspart(pushop, bundler):
1000 pushop.stepsdone.add(b'bookmarks')
1000 pushop.stepsdone.add(b'bookmarks')
1001 if not pushop.outbookmarks:
1001 if not pushop.outbookmarks:
1002 return
1002 return
1003
1003
1004 allactions = []
1004 allactions = []
1005 data = []
1005 data = []
1006 for book, old, new in pushop.outbookmarks:
1006 for book, old, new in pushop.outbookmarks:
1007 _abortonsecretctx(pushop, new, book)
1007 _abortonsecretctx(pushop, new, book)
1008 data.append((book, new))
1008 data.append((book, new))
1009 allactions.append((book, _bmaction(old, new)))
1009 allactions.append((book, _bmaction(old, new)))
1010 checkdata = bookmod.binaryencode(data)
1010 checkdata = bookmod.binaryencode(data)
1011 bundler.newpart(b'bookmarks', data=checkdata)
1011 bundler.newpart(b'bookmarks', data=checkdata)
1012
1012
1013 def handlereply(op):
1013 def handlereply(op):
1014 ui = pushop.ui
1014 ui = pushop.ui
1015 # if success
1015 # if success
1016 for book, action in allactions:
1016 for book, action in allactions:
1017 ui.status(bookmsgmap[action][0] % book)
1017 ui.status(bookmsgmap[action][0] % book)
1018
1018
1019 return handlereply
1019 return handlereply
1020
1020
1021
1021
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1023 pushop.stepsdone.add(b'bookmarks')
1023 pushop.stepsdone.add(b'bookmarks')
1024 part2book = []
1024 part2book = []
1025 enc = pushkey.encode
1025 enc = pushkey.encode
1026
1026
1027 def handlefailure(pushop, exc):
1027 def handlefailure(pushop, exc):
1028 targetid = int(exc.partid)
1028 targetid = int(exc.partid)
1029 for partid, book, action in part2book:
1029 for partid, book, action in part2book:
1030 if partid == targetid:
1030 if partid == targetid:
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 # we should not be called for part we did not generated
1032 # we should not be called for part we did not generated
1033 assert False
1033 assert False
1034
1034
1035 for book, old, new in pushop.outbookmarks:
1035 for book, old, new in pushop.outbookmarks:
1036 _abortonsecretctx(pushop, new, book)
1036 _abortonsecretctx(pushop, new, book)
1037 part = bundler.newpart(b'pushkey')
1037 part = bundler.newpart(b'pushkey')
1038 part.addparam(b'namespace', enc(b'bookmarks'))
1038 part.addparam(b'namespace', enc(b'bookmarks'))
1039 part.addparam(b'key', enc(book))
1039 part.addparam(b'key', enc(book))
1040 part.addparam(b'old', enc(hex(old)))
1040 part.addparam(b'old', enc(hex(old)))
1041 part.addparam(b'new', enc(hex(new)))
1041 part.addparam(b'new', enc(hex(new)))
1042 action = b'update'
1042 action = b'update'
1043 if not old:
1043 if not old:
1044 action = b'export'
1044 action = b'export'
1045 elif not new:
1045 elif not new:
1046 action = b'delete'
1046 action = b'delete'
1047 part2book.append((part.id, book, action))
1047 part2book.append((part.id, book, action))
1048 pushop.pkfailcb[part.id] = handlefailure
1048 pushop.pkfailcb[part.id] = handlefailure
1049
1049
1050 def handlereply(op):
1050 def handlereply(op):
1051 ui = pushop.ui
1051 ui = pushop.ui
1052 for partid, book, action in part2book:
1052 for partid, book, action in part2book:
1053 partrep = op.records.getreplies(partid)
1053 partrep = op.records.getreplies(partid)
1054 results = partrep[b'pushkey']
1054 results = partrep[b'pushkey']
1055 assert len(results) <= 1
1055 assert len(results) <= 1
1056 if not results:
1056 if not results:
1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1058 else:
1058 else:
1059 ret = int(results[0][b'return'])
1059 ret = int(results[0][b'return'])
1060 if ret:
1060 if ret:
1061 ui.status(bookmsgmap[action][0] % book)
1061 ui.status(bookmsgmap[action][0] % book)
1062 else:
1062 else:
1063 ui.warn(bookmsgmap[action][1] % book)
1063 ui.warn(bookmsgmap[action][1] % book)
1064 if pushop.bkresult is not None:
1064 if pushop.bkresult is not None:
1065 pushop.bkresult = 1
1065 pushop.bkresult = 1
1066
1066
1067 return handlereply
1067 return handlereply
1068
1068
1069
1069
1070 @b2partsgenerator(b'pushvars', idx=0)
1070 @b2partsgenerator(b'pushvars', idx=0)
1071 def _getbundlesendvars(pushop, bundler):
1071 def _getbundlesendvars(pushop, bundler):
1072 '''send shellvars via bundle2'''
1072 '''send shellvars via bundle2'''
1073 pushvars = pushop.pushvars
1073 pushvars = pushop.pushvars
1074 if pushvars:
1074 if pushvars:
1075 shellvars = {}
1075 shellvars = {}
1076 for raw in pushvars:
1076 for raw in pushvars:
1077 if b'=' not in raw:
1077 if b'=' not in raw:
1078 msg = (
1078 msg = (
1079 b"unable to parse variable '%s', should follow "
1079 b"unable to parse variable '%s', should follow "
1080 b"'KEY=VALUE' or 'KEY=' format"
1080 b"'KEY=VALUE' or 'KEY=' format"
1081 )
1081 )
1082 raise error.Abort(msg % raw)
1082 raise error.Abort(msg % raw)
1083 k, v = raw.split(b'=', 1)
1083 k, v = raw.split(b'=', 1)
1084 shellvars[k] = v
1084 shellvars[k] = v
1085
1085
1086 part = bundler.newpart(b'pushvars')
1086 part = bundler.newpart(b'pushvars')
1087
1087
1088 for key, value in pycompat.iteritems(shellvars):
1088 for key, value in pycompat.iteritems(shellvars):
1089 part.addparam(key, value, mandatory=False)
1089 part.addparam(key, value, mandatory=False)
1090
1090
1091
1091
1092 def _pushbundle2(pushop):
1092 def _pushbundle2(pushop):
1093 """push data to the remote using bundle2
1093 """push data to the remote using bundle2
1094
1094
1095 The only currently supported type of data is changegroup but this will
1095 The only currently supported type of data is changegroup but this will
1096 evolve in the future."""
1096 evolve in the future."""
1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1098 pushback = pushop.trmanager and pushop.ui.configbool(
1098 pushback = pushop.trmanager and pushop.ui.configbool(
1099 b'experimental', b'bundle2.pushback'
1099 b'experimental', b'bundle2.pushback'
1100 )
1100 )
1101
1101
1102 # create reply capability
1102 # create reply capability
1103 capsblob = bundle2.encodecaps(
1103 capsblob = bundle2.encodecaps(
1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1105 )
1105 )
1106 bundler.newpart(b'replycaps', data=capsblob)
1106 bundler.newpart(b'replycaps', data=capsblob)
1107 replyhandlers = []
1107 replyhandlers = []
1108 for partgenname in b2partsgenorder:
1108 for partgenname in b2partsgenorder:
1109 partgen = b2partsgenmapping[partgenname]
1109 partgen = b2partsgenmapping[partgenname]
1110 ret = partgen(pushop, bundler)
1110 ret = partgen(pushop, bundler)
1111 if callable(ret):
1111 if callable(ret):
1112 replyhandlers.append(ret)
1112 replyhandlers.append(ret)
1113 # do not push if nothing to push
1113 # do not push if nothing to push
1114 if bundler.nbparts <= 1:
1114 if bundler.nbparts <= 1:
1115 return
1115 return
1116 stream = util.chunkbuffer(bundler.getchunks())
1116 stream = util.chunkbuffer(bundler.getchunks())
1117 try:
1117 try:
1118 try:
1118 try:
1119 with pushop.remote.commandexecutor() as e:
1119 with pushop.remote.commandexecutor() as e:
1120 reply = e.callcommand(
1120 reply = e.callcommand(
1121 b'unbundle',
1121 b'unbundle',
1122 {
1122 {
1123 b'bundle': stream,
1123 b'bundle': stream,
1124 b'heads': [b'force'],
1124 b'heads': [b'force'],
1125 b'url': pushop.remote.url(),
1125 b'url': pushop.remote.url(),
1126 },
1126 },
1127 ).result()
1127 ).result()
1128 except error.BundleValueError as exc:
1128 except error.BundleValueError as exc:
1129 raise error.Abort(_(b'missing support for %s') % exc)
1129 raise error.Abort(_(b'missing support for %s') % exc)
1130 try:
1130 try:
1131 trgetter = None
1131 trgetter = None
1132 if pushback:
1132 if pushback:
1133 trgetter = pushop.trmanager.transaction
1133 trgetter = pushop.trmanager.transaction
1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1135 except error.BundleValueError as exc:
1135 except error.BundleValueError as exc:
1136 raise error.Abort(_(b'missing support for %s') % exc)
1136 raise error.Abort(_(b'missing support for %s') % exc)
1137 except bundle2.AbortFromPart as exc:
1137 except bundle2.AbortFromPart as exc:
1138 pushop.ui.error(_(b'remote: %s\n') % exc)
1138 pushop.ui.error(_(b'remote: %s\n') % exc)
1139 if exc.hint is not None:
1139 if exc.hint is not None:
1140 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1140 pushop.ui.error(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1141 raise error.Abort(_(b'push failed on remote'))
1141 raise error.Abort(_(b'push failed on remote'))
1142 except error.PushkeyFailed as exc:
1142 except error.PushkeyFailed as exc:
1143 partid = int(exc.partid)
1143 partid = int(exc.partid)
1144 if partid not in pushop.pkfailcb:
1144 if partid not in pushop.pkfailcb:
1145 raise
1145 raise
1146 pushop.pkfailcb[partid](pushop, exc)
1146 pushop.pkfailcb[partid](pushop, exc)
1147 for rephand in replyhandlers:
1147 for rephand in replyhandlers:
1148 rephand(op)
1148 rephand(op)
1149
1149
1150
1150
1151 def _pushchangeset(pushop):
1151 def _pushchangeset(pushop):
1152 """Make the actual push of changeset bundle to remote repo"""
1152 """Make the actual push of changeset bundle to remote repo"""
1153 if b'changesets' in pushop.stepsdone:
1153 if b'changesets' in pushop.stepsdone:
1154 return
1154 return
1155 pushop.stepsdone.add(b'changesets')
1155 pushop.stepsdone.add(b'changesets')
1156 if not _pushcheckoutgoing(pushop):
1156 if not _pushcheckoutgoing(pushop):
1157 return
1157 return
1158
1158
1159 # Should have verified this in push().
1159 # Should have verified this in push().
1160 assert pushop.remote.capable(b'unbundle')
1160 assert pushop.remote.capable(b'unbundle')
1161
1161
1162 pushop.repo.prepushoutgoinghooks(pushop)
1162 pushop.repo.prepushoutgoinghooks(pushop)
1163 outgoing = pushop.outgoing
1163 outgoing = pushop.outgoing
1164 # TODO: get bundlecaps from remote
1164 # TODO: get bundlecaps from remote
1165 bundlecaps = None
1165 bundlecaps = None
1166 # create a changegroup from local
1166 # create a changegroup from local
1167 if pushop.revs is None and not (
1167 if pushop.revs is None and not (
1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1169 ):
1169 ):
1170 # push everything,
1170 # push everything,
1171 # use the fast path, no race possible on push
1171 # use the fast path, no race possible on push
1172 cg = changegroup.makechangegroup(
1172 cg = changegroup.makechangegroup(
1173 pushop.repo,
1173 pushop.repo,
1174 outgoing,
1174 outgoing,
1175 b'01',
1175 b'01',
1176 b'push',
1176 b'push',
1177 fastpath=True,
1177 fastpath=True,
1178 bundlecaps=bundlecaps,
1178 bundlecaps=bundlecaps,
1179 )
1179 )
1180 else:
1180 else:
1181 cg = changegroup.makechangegroup(
1181 cg = changegroup.makechangegroup(
1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1183 )
1183 )
1184
1184
1185 # apply changegroup to remote
1185 # apply changegroup to remote
1186 # local repo finds heads on server, finds out what
1186 # local repo finds heads on server, finds out what
1187 # revs it must push. once revs transferred, if server
1187 # revs it must push. once revs transferred, if server
1188 # finds it has different heads (someone else won
1188 # finds it has different heads (someone else won
1189 # commit/push race), server aborts.
1189 # commit/push race), server aborts.
1190 if pushop.force:
1190 if pushop.force:
1191 remoteheads = [b'force']
1191 remoteheads = [b'force']
1192 else:
1192 else:
1193 remoteheads = pushop.remoteheads
1193 remoteheads = pushop.remoteheads
1194 # ssh: return remote's addchangegroup()
1194 # ssh: return remote's addchangegroup()
1195 # http: return remote's addchangegroup() or 0 for error
1195 # http: return remote's addchangegroup() or 0 for error
1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1197
1197
1198
1198
1199 def _pushsyncphase(pushop):
1199 def _pushsyncphase(pushop):
1200 """synchronise phase information locally and remotely"""
1200 """synchronise phase information locally and remotely"""
1201 cheads = pushop.commonheads
1201 cheads = pushop.commonheads
1202 # even when we don't push, exchanging phase data is useful
1202 # even when we don't push, exchanging phase data is useful
1203 remotephases = listkeys(pushop.remote, b'phases')
1203 remotephases = listkeys(pushop.remote, b'phases')
1204 if (
1204 if (
1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1206 and remotephases # server supports phases
1206 and remotephases # server supports phases
1207 and pushop.cgresult is None # nothing was pushed
1207 and pushop.cgresult is None # nothing was pushed
1208 and remotephases.get(b'publishing', False)
1208 and remotephases.get(b'publishing', False)
1209 ):
1209 ):
1210 # When:
1210 # When:
1211 # - this is a subrepo push
1211 # - this is a subrepo push
1212 # - and remote support phase
1212 # - and remote support phase
1213 # - and no changeset was pushed
1213 # - and no changeset was pushed
1214 # - and remote is publishing
1214 # - and remote is publishing
1215 # We may be in issue 3871 case!
1215 # We may be in issue 3871 case!
1216 # We drop the possible phase synchronisation done by
1216 # We drop the possible phase synchronisation done by
1217 # courtesy to publish changesets possibly locally draft
1217 # courtesy to publish changesets possibly locally draft
1218 # on the remote.
1218 # on the remote.
1219 remotephases = {b'publishing': b'True'}
1219 remotephases = {b'publishing': b'True'}
1220 if not remotephases: # old server or public only reply from non-publishing
1220 if not remotephases: # old server or public only reply from non-publishing
1221 _localphasemove(pushop, cheads)
1221 _localphasemove(pushop, cheads)
1222 # don't push any phase data as there is nothing to push
1222 # don't push any phase data as there is nothing to push
1223 else:
1223 else:
1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1225 pheads, droots = ana
1225 pheads, droots = ana
1226 ### Apply remote phase on local
1226 ### Apply remote phase on local
1227 if remotephases.get(b'publishing', False):
1227 if remotephases.get(b'publishing', False):
1228 _localphasemove(pushop, cheads)
1228 _localphasemove(pushop, cheads)
1229 else: # publish = False
1229 else: # publish = False
1230 _localphasemove(pushop, pheads)
1230 _localphasemove(pushop, pheads)
1231 _localphasemove(pushop, cheads, phases.draft)
1231 _localphasemove(pushop, cheads, phases.draft)
1232 ### Apply local phase on remote
1232 ### Apply local phase on remote
1233
1233
1234 if pushop.cgresult:
1234 if pushop.cgresult:
1235 if b'phases' in pushop.stepsdone:
1235 if b'phases' in pushop.stepsdone:
1236 # phases already pushed though bundle2
1236 # phases already pushed though bundle2
1237 return
1237 return
1238 outdated = pushop.outdatedphases
1238 outdated = pushop.outdatedphases
1239 else:
1239 else:
1240 outdated = pushop.fallbackoutdatedphases
1240 outdated = pushop.fallbackoutdatedphases
1241
1241
1242 pushop.stepsdone.add(b'phases')
1242 pushop.stepsdone.add(b'phases')
1243
1243
1244 # filter heads already turned public by the push
1244 # filter heads already turned public by the push
1245 outdated = [c for c in outdated if c.node() not in pheads]
1245 outdated = [c for c in outdated if c.node() not in pheads]
1246 # fallback to independent pushkey command
1246 # fallback to independent pushkey command
1247 for newremotehead in outdated:
1247 for newremotehead in outdated:
1248 with pushop.remote.commandexecutor() as e:
1248 with pushop.remote.commandexecutor() as e:
1249 r = e.callcommand(
1249 r = e.callcommand(
1250 b'pushkey',
1250 b'pushkey',
1251 {
1251 {
1252 b'namespace': b'phases',
1252 b'namespace': b'phases',
1253 b'key': newremotehead.hex(),
1253 b'key': newremotehead.hex(),
1254 b'old': b'%d' % phases.draft,
1254 b'old': b'%d' % phases.draft,
1255 b'new': b'%d' % phases.public,
1255 b'new': b'%d' % phases.public,
1256 },
1256 },
1257 ).result()
1257 ).result()
1258
1258
1259 if not r:
1259 if not r:
1260 pushop.ui.warn(
1260 pushop.ui.warn(
1261 _(b'updating %s to public failed!\n') % newremotehead
1261 _(b'updating %s to public failed!\n') % newremotehead
1262 )
1262 )
1263
1263
1264
1264
1265 def _localphasemove(pushop, nodes, phase=phases.public):
1265 def _localphasemove(pushop, nodes, phase=phases.public):
1266 """move <nodes> to <phase> in the local source repo"""
1266 """move <nodes> to <phase> in the local source repo"""
1267 if pushop.trmanager:
1267 if pushop.trmanager:
1268 phases.advanceboundary(
1268 phases.advanceboundary(
1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1270 )
1270 )
1271 else:
1271 else:
1272 # repo is not locked, do not change any phases!
1272 # repo is not locked, do not change any phases!
1273 # Informs the user that phases should have been moved when
1273 # Informs the user that phases should have been moved when
1274 # applicable.
1274 # applicable.
1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1276 phasestr = phases.phasenames[phase]
1276 phasestr = phases.phasenames[phase]
1277 if actualmoves:
1277 if actualmoves:
1278 pushop.ui.status(
1278 pushop.ui.status(
1279 _(
1279 _(
1280 b'cannot lock source repo, skipping '
1280 b'cannot lock source repo, skipping '
1281 b'local %s phase update\n'
1281 b'local %s phase update\n'
1282 )
1282 )
1283 % phasestr
1283 % phasestr
1284 )
1284 )
1285
1285
1286
1286
1287 def _pushobsolete(pushop):
1287 def _pushobsolete(pushop):
1288 """utility function to push obsolete markers to a remote"""
1288 """utility function to push obsolete markers to a remote"""
1289 if b'obsmarkers' in pushop.stepsdone:
1289 if b'obsmarkers' in pushop.stepsdone:
1290 return
1290 return
1291 repo = pushop.repo
1291 repo = pushop.repo
1292 remote = pushop.remote
1292 remote = pushop.remote
1293 pushop.stepsdone.add(b'obsmarkers')
1293 pushop.stepsdone.add(b'obsmarkers')
1294 if pushop.outobsmarkers:
1294 if pushop.outobsmarkers:
1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1296 rslts = []
1296 rslts = []
1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1298 remotedata = obsolete._pushkeyescape(markers)
1298 remotedata = obsolete._pushkeyescape(markers)
1299 for key in sorted(remotedata, reverse=True):
1299 for key in sorted(remotedata, reverse=True):
1300 # reverse sort to ensure we end with dump0
1300 # reverse sort to ensure we end with dump0
1301 data = remotedata[key]
1301 data = remotedata[key]
1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1303 if [r for r in rslts if not r]:
1303 if [r for r in rslts if not r]:
1304 msg = _(b'failed to push some obsolete markers!\n')
1304 msg = _(b'failed to push some obsolete markers!\n')
1305 repo.ui.warn(msg)
1305 repo.ui.warn(msg)
1306
1306
1307
1307
1308 def _pushbookmark(pushop):
1308 def _pushbookmark(pushop):
1309 """Update bookmark position on remote"""
1309 """Update bookmark position on remote"""
1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1311 return
1311 return
1312 pushop.stepsdone.add(b'bookmarks')
1312 pushop.stepsdone.add(b'bookmarks')
1313 ui = pushop.ui
1313 ui = pushop.ui
1314 remote = pushop.remote
1314 remote = pushop.remote
1315
1315
1316 for b, old, new in pushop.outbookmarks:
1316 for b, old, new in pushop.outbookmarks:
1317 action = b'update'
1317 action = b'update'
1318 if not old:
1318 if not old:
1319 action = b'export'
1319 action = b'export'
1320 elif not new:
1320 elif not new:
1321 action = b'delete'
1321 action = b'delete'
1322
1322
1323 with remote.commandexecutor() as e:
1323 with remote.commandexecutor() as e:
1324 r = e.callcommand(
1324 r = e.callcommand(
1325 b'pushkey',
1325 b'pushkey',
1326 {
1326 {
1327 b'namespace': b'bookmarks',
1327 b'namespace': b'bookmarks',
1328 b'key': b,
1328 b'key': b,
1329 b'old': hex(old),
1329 b'old': hex(old),
1330 b'new': hex(new),
1330 b'new': hex(new),
1331 },
1331 },
1332 ).result()
1332 ).result()
1333
1333
1334 if r:
1334 if r:
1335 ui.status(bookmsgmap[action][0] % b)
1335 ui.status(bookmsgmap[action][0] % b)
1336 else:
1336 else:
1337 ui.warn(bookmsgmap[action][1] % b)
1337 ui.warn(bookmsgmap[action][1] % b)
1338 # discovery can have set the value form invalid entry
1338 # discovery can have set the value form invalid entry
1339 if pushop.bkresult is not None:
1339 if pushop.bkresult is not None:
1340 pushop.bkresult = 1
1340 pushop.bkresult = 1
1341
1341
1342
1342
1343 class pulloperation(object):
1343 class pulloperation(object):
1344 """A object that represent a single pull operation
1344 """A object that represent a single pull operation
1345
1345
1346 It purpose is to carry pull related state and very common operation.
1346 It purpose is to carry pull related state and very common operation.
1347
1347
1348 A new should be created at the beginning of each pull and discarded
1348 A new should be created at the beginning of each pull and discarded
1349 afterward.
1349 afterward.
1350 """
1350 """
1351
1351
1352 def __init__(
1352 def __init__(
1353 self,
1353 self,
1354 repo,
1354 repo,
1355 remote,
1355 remote,
1356 heads=None,
1356 heads=None,
1357 force=False,
1357 force=False,
1358 bookmarks=(),
1358 bookmarks=(),
1359 remotebookmarks=None,
1359 remotebookmarks=None,
1360 streamclonerequested=None,
1360 streamclonerequested=None,
1361 includepats=None,
1361 includepats=None,
1362 excludepats=None,
1362 excludepats=None,
1363 depth=None,
1363 depth=None,
1364 ):
1364 ):
1365 # repo we pull into
1365 # repo we pull into
1366 self.repo = repo
1366 self.repo = repo
1367 # repo we pull from
1367 # repo we pull from
1368 self.remote = remote
1368 self.remote = remote
1369 # revision we try to pull (None is "all")
1369 # revision we try to pull (None is "all")
1370 self.heads = heads
1370 self.heads = heads
1371 # bookmark pulled explicitly
1371 # bookmark pulled explicitly
1372 self.explicitbookmarks = [
1372 self.explicitbookmarks = [
1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1374 ]
1374 ]
1375 # do we force pull?
1375 # do we force pull?
1376 self.force = force
1376 self.force = force
1377 # whether a streaming clone was requested
1377 # whether a streaming clone was requested
1378 self.streamclonerequested = streamclonerequested
1378 self.streamclonerequested = streamclonerequested
1379 # transaction manager
1379 # transaction manager
1380 self.trmanager = None
1380 self.trmanager = None
1381 # set of common changeset between local and remote before pull
1381 # set of common changeset between local and remote before pull
1382 self.common = None
1382 self.common = None
1383 # set of pulled head
1383 # set of pulled head
1384 self.rheads = None
1384 self.rheads = None
1385 # list of missing changeset to fetch remotely
1385 # list of missing changeset to fetch remotely
1386 self.fetch = None
1386 self.fetch = None
1387 # remote bookmarks data
1387 # remote bookmarks data
1388 self.remotebookmarks = remotebookmarks
1388 self.remotebookmarks = remotebookmarks
1389 # result of changegroup pulling (used as return code by pull)
1389 # result of changegroup pulling (used as return code by pull)
1390 self.cgresult = None
1390 self.cgresult = None
1391 # list of step already done
1391 # list of step already done
1392 self.stepsdone = set()
1392 self.stepsdone = set()
1393 # Whether we attempted a clone from pre-generated bundles.
1393 # Whether we attempted a clone from pre-generated bundles.
1394 self.clonebundleattempted = False
1394 self.clonebundleattempted = False
1395 # Set of file patterns to include.
1395 # Set of file patterns to include.
1396 self.includepats = includepats
1396 self.includepats = includepats
1397 # Set of file patterns to exclude.
1397 # Set of file patterns to exclude.
1398 self.excludepats = excludepats
1398 self.excludepats = excludepats
1399 # Number of ancestor changesets to pull from each pulled head.
1399 # Number of ancestor changesets to pull from each pulled head.
1400 self.depth = depth
1400 self.depth = depth
1401
1401
1402 @util.propertycache
1402 @util.propertycache
1403 def pulledsubset(self):
1403 def pulledsubset(self):
1404 """heads of the set of changeset target by the pull"""
1404 """heads of the set of changeset target by the pull"""
1405 # compute target subset
1405 # compute target subset
1406 if self.heads is None:
1406 if self.heads is None:
1407 # We pulled every thing possible
1407 # We pulled every thing possible
1408 # sync on everything common
1408 # sync on everything common
1409 c = set(self.common)
1409 c = set(self.common)
1410 ret = list(self.common)
1410 ret = list(self.common)
1411 for n in self.rheads:
1411 for n in self.rheads:
1412 if n not in c:
1412 if n not in c:
1413 ret.append(n)
1413 ret.append(n)
1414 return ret
1414 return ret
1415 else:
1415 else:
1416 # We pulled a specific subset
1416 # We pulled a specific subset
1417 # sync on this subset
1417 # sync on this subset
1418 return self.heads
1418 return self.heads
1419
1419
1420 @util.propertycache
1420 @util.propertycache
1421 def canusebundle2(self):
1421 def canusebundle2(self):
1422 return not _forcebundle1(self)
1422 return not _forcebundle1(self)
1423
1423
1424 @util.propertycache
1424 @util.propertycache
1425 def remotebundle2caps(self):
1425 def remotebundle2caps(self):
1426 return bundle2.bundle2caps(self.remote)
1426 return bundle2.bundle2caps(self.remote)
1427
1427
1428 def gettransaction(self):
1428 def gettransaction(self):
1429 # deprecated; talk to trmanager directly
1429 # deprecated; talk to trmanager directly
1430 return self.trmanager.transaction()
1430 return self.trmanager.transaction()
1431
1431
1432
1432
1433 class transactionmanager(util.transactional):
1433 class transactionmanager(util.transactional):
1434 """An object to manage the life cycle of a transaction
1434 """An object to manage the life cycle of a transaction
1435
1435
1436 It creates the transaction on demand and calls the appropriate hooks when
1436 It creates the transaction on demand and calls the appropriate hooks when
1437 closing the transaction."""
1437 closing the transaction."""
1438
1438
1439 def __init__(self, repo, source, url):
1439 def __init__(self, repo, source, url):
1440 self.repo = repo
1440 self.repo = repo
1441 self.source = source
1441 self.source = source
1442 self.url = url
1442 self.url = url
1443 self._tr = None
1443 self._tr = None
1444
1444
1445 def transaction(self):
1445 def transaction(self):
1446 """Return an open transaction object, constructing if necessary"""
1446 """Return an open transaction object, constructing if necessary"""
1447 if not self._tr:
1447 if not self._tr:
1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1449 self._tr = self.repo.transaction(trname)
1449 self._tr = self.repo.transaction(trname)
1450 self._tr.hookargs[b'source'] = self.source
1450 self._tr.hookargs[b'source'] = self.source
1451 self._tr.hookargs[b'url'] = self.url
1451 self._tr.hookargs[b'url'] = self.url
1452 return self._tr
1452 return self._tr
1453
1453
1454 def close(self):
1454 def close(self):
1455 """close transaction if created"""
1455 """close transaction if created"""
1456 if self._tr is not None:
1456 if self._tr is not None:
1457 self._tr.close()
1457 self._tr.close()
1458
1458
1459 def release(self):
1459 def release(self):
1460 """release transaction if created"""
1460 """release transaction if created"""
1461 if self._tr is not None:
1461 if self._tr is not None:
1462 self._tr.release()
1462 self._tr.release()
1463
1463
1464
1464
1465 def listkeys(remote, namespace):
1465 def listkeys(remote, namespace):
1466 with remote.commandexecutor() as e:
1466 with remote.commandexecutor() as e:
1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1468
1468
1469
1469
1470 def _fullpullbundle2(repo, pullop):
1470 def _fullpullbundle2(repo, pullop):
1471 # The server may send a partial reply, i.e. when inlining
1471 # The server may send a partial reply, i.e. when inlining
1472 # pre-computed bundles. In that case, update the common
1472 # pre-computed bundles. In that case, update the common
1473 # set based on the results and pull another bundle.
1473 # set based on the results and pull another bundle.
1474 #
1474 #
1475 # There are two indicators that the process is finished:
1475 # There are two indicators that the process is finished:
1476 # - no changeset has been added, or
1476 # - no changeset has been added, or
1477 # - all remote heads are known locally.
1477 # - all remote heads are known locally.
1478 # The head check must use the unfiltered view as obsoletion
1478 # The head check must use the unfiltered view as obsoletion
1479 # markers can hide heads.
1479 # markers can hide heads.
1480 unfi = repo.unfiltered()
1480 unfi = repo.unfiltered()
1481 unficl = unfi.changelog
1481 unficl = unfi.changelog
1482
1482
1483 def headsofdiff(h1, h2):
1483 def headsofdiff(h1, h2):
1484 """Returns heads(h1 % h2)"""
1484 """Returns heads(h1 % h2)"""
1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1486 return {ctx.node() for ctx in res}
1486 return {ctx.node() for ctx in res}
1487
1487
1488 def headsofunion(h1, h2):
1488 def headsofunion(h1, h2):
1489 """Returns heads((h1 + h2) - null)"""
1489 """Returns heads((h1 + h2) - null)"""
1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1491 return {ctx.node() for ctx in res}
1491 return {ctx.node() for ctx in res}
1492
1492
1493 while True:
1493 while True:
1494 old_heads = unficl.heads()
1494 old_heads = unficl.heads()
1495 clstart = len(unficl)
1495 clstart = len(unficl)
1496 _pullbundle2(pullop)
1496 _pullbundle2(pullop)
1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1498 # XXX narrow clones filter the heads on the server side during
1498 # XXX narrow clones filter the heads on the server side during
1499 # XXX getbundle and result in partial replies as well.
1499 # XXX getbundle and result in partial replies as well.
1500 # XXX Disable pull bundles in this case as band aid to avoid
1500 # XXX Disable pull bundles in this case as band aid to avoid
1501 # XXX extra round trips.
1501 # XXX extra round trips.
1502 break
1502 break
1503 if clstart == len(unficl):
1503 if clstart == len(unficl):
1504 break
1504 break
1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1506 break
1506 break
1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1508 pullop.common = headsofunion(new_heads, pullop.common)
1508 pullop.common = headsofunion(new_heads, pullop.common)
1509 pullop.rheads = set(pullop.rheads) - pullop.common
1509 pullop.rheads = set(pullop.rheads) - pullop.common
1510
1510
1511
1511
1512 def add_confirm_callback(repo, pullop):
1512 def add_confirm_callback(repo, pullop):
1513 """adds a finalize callback to transaction which can be used to show stats
1513 """adds a finalize callback to transaction which can be used to show stats
1514 to user and confirm the pull before committing transaction"""
1514 to user and confirm the pull before committing transaction"""
1515
1515
1516 tr = pullop.trmanager.transaction()
1516 tr = pullop.trmanager.transaction()
1517 scmutil.registersummarycallback(
1517 scmutil.registersummarycallback(
1518 repo, tr, txnname=b'pull', as_validator=True
1518 repo, tr, txnname=b'pull', as_validator=True
1519 )
1519 )
1520 reporef = weakref.ref(repo.unfiltered())
1520 reporef = weakref.ref(repo.unfiltered())
1521
1521
1522 def prompt(tr):
1522 def prompt(tr):
1523 repo = reporef()
1523 repo = reporef()
1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1525 if repo.ui.promptchoice(cm):
1525 if repo.ui.promptchoice(cm):
1526 raise error.Abort(b"user aborted")
1526 raise error.Abort(b"user aborted")
1527
1527
1528 tr.addvalidator(b'900-pull-prompt', prompt)
1528 tr.addvalidator(b'900-pull-prompt', prompt)
1529
1529
1530
1530
1531 def pull(
1531 def pull(
1532 repo,
1532 repo,
1533 remote,
1533 remote,
1534 heads=None,
1534 heads=None,
1535 force=False,
1535 force=False,
1536 bookmarks=(),
1536 bookmarks=(),
1537 opargs=None,
1537 opargs=None,
1538 streamclonerequested=None,
1538 streamclonerequested=None,
1539 includepats=None,
1539 includepats=None,
1540 excludepats=None,
1540 excludepats=None,
1541 depth=None,
1541 depth=None,
1542 confirm=None,
1542 confirm=None,
1543 ):
1543 ):
1544 """Fetch repository data from a remote.
1544 """Fetch repository data from a remote.
1545
1545
1546 This is the main function used to retrieve data from a remote repository.
1546 This is the main function used to retrieve data from a remote repository.
1547
1547
1548 ``repo`` is the local repository to clone into.
1548 ``repo`` is the local repository to clone into.
1549 ``remote`` is a peer instance.
1549 ``remote`` is a peer instance.
1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1551 default) means to pull everything from the remote.
1551 default) means to pull everything from the remote.
1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1553 default, all remote bookmarks are pulled.
1553 default, all remote bookmarks are pulled.
1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1555 initialization.
1555 initialization.
1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1558 of revlogs from the server. This only works when the local repository is
1558 of revlogs from the server. This only works when the local repository is
1559 empty. The default value of ``None`` means to respect the server
1559 empty. The default value of ``None`` means to respect the server
1560 configuration for preferring stream clones.
1560 configuration for preferring stream clones.
1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1562 include and exclude in storage, respectively. If not defined, narrow
1562 include and exclude in storage, respectively. If not defined, narrow
1563 patterns from the repo instance are used, if available.
1563 patterns from the repo instance are used, if available.
1564 ``depth`` is an integer indicating the DAG depth of history we're
1564 ``depth`` is an integer indicating the DAG depth of history we're
1565 interested in. If defined, for each revision specified in ``heads``, we
1565 interested in. If defined, for each revision specified in ``heads``, we
1566 will fetch up to this many of its ancestors and data associated with them.
1566 will fetch up to this many of its ancestors and data associated with them.
1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1568 before committing the transaction. This overrides HGPLAIN.
1568 before committing the transaction. This overrides HGPLAIN.
1569
1569
1570 Returns the ``pulloperation`` created for this pull.
1570 Returns the ``pulloperation`` created for this pull.
1571 """
1571 """
1572 if opargs is None:
1572 if opargs is None:
1573 opargs = {}
1573 opargs = {}
1574
1574
1575 # We allow the narrow patterns to be passed in explicitly to provide more
1575 # We allow the narrow patterns to be passed in explicitly to provide more
1576 # flexibility for API consumers.
1576 # flexibility for API consumers.
1577 if includepats or excludepats:
1577 if includepats or excludepats:
1578 includepats = includepats or set()
1578 includepats = includepats or set()
1579 excludepats = excludepats or set()
1579 excludepats = excludepats or set()
1580 else:
1580 else:
1581 includepats, excludepats = repo.narrowpats
1581 includepats, excludepats = repo.narrowpats
1582
1582
1583 narrowspec.validatepatterns(includepats)
1583 narrowspec.validatepatterns(includepats)
1584 narrowspec.validatepatterns(excludepats)
1584 narrowspec.validatepatterns(excludepats)
1585
1585
1586 pullop = pulloperation(
1586 pullop = pulloperation(
1587 repo,
1587 repo,
1588 remote,
1588 remote,
1589 heads,
1589 heads,
1590 force,
1590 force,
1591 bookmarks=bookmarks,
1591 bookmarks=bookmarks,
1592 streamclonerequested=streamclonerequested,
1592 streamclonerequested=streamclonerequested,
1593 includepats=includepats,
1593 includepats=includepats,
1594 excludepats=excludepats,
1594 excludepats=excludepats,
1595 depth=depth,
1595 depth=depth,
1596 **pycompat.strkwargs(opargs)
1596 **pycompat.strkwargs(opargs)
1597 )
1597 )
1598
1598
1599 peerlocal = pullop.remote.local()
1599 peerlocal = pullop.remote.local()
1600 if peerlocal:
1600 if peerlocal:
1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1602 if missing:
1602 if missing:
1603 msg = _(
1603 msg = _(
1604 b"required features are not"
1604 b"required features are not"
1605 b" supported in the destination:"
1605 b" supported in the destination:"
1606 b" %s"
1606 b" %s"
1607 ) % (b', '.join(sorted(missing)))
1607 ) % (b', '.join(sorted(missing)))
1608 raise error.Abort(msg)
1608 raise error.Abort(msg)
1609
1609
1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1611 wlock = util.nullcontextmanager()
1611 wlock = util.nullcontextmanager()
1612 if not bookmod.bookmarksinstore(repo):
1612 if not bookmod.bookmarksinstore(repo):
1613 wlock = repo.wlock()
1613 wlock = repo.wlock()
1614 with wlock, repo.lock(), pullop.trmanager:
1614 with wlock, repo.lock(), pullop.trmanager:
1615 if confirm or (
1615 if confirm or (
1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1617 ):
1617 ):
1618 add_confirm_callback(repo, pullop)
1618 add_confirm_callback(repo, pullop)
1619
1619
1620 # Use the modern wire protocol, if available.
1620 # Use the modern wire protocol, if available.
1621 if remote.capable(b'command-changesetdata'):
1621 if remote.capable(b'command-changesetdata'):
1622 exchangev2.pull(pullop)
1622 exchangev2.pull(pullop)
1623 else:
1623 else:
1624 # This should ideally be in _pullbundle2(). However, it needs to run
1624 # This should ideally be in _pullbundle2(). However, it needs to run
1625 # before discovery to avoid extra work.
1625 # before discovery to avoid extra work.
1626 _maybeapplyclonebundle(pullop)
1626 _maybeapplyclonebundle(pullop)
1627 streamclone.maybeperformlegacystreamclone(pullop)
1627 streamclone.maybeperformlegacystreamclone(pullop)
1628 _pulldiscovery(pullop)
1628 _pulldiscovery(pullop)
1629 if pullop.canusebundle2:
1629 if pullop.canusebundle2:
1630 _fullpullbundle2(repo, pullop)
1630 _fullpullbundle2(repo, pullop)
1631 _pullchangeset(pullop)
1631 _pullchangeset(pullop)
1632 _pullphase(pullop)
1632 _pullphase(pullop)
1633 _pullbookmarks(pullop)
1633 _pullbookmarks(pullop)
1634 _pullobsolete(pullop)
1634 _pullobsolete(pullop)
1635
1635
1636 # storing remotenames
1636 # storing remotenames
1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1638 logexchange.pullremotenames(repo, remote)
1638 logexchange.pullremotenames(repo, remote)
1639
1639
1640 return pullop
1640 return pullop
1641
1641
1642
1642
1643 # list of steps to perform discovery before pull
1643 # list of steps to perform discovery before pull
1644 pulldiscoveryorder = []
1644 pulldiscoveryorder = []
1645
1645
1646 # Mapping between step name and function
1646 # Mapping between step name and function
1647 #
1647 #
1648 # This exists to help extensions wrap steps if necessary
1648 # This exists to help extensions wrap steps if necessary
1649 pulldiscoverymapping = {}
1649 pulldiscoverymapping = {}
1650
1650
1651
1651
1652 def pulldiscovery(stepname):
1652 def pulldiscovery(stepname):
1653 """decorator for function performing discovery before pull
1653 """decorator for function performing discovery before pull
1654
1654
1655 The function is added to the step -> function mapping and appended to the
1655 The function is added to the step -> function mapping and appended to the
1656 list of steps. Beware that decorated function will be added in order (this
1656 list of steps. Beware that decorated function will be added in order (this
1657 may matter).
1657 may matter).
1658
1658
1659 You can only use this decorator for a new step, if you want to wrap a step
1659 You can only use this decorator for a new step, if you want to wrap a step
1660 from an extension, change the pulldiscovery dictionary directly."""
1660 from an extension, change the pulldiscovery dictionary directly."""
1661
1661
1662 def dec(func):
1662 def dec(func):
1663 assert stepname not in pulldiscoverymapping
1663 assert stepname not in pulldiscoverymapping
1664 pulldiscoverymapping[stepname] = func
1664 pulldiscoverymapping[stepname] = func
1665 pulldiscoveryorder.append(stepname)
1665 pulldiscoveryorder.append(stepname)
1666 return func
1666 return func
1667
1667
1668 return dec
1668 return dec
1669
1669
1670
1670
1671 def _pulldiscovery(pullop):
1671 def _pulldiscovery(pullop):
1672 """Run all discovery steps"""
1672 """Run all discovery steps"""
1673 for stepname in pulldiscoveryorder:
1673 for stepname in pulldiscoveryorder:
1674 step = pulldiscoverymapping[stepname]
1674 step = pulldiscoverymapping[stepname]
1675 step(pullop)
1675 step(pullop)
1676
1676
1677
1677
1678 @pulldiscovery(b'b1:bookmarks')
1678 @pulldiscovery(b'b1:bookmarks')
1679 def _pullbookmarkbundle1(pullop):
1679 def _pullbookmarkbundle1(pullop):
1680 """fetch bookmark data in bundle1 case
1680 """fetch bookmark data in bundle1 case
1681
1681
1682 If not using bundle2, we have to fetch bookmarks before changeset
1682 If not using bundle2, we have to fetch bookmarks before changeset
1683 discovery to reduce the chance and impact of race conditions."""
1683 discovery to reduce the chance and impact of race conditions."""
1684 if pullop.remotebookmarks is not None:
1684 if pullop.remotebookmarks is not None:
1685 return
1685 return
1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1687 # all known bundle2 servers now support listkeys, but lets be nice with
1687 # all known bundle2 servers now support listkeys, but lets be nice with
1688 # new implementation.
1688 # new implementation.
1689 return
1689 return
1690 books = listkeys(pullop.remote, b'bookmarks')
1690 books = listkeys(pullop.remote, b'bookmarks')
1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1692
1692
1693
1693
1694 @pulldiscovery(b'changegroup')
1694 @pulldiscovery(b'changegroup')
1695 def _pulldiscoverychangegroup(pullop):
1695 def _pulldiscoverychangegroup(pullop):
1696 """discovery phase for the pull
1696 """discovery phase for the pull
1697
1697
1698 Current handle changeset discovery only, will change handle all discovery
1698 Current handle changeset discovery only, will change handle all discovery
1699 at some point."""
1699 at some point."""
1700 tmp = discovery.findcommonincoming(
1700 tmp = discovery.findcommonincoming(
1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1702 )
1702 )
1703 common, fetch, rheads = tmp
1703 common, fetch, rheads = tmp
1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1705 if fetch and rheads:
1705 if fetch and rheads:
1706 # If a remote heads is filtered locally, put in back in common.
1706 # If a remote heads is filtered locally, put in back in common.
1707 #
1707 #
1708 # This is a hackish solution to catch most of "common but locally
1708 # This is a hackish solution to catch most of "common but locally
1709 # hidden situation". We do not performs discovery on unfiltered
1709 # hidden situation". We do not performs discovery on unfiltered
1710 # repository because it end up doing a pathological amount of round
1710 # repository because it end up doing a pathological amount of round
1711 # trip for w huge amount of changeset we do not care about.
1711 # trip for w huge amount of changeset we do not care about.
1712 #
1712 #
1713 # If a set of such "common but filtered" changeset exist on the server
1713 # If a set of such "common but filtered" changeset exist on the server
1714 # but are not including a remote heads, we'll not be able to detect it,
1714 # but are not including a remote heads, we'll not be able to detect it,
1715 scommon = set(common)
1715 scommon = set(common)
1716 for n in rheads:
1716 for n in rheads:
1717 if has_node(n):
1717 if has_node(n):
1718 if n not in scommon:
1718 if n not in scommon:
1719 common.append(n)
1719 common.append(n)
1720 if set(rheads).issubset(set(common)):
1720 if set(rheads).issubset(set(common)):
1721 fetch = []
1721 fetch = []
1722 pullop.common = common
1722 pullop.common = common
1723 pullop.fetch = fetch
1723 pullop.fetch = fetch
1724 pullop.rheads = rheads
1724 pullop.rheads = rheads
1725
1725
1726
1726
1727 def _pullbundle2(pullop):
1727 def _pullbundle2(pullop):
1728 """pull data using bundle2
1728 """pull data using bundle2
1729
1729
1730 For now, the only supported data are changegroup."""
1730 For now, the only supported data are changegroup."""
1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1732
1732
1733 # make ui easier to access
1733 # make ui easier to access
1734 ui = pullop.repo.ui
1734 ui = pullop.repo.ui
1735
1735
1736 # At the moment we don't do stream clones over bundle2. If that is
1736 # At the moment we don't do stream clones over bundle2. If that is
1737 # implemented then here's where the check for that will go.
1737 # implemented then here's where the check for that will go.
1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1739
1739
1740 # declare pull perimeters
1740 # declare pull perimeters
1741 kwargs[b'common'] = pullop.common
1741 kwargs[b'common'] = pullop.common
1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1743
1743
1744 # check server supports narrow and then adding includepats and excludepats
1744 # check server supports narrow and then adding includepats and excludepats
1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1746 if servernarrow and pullop.includepats:
1746 if servernarrow and pullop.includepats:
1747 kwargs[b'includepats'] = pullop.includepats
1747 kwargs[b'includepats'] = pullop.includepats
1748 if servernarrow and pullop.excludepats:
1748 if servernarrow and pullop.excludepats:
1749 kwargs[b'excludepats'] = pullop.excludepats
1749 kwargs[b'excludepats'] = pullop.excludepats
1750
1750
1751 if streaming:
1751 if streaming:
1752 kwargs[b'cg'] = False
1752 kwargs[b'cg'] = False
1753 kwargs[b'stream'] = True
1753 kwargs[b'stream'] = True
1754 pullop.stepsdone.add(b'changegroup')
1754 pullop.stepsdone.add(b'changegroup')
1755 pullop.stepsdone.add(b'phases')
1755 pullop.stepsdone.add(b'phases')
1756
1756
1757 else:
1757 else:
1758 # pulling changegroup
1758 # pulling changegroup
1759 pullop.stepsdone.add(b'changegroup')
1759 pullop.stepsdone.add(b'changegroup')
1760
1760
1761 kwargs[b'cg'] = pullop.fetch
1761 kwargs[b'cg'] = pullop.fetch
1762
1762
1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1765 if not legacyphase and hasbinaryphase:
1765 if not legacyphase and hasbinaryphase:
1766 kwargs[b'phases'] = True
1766 kwargs[b'phases'] = True
1767 pullop.stepsdone.add(b'phases')
1767 pullop.stepsdone.add(b'phases')
1768
1768
1769 if b'listkeys' in pullop.remotebundle2caps:
1769 if b'listkeys' in pullop.remotebundle2caps:
1770 if b'phases' not in pullop.stepsdone:
1770 if b'phases' not in pullop.stepsdone:
1771 kwargs[b'listkeys'] = [b'phases']
1771 kwargs[b'listkeys'] = [b'phases']
1772
1772
1773 bookmarksrequested = False
1773 bookmarksrequested = False
1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1776
1776
1777 if pullop.remotebookmarks is not None:
1777 if pullop.remotebookmarks is not None:
1778 pullop.stepsdone.add(b'request-bookmarks')
1778 pullop.stepsdone.add(b'request-bookmarks')
1779
1779
1780 if (
1780 if (
1781 b'request-bookmarks' not in pullop.stepsdone
1781 b'request-bookmarks' not in pullop.stepsdone
1782 and pullop.remotebookmarks is None
1782 and pullop.remotebookmarks is None
1783 and not legacybookmark
1783 and not legacybookmark
1784 and hasbinarybook
1784 and hasbinarybook
1785 ):
1785 ):
1786 kwargs[b'bookmarks'] = True
1786 kwargs[b'bookmarks'] = True
1787 bookmarksrequested = True
1787 bookmarksrequested = True
1788
1788
1789 if b'listkeys' in pullop.remotebundle2caps:
1789 if b'listkeys' in pullop.remotebundle2caps:
1790 if b'request-bookmarks' not in pullop.stepsdone:
1790 if b'request-bookmarks' not in pullop.stepsdone:
1791 # make sure to always includes bookmark data when migrating
1791 # make sure to always includes bookmark data when migrating
1792 # `hg incoming --bundle` to using this function.
1792 # `hg incoming --bundle` to using this function.
1793 pullop.stepsdone.add(b'request-bookmarks')
1793 pullop.stepsdone.add(b'request-bookmarks')
1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1795
1795
1796 # If this is a full pull / clone and the server supports the clone bundles
1796 # If this is a full pull / clone and the server supports the clone bundles
1797 # feature, tell the server whether we attempted a clone bundle. The
1797 # feature, tell the server whether we attempted a clone bundle. The
1798 # presence of this flag indicates the client supports clone bundles. This
1798 # presence of this flag indicates the client supports clone bundles. This
1799 # will enable the server to treat clients that support clone bundles
1799 # will enable the server to treat clients that support clone bundles
1800 # differently from those that don't.
1800 # differently from those that don't.
1801 if (
1801 if (
1802 pullop.remote.capable(b'clonebundles')
1802 pullop.remote.capable(b'clonebundles')
1803 and pullop.heads is None
1803 and pullop.heads is None
1804 and list(pullop.common) == [nullid]
1804 and list(pullop.common) == [nullid]
1805 ):
1805 ):
1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1807
1807
1808 if streaming:
1808 if streaming:
1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
1810 elif not pullop.fetch:
1810 elif not pullop.fetch:
1811 pullop.repo.ui.status(_(b"no changes found\n"))
1811 pullop.repo.ui.status(_(b"no changes found\n"))
1812 pullop.cgresult = 0
1812 pullop.cgresult = 0
1813 else:
1813 else:
1814 if pullop.heads is None and list(pullop.common) == [nullid]:
1814 if pullop.heads is None and list(pullop.common) == [nullid]:
1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1818 if obsolete.commonversion(remoteversions) is not None:
1818 if obsolete.commonversion(remoteversions) is not None:
1819 kwargs[b'obsmarkers'] = True
1819 kwargs[b'obsmarkers'] = True
1820 pullop.stepsdone.add(b'obsmarkers')
1820 pullop.stepsdone.add(b'obsmarkers')
1821 _pullbundle2extraprepare(pullop, kwargs)
1821 _pullbundle2extraprepare(pullop, kwargs)
1822
1822
1823 with pullop.remote.commandexecutor() as e:
1823 with pullop.remote.commandexecutor() as e:
1824 args = dict(kwargs)
1824 args = dict(kwargs)
1825 args[b'source'] = b'pull'
1825 args[b'source'] = b'pull'
1826 bundle = e.callcommand(b'getbundle', args).result()
1826 bundle = e.callcommand(b'getbundle', args).result()
1827
1827
1828 try:
1828 try:
1829 op = bundle2.bundleoperation(
1829 op = bundle2.bundleoperation(
1830 pullop.repo, pullop.gettransaction, source=b'pull'
1830 pullop.repo, pullop.gettransaction, source=b'pull'
1831 )
1831 )
1832 op.modes[b'bookmarks'] = b'records'
1832 op.modes[b'bookmarks'] = b'records'
1833 bundle2.processbundle(pullop.repo, bundle, op=op)
1833 bundle2.processbundle(pullop.repo, bundle, op=op)
1834 except bundle2.AbortFromPart as exc:
1834 except bundle2.AbortFromPart as exc:
1835 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1835 pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
1837 except error.BundleValueError as exc:
1837 except error.BundleValueError as exc:
1838 raise error.Abort(_(b'missing support for %s') % exc)
1838 raise error.Abort(_(b'missing support for %s') % exc)
1839
1839
1840 if pullop.fetch:
1840 if pullop.fetch:
1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
1842
1842
1843 # processing phases change
1843 # processing phases change
1844 for namespace, value in op.records[b'listkeys']:
1844 for namespace, value in op.records[b'listkeys']:
1845 if namespace == b'phases':
1845 if namespace == b'phases':
1846 _pullapplyphases(pullop, value)
1846 _pullapplyphases(pullop, value)
1847
1847
1848 # processing bookmark update
1848 # processing bookmark update
1849 if bookmarksrequested:
1849 if bookmarksrequested:
1850 books = {}
1850 books = {}
1851 for record in op.records[b'bookmarks']:
1851 for record in op.records[b'bookmarks']:
1852 books[record[b'bookmark']] = record[b"node"]
1852 books[record[b'bookmark']] = record[b"node"]
1853 pullop.remotebookmarks = books
1853 pullop.remotebookmarks = books
1854 else:
1854 else:
1855 for namespace, value in op.records[b'listkeys']:
1855 for namespace, value in op.records[b'listkeys']:
1856 if namespace == b'bookmarks':
1856 if namespace == b'bookmarks':
1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1858
1858
1859 # bookmark data were either already there or pulled in the bundle
1859 # bookmark data were either already there or pulled in the bundle
1860 if pullop.remotebookmarks is not None:
1860 if pullop.remotebookmarks is not None:
1861 _pullbookmarks(pullop)
1861 _pullbookmarks(pullop)
1862
1862
1863
1863
1864 def _pullbundle2extraprepare(pullop, kwargs):
1864 def _pullbundle2extraprepare(pullop, kwargs):
1865 """hook function so that extensions can extend the getbundle call"""
1865 """hook function so that extensions can extend the getbundle call"""
1866
1866
1867
1867
1868 def _pullchangeset(pullop):
1868 def _pullchangeset(pullop):
1869 """pull changeset from unbundle into the local repo"""
1869 """pull changeset from unbundle into the local repo"""
1870 # We delay the open of the transaction as late as possible so we
1870 # We delay the open of the transaction as late as possible so we
1871 # don't open transaction for nothing or you break future useful
1871 # don't open transaction for nothing or you break future useful
1872 # rollback call
1872 # rollback call
1873 if b'changegroup' in pullop.stepsdone:
1873 if b'changegroup' in pullop.stepsdone:
1874 return
1874 return
1875 pullop.stepsdone.add(b'changegroup')
1875 pullop.stepsdone.add(b'changegroup')
1876 if not pullop.fetch:
1876 if not pullop.fetch:
1877 pullop.repo.ui.status(_(b"no changes found\n"))
1877 pullop.repo.ui.status(_(b"no changes found\n"))
1878 pullop.cgresult = 0
1878 pullop.cgresult = 0
1879 return
1879 return
1880 tr = pullop.gettransaction()
1880 tr = pullop.gettransaction()
1881 if pullop.heads is None and list(pullop.common) == [nullid]:
1881 if pullop.heads is None and list(pullop.common) == [nullid]:
1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
1884 # issue1320, avoid a race if remote changed after discovery
1884 # issue1320, avoid a race if remote changed after discovery
1885 pullop.heads = pullop.rheads
1885 pullop.heads = pullop.rheads
1886
1886
1887 if pullop.remote.capable(b'getbundle'):
1887 if pullop.remote.capable(b'getbundle'):
1888 # TODO: get bundlecaps from remote
1888 # TODO: get bundlecaps from remote
1889 cg = pullop.remote.getbundle(
1889 cg = pullop.remote.getbundle(
1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
1891 )
1891 )
1892 elif pullop.heads is None:
1892 elif pullop.heads is None:
1893 with pullop.remote.commandexecutor() as e:
1893 with pullop.remote.commandexecutor() as e:
1894 cg = e.callcommand(
1894 cg = e.callcommand(
1895 b'changegroup',
1895 b'changegroup',
1896 {
1896 {
1897 b'nodes': pullop.fetch,
1897 b'nodes': pullop.fetch,
1898 b'source': b'pull',
1898 b'source': b'pull',
1899 },
1899 },
1900 ).result()
1900 ).result()
1901
1901
1902 elif not pullop.remote.capable(b'changegroupsubset'):
1902 elif not pullop.remote.capable(b'changegroupsubset'):
1903 raise error.Abort(
1903 raise error.Abort(
1904 _(
1904 _(
1905 b"partial pull cannot be done because "
1905 b"partial pull cannot be done because "
1906 b"other repository doesn't support "
1906 b"other repository doesn't support "
1907 b"changegroupsubset."
1907 b"changegroupsubset."
1908 )
1908 )
1909 )
1909 )
1910 else:
1910 else:
1911 with pullop.remote.commandexecutor() as e:
1911 with pullop.remote.commandexecutor() as e:
1912 cg = e.callcommand(
1912 cg = e.callcommand(
1913 b'changegroupsubset',
1913 b'changegroupsubset',
1914 {
1914 {
1915 b'bases': pullop.fetch,
1915 b'bases': pullop.fetch,
1916 b'heads': pullop.heads,
1916 b'heads': pullop.heads,
1917 b'source': b'pull',
1917 b'source': b'pull',
1918 },
1918 },
1919 ).result()
1919 ).result()
1920
1920
1921 bundleop = bundle2.applybundle(
1921 bundleop = bundle2.applybundle(
1922 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1922 pullop.repo, cg, tr, b'pull', pullop.remote.url()
1923 )
1923 )
1924 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1924 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1925
1925
1926
1926
1927 def _pullphase(pullop):
1927 def _pullphase(pullop):
1928 # Get remote phases data from remote
1928 # Get remote phases data from remote
1929 if b'phases' in pullop.stepsdone:
1929 if b'phases' in pullop.stepsdone:
1930 return
1930 return
1931 remotephases = listkeys(pullop.remote, b'phases')
1931 remotephases = listkeys(pullop.remote, b'phases')
1932 _pullapplyphases(pullop, remotephases)
1932 _pullapplyphases(pullop, remotephases)
1933
1933
1934
1934
1935 def _pullapplyphases(pullop, remotephases):
1935 def _pullapplyphases(pullop, remotephases):
1936 """apply phase movement from observed remote state"""
1936 """apply phase movement from observed remote state"""
1937 if b'phases' in pullop.stepsdone:
1937 if b'phases' in pullop.stepsdone:
1938 return
1938 return
1939 pullop.stepsdone.add(b'phases')
1939 pullop.stepsdone.add(b'phases')
1940 publishing = bool(remotephases.get(b'publishing', False))
1940 publishing = bool(remotephases.get(b'publishing', False))
1941 if remotephases and not publishing:
1941 if remotephases and not publishing:
1942 # remote is new and non-publishing
1942 # remote is new and non-publishing
1943 pheads, _dr = phases.analyzeremotephases(
1943 pheads, _dr = phases.analyzeremotephases(
1944 pullop.repo, pullop.pulledsubset, remotephases
1944 pullop.repo, pullop.pulledsubset, remotephases
1945 )
1945 )
1946 dheads = pullop.pulledsubset
1946 dheads = pullop.pulledsubset
1947 else:
1947 else:
1948 # Remote is old or publishing all common changesets
1948 # Remote is old or publishing all common changesets
1949 # should be seen as public
1949 # should be seen as public
1950 pheads = pullop.pulledsubset
1950 pheads = pullop.pulledsubset
1951 dheads = []
1951 dheads = []
1952 unfi = pullop.repo.unfiltered()
1952 unfi = pullop.repo.unfiltered()
1953 phase = unfi._phasecache.phase
1953 phase = unfi._phasecache.phase
1954 rev = unfi.changelog.index.get_rev
1954 rev = unfi.changelog.index.get_rev
1955 public = phases.public
1955 public = phases.public
1956 draft = phases.draft
1956 draft = phases.draft
1957
1957
1958 # exclude changesets already public locally and update the others
1958 # exclude changesets already public locally and update the others
1959 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1959 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1960 if pheads:
1960 if pheads:
1961 tr = pullop.gettransaction()
1961 tr = pullop.gettransaction()
1962 phases.advanceboundary(pullop.repo, tr, public, pheads)
1962 phases.advanceboundary(pullop.repo, tr, public, pheads)
1963
1963
1964 # exclude changesets already draft locally and update the others
1964 # exclude changesets already draft locally and update the others
1965 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1965 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1966 if dheads:
1966 if dheads:
1967 tr = pullop.gettransaction()
1967 tr = pullop.gettransaction()
1968 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1968 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1969
1969
1970
1970
1971 def _pullbookmarks(pullop):
1971 def _pullbookmarks(pullop):
1972 """process the remote bookmark information to update the local one"""
1972 """process the remote bookmark information to update the local one"""
1973 if b'bookmarks' in pullop.stepsdone:
1973 if b'bookmarks' in pullop.stepsdone:
1974 return
1974 return
1975 pullop.stepsdone.add(b'bookmarks')
1975 pullop.stepsdone.add(b'bookmarks')
1976 repo = pullop.repo
1976 repo = pullop.repo
1977 remotebookmarks = pullop.remotebookmarks
1977 remotebookmarks = pullop.remotebookmarks
1978 bookmod.updatefromremote(
1978 bookmod.updatefromremote(
1979 repo.ui,
1979 repo.ui,
1980 repo,
1980 repo,
1981 remotebookmarks,
1981 remotebookmarks,
1982 pullop.remote.url(),
1982 pullop.remote.url(),
1983 pullop.gettransaction,
1983 pullop.gettransaction,
1984 explicit=pullop.explicitbookmarks,
1984 explicit=pullop.explicitbookmarks,
1985 )
1985 )
1986
1986
1987
1987
1988 def _pullobsolete(pullop):
1988 def _pullobsolete(pullop):
1989 """utility function to pull obsolete markers from a remote
1989 """utility function to pull obsolete markers from a remote
1990
1990
1991 The `gettransaction` is function that return the pull transaction, creating
1991 The `gettransaction` is function that return the pull transaction, creating
1992 one if necessary. We return the transaction to inform the calling code that
1992 one if necessary. We return the transaction to inform the calling code that
1993 a new transaction have been created (when applicable).
1993 a new transaction have been created (when applicable).
1994
1994
1995 Exists mostly to allow overriding for experimentation purpose"""
1995 Exists mostly to allow overriding for experimentation purpose"""
1996 if b'obsmarkers' in pullop.stepsdone:
1996 if b'obsmarkers' in pullop.stepsdone:
1997 return
1997 return
1998 pullop.stepsdone.add(b'obsmarkers')
1998 pullop.stepsdone.add(b'obsmarkers')
1999 tr = None
1999 tr = None
2000 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2000 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2001 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2001 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2002 remoteobs = listkeys(pullop.remote, b'obsolete')
2002 remoteobs = listkeys(pullop.remote, b'obsolete')
2003 if b'dump0' in remoteobs:
2003 if b'dump0' in remoteobs:
2004 tr = pullop.gettransaction()
2004 tr = pullop.gettransaction()
2005 markers = []
2005 markers = []
2006 for key in sorted(remoteobs, reverse=True):
2006 for key in sorted(remoteobs, reverse=True):
2007 if key.startswith(b'dump'):
2007 if key.startswith(b'dump'):
2008 data = util.b85decode(remoteobs[key])
2008 data = util.b85decode(remoteobs[key])
2009 version, newmarks = obsolete._readmarkers(data)
2009 version, newmarks = obsolete._readmarkers(data)
2010 markers += newmarks
2010 markers += newmarks
2011 if markers:
2011 if markers:
2012 pullop.repo.obsstore.add(tr, markers)
2012 pullop.repo.obsstore.add(tr, markers)
2013 pullop.repo.invalidatevolatilesets()
2013 pullop.repo.invalidatevolatilesets()
2014 return tr
2014 return tr
2015
2015
2016
2016
2017 def applynarrowacl(repo, kwargs):
2017 def applynarrowacl(repo, kwargs):
2018 """Apply narrow fetch access control.
2018 """Apply narrow fetch access control.
2019
2019
2020 This massages the named arguments for getbundle wire protocol commands
2020 This massages the named arguments for getbundle wire protocol commands
2021 so requested data is filtered through access control rules.
2021 so requested data is filtered through access control rules.
2022 """
2022 """
2023 ui = repo.ui
2023 ui = repo.ui
2024 # TODO this assumes existence of HTTP and is a layering violation.
2024 # TODO this assumes existence of HTTP and is a layering violation.
2025 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2025 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2026 user_includes = ui.configlist(
2026 user_includes = ui.configlist(
2027 _NARROWACL_SECTION,
2027 _NARROWACL_SECTION,
2028 username + b'.includes',
2028 username + b'.includes',
2029 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2029 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2030 )
2030 )
2031 user_excludes = ui.configlist(
2031 user_excludes = ui.configlist(
2032 _NARROWACL_SECTION,
2032 _NARROWACL_SECTION,
2033 username + b'.excludes',
2033 username + b'.excludes',
2034 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2034 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2035 )
2035 )
2036 if not user_includes:
2036 if not user_includes:
2037 raise error.Abort(
2037 raise error.Abort(
2038 _(b"%s configuration for user %s is empty")
2038 _(b"%s configuration for user %s is empty")
2039 % (_NARROWACL_SECTION, username)
2039 % (_NARROWACL_SECTION, username)
2040 )
2040 )
2041
2041
2042 user_includes = [
2042 user_includes = [
2043 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2043 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2044 ]
2044 ]
2045 user_excludes = [
2045 user_excludes = [
2046 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2046 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2047 ]
2047 ]
2048
2048
2049 req_includes = set(kwargs.get('includepats', []))
2049 req_includes = set(kwargs.get('includepats', []))
2050 req_excludes = set(kwargs.get('excludepats', []))
2050 req_excludes = set(kwargs.get('excludepats', []))
2051
2051
2052 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2052 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2053 req_includes, req_excludes, user_includes, user_excludes
2053 req_includes, req_excludes, user_includes, user_excludes
2054 )
2054 )
2055
2055
2056 if invalid_includes:
2056 if invalid_includes:
2057 raise error.Abort(
2057 raise error.Abort(
2058 _(b"The following includes are not accessible for %s: %s")
2058 _(b"The following includes are not accessible for %s: %s")
2059 % (username, stringutil.pprint(invalid_includes))
2059 % (username, stringutil.pprint(invalid_includes))
2060 )
2060 )
2061
2061
2062 new_args = {}
2062 new_args = {}
2063 new_args.update(kwargs)
2063 new_args.update(kwargs)
2064 new_args['narrow'] = True
2064 new_args['narrow'] = True
2065 new_args['narrow_acl'] = True
2065 new_args['narrow_acl'] = True
2066 new_args['includepats'] = req_includes
2066 new_args['includepats'] = req_includes
2067 if req_excludes:
2067 if req_excludes:
2068 new_args['excludepats'] = req_excludes
2068 new_args['excludepats'] = req_excludes
2069
2069
2070 return new_args
2070 return new_args
2071
2071
2072
2072
2073 def _computeellipsis(repo, common, heads, known, match, depth=None):
2073 def _computeellipsis(repo, common, heads, known, match, depth=None):
2074 """Compute the shape of a narrowed DAG.
2074 """Compute the shape of a narrowed DAG.
2075
2075
2076 Args:
2076 Args:
2077 repo: The repository we're transferring.
2077 repo: The repository we're transferring.
2078 common: The roots of the DAG range we're transferring.
2078 common: The roots of the DAG range we're transferring.
2079 May be just [nullid], which means all ancestors of heads.
2079 May be just [nullid], which means all ancestors of heads.
2080 heads: The heads of the DAG range we're transferring.
2080 heads: The heads of the DAG range we're transferring.
2081 match: The narrowmatcher that allows us to identify relevant changes.
2081 match: The narrowmatcher that allows us to identify relevant changes.
2082 depth: If not None, only consider nodes to be full nodes if they are at
2082 depth: If not None, only consider nodes to be full nodes if they are at
2083 most depth changesets away from one of heads.
2083 most depth changesets away from one of heads.
2084
2084
2085 Returns:
2085 Returns:
2086 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2086 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2087
2087
2088 visitnodes: The list of nodes (either full or ellipsis) which
2088 visitnodes: The list of nodes (either full or ellipsis) which
2089 need to be sent to the client.
2089 need to be sent to the client.
2090 relevant_nodes: The set of changelog nodes which change a file inside
2090 relevant_nodes: The set of changelog nodes which change a file inside
2091 the narrowspec. The client needs these as non-ellipsis nodes.
2091 the narrowspec. The client needs these as non-ellipsis nodes.
2092 ellipsisroots: A dict of {rev: parents} that is used in
2092 ellipsisroots: A dict of {rev: parents} that is used in
2093 narrowchangegroup to produce ellipsis nodes with the
2093 narrowchangegroup to produce ellipsis nodes with the
2094 correct parents.
2094 correct parents.
2095 """
2095 """
2096 cl = repo.changelog
2096 cl = repo.changelog
2097 mfl = repo.manifestlog
2097 mfl = repo.manifestlog
2098
2098
2099 clrev = cl.rev
2099 clrev = cl.rev
2100
2100
2101 commonrevs = {clrev(n) for n in common} | {nullrev}
2101 commonrevs = {clrev(n) for n in common} | {nullrev}
2102 headsrevs = {clrev(n) for n in heads}
2102 headsrevs = {clrev(n) for n in heads}
2103
2103
2104 if depth:
2104 if depth:
2105 revdepth = {h: 0 for h in headsrevs}
2105 revdepth = {h: 0 for h in headsrevs}
2106
2106
2107 ellipsisheads = collections.defaultdict(set)
2107 ellipsisheads = collections.defaultdict(set)
2108 ellipsisroots = collections.defaultdict(set)
2108 ellipsisroots = collections.defaultdict(set)
2109
2109
2110 def addroot(head, curchange):
2110 def addroot(head, curchange):
2111 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2111 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2112 ellipsisroots[head].add(curchange)
2112 ellipsisroots[head].add(curchange)
2113 # Recursively split ellipsis heads with 3 roots by finding the
2113 # Recursively split ellipsis heads with 3 roots by finding the
2114 # roots' youngest common descendant which is an elided merge commit.
2114 # roots' youngest common descendant which is an elided merge commit.
2115 # That descendant takes 2 of the 3 roots as its own, and becomes a
2115 # That descendant takes 2 of the 3 roots as its own, and becomes a
2116 # root of the head.
2116 # root of the head.
2117 while len(ellipsisroots[head]) > 2:
2117 while len(ellipsisroots[head]) > 2:
2118 child, roots = splithead(head)
2118 child, roots = splithead(head)
2119 splitroots(head, child, roots)
2119 splitroots(head, child, roots)
2120 head = child # Recurse in case we just added a 3rd root
2120 head = child # Recurse in case we just added a 3rd root
2121
2121
2122 def splitroots(head, child, roots):
2122 def splitroots(head, child, roots):
2123 ellipsisroots[head].difference_update(roots)
2123 ellipsisroots[head].difference_update(roots)
2124 ellipsisroots[head].add(child)
2124 ellipsisroots[head].add(child)
2125 ellipsisroots[child].update(roots)
2125 ellipsisroots[child].update(roots)
2126 ellipsisroots[child].discard(child)
2126 ellipsisroots[child].discard(child)
2127
2127
2128 def splithead(head):
2128 def splithead(head):
2129 r1, r2, r3 = sorted(ellipsisroots[head])
2129 r1, r2, r3 = sorted(ellipsisroots[head])
2130 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2130 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2131 mid = repo.revs(
2131 mid = repo.revs(
2132 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2132 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2133 )
2133 )
2134 for j in mid:
2134 for j in mid:
2135 if j == nr2:
2135 if j == nr2:
2136 return nr2, (nr1, nr2)
2136 return nr2, (nr1, nr2)
2137 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2137 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2138 return j, (nr1, nr2)
2138 return j, (nr1, nr2)
2139 raise error.Abort(
2139 raise error.Abort(
2140 _(
2140 _(
2141 b'Failed to split up ellipsis node! head: %d, '
2141 b'Failed to split up ellipsis node! head: %d, '
2142 b'roots: %d %d %d'
2142 b'roots: %d %d %d'
2143 )
2143 )
2144 % (head, r1, r2, r3)
2144 % (head, r1, r2, r3)
2145 )
2145 )
2146
2146
2147 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2147 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2148 visit = reversed(missing)
2148 visit = reversed(missing)
2149 relevant_nodes = set()
2149 relevant_nodes = set()
2150 visitnodes = [cl.node(m) for m in missing]
2150 visitnodes = [cl.node(m) for m in missing]
2151 required = set(headsrevs) | known
2151 required = set(headsrevs) | known
2152 for rev in visit:
2152 for rev in visit:
2153 clrev = cl.changelogrevision(rev)
2153 clrev = cl.changelogrevision(rev)
2154 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2154 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2155 if depth is not None:
2155 if depth is not None:
2156 curdepth = revdepth[rev]
2156 curdepth = revdepth[rev]
2157 for p in ps:
2157 for p in ps:
2158 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2158 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2159 needed = False
2159 needed = False
2160 shallow_enough = depth is None or revdepth[rev] <= depth
2160 shallow_enough = depth is None or revdepth[rev] <= depth
2161 if shallow_enough:
2161 if shallow_enough:
2162 curmf = mfl[clrev.manifest].read()
2162 curmf = mfl[clrev.manifest].read()
2163 if ps:
2163 if ps:
2164 # We choose to not trust the changed files list in
2164 # We choose to not trust the changed files list in
2165 # changesets because it's not always correct. TODO: could
2165 # changesets because it's not always correct. TODO: could
2166 # we trust it for the non-merge case?
2166 # we trust it for the non-merge case?
2167 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2167 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2168 needed = bool(curmf.diff(p1mf, match))
2168 needed = bool(curmf.diff(p1mf, match))
2169 if not needed and len(ps) > 1:
2169 if not needed and len(ps) > 1:
2170 # For merge changes, the list of changed files is not
2170 # For merge changes, the list of changed files is not
2171 # helpful, since we need to emit the merge if a file
2171 # helpful, since we need to emit the merge if a file
2172 # in the narrow spec has changed on either side of the
2172 # in the narrow spec has changed on either side of the
2173 # merge. As a result, we do a manifest diff to check.
2173 # merge. As a result, we do a manifest diff to check.
2174 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2174 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2175 needed = bool(curmf.diff(p2mf, match))
2175 needed = bool(curmf.diff(p2mf, match))
2176 else:
2176 else:
2177 # For a root node, we need to include the node if any
2177 # For a root node, we need to include the node if any
2178 # files in the node match the narrowspec.
2178 # files in the node match the narrowspec.
2179 needed = any(curmf.walk(match))
2179 needed = any(curmf.walk(match))
2180
2180
2181 if needed:
2181 if needed:
2182 for head in ellipsisheads[rev]:
2182 for head in ellipsisheads[rev]:
2183 addroot(head, rev)
2183 addroot(head, rev)
2184 for p in ps:
2184 for p in ps:
2185 required.add(p)
2185 required.add(p)
2186 relevant_nodes.add(cl.node(rev))
2186 relevant_nodes.add(cl.node(rev))
2187 else:
2187 else:
2188 if not ps:
2188 if not ps:
2189 ps = [nullrev]
2189 ps = [nullrev]
2190 if rev in required:
2190 if rev in required:
2191 for head in ellipsisheads[rev]:
2191 for head in ellipsisheads[rev]:
2192 addroot(head, rev)
2192 addroot(head, rev)
2193 for p in ps:
2193 for p in ps:
2194 ellipsisheads[p].add(rev)
2194 ellipsisheads[p].add(rev)
2195 else:
2195 else:
2196 for p in ps:
2196 for p in ps:
2197 ellipsisheads[p] |= ellipsisheads[rev]
2197 ellipsisheads[p] |= ellipsisheads[rev]
2198
2198
2199 # add common changesets as roots of their reachable ellipsis heads
2199 # add common changesets as roots of their reachable ellipsis heads
2200 for c in commonrevs:
2200 for c in commonrevs:
2201 for head in ellipsisheads[c]:
2201 for head in ellipsisheads[c]:
2202 addroot(head, c)
2202 addroot(head, c)
2203 return visitnodes, relevant_nodes, ellipsisroots
2203 return visitnodes, relevant_nodes, ellipsisroots
2204
2204
2205
2205
2206 def caps20to10(repo, role):
2206 def caps20to10(repo, role):
2207 """return a set with appropriate options to use bundle20 during getbundle"""
2207 """return a set with appropriate options to use bundle20 during getbundle"""
2208 caps = {b'HG20'}
2208 caps = {b'HG20'}
2209 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2209 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2210 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2210 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2211 return caps
2211 return caps
2212
2212
2213
2213
2214 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2214 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2215 getbundle2partsorder = []
2215 getbundle2partsorder = []
2216
2216
2217 # Mapping between step name and function
2217 # Mapping between step name and function
2218 #
2218 #
2219 # This exists to help extensions wrap steps if necessary
2219 # This exists to help extensions wrap steps if necessary
2220 getbundle2partsmapping = {}
2220 getbundle2partsmapping = {}
2221
2221
2222
2222
2223 def getbundle2partsgenerator(stepname, idx=None):
2223 def getbundle2partsgenerator(stepname, idx=None):
2224 """decorator for function generating bundle2 part for getbundle
2224 """decorator for function generating bundle2 part for getbundle
2225
2225
2226 The function is added to the step -> function mapping and appended to the
2226 The function is added to the step -> function mapping and appended to the
2227 list of steps. Beware that decorated functions will be added in order
2227 list of steps. Beware that decorated functions will be added in order
2228 (this may matter).
2228 (this may matter).
2229
2229
2230 You can only use this decorator for new steps, if you want to wrap a step
2230 You can only use this decorator for new steps, if you want to wrap a step
2231 from an extension, attack the getbundle2partsmapping dictionary directly."""
2231 from an extension, attack the getbundle2partsmapping dictionary directly."""
2232
2232
2233 def dec(func):
2233 def dec(func):
2234 assert stepname not in getbundle2partsmapping
2234 assert stepname not in getbundle2partsmapping
2235 getbundle2partsmapping[stepname] = func
2235 getbundle2partsmapping[stepname] = func
2236 if idx is None:
2236 if idx is None:
2237 getbundle2partsorder.append(stepname)
2237 getbundle2partsorder.append(stepname)
2238 else:
2238 else:
2239 getbundle2partsorder.insert(idx, stepname)
2239 getbundle2partsorder.insert(idx, stepname)
2240 return func
2240 return func
2241
2241
2242 return dec
2242 return dec
2243
2243
2244
2244
2245 def bundle2requested(bundlecaps):
2245 def bundle2requested(bundlecaps):
2246 if bundlecaps is not None:
2246 if bundlecaps is not None:
2247 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2247 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2248 return False
2248 return False
2249
2249
2250
2250
2251 def getbundlechunks(
2251 def getbundlechunks(
2252 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2252 repo,
2253 source,
2254 heads=None,
2255 common=None,
2256 bundlecaps=None,
2257 remote_sidedata=None,
2258 **kwargs
2253 ):
2259 ):
2254 """Return chunks constituting a bundle's raw data.
2260 """Return chunks constituting a bundle's raw data.
2255
2261
2256 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2262 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2257 passed.
2263 passed.
2258
2264
2259 Returns a 2-tuple of a dict with metadata about the generated bundle
2265 Returns a 2-tuple of a dict with metadata about the generated bundle
2260 and an iterator over raw chunks (of varying sizes).
2266 and an iterator over raw chunks (of varying sizes).
2261 """
2267 """
2262 kwargs = pycompat.byteskwargs(kwargs)
2268 kwargs = pycompat.byteskwargs(kwargs)
2263 info = {}
2269 info = {}
2264 usebundle2 = bundle2requested(bundlecaps)
2270 usebundle2 = bundle2requested(bundlecaps)
2265 # bundle10 case
2271 # bundle10 case
2266 if not usebundle2:
2272 if not usebundle2:
2267 if bundlecaps and not kwargs.get(b'cg', True):
2273 if bundlecaps and not kwargs.get(b'cg', True):
2268 raise ValueError(
2274 raise ValueError(
2269 _(b'request for bundle10 must include changegroup')
2275 _(b'request for bundle10 must include changegroup')
2270 )
2276 )
2271
2277
2272 if kwargs:
2278 if kwargs:
2273 raise ValueError(
2279 raise ValueError(
2274 _(b'unsupported getbundle arguments: %s')
2280 _(b'unsupported getbundle arguments: %s')
2275 % b', '.join(sorted(kwargs.keys()))
2281 % b', '.join(sorted(kwargs.keys()))
2276 )
2282 )
2277 outgoing = _computeoutgoing(repo, heads, common)
2283 outgoing = _computeoutgoing(repo, heads, common)
2278 info[b'bundleversion'] = 1
2284 info[b'bundleversion'] = 1
2279 return (
2285 return (
2280 info,
2286 info,
2281 changegroup.makestream(
2287 changegroup.makestream(
2282 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2288 repo,
2289 outgoing,
2290 b'01',
2291 source,
2292 bundlecaps=bundlecaps,
2293 remote_sidedata=remote_sidedata,
2283 ),
2294 ),
2284 )
2295 )
2285
2296
2286 # bundle20 case
2297 # bundle20 case
2287 info[b'bundleversion'] = 2
2298 info[b'bundleversion'] = 2
2288 b2caps = {}
2299 b2caps = {}
2289 for bcaps in bundlecaps:
2300 for bcaps in bundlecaps:
2290 if bcaps.startswith(b'bundle2='):
2301 if bcaps.startswith(b'bundle2='):
2291 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2302 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2292 b2caps.update(bundle2.decodecaps(blob))
2303 b2caps.update(bundle2.decodecaps(blob))
2293 bundler = bundle2.bundle20(repo.ui, b2caps)
2304 bundler = bundle2.bundle20(repo.ui, b2caps)
2294
2305
2295 kwargs[b'heads'] = heads
2306 kwargs[b'heads'] = heads
2296 kwargs[b'common'] = common
2307 kwargs[b'common'] = common
2297
2308
2298 for name in getbundle2partsorder:
2309 for name in getbundle2partsorder:
2299 func = getbundle2partsmapping[name]
2310 func = getbundle2partsmapping[name]
2300 func(
2311 func(
2301 bundler,
2312 bundler,
2302 repo,
2313 repo,
2303 source,
2314 source,
2304 bundlecaps=bundlecaps,
2315 bundlecaps=bundlecaps,
2305 b2caps=b2caps,
2316 b2caps=b2caps,
2317 remote_sidedata=remote_sidedata,
2306 **pycompat.strkwargs(kwargs)
2318 **pycompat.strkwargs(kwargs)
2307 )
2319 )
2308
2320
2309 info[b'prefercompressed'] = bundler.prefercompressed
2321 info[b'prefercompressed'] = bundler.prefercompressed
2310
2322
2311 return info, bundler.getchunks()
2323 return info, bundler.getchunks()
2312
2324
2313
2325
2314 @getbundle2partsgenerator(b'stream2')
2326 @getbundle2partsgenerator(b'stream2')
2315 def _getbundlestream2(bundler, repo, *args, **kwargs):
2327 def _getbundlestream2(bundler, repo, *args, **kwargs):
2316 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2328 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2317
2329
2318
2330
2319 @getbundle2partsgenerator(b'changegroup')
2331 @getbundle2partsgenerator(b'changegroup')
2320 def _getbundlechangegrouppart(
2332 def _getbundlechangegrouppart(
2321 bundler,
2333 bundler,
2322 repo,
2334 repo,
2323 source,
2335 source,
2324 bundlecaps=None,
2336 bundlecaps=None,
2325 b2caps=None,
2337 b2caps=None,
2326 heads=None,
2338 heads=None,
2327 common=None,
2339 common=None,
2340 remote_sidedata=None,
2328 **kwargs
2341 **kwargs
2329 ):
2342 ):
2330 """add a changegroup part to the requested bundle"""
2343 """add a changegroup part to the requested bundle"""
2331 if not kwargs.get('cg', True) or not b2caps:
2344 if not kwargs.get('cg', True) or not b2caps:
2332 return
2345 return
2333
2346
2334 version = b'01'
2347 version = b'01'
2335 cgversions = b2caps.get(b'changegroup')
2348 cgversions = b2caps.get(b'changegroup')
2336 if cgversions: # 3.1 and 3.2 ship with an empty value
2349 if cgversions: # 3.1 and 3.2 ship with an empty value
2337 cgversions = [
2350 cgversions = [
2338 v
2351 v
2339 for v in cgversions
2352 for v in cgversions
2340 if v in changegroup.supportedoutgoingversions(repo)
2353 if v in changegroup.supportedoutgoingversions(repo)
2341 ]
2354 ]
2342 if not cgversions:
2355 if not cgversions:
2343 raise error.Abort(_(b'no common changegroup version'))
2356 raise error.Abort(_(b'no common changegroup version'))
2344 version = max(cgversions)
2357 version = max(cgversions)
2345
2358
2346 outgoing = _computeoutgoing(repo, heads, common)
2359 outgoing = _computeoutgoing(repo, heads, common)
2347 if not outgoing.missing:
2360 if not outgoing.missing:
2348 return
2361 return
2349
2362
2350 if kwargs.get('narrow', False):
2363 if kwargs.get('narrow', False):
2351 include = sorted(filter(bool, kwargs.get('includepats', [])))
2364 include = sorted(filter(bool, kwargs.get('includepats', [])))
2352 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2365 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2353 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2366 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2354 else:
2367 else:
2355 matcher = None
2368 matcher = None
2356
2369
2357 cgstream = changegroup.makestream(
2370 cgstream = changegroup.makestream(
2358 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2371 repo,
2372 outgoing,
2373 version,
2374 source,
2375 bundlecaps=bundlecaps,
2376 matcher=matcher,
2377 remote_sidedata=remote_sidedata,
2359 )
2378 )
2360
2379
2361 part = bundler.newpart(b'changegroup', data=cgstream)
2380 part = bundler.newpart(b'changegroup', data=cgstream)
2362 if cgversions:
2381 if cgversions:
2363 part.addparam(b'version', version)
2382 part.addparam(b'version', version)
2364
2383
2365 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2384 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2366
2385
2367 if scmutil.istreemanifest(repo):
2386 if scmutil.istreemanifest(repo):
2368 part.addparam(b'treemanifest', b'1')
2387 part.addparam(b'treemanifest', b'1')
2369
2388
2370 if b'exp-sidedata-flag' in repo.requirements:
2389 if b'exp-sidedata-flag' in repo.requirements:
2371 part.addparam(b'exp-sidedata', b'1')
2390 part.addparam(b'exp-sidedata', b'1')
2372
2391
2373 if (
2392 if (
2374 kwargs.get('narrow', False)
2393 kwargs.get('narrow', False)
2375 and kwargs.get('narrow_acl', False)
2394 and kwargs.get('narrow_acl', False)
2376 and (include or exclude)
2395 and (include or exclude)
2377 ):
2396 ):
2378 # this is mandatory because otherwise ACL clients won't work
2397 # this is mandatory because otherwise ACL clients won't work
2379 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2398 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2380 narrowspecpart.data = b'%s\0%s' % (
2399 narrowspecpart.data = b'%s\0%s' % (
2381 b'\n'.join(include),
2400 b'\n'.join(include),
2382 b'\n'.join(exclude),
2401 b'\n'.join(exclude),
2383 )
2402 )
2384
2403
2385
2404
2386 @getbundle2partsgenerator(b'bookmarks')
2405 @getbundle2partsgenerator(b'bookmarks')
2387 def _getbundlebookmarkpart(
2406 def _getbundlebookmarkpart(
2388 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2407 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2389 ):
2408 ):
2390 """add a bookmark part to the requested bundle"""
2409 """add a bookmark part to the requested bundle"""
2391 if not kwargs.get('bookmarks', False):
2410 if not kwargs.get('bookmarks', False):
2392 return
2411 return
2393 if not b2caps or b'bookmarks' not in b2caps:
2412 if not b2caps or b'bookmarks' not in b2caps:
2394 raise error.Abort(_(b'no common bookmarks exchange method'))
2413 raise error.Abort(_(b'no common bookmarks exchange method'))
2395 books = bookmod.listbinbookmarks(repo)
2414 books = bookmod.listbinbookmarks(repo)
2396 data = bookmod.binaryencode(books)
2415 data = bookmod.binaryencode(books)
2397 if data:
2416 if data:
2398 bundler.newpart(b'bookmarks', data=data)
2417 bundler.newpart(b'bookmarks', data=data)
2399
2418
2400
2419
2401 @getbundle2partsgenerator(b'listkeys')
2420 @getbundle2partsgenerator(b'listkeys')
2402 def _getbundlelistkeysparts(
2421 def _getbundlelistkeysparts(
2403 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2422 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2404 ):
2423 ):
2405 """add parts containing listkeys namespaces to the requested bundle"""
2424 """add parts containing listkeys namespaces to the requested bundle"""
2406 listkeys = kwargs.get('listkeys', ())
2425 listkeys = kwargs.get('listkeys', ())
2407 for namespace in listkeys:
2426 for namespace in listkeys:
2408 part = bundler.newpart(b'listkeys')
2427 part = bundler.newpart(b'listkeys')
2409 part.addparam(b'namespace', namespace)
2428 part.addparam(b'namespace', namespace)
2410 keys = repo.listkeys(namespace).items()
2429 keys = repo.listkeys(namespace).items()
2411 part.data = pushkey.encodekeys(keys)
2430 part.data = pushkey.encodekeys(keys)
2412
2431
2413
2432
2414 @getbundle2partsgenerator(b'obsmarkers')
2433 @getbundle2partsgenerator(b'obsmarkers')
2415 def _getbundleobsmarkerpart(
2434 def _getbundleobsmarkerpart(
2416 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2435 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2417 ):
2436 ):
2418 """add an obsolescence markers part to the requested bundle"""
2437 """add an obsolescence markers part to the requested bundle"""
2419 if kwargs.get('obsmarkers', False):
2438 if kwargs.get('obsmarkers', False):
2420 if heads is None:
2439 if heads is None:
2421 heads = repo.heads()
2440 heads = repo.heads()
2422 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2441 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2423 markers = repo.obsstore.relevantmarkers(subset)
2442 markers = repo.obsstore.relevantmarkers(subset)
2424 markers = obsutil.sortedmarkers(markers)
2443 markers = obsutil.sortedmarkers(markers)
2425 bundle2.buildobsmarkerspart(bundler, markers)
2444 bundle2.buildobsmarkerspart(bundler, markers)
2426
2445
2427
2446
2428 @getbundle2partsgenerator(b'phases')
2447 @getbundle2partsgenerator(b'phases')
2429 def _getbundlephasespart(
2448 def _getbundlephasespart(
2430 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2449 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2431 ):
2450 ):
2432 """add phase heads part to the requested bundle"""
2451 """add phase heads part to the requested bundle"""
2433 if kwargs.get('phases', False):
2452 if kwargs.get('phases', False):
2434 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2453 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2435 raise error.Abort(_(b'no common phases exchange method'))
2454 raise error.Abort(_(b'no common phases exchange method'))
2436 if heads is None:
2455 if heads is None:
2437 heads = repo.heads()
2456 heads = repo.heads()
2438
2457
2439 headsbyphase = collections.defaultdict(set)
2458 headsbyphase = collections.defaultdict(set)
2440 if repo.publishing():
2459 if repo.publishing():
2441 headsbyphase[phases.public] = heads
2460 headsbyphase[phases.public] = heads
2442 else:
2461 else:
2443 # find the appropriate heads to move
2462 # find the appropriate heads to move
2444
2463
2445 phase = repo._phasecache.phase
2464 phase = repo._phasecache.phase
2446 node = repo.changelog.node
2465 node = repo.changelog.node
2447 rev = repo.changelog.rev
2466 rev = repo.changelog.rev
2448 for h in heads:
2467 for h in heads:
2449 headsbyphase[phase(repo, rev(h))].add(h)
2468 headsbyphase[phase(repo, rev(h))].add(h)
2450 seenphases = list(headsbyphase.keys())
2469 seenphases = list(headsbyphase.keys())
2451
2470
2452 # We do not handle anything but public and draft phase for now)
2471 # We do not handle anything but public and draft phase for now)
2453 if seenphases:
2472 if seenphases:
2454 assert max(seenphases) <= phases.draft
2473 assert max(seenphases) <= phases.draft
2455
2474
2456 # if client is pulling non-public changesets, we need to find
2475 # if client is pulling non-public changesets, we need to find
2457 # intermediate public heads.
2476 # intermediate public heads.
2458 draftheads = headsbyphase.get(phases.draft, set())
2477 draftheads = headsbyphase.get(phases.draft, set())
2459 if draftheads:
2478 if draftheads:
2460 publicheads = headsbyphase.get(phases.public, set())
2479 publicheads = headsbyphase.get(phases.public, set())
2461
2480
2462 revset = b'heads(only(%ln, %ln) and public())'
2481 revset = b'heads(only(%ln, %ln) and public())'
2463 extraheads = repo.revs(revset, draftheads, publicheads)
2482 extraheads = repo.revs(revset, draftheads, publicheads)
2464 for r in extraheads:
2483 for r in extraheads:
2465 headsbyphase[phases.public].add(node(r))
2484 headsbyphase[phases.public].add(node(r))
2466
2485
2467 # transform data in a format used by the encoding function
2486 # transform data in a format used by the encoding function
2468 phasemapping = {
2487 phasemapping = {
2469 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2488 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2470 }
2489 }
2471
2490
2472 # generate the actual part
2491 # generate the actual part
2473 phasedata = phases.binaryencode(phasemapping)
2492 phasedata = phases.binaryencode(phasemapping)
2474 bundler.newpart(b'phase-heads', data=phasedata)
2493 bundler.newpart(b'phase-heads', data=phasedata)
2475
2494
2476
2495
2477 @getbundle2partsgenerator(b'hgtagsfnodes')
2496 @getbundle2partsgenerator(b'hgtagsfnodes')
2478 def _getbundletagsfnodes(
2497 def _getbundletagsfnodes(
2479 bundler,
2498 bundler,
2480 repo,
2499 repo,
2481 source,
2500 source,
2482 bundlecaps=None,
2501 bundlecaps=None,
2483 b2caps=None,
2502 b2caps=None,
2484 heads=None,
2503 heads=None,
2485 common=None,
2504 common=None,
2486 **kwargs
2505 **kwargs
2487 ):
2506 ):
2488 """Transfer the .hgtags filenodes mapping.
2507 """Transfer the .hgtags filenodes mapping.
2489
2508
2490 Only values for heads in this bundle will be transferred.
2509 Only values for heads in this bundle will be transferred.
2491
2510
2492 The part data consists of pairs of 20 byte changeset node and .hgtags
2511 The part data consists of pairs of 20 byte changeset node and .hgtags
2493 filenodes raw values.
2512 filenodes raw values.
2494 """
2513 """
2495 # Don't send unless:
2514 # Don't send unless:
2496 # - changeset are being exchanged,
2515 # - changeset are being exchanged,
2497 # - the client supports it.
2516 # - the client supports it.
2498 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2517 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2499 return
2518 return
2500
2519
2501 outgoing = _computeoutgoing(repo, heads, common)
2520 outgoing = _computeoutgoing(repo, heads, common)
2502 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2521 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2503
2522
2504
2523
2505 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2524 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2506 def _getbundlerevbranchcache(
2525 def _getbundlerevbranchcache(
2507 bundler,
2526 bundler,
2508 repo,
2527 repo,
2509 source,
2528 source,
2510 bundlecaps=None,
2529 bundlecaps=None,
2511 b2caps=None,
2530 b2caps=None,
2512 heads=None,
2531 heads=None,
2513 common=None,
2532 common=None,
2514 **kwargs
2533 **kwargs
2515 ):
2534 ):
2516 """Transfer the rev-branch-cache mapping
2535 """Transfer the rev-branch-cache mapping
2517
2536
2518 The payload is a series of data related to each branch
2537 The payload is a series of data related to each branch
2519
2538
2520 1) branch name length
2539 1) branch name length
2521 2) number of open heads
2540 2) number of open heads
2522 3) number of closed heads
2541 3) number of closed heads
2523 4) open heads nodes
2542 4) open heads nodes
2524 5) closed heads nodes
2543 5) closed heads nodes
2525 """
2544 """
2526 # Don't send unless:
2545 # Don't send unless:
2527 # - changeset are being exchanged,
2546 # - changeset are being exchanged,
2528 # - the client supports it.
2547 # - the client supports it.
2529 # - narrow bundle isn't in play (not currently compatible).
2548 # - narrow bundle isn't in play (not currently compatible).
2530 if (
2549 if (
2531 not kwargs.get('cg', True)
2550 not kwargs.get('cg', True)
2532 or not b2caps
2551 or not b2caps
2533 or b'rev-branch-cache' not in b2caps
2552 or b'rev-branch-cache' not in b2caps
2534 or kwargs.get('narrow', False)
2553 or kwargs.get('narrow', False)
2535 or repo.ui.has_section(_NARROWACL_SECTION)
2554 or repo.ui.has_section(_NARROWACL_SECTION)
2536 ):
2555 ):
2537 return
2556 return
2538
2557
2539 outgoing = _computeoutgoing(repo, heads, common)
2558 outgoing = _computeoutgoing(repo, heads, common)
2540 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2559 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2541
2560
2542
2561
2543 def check_heads(repo, their_heads, context):
2562 def check_heads(repo, their_heads, context):
2544 """check if the heads of a repo have been modified
2563 """check if the heads of a repo have been modified
2545
2564
2546 Used by peer for unbundling.
2565 Used by peer for unbundling.
2547 """
2566 """
2548 heads = repo.heads()
2567 heads = repo.heads()
2549 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2568 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2550 if not (
2569 if not (
2551 their_heads == [b'force']
2570 their_heads == [b'force']
2552 or their_heads == heads
2571 or their_heads == heads
2553 or their_heads == [b'hashed', heads_hash]
2572 or their_heads == [b'hashed', heads_hash]
2554 ):
2573 ):
2555 # someone else committed/pushed/unbundled while we
2574 # someone else committed/pushed/unbundled while we
2556 # were transferring data
2575 # were transferring data
2557 raise error.PushRaced(
2576 raise error.PushRaced(
2558 b'repository changed while %s - please try again' % context
2577 b'repository changed while %s - please try again' % context
2559 )
2578 )
2560
2579
2561
2580
2562 def unbundle(repo, cg, heads, source, url):
2581 def unbundle(repo, cg, heads, source, url):
2563 """Apply a bundle to a repo.
2582 """Apply a bundle to a repo.
2564
2583
2565 this function makes sure the repo is locked during the application and have
2584 this function makes sure the repo is locked during the application and have
2566 mechanism to check that no push race occurred between the creation of the
2585 mechanism to check that no push race occurred between the creation of the
2567 bundle and its application.
2586 bundle and its application.
2568
2587
2569 If the push was raced as PushRaced exception is raised."""
2588 If the push was raced as PushRaced exception is raised."""
2570 r = 0
2589 r = 0
2571 # need a transaction when processing a bundle2 stream
2590 # need a transaction when processing a bundle2 stream
2572 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2591 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2573 lockandtr = [None, None, None]
2592 lockandtr = [None, None, None]
2574 recordout = None
2593 recordout = None
2575 # quick fix for output mismatch with bundle2 in 3.4
2594 # quick fix for output mismatch with bundle2 in 3.4
2576 captureoutput = repo.ui.configbool(
2595 captureoutput = repo.ui.configbool(
2577 b'experimental', b'bundle2-output-capture'
2596 b'experimental', b'bundle2-output-capture'
2578 )
2597 )
2579 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2598 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2580 captureoutput = True
2599 captureoutput = True
2581 try:
2600 try:
2582 # note: outside bundle1, 'heads' is expected to be empty and this
2601 # note: outside bundle1, 'heads' is expected to be empty and this
2583 # 'check_heads' call wil be a no-op
2602 # 'check_heads' call wil be a no-op
2584 check_heads(repo, heads, b'uploading changes')
2603 check_heads(repo, heads, b'uploading changes')
2585 # push can proceed
2604 # push can proceed
2586 if not isinstance(cg, bundle2.unbundle20):
2605 if not isinstance(cg, bundle2.unbundle20):
2587 # legacy case: bundle1 (changegroup 01)
2606 # legacy case: bundle1 (changegroup 01)
2588 txnname = b"\n".join([source, util.hidepassword(url)])
2607 txnname = b"\n".join([source, util.hidepassword(url)])
2589 with repo.lock(), repo.transaction(txnname) as tr:
2608 with repo.lock(), repo.transaction(txnname) as tr:
2590 op = bundle2.applybundle(repo, cg, tr, source, url)
2609 op = bundle2.applybundle(repo, cg, tr, source, url)
2591 r = bundle2.combinechangegroupresults(op)
2610 r = bundle2.combinechangegroupresults(op)
2592 else:
2611 else:
2593 r = None
2612 r = None
2594 try:
2613 try:
2595
2614
2596 def gettransaction():
2615 def gettransaction():
2597 if not lockandtr[2]:
2616 if not lockandtr[2]:
2598 if not bookmod.bookmarksinstore(repo):
2617 if not bookmod.bookmarksinstore(repo):
2599 lockandtr[0] = repo.wlock()
2618 lockandtr[0] = repo.wlock()
2600 lockandtr[1] = repo.lock()
2619 lockandtr[1] = repo.lock()
2601 lockandtr[2] = repo.transaction(source)
2620 lockandtr[2] = repo.transaction(source)
2602 lockandtr[2].hookargs[b'source'] = source
2621 lockandtr[2].hookargs[b'source'] = source
2603 lockandtr[2].hookargs[b'url'] = url
2622 lockandtr[2].hookargs[b'url'] = url
2604 lockandtr[2].hookargs[b'bundle2'] = b'1'
2623 lockandtr[2].hookargs[b'bundle2'] = b'1'
2605 return lockandtr[2]
2624 return lockandtr[2]
2606
2625
2607 # Do greedy locking by default until we're satisfied with lazy
2626 # Do greedy locking by default until we're satisfied with lazy
2608 # locking.
2627 # locking.
2609 if not repo.ui.configbool(
2628 if not repo.ui.configbool(
2610 b'experimental', b'bundle2lazylocking'
2629 b'experimental', b'bundle2lazylocking'
2611 ):
2630 ):
2612 gettransaction()
2631 gettransaction()
2613
2632
2614 op = bundle2.bundleoperation(
2633 op = bundle2.bundleoperation(
2615 repo,
2634 repo,
2616 gettransaction,
2635 gettransaction,
2617 captureoutput=captureoutput,
2636 captureoutput=captureoutput,
2618 source=b'push',
2637 source=b'push',
2619 )
2638 )
2620 try:
2639 try:
2621 op = bundle2.processbundle(repo, cg, op=op)
2640 op = bundle2.processbundle(repo, cg, op=op)
2622 finally:
2641 finally:
2623 r = op.reply
2642 r = op.reply
2624 if captureoutput and r is not None:
2643 if captureoutput and r is not None:
2625 repo.ui.pushbuffer(error=True, subproc=True)
2644 repo.ui.pushbuffer(error=True, subproc=True)
2626
2645
2627 def recordout(output):
2646 def recordout(output):
2628 r.newpart(b'output', data=output, mandatory=False)
2647 r.newpart(b'output', data=output, mandatory=False)
2629
2648
2630 if lockandtr[2] is not None:
2649 if lockandtr[2] is not None:
2631 lockandtr[2].close()
2650 lockandtr[2].close()
2632 except BaseException as exc:
2651 except BaseException as exc:
2633 exc.duringunbundle2 = True
2652 exc.duringunbundle2 = True
2634 if captureoutput and r is not None:
2653 if captureoutput and r is not None:
2635 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2654 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2636
2655
2637 def recordout(output):
2656 def recordout(output):
2638 part = bundle2.bundlepart(
2657 part = bundle2.bundlepart(
2639 b'output', data=output, mandatory=False
2658 b'output', data=output, mandatory=False
2640 )
2659 )
2641 parts.append(part)
2660 parts.append(part)
2642
2661
2643 raise
2662 raise
2644 finally:
2663 finally:
2645 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2664 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2646 if recordout is not None:
2665 if recordout is not None:
2647 recordout(repo.ui.popbuffer())
2666 recordout(repo.ui.popbuffer())
2648 return r
2667 return r
2649
2668
2650
2669
2651 def _maybeapplyclonebundle(pullop):
2670 def _maybeapplyclonebundle(pullop):
2652 """Apply a clone bundle from a remote, if possible."""
2671 """Apply a clone bundle from a remote, if possible."""
2653
2672
2654 repo = pullop.repo
2673 repo = pullop.repo
2655 remote = pullop.remote
2674 remote = pullop.remote
2656
2675
2657 if not repo.ui.configbool(b'ui', b'clonebundles'):
2676 if not repo.ui.configbool(b'ui', b'clonebundles'):
2658 return
2677 return
2659
2678
2660 # Only run if local repo is empty.
2679 # Only run if local repo is empty.
2661 if len(repo):
2680 if len(repo):
2662 return
2681 return
2663
2682
2664 if pullop.heads:
2683 if pullop.heads:
2665 return
2684 return
2666
2685
2667 if not remote.capable(b'clonebundles'):
2686 if not remote.capable(b'clonebundles'):
2668 return
2687 return
2669
2688
2670 with remote.commandexecutor() as e:
2689 with remote.commandexecutor() as e:
2671 res = e.callcommand(b'clonebundles', {}).result()
2690 res = e.callcommand(b'clonebundles', {}).result()
2672
2691
2673 # If we call the wire protocol command, that's good enough to record the
2692 # If we call the wire protocol command, that's good enough to record the
2674 # attempt.
2693 # attempt.
2675 pullop.clonebundleattempted = True
2694 pullop.clonebundleattempted = True
2676
2695
2677 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2696 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2678 if not entries:
2697 if not entries:
2679 repo.ui.note(
2698 repo.ui.note(
2680 _(
2699 _(
2681 b'no clone bundles available on remote; '
2700 b'no clone bundles available on remote; '
2682 b'falling back to regular clone\n'
2701 b'falling back to regular clone\n'
2683 )
2702 )
2684 )
2703 )
2685 return
2704 return
2686
2705
2687 entries = bundlecaches.filterclonebundleentries(
2706 entries = bundlecaches.filterclonebundleentries(
2688 repo, entries, streamclonerequested=pullop.streamclonerequested
2707 repo, entries, streamclonerequested=pullop.streamclonerequested
2689 )
2708 )
2690
2709
2691 if not entries:
2710 if not entries:
2692 # There is a thundering herd concern here. However, if a server
2711 # There is a thundering herd concern here. However, if a server
2693 # operator doesn't advertise bundles appropriate for its clients,
2712 # operator doesn't advertise bundles appropriate for its clients,
2694 # they deserve what's coming. Furthermore, from a client's
2713 # they deserve what's coming. Furthermore, from a client's
2695 # perspective, no automatic fallback would mean not being able to
2714 # perspective, no automatic fallback would mean not being able to
2696 # clone!
2715 # clone!
2697 repo.ui.warn(
2716 repo.ui.warn(
2698 _(
2717 _(
2699 b'no compatible clone bundles available on server; '
2718 b'no compatible clone bundles available on server; '
2700 b'falling back to regular clone\n'
2719 b'falling back to regular clone\n'
2701 )
2720 )
2702 )
2721 )
2703 repo.ui.warn(
2722 repo.ui.warn(
2704 _(b'(you may want to report this to the server operator)\n')
2723 _(b'(you may want to report this to the server operator)\n')
2705 )
2724 )
2706 return
2725 return
2707
2726
2708 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2727 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2709
2728
2710 url = entries[0][b'URL']
2729 url = entries[0][b'URL']
2711 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2730 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2712 if trypullbundlefromurl(repo.ui, repo, url):
2731 if trypullbundlefromurl(repo.ui, repo, url):
2713 repo.ui.status(_(b'finished applying clone bundle\n'))
2732 repo.ui.status(_(b'finished applying clone bundle\n'))
2714 # Bundle failed.
2733 # Bundle failed.
2715 #
2734 #
2716 # We abort by default to avoid the thundering herd of
2735 # We abort by default to avoid the thundering herd of
2717 # clients flooding a server that was expecting expensive
2736 # clients flooding a server that was expecting expensive
2718 # clone load to be offloaded.
2737 # clone load to be offloaded.
2719 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2738 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2720 repo.ui.warn(_(b'falling back to normal clone\n'))
2739 repo.ui.warn(_(b'falling back to normal clone\n'))
2721 else:
2740 else:
2722 raise error.Abort(
2741 raise error.Abort(
2723 _(b'error applying bundle'),
2742 _(b'error applying bundle'),
2724 hint=_(
2743 hint=_(
2725 b'if this error persists, consider contacting '
2744 b'if this error persists, consider contacting '
2726 b'the server operator or disable clone '
2745 b'the server operator or disable clone '
2727 b'bundles via '
2746 b'bundles via '
2728 b'"--config ui.clonebundles=false"'
2747 b'"--config ui.clonebundles=false"'
2729 ),
2748 ),
2730 )
2749 )
2731
2750
2732
2751
2733 def trypullbundlefromurl(ui, repo, url):
2752 def trypullbundlefromurl(ui, repo, url):
2734 """Attempt to apply a bundle from a URL."""
2753 """Attempt to apply a bundle from a URL."""
2735 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2754 with repo.lock(), repo.transaction(b'bundleurl') as tr:
2736 try:
2755 try:
2737 fh = urlmod.open(ui, url)
2756 fh = urlmod.open(ui, url)
2738 cg = readbundle(ui, fh, b'stream')
2757 cg = readbundle(ui, fh, b'stream')
2739
2758
2740 if isinstance(cg, streamclone.streamcloneapplier):
2759 if isinstance(cg, streamclone.streamcloneapplier):
2741 cg.apply(repo)
2760 cg.apply(repo)
2742 else:
2761 else:
2743 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2762 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
2744 return True
2763 return True
2745 except urlerr.httperror as e:
2764 except urlerr.httperror as e:
2746 ui.warn(
2765 ui.warn(
2747 _(b'HTTP error fetching bundle: %s\n')
2766 _(b'HTTP error fetching bundle: %s\n')
2748 % stringutil.forcebytestr(e)
2767 % stringutil.forcebytestr(e)
2749 )
2768 )
2750 except urlerr.urlerror as e:
2769 except urlerr.urlerror as e:
2751 ui.warn(
2770 ui.warn(
2752 _(b'error fetching bundle: %s\n')
2771 _(b'error fetching bundle: %s\n')
2753 % stringutil.forcebytestr(e.reason)
2772 % stringutil.forcebytestr(e.reason)
2754 )
2773 )
2755
2774
2756 return False
2775 return False
@@ -1,799 +1,807 b''
1 # exchangev2.py - repository exchange for wire protocol version 2
1 # exchangev2.py - repository exchange for wire protocol version 2
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 error,
20 error,
21 mdiff,
21 mdiff,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 requirements as requirementsmod,
25 requirements as requirementsmod,
26 setdiscovery,
26 setdiscovery,
27 )
27 )
28 from .interfaces import repository
28 from .interfaces import repository
29
29
30
30
31 def pull(pullop):
31 def pull(pullop):
32 """Pull using wire protocol version 2."""
32 """Pull using wire protocol version 2."""
33 repo = pullop.repo
33 repo = pullop.repo
34 remote = pullop.remote
34 remote = pullop.remote
35
35
36 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
37
37
38 # If this is a clone and it was requested to perform a "stream clone",
38 # If this is a clone and it was requested to perform a "stream clone",
39 # we obtain the raw files data from the remote then fall back to an
39 # we obtain the raw files data from the remote then fall back to an
40 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 # incremental pull. This is somewhat hacky and is not nearly robust enough
41 # for long-term usage.
41 # for long-term usage.
42 if usingrawchangelogandmanifest:
42 if usingrawchangelogandmanifest:
43 with repo.transaction(b'clone'):
43 with repo.transaction(b'clone'):
44 _fetchrawstorefiles(repo, remote)
44 _fetchrawstorefiles(repo, remote)
45 repo.invalidate(clearfilecache=True)
45 repo.invalidate(clearfilecache=True)
46
46
47 tr = pullop.trmanager.transaction()
47 tr = pullop.trmanager.transaction()
48
48
49 # We don't use the repo's narrow matcher here because the patterns passed
49 # We don't use the repo's narrow matcher here because the patterns passed
50 # to exchange.pull() could be different.
50 # to exchange.pull() could be different.
51 narrowmatcher = narrowspec.match(
51 narrowmatcher = narrowspec.match(
52 repo.root,
52 repo.root,
53 # Empty maps to nevermatcher. So always
53 # Empty maps to nevermatcher. So always
54 # set includes if missing.
54 # set includes if missing.
55 pullop.includepats or {b'path:.'},
55 pullop.includepats or {b'path:.'},
56 pullop.excludepats,
56 pullop.excludepats,
57 )
57 )
58
58
59 if pullop.includepats or pullop.excludepats:
59 if pullop.includepats or pullop.excludepats:
60 pathfilter = {}
60 pathfilter = {}
61 if pullop.includepats:
61 if pullop.includepats:
62 pathfilter[b'include'] = sorted(pullop.includepats)
62 pathfilter[b'include'] = sorted(pullop.includepats)
63 if pullop.excludepats:
63 if pullop.excludepats:
64 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 pathfilter[b'exclude'] = sorted(pullop.excludepats)
65 else:
65 else:
66 pathfilter = None
66 pathfilter = None
67
67
68 # Figure out what needs to be fetched.
68 # Figure out what needs to be fetched.
69 common, fetch, remoteheads = _pullchangesetdiscovery(
69 common, fetch, remoteheads = _pullchangesetdiscovery(
70 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
71 )
71 )
72
72
73 # And fetch the data.
73 # And fetch the data.
74 pullheads = pullop.heads or remoteheads
74 pullheads = pullop.heads or remoteheads
75 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
76
76
77 # New revisions are written to the changelog. But all other updates
77 # New revisions are written to the changelog. But all other updates
78 # are deferred. Do those now.
78 # are deferred. Do those now.
79
79
80 # Ensure all new changesets are draft by default. If the repo is
80 # Ensure all new changesets are draft by default. If the repo is
81 # publishing, the phase will be adjusted by the loop below.
81 # publishing, the phase will be adjusted by the loop below.
82 if csetres[b'added']:
82 if csetres[b'added']:
83 phases.registernew(
83 phases.registernew(
84 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
84 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
85 )
85 )
86
86
87 # And adjust the phase of all changesets accordingly.
87 # And adjust the phase of all changesets accordingly.
88 for phasenumber, phase in phases.phasenames.items():
88 for phasenumber, phase in phases.phasenames.items():
89 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
89 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
90 continue
90 continue
91
91
92 phases.advanceboundary(
92 phases.advanceboundary(
93 repo,
93 repo,
94 tr,
94 tr,
95 phasenumber,
95 phasenumber,
96 csetres[b'nodesbyphase'][phase],
96 csetres[b'nodesbyphase'][phase],
97 )
97 )
98
98
99 # Write bookmark updates.
99 # Write bookmark updates.
100 bookmarks.updatefromremote(
100 bookmarks.updatefromremote(
101 repo.ui,
101 repo.ui,
102 repo,
102 repo,
103 csetres[b'bookmarks'],
103 csetres[b'bookmarks'],
104 remote.url(),
104 remote.url(),
105 pullop.gettransaction,
105 pullop.gettransaction,
106 explicit=pullop.explicitbookmarks,
106 explicit=pullop.explicitbookmarks,
107 )
107 )
108
108
109 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
109 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
110
110
111 # We don't properly support shallow changeset and manifest yet. So we apply
111 # We don't properly support shallow changeset and manifest yet. So we apply
112 # depth limiting locally.
112 # depth limiting locally.
113 if pullop.depth:
113 if pullop.depth:
114 relevantcsetnodes = set()
114 relevantcsetnodes = set()
115 clnode = repo.changelog.node
115 clnode = repo.changelog.node
116
116
117 for rev in repo.revs(
117 for rev in repo.revs(
118 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
118 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
119 ):
119 ):
120 relevantcsetnodes.add(clnode(rev))
120 relevantcsetnodes.add(clnode(rev))
121
121
122 csetrelevantfilter = lambda n: n in relevantcsetnodes
122 csetrelevantfilter = lambda n: n in relevantcsetnodes
123
123
124 else:
124 else:
125 csetrelevantfilter = lambda n: True
125 csetrelevantfilter = lambda n: True
126
126
127 # If obtaining the raw store files, we need to scan the full repo to
127 # If obtaining the raw store files, we need to scan the full repo to
128 # derive all the changesets, manifests, and linkrevs.
128 # derive all the changesets, manifests, and linkrevs.
129 if usingrawchangelogandmanifest:
129 if usingrawchangelogandmanifest:
130 csetsforfiles = []
130 csetsforfiles = []
131 mnodesforfiles = []
131 mnodesforfiles = []
132 manifestlinkrevs = {}
132 manifestlinkrevs = {}
133
133
134 for rev in repo:
134 for rev in repo:
135 ctx = repo[rev]
135 ctx = repo[rev]
136 node = ctx.node()
136 node = ctx.node()
137
137
138 if not csetrelevantfilter(node):
138 if not csetrelevantfilter(node):
139 continue
139 continue
140
140
141 mnode = ctx.manifestnode()
141 mnode = ctx.manifestnode()
142
142
143 csetsforfiles.append(node)
143 csetsforfiles.append(node)
144 mnodesforfiles.append(mnode)
144 mnodesforfiles.append(mnode)
145 manifestlinkrevs[mnode] = rev
145 manifestlinkrevs[mnode] = rev
146
146
147 else:
147 else:
148 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
148 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
149 mnodesforfiles = manres[b'added']
149 mnodesforfiles = manres[b'added']
150 manifestlinkrevs = manres[b'linkrevs']
150 manifestlinkrevs = manres[b'linkrevs']
151
151
152 # Find all file nodes referenced by added manifests and fetch those
152 # Find all file nodes referenced by added manifests and fetch those
153 # revisions.
153 # revisions.
154 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
154 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
155 _fetchfilesfromcsets(
155 _fetchfilesfromcsets(
156 repo,
156 repo,
157 tr,
157 tr,
158 remote,
158 remote,
159 pathfilter,
159 pathfilter,
160 fnodes,
160 fnodes,
161 csetsforfiles,
161 csetsforfiles,
162 manifestlinkrevs,
162 manifestlinkrevs,
163 shallow=bool(pullop.depth),
163 shallow=bool(pullop.depth),
164 )
164 )
165
165
166
166
167 def _checkuserawstorefiledata(pullop):
167 def _checkuserawstorefiledata(pullop):
168 """Check whether we should use rawstorefiledata command to retrieve data."""
168 """Check whether we should use rawstorefiledata command to retrieve data."""
169
169
170 repo = pullop.repo
170 repo = pullop.repo
171 remote = pullop.remote
171 remote = pullop.remote
172
172
173 # Command to obtain raw store data isn't available.
173 # Command to obtain raw store data isn't available.
174 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
174 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
175 return False
175 return False
176
176
177 # Only honor if user requested stream clone operation.
177 # Only honor if user requested stream clone operation.
178 if not pullop.streamclonerequested:
178 if not pullop.streamclonerequested:
179 return False
179 return False
180
180
181 # Only works on empty repos.
181 # Only works on empty repos.
182 if len(repo):
182 if len(repo):
183 return False
183 return False
184
184
185 # TODO This is super hacky. There needs to be a storage API for this. We
185 # TODO This is super hacky. There needs to be a storage API for this. We
186 # also need to check for compatibility with the remote.
186 # also need to check for compatibility with the remote.
187 if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
187 if requirementsmod.REVLOGV1_REQUIREMENT not in repo.requirements:
188 return False
188 return False
189
189
190 return True
190 return True
191
191
192
192
193 def _fetchrawstorefiles(repo, remote):
193 def _fetchrawstorefiles(repo, remote):
194 with remote.commandexecutor() as e:
194 with remote.commandexecutor() as e:
195 objs = e.callcommand(
195 objs = e.callcommand(
196 b'rawstorefiledata',
196 b'rawstorefiledata',
197 {
197 {
198 b'files': [b'changelog', b'manifestlog'],
198 b'files': [b'changelog', b'manifestlog'],
199 },
199 },
200 ).result()
200 ).result()
201
201
202 # First object is a summary of files data that follows.
202 # First object is a summary of files data that follows.
203 overall = next(objs)
203 overall = next(objs)
204
204
205 progress = repo.ui.makeprogress(
205 progress = repo.ui.makeprogress(
206 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
206 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
207 )
207 )
208 with progress:
208 with progress:
209 progress.update(0)
209 progress.update(0)
210
210
211 # Next are pairs of file metadata, data.
211 # Next are pairs of file metadata, data.
212 while True:
212 while True:
213 try:
213 try:
214 filemeta = next(objs)
214 filemeta = next(objs)
215 except StopIteration:
215 except StopIteration:
216 break
216 break
217
217
218 for k in (b'location', b'path', b'size'):
218 for k in (b'location', b'path', b'size'):
219 if k not in filemeta:
219 if k not in filemeta:
220 raise error.Abort(
220 raise error.Abort(
221 _(b'remote file data missing key: %s') % k
221 _(b'remote file data missing key: %s') % k
222 )
222 )
223
223
224 if filemeta[b'location'] == b'store':
224 if filemeta[b'location'] == b'store':
225 vfs = repo.svfs
225 vfs = repo.svfs
226 else:
226 else:
227 raise error.Abort(
227 raise error.Abort(
228 _(b'invalid location for raw file data: %s')
228 _(b'invalid location for raw file data: %s')
229 % filemeta[b'location']
229 % filemeta[b'location']
230 )
230 )
231
231
232 bytesremaining = filemeta[b'size']
232 bytesremaining = filemeta[b'size']
233
233
234 with vfs.open(filemeta[b'path'], b'wb') as fh:
234 with vfs.open(filemeta[b'path'], b'wb') as fh:
235 while True:
235 while True:
236 try:
236 try:
237 chunk = next(objs)
237 chunk = next(objs)
238 except StopIteration:
238 except StopIteration:
239 break
239 break
240
240
241 bytesremaining -= len(chunk)
241 bytesremaining -= len(chunk)
242
242
243 if bytesremaining < 0:
243 if bytesremaining < 0:
244 raise error.Abort(
244 raise error.Abort(
245 _(
245 _(
246 b'received invalid number of bytes for file '
246 b'received invalid number of bytes for file '
247 b'data; expected %d, got extra'
247 b'data; expected %d, got extra'
248 )
248 )
249 % filemeta[b'size']
249 % filemeta[b'size']
250 )
250 )
251
251
252 progress.increment(step=len(chunk))
252 progress.increment(step=len(chunk))
253 fh.write(chunk)
253 fh.write(chunk)
254
254
255 try:
255 try:
256 if chunk.islast:
256 if chunk.islast:
257 break
257 break
258 except AttributeError:
258 except AttributeError:
259 raise error.Abort(
259 raise error.Abort(
260 _(
260 _(
261 b'did not receive indefinite length bytestring '
261 b'did not receive indefinite length bytestring '
262 b'for file data'
262 b'for file data'
263 )
263 )
264 )
264 )
265
265
266 if bytesremaining:
266 if bytesremaining:
267 raise error.Abort(
267 raise error.Abort(
268 _(
268 _(
269 b'received invalid number of bytes for'
269 b'received invalid number of bytes for'
270 b'file data; expected %d got %d'
270 b'file data; expected %d got %d'
271 )
271 )
272 % (
272 % (
273 filemeta[b'size'],
273 filemeta[b'size'],
274 filemeta[b'size'] - bytesremaining,
274 filemeta[b'size'] - bytesremaining,
275 )
275 )
276 )
276 )
277
277
278
278
279 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
279 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
280 """Determine which changesets need to be pulled."""
280 """Determine which changesets need to be pulled."""
281
281
282 if heads:
282 if heads:
283 knownnode = repo.changelog.hasnode
283 knownnode = repo.changelog.hasnode
284 if all(knownnode(head) for head in heads):
284 if all(knownnode(head) for head in heads):
285 return heads, False, heads
285 return heads, False, heads
286
286
287 # TODO wire protocol version 2 is capable of more efficient discovery
287 # TODO wire protocol version 2 is capable of more efficient discovery
288 # than setdiscovery. Consider implementing something better.
288 # than setdiscovery. Consider implementing something better.
289 common, fetch, remoteheads = setdiscovery.findcommonheads(
289 common, fetch, remoteheads = setdiscovery.findcommonheads(
290 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
290 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
291 )
291 )
292
292
293 common = set(common)
293 common = set(common)
294 remoteheads = set(remoteheads)
294 remoteheads = set(remoteheads)
295
295
296 # If a remote head is filtered locally, put it back in the common set.
296 # If a remote head is filtered locally, put it back in the common set.
297 # See the comment in exchange._pulldiscoverychangegroup() for more.
297 # See the comment in exchange._pulldiscoverychangegroup() for more.
298
298
299 if fetch and remoteheads:
299 if fetch and remoteheads:
300 has_node = repo.unfiltered().changelog.index.has_node
300 has_node = repo.unfiltered().changelog.index.has_node
301
301
302 common |= {head for head in remoteheads if has_node(head)}
302 common |= {head for head in remoteheads if has_node(head)}
303
303
304 if set(remoteheads).issubset(common):
304 if set(remoteheads).issubset(common):
305 fetch = []
305 fetch = []
306
306
307 common.discard(nullid)
307 common.discard(nullid)
308
308
309 return common, fetch, remoteheads
309 return common, fetch, remoteheads
310
310
311
311
312 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
312 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
313 # TODO consider adding a step here where we obtain the DAG shape first
313 # TODO consider adding a step here where we obtain the DAG shape first
314 # (or ask the server to slice changesets into chunks for us) so that
314 # (or ask the server to slice changesets into chunks for us) so that
315 # we can perform multiple fetches in batches. This will facilitate
315 # we can perform multiple fetches in batches. This will facilitate
316 # resuming interrupted clones, higher server-side cache hit rates due
316 # resuming interrupted clones, higher server-side cache hit rates due
317 # to smaller segments, etc.
317 # to smaller segments, etc.
318 with remote.commandexecutor() as e:
318 with remote.commandexecutor() as e:
319 objs = e.callcommand(
319 objs = e.callcommand(
320 b'changesetdata',
320 b'changesetdata',
321 {
321 {
322 b'revisions': [
322 b'revisions': [
323 {
323 {
324 b'type': b'changesetdagrange',
324 b'type': b'changesetdagrange',
325 b'roots': sorted(common),
325 b'roots': sorted(common),
326 b'heads': sorted(remoteheads),
326 b'heads': sorted(remoteheads),
327 }
327 }
328 ],
328 ],
329 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
329 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
330 },
330 },
331 ).result()
331 ).result()
332
332
333 # The context manager waits on all response data when exiting. So
333 # The context manager waits on all response data when exiting. So
334 # we need to remain in the context manager in order to stream data.
334 # we need to remain in the context manager in order to stream data.
335 return _processchangesetdata(repo, tr, objs)
335 return _processchangesetdata(repo, tr, objs)
336
336
337
337
338 def _processchangesetdata(repo, tr, objs):
338 def _processchangesetdata(repo, tr, objs):
339 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
339 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
340
340
341 urepo = repo.unfiltered()
341 urepo = repo.unfiltered()
342 cl = urepo.changelog
342 cl = urepo.changelog
343
343
344 cl.delayupdate(tr)
344 cl.delayupdate(tr)
345
345
346 # The first emitted object is a header describing the data that
346 # The first emitted object is a header describing the data that
347 # follows.
347 # follows.
348 meta = next(objs)
348 meta = next(objs)
349
349
350 progress = repo.ui.makeprogress(
350 progress = repo.ui.makeprogress(
351 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
351 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
352 )
352 )
353
353
354 manifestnodes = {}
354 manifestnodes = {}
355 added = []
355 added = []
356
356
357 def linkrev(node):
357 def linkrev(node):
358 repo.ui.debug(b'add changeset %s\n' % short(node))
358 repo.ui.debug(b'add changeset %s\n' % short(node))
359 # Linkrev for changelog is always self.
359 # Linkrev for changelog is always self.
360 return len(cl)
360 return len(cl)
361
361
362 def ondupchangeset(cl, rev):
362 def ondupchangeset(cl, rev):
363 added.append(cl.node(rev))
363 added.append(cl.node(rev))
364
364
365 def onchangeset(cl, rev):
365 def onchangeset(cl, rev):
366 progress.increment()
366 progress.increment()
367
367
368 revision = cl.changelogrevision(rev)
368 revision = cl.changelogrevision(rev)
369 added.append(cl.node(rev))
369 added.append(cl.node(rev))
370
370
371 # We need to preserve the mapping of changelog revision to node
371 # We need to preserve the mapping of changelog revision to node
372 # so we can set the linkrev accordingly when manifests are added.
372 # so we can set the linkrev accordingly when manifests are added.
373 manifestnodes[rev] = revision.manifest
373 manifestnodes[rev] = revision.manifest
374
374
375 repo.register_changeset(rev, revision)
375 repo.register_changeset(rev, revision)
376
376
377 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
377 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
378 remotebookmarks = {}
378 remotebookmarks = {}
379
379
380 # addgroup() expects a 7-tuple describing revisions. This normalizes
380 # addgroup() expects a 7-tuple describing revisions. This normalizes
381 # the wire data to that format.
381 # the wire data to that format.
382 #
382 #
383 # This loop also aggregates non-revision metadata, such as phase
383 # This loop also aggregates non-revision metadata, such as phase
384 # data.
384 # data.
385 def iterrevisions():
385 def iterrevisions():
386 for cset in objs:
386 for cset in objs:
387 node = cset[b'node']
387 node = cset[b'node']
388
388
389 if b'phase' in cset:
389 if b'phase' in cset:
390 nodesbyphase[cset[b'phase']].add(node)
390 nodesbyphase[cset[b'phase']].add(node)
391
391
392 for mark in cset.get(b'bookmarks', []):
392 for mark in cset.get(b'bookmarks', []):
393 remotebookmarks[mark] = node
393 remotebookmarks[mark] = node
394
394
395 # TODO add mechanism for extensions to examine records so they
395 # TODO add mechanism for extensions to examine records so they
396 # can siphon off custom data fields.
396 # can siphon off custom data fields.
397
397
398 extrafields = {}
398 extrafields = {}
399
399
400 for field, size in cset.get(b'fieldsfollowing', []):
400 for field, size in cset.get(b'fieldsfollowing', []):
401 extrafields[field] = next(objs)
401 extrafields[field] = next(objs)
402
402
403 # Some entries might only be metadata only updates.
403 # Some entries might only be metadata only updates.
404 if b'revision' not in extrafields:
404 if b'revision' not in extrafields:
405 continue
405 continue
406
406
407 data = extrafields[b'revision']
407 data = extrafields[b'revision']
408
408
409 yield (
409 yield (
410 node,
410 node,
411 cset[b'parents'][0],
411 cset[b'parents'][0],
412 cset[b'parents'][1],
412 cset[b'parents'][1],
413 # Linknode is always itself for changesets.
413 # Linknode is always itself for changesets.
414 cset[b'node'],
414 cset[b'node'],
415 # We always send full revisions. So delta base is not set.
415 # We always send full revisions. So delta base is not set.
416 nullid,
416 nullid,
417 mdiff.trivialdiffheader(len(data)) + data,
417 mdiff.trivialdiffheader(len(data)) + data,
418 # Flags not yet supported.
418 # Flags not yet supported.
419 0,
419 0,
420 # Sidedata not yet supported
421 {},
420 )
422 )
421
423
422 cl.addgroup(
424 cl.addgroup(
423 iterrevisions(),
425 iterrevisions(),
424 linkrev,
426 linkrev,
425 weakref.proxy(tr),
427 weakref.proxy(tr),
426 alwayscache=True,
428 alwayscache=True,
427 addrevisioncb=onchangeset,
429 addrevisioncb=onchangeset,
428 duplicaterevisioncb=ondupchangeset,
430 duplicaterevisioncb=ondupchangeset,
429 )
431 )
430
432
431 progress.complete()
433 progress.complete()
432
434
433 return {
435 return {
434 b'added': added,
436 b'added': added,
435 b'nodesbyphase': nodesbyphase,
437 b'nodesbyphase': nodesbyphase,
436 b'bookmarks': remotebookmarks,
438 b'bookmarks': remotebookmarks,
437 b'manifestnodes': manifestnodes,
439 b'manifestnodes': manifestnodes,
438 }
440 }
439
441
440
442
441 def _fetchmanifests(repo, tr, remote, manifestnodes):
443 def _fetchmanifests(repo, tr, remote, manifestnodes):
442 rootmanifest = repo.manifestlog.getstorage(b'')
444 rootmanifest = repo.manifestlog.getstorage(b'')
443
445
444 # Some manifests can be shared between changesets. Filter out revisions
446 # Some manifests can be shared between changesets. Filter out revisions
445 # we already know about.
447 # we already know about.
446 fetchnodes = []
448 fetchnodes = []
447 linkrevs = {}
449 linkrevs = {}
448 seen = set()
450 seen = set()
449
451
450 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
452 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
451 if node in seen:
453 if node in seen:
452 continue
454 continue
453
455
454 try:
456 try:
455 rootmanifest.rev(node)
457 rootmanifest.rev(node)
456 except error.LookupError:
458 except error.LookupError:
457 fetchnodes.append(node)
459 fetchnodes.append(node)
458 linkrevs[node] = clrev
460 linkrevs[node] = clrev
459
461
460 seen.add(node)
462 seen.add(node)
461
463
462 # TODO handle tree manifests
464 # TODO handle tree manifests
463
465
464 # addgroup() expects 7-tuple describing revisions. This normalizes
466 # addgroup() expects 7-tuple describing revisions. This normalizes
465 # the wire data to that format.
467 # the wire data to that format.
466 def iterrevisions(objs, progress):
468 def iterrevisions(objs, progress):
467 for manifest in objs:
469 for manifest in objs:
468 node = manifest[b'node']
470 node = manifest[b'node']
469
471
470 extrafields = {}
472 extrafields = {}
471
473
472 for field, size in manifest.get(b'fieldsfollowing', []):
474 for field, size in manifest.get(b'fieldsfollowing', []):
473 extrafields[field] = next(objs)
475 extrafields[field] = next(objs)
474
476
475 if b'delta' in extrafields:
477 if b'delta' in extrafields:
476 basenode = manifest[b'deltabasenode']
478 basenode = manifest[b'deltabasenode']
477 delta = extrafields[b'delta']
479 delta = extrafields[b'delta']
478 elif b'revision' in extrafields:
480 elif b'revision' in extrafields:
479 basenode = nullid
481 basenode = nullid
480 revision = extrafields[b'revision']
482 revision = extrafields[b'revision']
481 delta = mdiff.trivialdiffheader(len(revision)) + revision
483 delta = mdiff.trivialdiffheader(len(revision)) + revision
482 else:
484 else:
483 continue
485 continue
484
486
485 yield (
487 yield (
486 node,
488 node,
487 manifest[b'parents'][0],
489 manifest[b'parents'][0],
488 manifest[b'parents'][1],
490 manifest[b'parents'][1],
489 # The value passed in is passed to the lookup function passed
491 # The value passed in is passed to the lookup function passed
490 # to addgroup(). We already have a map of manifest node to
492 # to addgroup(). We already have a map of manifest node to
491 # changelog revision number. So we just pass in the
493 # changelog revision number. So we just pass in the
492 # manifest node here and use linkrevs.__getitem__ as the
494 # manifest node here and use linkrevs.__getitem__ as the
493 # resolution function.
495 # resolution function.
494 node,
496 node,
495 basenode,
497 basenode,
496 delta,
498 delta,
497 # Flags not yet supported.
499 # Flags not yet supported.
498 0,
500 0,
501 # Sidedata not yet supported.
502 {},
499 )
503 )
500
504
501 progress.increment()
505 progress.increment()
502
506
503 progress = repo.ui.makeprogress(
507 progress = repo.ui.makeprogress(
504 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
508 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
505 )
509 )
506
510
507 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
511 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
508 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
512 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
509 # TODO make size configurable on client?
513 # TODO make size configurable on client?
510
514
511 # We send commands 1 at a time to the remote. This is not the most
515 # We send commands 1 at a time to the remote. This is not the most
512 # efficient because we incur a round trip at the end of each batch.
516 # efficient because we incur a round trip at the end of each batch.
513 # However, the existing frame-based reactor keeps consuming server
517 # However, the existing frame-based reactor keeps consuming server
514 # data in the background. And this results in response data buffering
518 # data in the background. And this results in response data buffering
515 # in memory. This can consume gigabytes of memory.
519 # in memory. This can consume gigabytes of memory.
516 # TODO send multiple commands in a request once background buffering
520 # TODO send multiple commands in a request once background buffering
517 # issues are resolved.
521 # issues are resolved.
518
522
519 added = []
523 added = []
520
524
521 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
525 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
522 batch = [node for node in fetchnodes[i : i + batchsize]]
526 batch = [node for node in fetchnodes[i : i + batchsize]]
523 if not batch:
527 if not batch:
524 continue
528 continue
525
529
526 with remote.commandexecutor() as e:
530 with remote.commandexecutor() as e:
527 objs = e.callcommand(
531 objs = e.callcommand(
528 b'manifestdata',
532 b'manifestdata',
529 {
533 {
530 b'tree': b'',
534 b'tree': b'',
531 b'nodes': batch,
535 b'nodes': batch,
532 b'fields': {b'parents', b'revision'},
536 b'fields': {b'parents', b'revision'},
533 b'haveparents': True,
537 b'haveparents': True,
534 },
538 },
535 ).result()
539 ).result()
536
540
537 # Chomp off header object.
541 # Chomp off header object.
538 next(objs)
542 next(objs)
539
543
540 def onchangeset(cl, rev):
544 def onchangeset(cl, rev):
541 added.append(cl.node(rev))
545 added.append(cl.node(rev))
542
546
543 rootmanifest.addgroup(
547 rootmanifest.addgroup(
544 iterrevisions(objs, progress),
548 iterrevisions(objs, progress),
545 linkrevs.__getitem__,
549 linkrevs.__getitem__,
546 weakref.proxy(tr),
550 weakref.proxy(tr),
547 addrevisioncb=onchangeset,
551 addrevisioncb=onchangeset,
548 duplicaterevisioncb=onchangeset,
552 duplicaterevisioncb=onchangeset,
549 )
553 )
550
554
551 progress.complete()
555 progress.complete()
552
556
553 return {
557 return {
554 b'added': added,
558 b'added': added,
555 b'linkrevs': linkrevs,
559 b'linkrevs': linkrevs,
556 }
560 }
557
561
558
562
559 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
563 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
560 """Determine what file nodes are relevant given a set of manifest nodes.
564 """Determine what file nodes are relevant given a set of manifest nodes.
561
565
562 Returns a dict mapping file paths to dicts of file node to first manifest
566 Returns a dict mapping file paths to dicts of file node to first manifest
563 node.
567 node.
564 """
568 """
565 ml = repo.manifestlog
569 ml = repo.manifestlog
566 fnodes = collections.defaultdict(dict)
570 fnodes = collections.defaultdict(dict)
567
571
568 progress = repo.ui.makeprogress(
572 progress = repo.ui.makeprogress(
569 _(b'scanning manifests'), total=len(manifestnodes)
573 _(b'scanning manifests'), total=len(manifestnodes)
570 )
574 )
571
575
572 with progress:
576 with progress:
573 for manifestnode in manifestnodes:
577 for manifestnode in manifestnodes:
574 m = ml.get(b'', manifestnode)
578 m = ml.get(b'', manifestnode)
575
579
576 # TODO this will pull in unwanted nodes because it takes the storage
580 # TODO this will pull in unwanted nodes because it takes the storage
577 # delta into consideration. What we really want is something that
581 # delta into consideration. What we really want is something that
578 # takes the delta between the manifest's parents. And ideally we
582 # takes the delta between the manifest's parents. And ideally we
579 # would ignore file nodes that are known locally. For now, ignore
583 # would ignore file nodes that are known locally. For now, ignore
580 # both these limitations. This will result in incremental fetches
584 # both these limitations. This will result in incremental fetches
581 # requesting data we already have. So this is far from ideal.
585 # requesting data we already have. So this is far from ideal.
582 md = m.readfast()
586 md = m.readfast()
583
587
584 for path, fnode in md.items():
588 for path, fnode in md.items():
585 if matcher(path):
589 if matcher(path):
586 fnodes[path].setdefault(fnode, manifestnode)
590 fnodes[path].setdefault(fnode, manifestnode)
587
591
588 progress.increment()
592 progress.increment()
589
593
590 return fnodes
594 return fnodes
591
595
592
596
593 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
597 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
594 """Fetch file data from explicit file revisions."""
598 """Fetch file data from explicit file revisions."""
595
599
596 def iterrevisions(objs, progress):
600 def iterrevisions(objs, progress):
597 for filerevision in objs:
601 for filerevision in objs:
598 node = filerevision[b'node']
602 node = filerevision[b'node']
599
603
600 extrafields = {}
604 extrafields = {}
601
605
602 for field, size in filerevision.get(b'fieldsfollowing', []):
606 for field, size in filerevision.get(b'fieldsfollowing', []):
603 extrafields[field] = next(objs)
607 extrafields[field] = next(objs)
604
608
605 if b'delta' in extrafields:
609 if b'delta' in extrafields:
606 basenode = filerevision[b'deltabasenode']
610 basenode = filerevision[b'deltabasenode']
607 delta = extrafields[b'delta']
611 delta = extrafields[b'delta']
608 elif b'revision' in extrafields:
612 elif b'revision' in extrafields:
609 basenode = nullid
613 basenode = nullid
610 revision = extrafields[b'revision']
614 revision = extrafields[b'revision']
611 delta = mdiff.trivialdiffheader(len(revision)) + revision
615 delta = mdiff.trivialdiffheader(len(revision)) + revision
612 else:
616 else:
613 continue
617 continue
614
618
615 yield (
619 yield (
616 node,
620 node,
617 filerevision[b'parents'][0],
621 filerevision[b'parents'][0],
618 filerevision[b'parents'][1],
622 filerevision[b'parents'][1],
619 node,
623 node,
620 basenode,
624 basenode,
621 delta,
625 delta,
622 # Flags not yet supported.
626 # Flags not yet supported.
623 0,
627 0,
628 # Sidedata not yet supported.
629 {},
624 )
630 )
625
631
626 progress.increment()
632 progress.increment()
627
633
628 progress = repo.ui.makeprogress(
634 progress = repo.ui.makeprogress(
629 _(b'files'),
635 _(b'files'),
630 unit=_(b'chunks'),
636 unit=_(b'chunks'),
631 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
637 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
632 )
638 )
633
639
634 # TODO make batch size configurable
640 # TODO make batch size configurable
635 batchsize = 10000
641 batchsize = 10000
636 fnodeslist = [x for x in sorted(fnodes.items())]
642 fnodeslist = [x for x in sorted(fnodes.items())]
637
643
638 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
644 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
639 batch = [x for x in fnodeslist[i : i + batchsize]]
645 batch = [x for x in fnodeslist[i : i + batchsize]]
640 if not batch:
646 if not batch:
641 continue
647 continue
642
648
643 with remote.commandexecutor() as e:
649 with remote.commandexecutor() as e:
644 fs = []
650 fs = []
645 locallinkrevs = {}
651 locallinkrevs = {}
646
652
647 for path, nodes in batch:
653 for path, nodes in batch:
648 fs.append(
654 fs.append(
649 (
655 (
650 path,
656 path,
651 e.callcommand(
657 e.callcommand(
652 b'filedata',
658 b'filedata',
653 {
659 {
654 b'path': path,
660 b'path': path,
655 b'nodes': sorted(nodes),
661 b'nodes': sorted(nodes),
656 b'fields': {b'parents', b'revision'},
662 b'fields': {b'parents', b'revision'},
657 b'haveparents': True,
663 b'haveparents': True,
658 },
664 },
659 ),
665 ),
660 )
666 )
661 )
667 )
662
668
663 locallinkrevs[path] = {
669 locallinkrevs[path] = {
664 node: linkrevs[manifestnode]
670 node: linkrevs[manifestnode]
665 for node, manifestnode in pycompat.iteritems(nodes)
671 for node, manifestnode in pycompat.iteritems(nodes)
666 }
672 }
667
673
668 for path, f in fs:
674 for path, f in fs:
669 objs = f.result()
675 objs = f.result()
670
676
671 # Chomp off header objects.
677 # Chomp off header objects.
672 next(objs)
678 next(objs)
673
679
674 store = repo.file(path)
680 store = repo.file(path)
675 store.addgroup(
681 store.addgroup(
676 iterrevisions(objs, progress),
682 iterrevisions(objs, progress),
677 locallinkrevs[path].__getitem__,
683 locallinkrevs[path].__getitem__,
678 weakref.proxy(tr),
684 weakref.proxy(tr),
679 )
685 )
680
686
681
687
682 def _fetchfilesfromcsets(
688 def _fetchfilesfromcsets(
683 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
689 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
684 ):
690 ):
685 """Fetch file data from explicit changeset revisions."""
691 """Fetch file data from explicit changeset revisions."""
686
692
687 def iterrevisions(objs, remaining, progress):
693 def iterrevisions(objs, remaining, progress):
688 while remaining:
694 while remaining:
689 filerevision = next(objs)
695 filerevision = next(objs)
690
696
691 node = filerevision[b'node']
697 node = filerevision[b'node']
692
698
693 extrafields = {}
699 extrafields = {}
694
700
695 for field, size in filerevision.get(b'fieldsfollowing', []):
701 for field, size in filerevision.get(b'fieldsfollowing', []):
696 extrafields[field] = next(objs)
702 extrafields[field] = next(objs)
697
703
698 if b'delta' in extrafields:
704 if b'delta' in extrafields:
699 basenode = filerevision[b'deltabasenode']
705 basenode = filerevision[b'deltabasenode']
700 delta = extrafields[b'delta']
706 delta = extrafields[b'delta']
701 elif b'revision' in extrafields:
707 elif b'revision' in extrafields:
702 basenode = nullid
708 basenode = nullid
703 revision = extrafields[b'revision']
709 revision = extrafields[b'revision']
704 delta = mdiff.trivialdiffheader(len(revision)) + revision
710 delta = mdiff.trivialdiffheader(len(revision)) + revision
705 else:
711 else:
706 continue
712 continue
707
713
708 if b'linknode' in filerevision:
714 if b'linknode' in filerevision:
709 linknode = filerevision[b'linknode']
715 linknode = filerevision[b'linknode']
710 else:
716 else:
711 linknode = node
717 linknode = node
712
718
713 yield (
719 yield (
714 node,
720 node,
715 filerevision[b'parents'][0],
721 filerevision[b'parents'][0],
716 filerevision[b'parents'][1],
722 filerevision[b'parents'][1],
717 linknode,
723 linknode,
718 basenode,
724 basenode,
719 delta,
725 delta,
720 # Flags not yet supported.
726 # Flags not yet supported.
721 0,
727 0,
728 # Sidedata not yet supported.
729 {},
722 )
730 )
723
731
724 progress.increment()
732 progress.increment()
725 remaining -= 1
733 remaining -= 1
726
734
727 progress = repo.ui.makeprogress(
735 progress = repo.ui.makeprogress(
728 _(b'files'),
736 _(b'files'),
729 unit=_(b'chunks'),
737 unit=_(b'chunks'),
730 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
738 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
731 )
739 )
732
740
733 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
741 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
734 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
742 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
735
743
736 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
744 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
737 fields = {b'parents', b'revision'}
745 fields = {b'parents', b'revision'}
738 clrev = repo.changelog.rev
746 clrev = repo.changelog.rev
739
747
740 # There are no guarantees that we'll have ancestor revisions if
748 # There are no guarantees that we'll have ancestor revisions if
741 # a) this repo has shallow file storage b) shallow data fetching is enabled.
749 # a) this repo has shallow file storage b) shallow data fetching is enabled.
742 # Force remote to not delta against possibly unknown revisions when these
750 # Force remote to not delta against possibly unknown revisions when these
743 # conditions hold.
751 # conditions hold.
744 haveparents = not (shallowfiles or shallow)
752 haveparents = not (shallowfiles or shallow)
745
753
746 # Similarly, we may not have calculated linkrevs for all incoming file
754 # Similarly, we may not have calculated linkrevs for all incoming file
747 # revisions. Ask the remote to do work for us in this case.
755 # revisions. Ask the remote to do work for us in this case.
748 if not haveparents:
756 if not haveparents:
749 fields.add(b'linknode')
757 fields.add(b'linknode')
750
758
751 for i in pycompat.xrange(0, len(csets), batchsize):
759 for i in pycompat.xrange(0, len(csets), batchsize):
752 batch = [x for x in csets[i : i + batchsize]]
760 batch = [x for x in csets[i : i + batchsize]]
753 if not batch:
761 if not batch:
754 continue
762 continue
755
763
756 with remote.commandexecutor() as e:
764 with remote.commandexecutor() as e:
757 args = {
765 args = {
758 b'revisions': [
766 b'revisions': [
759 {
767 {
760 b'type': b'changesetexplicit',
768 b'type': b'changesetexplicit',
761 b'nodes': batch,
769 b'nodes': batch,
762 }
770 }
763 ],
771 ],
764 b'fields': fields,
772 b'fields': fields,
765 b'haveparents': haveparents,
773 b'haveparents': haveparents,
766 }
774 }
767
775
768 if pathfilter:
776 if pathfilter:
769 args[b'pathfilter'] = pathfilter
777 args[b'pathfilter'] = pathfilter
770
778
771 objs = e.callcommand(b'filesdata', args).result()
779 objs = e.callcommand(b'filesdata', args).result()
772
780
773 # First object is an overall header.
781 # First object is an overall header.
774 overall = next(objs)
782 overall = next(objs)
775
783
776 # We have overall['totalpaths'] segments.
784 # We have overall['totalpaths'] segments.
777 for i in pycompat.xrange(overall[b'totalpaths']):
785 for i in pycompat.xrange(overall[b'totalpaths']):
778 header = next(objs)
786 header = next(objs)
779
787
780 path = header[b'path']
788 path = header[b'path']
781 store = repo.file(path)
789 store = repo.file(path)
782
790
783 linkrevs = {
791 linkrevs = {
784 fnode: manlinkrevs[mnode]
792 fnode: manlinkrevs[mnode]
785 for fnode, mnode in pycompat.iteritems(fnodes[path])
793 for fnode, mnode in pycompat.iteritems(fnodes[path])
786 }
794 }
787
795
788 def getlinkrev(node):
796 def getlinkrev(node):
789 if node in linkrevs:
797 if node in linkrevs:
790 return linkrevs[node]
798 return linkrevs[node]
791 else:
799 else:
792 return clrev(node)
800 return clrev(node)
793
801
794 store.addgroup(
802 store.addgroup(
795 iterrevisions(objs, header[b'totalitems'], progress),
803 iterrevisions(objs, header[b'totalitems'], progress),
796 getlinkrev,
804 getlinkrev,
797 weakref.proxy(tr),
805 weakref.proxy(tr),
798 maybemissingparents=shallow,
806 maybemissingparents=shallow,
799 )
807 )
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now