##// END OF EJS Templates
discovery: change users of `outgoing.missingheads` to `outgoing.ancestorsof`...
Manuel Jacob -
r45704:c93dd9d9 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1344 +1,1344 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 IMPORTANT: if you use this extension, please contact
9 IMPORTANT: if you use this extension, please contact
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 be unused and barring learning of users of this functionality, we will
11 be unused and barring learning of users of this functionality, we will
12 delete this code at the end of 2020.
12 delete this code at the end of 2020.
13
13
14 [infinitepush]
14 [infinitepush]
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 branchpattern = PATTERN
16 branchpattern = PATTERN
17
17
18 # Server or client
18 # Server or client
19 server = False
19 server = False
20
20
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 indextype = disk
22 indextype = disk
23
23
24 # Server-side option. Used only if indextype=sql.
24 # Server-side option. Used only if indextype=sql.
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27
27
28 # Server-side option. Used only if indextype=disk.
28 # Server-side option. Used only if indextype=disk.
29 # Filesystem path to the index store
29 # Filesystem path to the index store
30 indexpath = PATH
30 indexpath = PATH
31
31
32 # Server-side option. Possible values: 'disk' or 'external'
32 # Server-side option. Possible values: 'disk' or 'external'
33 # Fails if not set
33 # Fails if not set
34 storetype = disk
34 storetype = disk
35
35
36 # Server-side option.
36 # Server-side option.
37 # Path to the binary that will save bundle to the bundlestore
37 # Path to the binary that will save bundle to the bundlestore
38 # Formatted cmd line will be passed to it (see `put_args`)
38 # Formatted cmd line will be passed to it (see `put_args`)
39 put_binary = put
39 put_binary = put
40
40
41 # Serser-side option. Used only if storetype=external.
41 # Serser-side option. Used only if storetype=external.
42 # Format cmd-line string for put binary. Placeholder: {filename}
42 # Format cmd-line string for put binary. Placeholder: {filename}
43 put_args = {filename}
43 put_args = {filename}
44
44
45 # Server-side option.
45 # Server-side option.
46 # Path to the binary that get bundle from the bundlestore.
46 # Path to the binary that get bundle from the bundlestore.
47 # Formatted cmd line will be passed to it (see `get_args`)
47 # Formatted cmd line will be passed to it (see `get_args`)
48 get_binary = get
48 get_binary = get
49
49
50 # Serser-side option. Used only if storetype=external.
50 # Serser-side option. Used only if storetype=external.
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 get_args = {filename} {handle}
52 get_args = {filename} {handle}
53
53
54 # Server-side option
54 # Server-side option
55 logfile = FIlE
55 logfile = FIlE
56
56
57 # Server-side option
57 # Server-side option
58 loglevel = DEBUG
58 loglevel = DEBUG
59
59
60 # Server-side option. Used only if indextype=sql.
60 # Server-side option. Used only if indextype=sql.
61 # Sets mysql wait_timeout option.
61 # Sets mysql wait_timeout option.
62 waittimeout = 300
62 waittimeout = 300
63
63
64 # Server-side option. Used only if indextype=sql.
64 # Server-side option. Used only if indextype=sql.
65 # Sets mysql innodb_lock_wait_timeout option.
65 # Sets mysql innodb_lock_wait_timeout option.
66 locktimeout = 120
66 locktimeout = 120
67
67
68 # Server-side option. Used only if indextype=sql.
68 # Server-side option. Used only if indextype=sql.
69 # Name of the repository
69 # Name of the repository
70 reponame = ''
70 reponame = ''
71
71
72 # Client-side option. Used by --list-remote option. List of remote scratch
72 # Client-side option. Used by --list-remote option. List of remote scratch
73 # patterns to list if no patterns are specified.
73 # patterns to list if no patterns are specified.
74 defaultremotepatterns = ['*']
74 defaultremotepatterns = ['*']
75
75
76 # Instructs infinitepush to forward all received bundle2 parts to the
76 # Instructs infinitepush to forward all received bundle2 parts to the
77 # bundle for storage. Defaults to False.
77 # bundle for storage. Defaults to False.
78 storeallparts = True
78 storeallparts = True
79
79
80 # routes each incoming push to the bundlestore. defaults to False
80 # routes each incoming push to the bundlestore. defaults to False
81 pushtobundlestore = True
81 pushtobundlestore = True
82
82
83 [remotenames]
83 [remotenames]
84 # Client-side option
84 # Client-side option
85 # This option should be set only if remotenames extension is enabled.
85 # This option should be set only if remotenames extension is enabled.
86 # Whether remote bookmarks are tracked by remotenames extension.
86 # Whether remote bookmarks are tracked by remotenames extension.
87 bookmarks = True
87 bookmarks = True
88 """
88 """
89
89
90 from __future__ import absolute_import
90 from __future__ import absolute_import
91
91
92 import collections
92 import collections
93 import contextlib
93 import contextlib
94 import errno
94 import errno
95 import functools
95 import functools
96 import logging
96 import logging
97 import os
97 import os
98 import random
98 import random
99 import re
99 import re
100 import socket
100 import socket
101 import subprocess
101 import subprocess
102 import time
102 import time
103
103
104 from mercurial.node import (
104 from mercurial.node import (
105 bin,
105 bin,
106 hex,
106 hex,
107 )
107 )
108
108
109 from mercurial.i18n import _
109 from mercurial.i18n import _
110
110
111 from mercurial.pycompat import (
111 from mercurial.pycompat import (
112 getattr,
112 getattr,
113 open,
113 open,
114 )
114 )
115
115
116 from mercurial.utils import (
116 from mercurial.utils import (
117 procutil,
117 procutil,
118 stringutil,
118 stringutil,
119 )
119 )
120
120
121 from mercurial import (
121 from mercurial import (
122 bundle2,
122 bundle2,
123 changegroup,
123 changegroup,
124 commands,
124 commands,
125 discovery,
125 discovery,
126 encoding,
126 encoding,
127 error,
127 error,
128 exchange,
128 exchange,
129 extensions,
129 extensions,
130 hg,
130 hg,
131 localrepo,
131 localrepo,
132 phases,
132 phases,
133 pushkey,
133 pushkey,
134 pycompat,
134 pycompat,
135 registrar,
135 registrar,
136 util,
136 util,
137 wireprototypes,
137 wireprototypes,
138 wireprotov1peer,
138 wireprotov1peer,
139 wireprotov1server,
139 wireprotov1server,
140 )
140 )
141
141
142 from . import (
142 from . import (
143 bundleparts,
143 bundleparts,
144 common,
144 common,
145 )
145 )
146
146
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 # be specifying the version(s) of Mercurial they are tested with, or
149 # be specifying the version(s) of Mercurial they are tested with, or
150 # leave the attribute unspecified.
150 # leave the attribute unspecified.
151 testedwith = b'ships-with-hg-core'
151 testedwith = b'ships-with-hg-core'
152
152
153 configtable = {}
153 configtable = {}
154 configitem = registrar.configitem(configtable)
154 configitem = registrar.configitem(configtable)
155
155
156 configitem(
156 configitem(
157 b'infinitepush', b'server', default=False,
157 b'infinitepush', b'server', default=False,
158 )
158 )
159 configitem(
159 configitem(
160 b'infinitepush', b'storetype', default=b'',
160 b'infinitepush', b'storetype', default=b'',
161 )
161 )
162 configitem(
162 configitem(
163 b'infinitepush', b'indextype', default=b'',
163 b'infinitepush', b'indextype', default=b'',
164 )
164 )
165 configitem(
165 configitem(
166 b'infinitepush', b'indexpath', default=b'',
166 b'infinitepush', b'indexpath', default=b'',
167 )
167 )
168 configitem(
168 configitem(
169 b'infinitepush', b'storeallparts', default=False,
169 b'infinitepush', b'storeallparts', default=False,
170 )
170 )
171 configitem(
171 configitem(
172 b'infinitepush', b'reponame', default=b'',
172 b'infinitepush', b'reponame', default=b'',
173 )
173 )
174 configitem(
174 configitem(
175 b'scratchbranch', b'storepath', default=b'',
175 b'scratchbranch', b'storepath', default=b'',
176 )
176 )
177 configitem(
177 configitem(
178 b'infinitepush', b'branchpattern', default=b'',
178 b'infinitepush', b'branchpattern', default=b'',
179 )
179 )
180 configitem(
180 configitem(
181 b'infinitepush', b'pushtobundlestore', default=False,
181 b'infinitepush', b'pushtobundlestore', default=False,
182 )
182 )
183 configitem(
183 configitem(
184 b'experimental', b'server-bundlestore-bookmark', default=b'',
184 b'experimental', b'server-bundlestore-bookmark', default=b'',
185 )
185 )
186 configitem(
186 configitem(
187 b'experimental', b'infinitepush-scratchpush', default=False,
187 b'experimental', b'infinitepush-scratchpush', default=False,
188 )
188 )
189
189
190 experimental = b'experimental'
190 experimental = b'experimental'
191 configbookmark = b'server-bundlestore-bookmark'
191 configbookmark = b'server-bundlestore-bookmark'
192 configscratchpush = b'infinitepush-scratchpush'
192 configscratchpush = b'infinitepush-scratchpush'
193
193
194 scratchbranchparttype = bundleparts.scratchbranchparttype
194 scratchbranchparttype = bundleparts.scratchbranchparttype
195 revsetpredicate = registrar.revsetpredicate()
195 revsetpredicate = registrar.revsetpredicate()
196 templatekeyword = registrar.templatekeyword()
196 templatekeyword = registrar.templatekeyword()
197 _scratchbranchmatcher = lambda x: False
197 _scratchbranchmatcher = lambda x: False
198 _maybehash = re.compile('^[a-f0-9]+$').search
198 _maybehash = re.compile('^[a-f0-9]+$').search
199
199
200
200
201 def _buildexternalbundlestore(ui):
201 def _buildexternalbundlestore(ui):
202 put_args = ui.configlist(b'infinitepush', b'put_args', [])
202 put_args = ui.configlist(b'infinitepush', b'put_args', [])
203 put_binary = ui.config(b'infinitepush', b'put_binary')
203 put_binary = ui.config(b'infinitepush', b'put_binary')
204 if not put_binary:
204 if not put_binary:
205 raise error.Abort(b'put binary is not specified')
205 raise error.Abort(b'put binary is not specified')
206 get_args = ui.configlist(b'infinitepush', b'get_args', [])
206 get_args = ui.configlist(b'infinitepush', b'get_args', [])
207 get_binary = ui.config(b'infinitepush', b'get_binary')
207 get_binary = ui.config(b'infinitepush', b'get_binary')
208 if not get_binary:
208 if not get_binary:
209 raise error.Abort(b'get binary is not specified')
209 raise error.Abort(b'get binary is not specified')
210 from . import store
210 from . import store
211
211
212 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
212 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
213
213
214
214
215 def _buildsqlindex(ui):
215 def _buildsqlindex(ui):
216 sqlhost = ui.config(b'infinitepush', b'sqlhost')
216 sqlhost = ui.config(b'infinitepush', b'sqlhost')
217 if not sqlhost:
217 if not sqlhost:
218 raise error.Abort(_(b'please set infinitepush.sqlhost'))
218 raise error.Abort(_(b'please set infinitepush.sqlhost'))
219 host, port, db, user, password = sqlhost.split(b':')
219 host, port, db, user, password = sqlhost.split(b':')
220 reponame = ui.config(b'infinitepush', b'reponame')
220 reponame = ui.config(b'infinitepush', b'reponame')
221 if not reponame:
221 if not reponame:
222 raise error.Abort(_(b'please set infinitepush.reponame'))
222 raise error.Abort(_(b'please set infinitepush.reponame'))
223
223
224 logfile = ui.config(b'infinitepush', b'logfile', b'')
224 logfile = ui.config(b'infinitepush', b'logfile', b'')
225 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
225 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
226 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
226 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
227 from . import sqlindexapi
227 from . import sqlindexapi
228
228
229 return sqlindexapi.sqlindexapi(
229 return sqlindexapi.sqlindexapi(
230 reponame,
230 reponame,
231 host,
231 host,
232 port,
232 port,
233 db,
233 db,
234 user,
234 user,
235 password,
235 password,
236 logfile,
236 logfile,
237 _getloglevel(ui),
237 _getloglevel(ui),
238 waittimeout=waittimeout,
238 waittimeout=waittimeout,
239 locktimeout=locktimeout,
239 locktimeout=locktimeout,
240 )
240 )
241
241
242
242
243 def _getloglevel(ui):
243 def _getloglevel(ui):
244 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
244 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
245 numeric_loglevel = getattr(logging, loglevel.upper(), None)
245 numeric_loglevel = getattr(logging, loglevel.upper(), None)
246 if not isinstance(numeric_loglevel, int):
246 if not isinstance(numeric_loglevel, int):
247 raise error.Abort(_(b'invalid log level %s') % loglevel)
247 raise error.Abort(_(b'invalid log level %s') % loglevel)
248 return numeric_loglevel
248 return numeric_loglevel
249
249
250
250
251 def _tryhoist(ui, remotebookmark):
251 def _tryhoist(ui, remotebookmark):
252 '''returns a bookmarks with hoisted part removed
252 '''returns a bookmarks with hoisted part removed
253
253
254 Remotenames extension has a 'hoist' config that allows to use remote
254 Remotenames extension has a 'hoist' config that allows to use remote
255 bookmarks without specifying remote path. For example, 'hg update master'
255 bookmarks without specifying remote path. For example, 'hg update master'
256 works as well as 'hg update remote/master'. We want to allow the same in
256 works as well as 'hg update remote/master'. We want to allow the same in
257 infinitepush.
257 infinitepush.
258 '''
258 '''
259
259
260 if common.isremotebooksenabled(ui):
260 if common.isremotebooksenabled(ui):
261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
262 if remotebookmark.startswith(hoist):
262 if remotebookmark.startswith(hoist):
263 return remotebookmark[len(hoist) :]
263 return remotebookmark[len(hoist) :]
264 return remotebookmark
264 return remotebookmark
265
265
266
266
267 class bundlestore(object):
267 class bundlestore(object):
268 def __init__(self, repo):
268 def __init__(self, repo):
269 self._repo = repo
269 self._repo = repo
270 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
270 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
271 if storetype == b'disk':
271 if storetype == b'disk':
272 from . import store
272 from . import store
273
273
274 self.store = store.filebundlestore(self._repo.ui, self._repo)
274 self.store = store.filebundlestore(self._repo.ui, self._repo)
275 elif storetype == b'external':
275 elif storetype == b'external':
276 self.store = _buildexternalbundlestore(self._repo.ui)
276 self.store = _buildexternalbundlestore(self._repo.ui)
277 else:
277 else:
278 raise error.Abort(
278 raise error.Abort(
279 _(b'unknown infinitepush store type specified %s') % storetype
279 _(b'unknown infinitepush store type specified %s') % storetype
280 )
280 )
281
281
282 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
282 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
283 if indextype == b'disk':
283 if indextype == b'disk':
284 from . import fileindexapi
284 from . import fileindexapi
285
285
286 self.index = fileindexapi.fileindexapi(self._repo)
286 self.index = fileindexapi.fileindexapi(self._repo)
287 elif indextype == b'sql':
287 elif indextype == b'sql':
288 self.index = _buildsqlindex(self._repo.ui)
288 self.index = _buildsqlindex(self._repo.ui)
289 else:
289 else:
290 raise error.Abort(
290 raise error.Abort(
291 _(b'unknown infinitepush index type specified %s') % indextype
291 _(b'unknown infinitepush index type specified %s') % indextype
292 )
292 )
293
293
294
294
295 def _isserver(ui):
295 def _isserver(ui):
296 return ui.configbool(b'infinitepush', b'server')
296 return ui.configbool(b'infinitepush', b'server')
297
297
298
298
299 def reposetup(ui, repo):
299 def reposetup(ui, repo):
300 if _isserver(ui) and repo.local():
300 if _isserver(ui) and repo.local():
301 repo.bundlestore = bundlestore(repo)
301 repo.bundlestore = bundlestore(repo)
302
302
303
303
304 def extsetup(ui):
304 def extsetup(ui):
305 commonsetup(ui)
305 commonsetup(ui)
306 if _isserver(ui):
306 if _isserver(ui):
307 serverextsetup(ui)
307 serverextsetup(ui)
308 else:
308 else:
309 clientextsetup(ui)
309 clientextsetup(ui)
310
310
311
311
312 def commonsetup(ui):
312 def commonsetup(ui):
313 wireprotov1server.commands[b'listkeyspatterns'] = (
313 wireprotov1server.commands[b'listkeyspatterns'] = (
314 wireprotolistkeyspatterns,
314 wireprotolistkeyspatterns,
315 b'namespace patterns',
315 b'namespace patterns',
316 )
316 )
317 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
317 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
318 if scratchbranchpat:
318 if scratchbranchpat:
319 global _scratchbranchmatcher
319 global _scratchbranchmatcher
320 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
320 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
321 scratchbranchpat
321 scratchbranchpat
322 )
322 )
323
323
324
324
325 def serverextsetup(ui):
325 def serverextsetup(ui):
326 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
326 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
327
327
328 def newpushkeyhandler(*args, **kwargs):
328 def newpushkeyhandler(*args, **kwargs):
329 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
329 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
330
330
331 newpushkeyhandler.params = origpushkeyhandler.params
331 newpushkeyhandler.params = origpushkeyhandler.params
332 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
332 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
333
333
334 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
334 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
335 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
335 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
336 orighandlephasehandler, *args, **kwargs
336 orighandlephasehandler, *args, **kwargs
337 )
337 )
338 newphaseheadshandler.params = orighandlephasehandler.params
338 newphaseheadshandler.params = orighandlephasehandler.params
339 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
339 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
340
340
341 extensions.wrapfunction(
341 extensions.wrapfunction(
342 localrepo.localrepository, b'listkeys', localrepolistkeys
342 localrepo.localrepository, b'listkeys', localrepolistkeys
343 )
343 )
344 wireprotov1server.commands[b'lookup'] = (
344 wireprotov1server.commands[b'lookup'] = (
345 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
345 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
346 b'key',
346 b'key',
347 )
347 )
348 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
348 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
349
349
350 extensions.wrapfunction(bundle2, b'processparts', processparts)
350 extensions.wrapfunction(bundle2, b'processparts', processparts)
351
351
352
352
353 def clientextsetup(ui):
353 def clientextsetup(ui):
354 entry = extensions.wrapcommand(commands.table, b'push', _push)
354 entry = extensions.wrapcommand(commands.table, b'push', _push)
355
355
356 entry[1].append(
356 entry[1].append(
357 (
357 (
358 b'',
358 b'',
359 b'bundle-store',
359 b'bundle-store',
360 None,
360 None,
361 _(b'force push to go to bundle store (EXPERIMENTAL)'),
361 _(b'force push to go to bundle store (EXPERIMENTAL)'),
362 )
362 )
363 )
363 )
364
364
365 extensions.wrapcommand(commands.table, b'pull', _pull)
365 extensions.wrapcommand(commands.table, b'pull', _pull)
366
366
367 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
367 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
368
368
369 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
369 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
370
370
371 partorder = exchange.b2partsgenorder
371 partorder = exchange.b2partsgenorder
372 index = partorder.index(b'changeset')
372 index = partorder.index(b'changeset')
373 partorder.insert(
373 partorder.insert(
374 index, partorder.pop(partorder.index(scratchbranchparttype))
374 index, partorder.pop(partorder.index(scratchbranchparttype))
375 )
375 )
376
376
377
377
378 def _checkheads(orig, pushop):
378 def _checkheads(orig, pushop):
379 if pushop.ui.configbool(experimental, configscratchpush, False):
379 if pushop.ui.configbool(experimental, configscratchpush, False):
380 return
380 return
381 return orig(pushop)
381 return orig(pushop)
382
382
383
383
384 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
384 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
385 patterns = wireprototypes.decodelist(patterns)
385 patterns = wireprototypes.decodelist(patterns)
386 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
386 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
387 return pushkey.encodekeys(d)
387 return pushkey.encodekeys(d)
388
388
389
389
390 def localrepolistkeys(orig, self, namespace, patterns=None):
390 def localrepolistkeys(orig, self, namespace, patterns=None):
391 if namespace == b'bookmarks' and patterns:
391 if namespace == b'bookmarks' and patterns:
392 index = self.bundlestore.index
392 index = self.bundlestore.index
393 results = {}
393 results = {}
394 bookmarks = orig(self, namespace)
394 bookmarks = orig(self, namespace)
395 for pattern in patterns:
395 for pattern in patterns:
396 results.update(index.getbookmarks(pattern))
396 results.update(index.getbookmarks(pattern))
397 if pattern.endswith(b'*'):
397 if pattern.endswith(b'*'):
398 pattern = b're:^' + pattern[:-1] + b'.*'
398 pattern = b're:^' + pattern[:-1] + b'.*'
399 kind, pat, matcher = stringutil.stringmatcher(pattern)
399 kind, pat, matcher = stringutil.stringmatcher(pattern)
400 for bookmark, node in pycompat.iteritems(bookmarks):
400 for bookmark, node in pycompat.iteritems(bookmarks):
401 if matcher(bookmark):
401 if matcher(bookmark):
402 results[bookmark] = node
402 results[bookmark] = node
403 return results
403 return results
404 else:
404 else:
405 return orig(self, namespace)
405 return orig(self, namespace)
406
406
407
407
408 @wireprotov1peer.batchable
408 @wireprotov1peer.batchable
409 def listkeyspatterns(self, namespace, patterns):
409 def listkeyspatterns(self, namespace, patterns):
410 if not self.capable(b'pushkey'):
410 if not self.capable(b'pushkey'):
411 yield {}, None
411 yield {}, None
412 f = wireprotov1peer.future()
412 f = wireprotov1peer.future()
413 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
413 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
414 yield {
414 yield {
415 b'namespace': encoding.fromlocal(namespace),
415 b'namespace': encoding.fromlocal(namespace),
416 b'patterns': wireprototypes.encodelist(patterns),
416 b'patterns': wireprototypes.encodelist(patterns),
417 }, f
417 }, f
418 d = f.value
418 d = f.value
419 self.ui.debug(
419 self.ui.debug(
420 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
420 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
421 )
421 )
422 yield pushkey.decodekeys(d)
422 yield pushkey.decodekeys(d)
423
423
424
424
425 def _readbundlerevs(bundlerepo):
425 def _readbundlerevs(bundlerepo):
426 return list(bundlerepo.revs(b'bundle()'))
426 return list(bundlerepo.revs(b'bundle()'))
427
427
428
428
429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
430 '''Tells remotefilelog to include all changed files to the changegroup
430 '''Tells remotefilelog to include all changed files to the changegroup
431
431
432 By default remotefilelog doesn't include file content to the changegroup.
432 By default remotefilelog doesn't include file content to the changegroup.
433 But we need to include it if we are fetching from bundlestore.
433 But we need to include it if we are fetching from bundlestore.
434 '''
434 '''
435 changedfiles = set()
435 changedfiles = set()
436 cl = bundlerepo.changelog
436 cl = bundlerepo.changelog
437 for r in bundlerevs:
437 for r in bundlerevs:
438 # [3] means changed files
438 # [3] means changed files
439 changedfiles.update(cl.read(r)[3])
439 changedfiles.update(cl.read(r)[3])
440 if not changedfiles:
440 if not changedfiles:
441 return bundlecaps
441 return bundlecaps
442
442
443 changedfiles = b'\0'.join(changedfiles)
443 changedfiles = b'\0'.join(changedfiles)
444 newcaps = []
444 newcaps = []
445 appended = False
445 appended = False
446 for cap in bundlecaps or []:
446 for cap in bundlecaps or []:
447 if cap.startswith(b'excludepattern='):
447 if cap.startswith(b'excludepattern='):
448 newcaps.append(b'\0'.join((cap, changedfiles)))
448 newcaps.append(b'\0'.join((cap, changedfiles)))
449 appended = True
449 appended = True
450 else:
450 else:
451 newcaps.append(cap)
451 newcaps.append(cap)
452 if not appended:
452 if not appended:
453 # Not found excludepattern cap. Just append it
453 # Not found excludepattern cap. Just append it
454 newcaps.append(b'excludepattern=' + changedfiles)
454 newcaps.append(b'excludepattern=' + changedfiles)
455
455
456 return newcaps
456 return newcaps
457
457
458
458
459 def _rebundle(bundlerepo, bundleroots, unknownhead):
459 def _rebundle(bundlerepo, bundleroots, unknownhead):
460 '''
460 '''
461 Bundle may include more revision then user requested. For example,
461 Bundle may include more revision then user requested. For example,
462 if user asks for revision but bundle also consists its descendants.
462 if user asks for revision but bundle also consists its descendants.
463 This function will filter out all revision that user is not requested.
463 This function will filter out all revision that user is not requested.
464 '''
464 '''
465 parts = []
465 parts = []
466
466
467 version = b'02'
467 version = b'02'
468 outgoing = discovery.outgoing(
468 outgoing = discovery.outgoing(
469 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
469 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
470 )
470 )
471 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
471 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
472 cgstream = util.chunkbuffer(cgstream).read()
472 cgstream = util.chunkbuffer(cgstream).read()
473 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
473 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
474 cgpart.addparam(b'version', version)
474 cgpart.addparam(b'version', version)
475 parts.append(cgpart)
475 parts.append(cgpart)
476
476
477 return parts
477 return parts
478
478
479
479
480 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
480 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
481 cl = bundlerepo.changelog
481 cl = bundlerepo.changelog
482 bundleroots = []
482 bundleroots = []
483 for rev in bundlerevs:
483 for rev in bundlerevs:
484 node = cl.node(rev)
484 node = cl.node(rev)
485 parents = cl.parents(node)
485 parents = cl.parents(node)
486 for parent in parents:
486 for parent in parents:
487 # include all revs that exist in the main repo
487 # include all revs that exist in the main repo
488 # to make sure that bundle may apply client-side
488 # to make sure that bundle may apply client-side
489 if parent in oldrepo:
489 if parent in oldrepo:
490 bundleroots.append(parent)
490 bundleroots.append(parent)
491 return bundleroots
491 return bundleroots
492
492
493
493
494 def _needsrebundling(head, bundlerepo):
494 def _needsrebundling(head, bundlerepo):
495 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
495 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
496 return not (
496 return not (
497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
498 )
498 )
499
499
500
500
501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
502 '''generates bundle that will be send to the user
502 '''generates bundle that will be send to the user
503
503
504 returns tuple with raw bundle string and bundle type
504 returns tuple with raw bundle string and bundle type
505 '''
505 '''
506 parts = []
506 parts = []
507 if not _needsrebundling(head, bundlerepo):
507 if not _needsrebundling(head, bundlerepo):
508 with util.posixfile(bundlefile, b"rb") as f:
508 with util.posixfile(bundlefile, b"rb") as f:
509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
510 if isinstance(unbundler, changegroup.cg1unpacker):
510 if isinstance(unbundler, changegroup.cg1unpacker):
511 part = bundle2.bundlepart(
511 part = bundle2.bundlepart(
512 b'changegroup', data=unbundler._stream.read()
512 b'changegroup', data=unbundler._stream.read()
513 )
513 )
514 part.addparam(b'version', b'01')
514 part.addparam(b'version', b'01')
515 parts.append(part)
515 parts.append(part)
516 elif isinstance(unbundler, bundle2.unbundle20):
516 elif isinstance(unbundler, bundle2.unbundle20):
517 haschangegroup = False
517 haschangegroup = False
518 for part in unbundler.iterparts():
518 for part in unbundler.iterparts():
519 if part.type == b'changegroup':
519 if part.type == b'changegroup':
520 haschangegroup = True
520 haschangegroup = True
521 newpart = bundle2.bundlepart(part.type, data=part.read())
521 newpart = bundle2.bundlepart(part.type, data=part.read())
522 for key, value in pycompat.iteritems(part.params):
522 for key, value in pycompat.iteritems(part.params):
523 newpart.addparam(key, value)
523 newpart.addparam(key, value)
524 parts.append(newpart)
524 parts.append(newpart)
525
525
526 if not haschangegroup:
526 if not haschangegroup:
527 raise error.Abort(
527 raise error.Abort(
528 b'unexpected bundle without changegroup part, '
528 b'unexpected bundle without changegroup part, '
529 + b'head: %s' % hex(head),
529 + b'head: %s' % hex(head),
530 hint=b'report to administrator',
530 hint=b'report to administrator',
531 )
531 )
532 else:
532 else:
533 raise error.Abort(b'unknown bundle type')
533 raise error.Abort(b'unknown bundle type')
534 else:
534 else:
535 parts = _rebundle(bundlerepo, bundleroots, head)
535 parts = _rebundle(bundlerepo, bundleroots, head)
536
536
537 return parts
537 return parts
538
538
539
539
540 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
540 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
541 heads = heads or []
541 heads = heads or []
542 # newheads are parents of roots of scratch bundles that were requested
542 # newheads are parents of roots of scratch bundles that were requested
543 newphases = {}
543 newphases = {}
544 scratchbundles = []
544 scratchbundles = []
545 newheads = []
545 newheads = []
546 scratchheads = []
546 scratchheads = []
547 nodestobundle = {}
547 nodestobundle = {}
548 allbundlestocleanup = []
548 allbundlestocleanup = []
549 try:
549 try:
550 for head in heads:
550 for head in heads:
551 if not repo.changelog.index.has_node(head):
551 if not repo.changelog.index.has_node(head):
552 if head not in nodestobundle:
552 if head not in nodestobundle:
553 newbundlefile = common.downloadbundle(repo, head)
553 newbundlefile = common.downloadbundle(repo, head)
554 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
554 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
555 bundlerepo = hg.repository(repo.ui, bundlepath)
555 bundlerepo = hg.repository(repo.ui, bundlepath)
556
556
557 allbundlestocleanup.append((bundlerepo, newbundlefile))
557 allbundlestocleanup.append((bundlerepo, newbundlefile))
558 bundlerevs = set(_readbundlerevs(bundlerepo))
558 bundlerevs = set(_readbundlerevs(bundlerepo))
559 bundlecaps = _includefilelogstobundle(
559 bundlecaps = _includefilelogstobundle(
560 bundlecaps, bundlerepo, bundlerevs, repo.ui
560 bundlecaps, bundlerepo, bundlerevs, repo.ui
561 )
561 )
562 cl = bundlerepo.changelog
562 cl = bundlerepo.changelog
563 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
563 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
564 for rev in bundlerevs:
564 for rev in bundlerevs:
565 node = cl.node(rev)
565 node = cl.node(rev)
566 newphases[hex(node)] = str(phases.draft)
566 newphases[hex(node)] = str(phases.draft)
567 nodestobundle[node] = (
567 nodestobundle[node] = (
568 bundlerepo,
568 bundlerepo,
569 bundleroots,
569 bundleroots,
570 newbundlefile,
570 newbundlefile,
571 )
571 )
572
572
573 scratchbundles.append(
573 scratchbundles.append(
574 _generateoutputparts(head, *nodestobundle[head])
574 _generateoutputparts(head, *nodestobundle[head])
575 )
575 )
576 newheads.extend(bundleroots)
576 newheads.extend(bundleroots)
577 scratchheads.append(head)
577 scratchheads.append(head)
578 finally:
578 finally:
579 for bundlerepo, bundlefile in allbundlestocleanup:
579 for bundlerepo, bundlefile in allbundlestocleanup:
580 bundlerepo.close()
580 bundlerepo.close()
581 try:
581 try:
582 os.unlink(bundlefile)
582 os.unlink(bundlefile)
583 except (IOError, OSError):
583 except (IOError, OSError):
584 # if we can't cleanup the file then just ignore the error,
584 # if we can't cleanup the file then just ignore the error,
585 # no need to fail
585 # no need to fail
586 pass
586 pass
587
587
588 pullfrombundlestore = bool(scratchbundles)
588 pullfrombundlestore = bool(scratchbundles)
589 wrappedchangegrouppart = False
589 wrappedchangegrouppart = False
590 wrappedlistkeys = False
590 wrappedlistkeys = False
591 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
591 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
592 try:
592 try:
593
593
594 def _changegrouppart(bundler, *args, **kwargs):
594 def _changegrouppart(bundler, *args, **kwargs):
595 # Order is important here. First add non-scratch part
595 # Order is important here. First add non-scratch part
596 # and only then add parts with scratch bundles because
596 # and only then add parts with scratch bundles because
597 # non-scratch part contains parents of roots of scratch bundles.
597 # non-scratch part contains parents of roots of scratch bundles.
598 result = oldchangegrouppart(bundler, *args, **kwargs)
598 result = oldchangegrouppart(bundler, *args, **kwargs)
599 for bundle in scratchbundles:
599 for bundle in scratchbundles:
600 for part in bundle:
600 for part in bundle:
601 bundler.addpart(part)
601 bundler.addpart(part)
602 return result
602 return result
603
603
604 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
604 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
605 wrappedchangegrouppart = True
605 wrappedchangegrouppart = True
606
606
607 def _listkeys(orig, self, namespace):
607 def _listkeys(orig, self, namespace):
608 origvalues = orig(self, namespace)
608 origvalues = orig(self, namespace)
609 if namespace == b'phases' and pullfrombundlestore:
609 if namespace == b'phases' and pullfrombundlestore:
610 if origvalues.get(b'publishing') == b'True':
610 if origvalues.get(b'publishing') == b'True':
611 # Make repo non-publishing to preserve draft phase
611 # Make repo non-publishing to preserve draft phase
612 del origvalues[b'publishing']
612 del origvalues[b'publishing']
613 origvalues.update(newphases)
613 origvalues.update(newphases)
614 return origvalues
614 return origvalues
615
615
616 extensions.wrapfunction(
616 extensions.wrapfunction(
617 localrepo.localrepository, b'listkeys', _listkeys
617 localrepo.localrepository, b'listkeys', _listkeys
618 )
618 )
619 wrappedlistkeys = True
619 wrappedlistkeys = True
620 heads = list((set(newheads) | set(heads)) - set(scratchheads))
620 heads = list((set(newheads) | set(heads)) - set(scratchheads))
621 result = orig(
621 result = orig(
622 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
622 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
623 )
623 )
624 finally:
624 finally:
625 if wrappedchangegrouppart:
625 if wrappedchangegrouppart:
626 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
626 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
627 if wrappedlistkeys:
627 if wrappedlistkeys:
628 extensions.unwrapfunction(
628 extensions.unwrapfunction(
629 localrepo.localrepository, b'listkeys', _listkeys
629 localrepo.localrepository, b'listkeys', _listkeys
630 )
630 )
631 return result
631 return result
632
632
633
633
634 def _lookupwrap(orig):
634 def _lookupwrap(orig):
635 def _lookup(repo, proto, key):
635 def _lookup(repo, proto, key):
636 localkey = encoding.tolocal(key)
636 localkey = encoding.tolocal(key)
637
637
638 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
638 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
639 scratchnode = repo.bundlestore.index.getnode(localkey)
639 scratchnode = repo.bundlestore.index.getnode(localkey)
640 if scratchnode:
640 if scratchnode:
641 return b"%d %s\n" % (1, scratchnode)
641 return b"%d %s\n" % (1, scratchnode)
642 else:
642 else:
643 return b"%d %s\n" % (
643 return b"%d %s\n" % (
644 0,
644 0,
645 b'scratch branch %s not found' % localkey,
645 b'scratch branch %s not found' % localkey,
646 )
646 )
647 else:
647 else:
648 try:
648 try:
649 r = hex(repo.lookup(localkey))
649 r = hex(repo.lookup(localkey))
650 return b"%d %s\n" % (1, r)
650 return b"%d %s\n" % (1, r)
651 except Exception as inst:
651 except Exception as inst:
652 if repo.bundlestore.index.getbundle(localkey):
652 if repo.bundlestore.index.getbundle(localkey):
653 return b"%d %s\n" % (1, localkey)
653 return b"%d %s\n" % (1, localkey)
654 else:
654 else:
655 r = stringutil.forcebytestr(inst)
655 r = stringutil.forcebytestr(inst)
656 return b"%d %s\n" % (0, r)
656 return b"%d %s\n" % (0, r)
657
657
658 return _lookup
658 return _lookup
659
659
660
660
661 def _pull(orig, ui, repo, source=b"default", **opts):
661 def _pull(orig, ui, repo, source=b"default", **opts):
662 opts = pycompat.byteskwargs(opts)
662 opts = pycompat.byteskwargs(opts)
663 # Copy paste from `pull` command
663 # Copy paste from `pull` command
664 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
664 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
665
665
666 scratchbookmarks = {}
666 scratchbookmarks = {}
667 unfi = repo.unfiltered()
667 unfi = repo.unfiltered()
668 unknownnodes = []
668 unknownnodes = []
669 for rev in opts.get(b'rev', []):
669 for rev in opts.get(b'rev', []):
670 if rev not in unfi:
670 if rev not in unfi:
671 unknownnodes.append(rev)
671 unknownnodes.append(rev)
672 if opts.get(b'bookmark'):
672 if opts.get(b'bookmark'):
673 bookmarks = []
673 bookmarks = []
674 revs = opts.get(b'rev') or []
674 revs = opts.get(b'rev') or []
675 for bookmark in opts.get(b'bookmark'):
675 for bookmark in opts.get(b'bookmark'):
676 if _scratchbranchmatcher(bookmark):
676 if _scratchbranchmatcher(bookmark):
677 # rev is not known yet
677 # rev is not known yet
678 # it will be fetched with listkeyspatterns next
678 # it will be fetched with listkeyspatterns next
679 scratchbookmarks[bookmark] = b'REVTOFETCH'
679 scratchbookmarks[bookmark] = b'REVTOFETCH'
680 else:
680 else:
681 bookmarks.append(bookmark)
681 bookmarks.append(bookmark)
682
682
683 if scratchbookmarks:
683 if scratchbookmarks:
684 other = hg.peer(repo, opts, source)
684 other = hg.peer(repo, opts, source)
685 fetchedbookmarks = other.listkeyspatterns(
685 fetchedbookmarks = other.listkeyspatterns(
686 b'bookmarks', patterns=scratchbookmarks
686 b'bookmarks', patterns=scratchbookmarks
687 )
687 )
688 for bookmark in scratchbookmarks:
688 for bookmark in scratchbookmarks:
689 if bookmark not in fetchedbookmarks:
689 if bookmark not in fetchedbookmarks:
690 raise error.Abort(
690 raise error.Abort(
691 b'remote bookmark %s not found!' % bookmark
691 b'remote bookmark %s not found!' % bookmark
692 )
692 )
693 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
693 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
694 revs.append(fetchedbookmarks[bookmark])
694 revs.append(fetchedbookmarks[bookmark])
695 opts[b'bookmark'] = bookmarks
695 opts[b'bookmark'] = bookmarks
696 opts[b'rev'] = revs
696 opts[b'rev'] = revs
697
697
698 if scratchbookmarks or unknownnodes:
698 if scratchbookmarks or unknownnodes:
699 # Set anyincoming to True
699 # Set anyincoming to True
700 extensions.wrapfunction(
700 extensions.wrapfunction(
701 discovery, b'findcommonincoming', _findcommonincoming
701 discovery, b'findcommonincoming', _findcommonincoming
702 )
702 )
703 try:
703 try:
704 # Remote scratch bookmarks will be deleted because remotenames doesn't
704 # Remote scratch bookmarks will be deleted because remotenames doesn't
705 # know about them. Let's save it before pull and restore after
705 # know about them. Let's save it before pull and restore after
706 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
706 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
707 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
707 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
708 # TODO(stash): race condition is possible
708 # TODO(stash): race condition is possible
709 # if scratch bookmarks was updated right after orig.
709 # if scratch bookmarks was updated right after orig.
710 # But that's unlikely and shouldn't be harmful.
710 # But that's unlikely and shouldn't be harmful.
711 if common.isremotebooksenabled(ui):
711 if common.isremotebooksenabled(ui):
712 remotescratchbookmarks.update(scratchbookmarks)
712 remotescratchbookmarks.update(scratchbookmarks)
713 _saveremotebookmarks(repo, remotescratchbookmarks, source)
713 _saveremotebookmarks(repo, remotescratchbookmarks, source)
714 else:
714 else:
715 _savelocalbookmarks(repo, scratchbookmarks)
715 _savelocalbookmarks(repo, scratchbookmarks)
716 return result
716 return result
717 finally:
717 finally:
718 if scratchbookmarks:
718 if scratchbookmarks:
719 extensions.unwrapfunction(discovery, b'findcommonincoming')
719 extensions.unwrapfunction(discovery, b'findcommonincoming')
720
720
721
721
722 def _readscratchremotebookmarks(ui, repo, other):
722 def _readscratchremotebookmarks(ui, repo, other):
723 if common.isremotebooksenabled(ui):
723 if common.isremotebooksenabled(ui):
724 remotenamesext = extensions.find(b'remotenames')
724 remotenamesext = extensions.find(b'remotenames')
725 remotepath = remotenamesext.activepath(repo.ui, other)
725 remotepath = remotenamesext.activepath(repo.ui, other)
726 result = {}
726 result = {}
727 # Let's refresh remotenames to make sure we have it up to date
727 # Let's refresh remotenames to make sure we have it up to date
728 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
728 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
729 # and it results in deleting scratch bookmarks. Our best guess how to
729 # and it results in deleting scratch bookmarks. Our best guess how to
730 # fix it is to use `clearnames()`
730 # fix it is to use `clearnames()`
731 repo._remotenames.clearnames()
731 repo._remotenames.clearnames()
732 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
732 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
733 path, bookname = remotenamesext.splitremotename(remotebookmark)
733 path, bookname = remotenamesext.splitremotename(remotebookmark)
734 if path == remotepath and _scratchbranchmatcher(bookname):
734 if path == remotepath and _scratchbranchmatcher(bookname):
735 nodes = repo.names[b'remotebookmarks'].nodes(
735 nodes = repo.names[b'remotebookmarks'].nodes(
736 repo, remotebookmark
736 repo, remotebookmark
737 )
737 )
738 if nodes:
738 if nodes:
739 result[bookname] = hex(nodes[0])
739 result[bookname] = hex(nodes[0])
740 return result
740 return result
741 else:
741 else:
742 return {}
742 return {}
743
743
744
744
745 def _saveremotebookmarks(repo, newbookmarks, remote):
745 def _saveremotebookmarks(repo, newbookmarks, remote):
746 remotenamesext = extensions.find(b'remotenames')
746 remotenamesext = extensions.find(b'remotenames')
747 remotepath = remotenamesext.activepath(repo.ui, remote)
747 remotepath = remotenamesext.activepath(repo.ui, remote)
748 branches = collections.defaultdict(list)
748 branches = collections.defaultdict(list)
749 bookmarks = {}
749 bookmarks = {}
750 remotenames = remotenamesext.readremotenames(repo)
750 remotenames = remotenamesext.readremotenames(repo)
751 for hexnode, nametype, remote, rname in remotenames:
751 for hexnode, nametype, remote, rname in remotenames:
752 if remote != remotepath:
752 if remote != remotepath:
753 continue
753 continue
754 if nametype == b'bookmarks':
754 if nametype == b'bookmarks':
755 if rname in newbookmarks:
755 if rname in newbookmarks:
756 # It's possible if we have a normal bookmark that matches
756 # It's possible if we have a normal bookmark that matches
757 # scratch branch pattern. In this case just use the current
757 # scratch branch pattern. In this case just use the current
758 # bookmark node
758 # bookmark node
759 del newbookmarks[rname]
759 del newbookmarks[rname]
760 bookmarks[rname] = hexnode
760 bookmarks[rname] = hexnode
761 elif nametype == b'branches':
761 elif nametype == b'branches':
762 # saveremotenames expects 20 byte binary nodes for branches
762 # saveremotenames expects 20 byte binary nodes for branches
763 branches[rname].append(bin(hexnode))
763 branches[rname].append(bin(hexnode))
764
764
765 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
765 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
766 bookmarks[bookmark] = hexnode
766 bookmarks[bookmark] = hexnode
767 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
767 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
768
768
769
769
770 def _savelocalbookmarks(repo, bookmarks):
770 def _savelocalbookmarks(repo, bookmarks):
771 if not bookmarks:
771 if not bookmarks:
772 return
772 return
773 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
773 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
774 changes = []
774 changes = []
775 for scratchbook, node in pycompat.iteritems(bookmarks):
775 for scratchbook, node in pycompat.iteritems(bookmarks):
776 changectx = repo[node]
776 changectx = repo[node]
777 changes.append((scratchbook, changectx.node()))
777 changes.append((scratchbook, changectx.node()))
778 repo._bookmarks.applychanges(repo, tr, changes)
778 repo._bookmarks.applychanges(repo, tr, changes)
779
779
780
780
781 def _findcommonincoming(orig, *args, **kwargs):
781 def _findcommonincoming(orig, *args, **kwargs):
782 common, inc, remoteheads = orig(*args, **kwargs)
782 common, inc, remoteheads = orig(*args, **kwargs)
783 return common, True, remoteheads
783 return common, True, remoteheads
784
784
785
785
786 def _push(orig, ui, repo, dest=None, *args, **opts):
786 def _push(orig, ui, repo, dest=None, *args, **opts):
787 opts = pycompat.byteskwargs(opts)
787 opts = pycompat.byteskwargs(opts)
788 bookmark = opts.get(b'bookmark')
788 bookmark = opts.get(b'bookmark')
789 # we only support pushing one infinitepush bookmark at once
789 # we only support pushing one infinitepush bookmark at once
790 if len(bookmark) == 1:
790 if len(bookmark) == 1:
791 bookmark = bookmark[0]
791 bookmark = bookmark[0]
792 else:
792 else:
793 bookmark = b''
793 bookmark = b''
794
794
795 oldphasemove = None
795 oldphasemove = None
796 overrides = {(experimental, configbookmark): bookmark}
796 overrides = {(experimental, configbookmark): bookmark}
797
797
798 with ui.configoverride(overrides, b'infinitepush'):
798 with ui.configoverride(overrides, b'infinitepush'):
799 scratchpush = opts.get(b'bundle_store')
799 scratchpush = opts.get(b'bundle_store')
800 if _scratchbranchmatcher(bookmark):
800 if _scratchbranchmatcher(bookmark):
801 scratchpush = True
801 scratchpush = True
802 # bundle2 can be sent back after push (for example, bundle2
802 # bundle2 can be sent back after push (for example, bundle2
803 # containing `pushkey` part to update bookmarks)
803 # containing `pushkey` part to update bookmarks)
804 ui.setconfig(experimental, b'bundle2.pushback', True)
804 ui.setconfig(experimental, b'bundle2.pushback', True)
805
805
806 if scratchpush:
806 if scratchpush:
807 # this is an infinitepush, we don't want the bookmark to be applied
807 # this is an infinitepush, we don't want the bookmark to be applied
808 # rather that should be stored in the bundlestore
808 # rather that should be stored in the bundlestore
809 opts[b'bookmark'] = []
809 opts[b'bookmark'] = []
810 ui.setconfig(experimental, configscratchpush, True)
810 ui.setconfig(experimental, configscratchpush, True)
811 oldphasemove = extensions.wrapfunction(
811 oldphasemove = extensions.wrapfunction(
812 exchange, b'_localphasemove', _phasemove
812 exchange, b'_localphasemove', _phasemove
813 )
813 )
814 # Copy-paste from `push` command
814 # Copy-paste from `push` command
815 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
815 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
816 if not path:
816 if not path:
817 raise error.Abort(
817 raise error.Abort(
818 _(b'default repository not configured!'),
818 _(b'default repository not configured!'),
819 hint=_(b"see 'hg help config.paths'"),
819 hint=_(b"see 'hg help config.paths'"),
820 )
820 )
821 destpath = path.pushloc or path.loc
821 destpath = path.pushloc or path.loc
822 # Remote scratch bookmarks will be deleted because remotenames doesn't
822 # Remote scratch bookmarks will be deleted because remotenames doesn't
823 # know about them. Let's save it before push and restore after
823 # know about them. Let's save it before push and restore after
824 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
824 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
825 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
825 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
826 if common.isremotebooksenabled(ui):
826 if common.isremotebooksenabled(ui):
827 if bookmark and scratchpush:
827 if bookmark and scratchpush:
828 other = hg.peer(repo, opts, destpath)
828 other = hg.peer(repo, opts, destpath)
829 fetchedbookmarks = other.listkeyspatterns(
829 fetchedbookmarks = other.listkeyspatterns(
830 b'bookmarks', patterns=[bookmark]
830 b'bookmarks', patterns=[bookmark]
831 )
831 )
832 remotescratchbookmarks.update(fetchedbookmarks)
832 remotescratchbookmarks.update(fetchedbookmarks)
833 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
833 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
834 if oldphasemove:
834 if oldphasemove:
835 exchange._localphasemove = oldphasemove
835 exchange._localphasemove = oldphasemove
836 return result
836 return result
837
837
838
838
839 def _deleteinfinitepushbookmarks(ui, repo, path, names):
839 def _deleteinfinitepushbookmarks(ui, repo, path, names):
840 """Prune remote names by removing the bookmarks we don't want anymore,
840 """Prune remote names by removing the bookmarks we don't want anymore,
841 then writing the result back to disk
841 then writing the result back to disk
842 """
842 """
843 remotenamesext = extensions.find(b'remotenames')
843 remotenamesext = extensions.find(b'remotenames')
844
844
845 # remotename format is:
845 # remotename format is:
846 # (node, nametype ("branches" or "bookmarks"), remote, name)
846 # (node, nametype ("branches" or "bookmarks"), remote, name)
847 nametype_idx = 1
847 nametype_idx = 1
848 remote_idx = 2
848 remote_idx = 2
849 name_idx = 3
849 name_idx = 3
850 remotenames = [
850 remotenames = [
851 remotename
851 remotename
852 for remotename in remotenamesext.readremotenames(repo)
852 for remotename in remotenamesext.readremotenames(repo)
853 if remotename[remote_idx] == path
853 if remotename[remote_idx] == path
854 ]
854 ]
855 remote_bm_names = [
855 remote_bm_names = [
856 remotename[name_idx]
856 remotename[name_idx]
857 for remotename in remotenames
857 for remotename in remotenames
858 if remotename[nametype_idx] == b"bookmarks"
858 if remotename[nametype_idx] == b"bookmarks"
859 ]
859 ]
860
860
861 for name in names:
861 for name in names:
862 if name not in remote_bm_names:
862 if name not in remote_bm_names:
863 raise error.Abort(
863 raise error.Abort(
864 _(
864 _(
865 b"infinitepush bookmark '{}' does not exist "
865 b"infinitepush bookmark '{}' does not exist "
866 b"in path '{}'"
866 b"in path '{}'"
867 ).format(name, path)
867 ).format(name, path)
868 )
868 )
869
869
870 bookmarks = {}
870 bookmarks = {}
871 branches = collections.defaultdict(list)
871 branches = collections.defaultdict(list)
872 for node, nametype, remote, name in remotenames:
872 for node, nametype, remote, name in remotenames:
873 if nametype == b"bookmarks" and name not in names:
873 if nametype == b"bookmarks" and name not in names:
874 bookmarks[name] = node
874 bookmarks[name] = node
875 elif nametype == b"branches":
875 elif nametype == b"branches":
876 # saveremotenames wants binary nodes for branches
876 # saveremotenames wants binary nodes for branches
877 branches[name].append(bin(node))
877 branches[name].append(bin(node))
878
878
879 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
879 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
880
880
881
881
882 def _phasemove(orig, pushop, nodes, phase=phases.public):
882 def _phasemove(orig, pushop, nodes, phase=phases.public):
883 """prevent commits from being marked public
883 """prevent commits from being marked public
884
884
885 Since these are going to a scratch branch, they aren't really being
885 Since these are going to a scratch branch, they aren't really being
886 published."""
886 published."""
887
887
888 if phase != phases.public:
888 if phase != phases.public:
889 orig(pushop, nodes, phase)
889 orig(pushop, nodes, phase)
890
890
891
891
892 @exchange.b2partsgenerator(scratchbranchparttype)
892 @exchange.b2partsgenerator(scratchbranchparttype)
893 def partgen(pushop, bundler):
893 def partgen(pushop, bundler):
894 bookmark = pushop.ui.config(experimental, configbookmark)
894 bookmark = pushop.ui.config(experimental, configbookmark)
895 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
895 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
896 if b'changesets' in pushop.stepsdone or not scratchpush:
896 if b'changesets' in pushop.stepsdone or not scratchpush:
897 return
897 return
898
898
899 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
899 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
900 return
900 return
901
901
902 pushop.stepsdone.add(b'changesets')
902 pushop.stepsdone.add(b'changesets')
903 if not pushop.outgoing.missing:
903 if not pushop.outgoing.missing:
904 pushop.ui.status(_(b'no changes found\n'))
904 pushop.ui.status(_(b'no changes found\n'))
905 pushop.cgresult = 0
905 pushop.cgresult = 0
906 return
906 return
907
907
908 # This parameter tells the server that the following bundle is an
908 # This parameter tells the server that the following bundle is an
909 # infinitepush. This let's it switch the part processing to our infinitepush
909 # infinitepush. This let's it switch the part processing to our infinitepush
910 # code path.
910 # code path.
911 bundler.addparam(b"infinitepush", b"True")
911 bundler.addparam(b"infinitepush", b"True")
912
912
913 scratchparts = bundleparts.getscratchbranchparts(
913 scratchparts = bundleparts.getscratchbranchparts(
914 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
914 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
915 )
915 )
916
916
917 for scratchpart in scratchparts:
917 for scratchpart in scratchparts:
918 bundler.addpart(scratchpart)
918 bundler.addpart(scratchpart)
919
919
920 def handlereply(op):
920 def handlereply(op):
921 # server either succeeds or aborts; no code to read
921 # server either succeeds or aborts; no code to read
922 pushop.cgresult = 1
922 pushop.cgresult = 1
923
923
924 return handlereply
924 return handlereply
925
925
926
926
927 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
927 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
928
928
929
929
930 def _getrevs(bundle, oldnode, force, bookmark):
930 def _getrevs(bundle, oldnode, force, bookmark):
931 b'extracts and validates the revs to be imported'
931 b'extracts and validates the revs to be imported'
932 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
932 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
933
933
934 # new bookmark
934 # new bookmark
935 if oldnode is None:
935 if oldnode is None:
936 return revs
936 return revs
937
937
938 # Fast forward update
938 # Fast forward update
939 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
939 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
940 return revs
940 return revs
941
941
942 return revs
942 return revs
943
943
944
944
945 @contextlib.contextmanager
945 @contextlib.contextmanager
946 def logservicecall(logger, service, **kwargs):
946 def logservicecall(logger, service, **kwargs):
947 start = time.time()
947 start = time.time()
948 logger(service, eventtype=b'start', **kwargs)
948 logger(service, eventtype=b'start', **kwargs)
949 try:
949 try:
950 yield
950 yield
951 logger(
951 logger(
952 service,
952 service,
953 eventtype=b'success',
953 eventtype=b'success',
954 elapsedms=(time.time() - start) * 1000,
954 elapsedms=(time.time() - start) * 1000,
955 **kwargs
955 **kwargs
956 )
956 )
957 except Exception as e:
957 except Exception as e:
958 logger(
958 logger(
959 service,
959 service,
960 eventtype=b'failure',
960 eventtype=b'failure',
961 elapsedms=(time.time() - start) * 1000,
961 elapsedms=(time.time() - start) * 1000,
962 errormsg=stringutil.forcebytestr(e),
962 errormsg=stringutil.forcebytestr(e),
963 **kwargs
963 **kwargs
964 )
964 )
965 raise
965 raise
966
966
967
967
968 def _getorcreateinfinitepushlogger(op):
968 def _getorcreateinfinitepushlogger(op):
969 logger = op.records[b'infinitepushlogger']
969 logger = op.records[b'infinitepushlogger']
970 if not logger:
970 if not logger:
971 ui = op.repo.ui
971 ui = op.repo.ui
972 try:
972 try:
973 username = procutil.getuser()
973 username = procutil.getuser()
974 except Exception:
974 except Exception:
975 username = b'unknown'
975 username = b'unknown'
976 # Generate random request id to be able to find all logged entries
976 # Generate random request id to be able to find all logged entries
977 # for the same request. Since requestid is pseudo-generated it may
977 # for the same request. Since requestid is pseudo-generated it may
978 # not be unique, but we assume that (hostname, username, requestid)
978 # not be unique, but we assume that (hostname, username, requestid)
979 # is unique.
979 # is unique.
980 random.seed()
980 random.seed()
981 requestid = random.randint(0, 2000000000)
981 requestid = random.randint(0, 2000000000)
982 hostname = socket.gethostname()
982 hostname = socket.gethostname()
983 logger = functools.partial(
983 logger = functools.partial(
984 ui.log,
984 ui.log,
985 b'infinitepush',
985 b'infinitepush',
986 user=username,
986 user=username,
987 requestid=requestid,
987 requestid=requestid,
988 hostname=hostname,
988 hostname=hostname,
989 reponame=ui.config(b'infinitepush', b'reponame'),
989 reponame=ui.config(b'infinitepush', b'reponame'),
990 )
990 )
991 op.records.add(b'infinitepushlogger', logger)
991 op.records.add(b'infinitepushlogger', logger)
992 else:
992 else:
993 logger = logger[0]
993 logger = logger[0]
994 return logger
994 return logger
995
995
996
996
997 def storetobundlestore(orig, repo, op, unbundler):
997 def storetobundlestore(orig, repo, op, unbundler):
998 """stores the incoming bundle coming from push command to the bundlestore
998 """stores the incoming bundle coming from push command to the bundlestore
999 instead of applying on the revlogs"""
999 instead of applying on the revlogs"""
1000
1000
1001 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1001 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1002 bundler = bundle2.bundle20(repo.ui)
1002 bundler = bundle2.bundle20(repo.ui)
1003
1003
1004 # processing each part and storing it in bundler
1004 # processing each part and storing it in bundler
1005 with bundle2.partiterator(repo, op, unbundler) as parts:
1005 with bundle2.partiterator(repo, op, unbundler) as parts:
1006 for part in parts:
1006 for part in parts:
1007 bundlepart = None
1007 bundlepart = None
1008 if part.type == b'replycaps':
1008 if part.type == b'replycaps':
1009 # This configures the current operation to allow reply parts.
1009 # This configures the current operation to allow reply parts.
1010 bundle2._processpart(op, part)
1010 bundle2._processpart(op, part)
1011 else:
1011 else:
1012 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1012 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1013 for key, value in pycompat.iteritems(part.params):
1013 for key, value in pycompat.iteritems(part.params):
1014 bundlepart.addparam(key, value)
1014 bundlepart.addparam(key, value)
1015
1015
1016 # Certain parts require a response
1016 # Certain parts require a response
1017 if part.type in (b'pushkey', b'changegroup'):
1017 if part.type in (b'pushkey', b'changegroup'):
1018 if op.reply is not None:
1018 if op.reply is not None:
1019 rpart = op.reply.newpart(b'reply:%s' % part.type)
1019 rpart = op.reply.newpart(b'reply:%s' % part.type)
1020 rpart.addparam(
1020 rpart.addparam(
1021 b'in-reply-to', b'%d' % part.id, mandatory=False
1021 b'in-reply-to', b'%d' % part.id, mandatory=False
1022 )
1022 )
1023 rpart.addparam(b'return', b'1', mandatory=False)
1023 rpart.addparam(b'return', b'1', mandatory=False)
1024
1024
1025 op.records.add(part.type, {b'return': 1,})
1025 op.records.add(part.type, {b'return': 1,})
1026 if bundlepart:
1026 if bundlepart:
1027 bundler.addpart(bundlepart)
1027 bundler.addpart(bundlepart)
1028
1028
1029 # storing the bundle in the bundlestore
1029 # storing the bundle in the bundlestore
1030 buf = util.chunkbuffer(bundler.getchunks())
1030 buf = util.chunkbuffer(bundler.getchunks())
1031 fd, bundlefile = pycompat.mkstemp()
1031 fd, bundlefile = pycompat.mkstemp()
1032 try:
1032 try:
1033 try:
1033 try:
1034 fp = os.fdopen(fd, 'wb')
1034 fp = os.fdopen(fd, 'wb')
1035 fp.write(buf.read())
1035 fp.write(buf.read())
1036 finally:
1036 finally:
1037 fp.close()
1037 fp.close()
1038 storebundle(op, {}, bundlefile)
1038 storebundle(op, {}, bundlefile)
1039 finally:
1039 finally:
1040 try:
1040 try:
1041 os.unlink(bundlefile)
1041 os.unlink(bundlefile)
1042 except Exception:
1042 except Exception:
1043 # we would rather see the original exception
1043 # we would rather see the original exception
1044 pass
1044 pass
1045
1045
1046
1046
1047 def processparts(orig, repo, op, unbundler):
1047 def processparts(orig, repo, op, unbundler):
1048
1048
1049 # make sure we don't wrap processparts in case of `hg unbundle`
1049 # make sure we don't wrap processparts in case of `hg unbundle`
1050 if op.source == b'unbundle':
1050 if op.source == b'unbundle':
1051 return orig(repo, op, unbundler)
1051 return orig(repo, op, unbundler)
1052
1052
1053 # this server routes each push to bundle store
1053 # this server routes each push to bundle store
1054 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1054 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1055 return storetobundlestore(orig, repo, op, unbundler)
1055 return storetobundlestore(orig, repo, op, unbundler)
1056
1056
1057 if unbundler.params.get(b'infinitepush') != b'True':
1057 if unbundler.params.get(b'infinitepush') != b'True':
1058 return orig(repo, op, unbundler)
1058 return orig(repo, op, unbundler)
1059
1059
1060 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1060 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1061
1061
1062 bundler = bundle2.bundle20(repo.ui)
1062 bundler = bundle2.bundle20(repo.ui)
1063 cgparams = None
1063 cgparams = None
1064 with bundle2.partiterator(repo, op, unbundler) as parts:
1064 with bundle2.partiterator(repo, op, unbundler) as parts:
1065 for part in parts:
1065 for part in parts:
1066 bundlepart = None
1066 bundlepart = None
1067 if part.type == b'replycaps':
1067 if part.type == b'replycaps':
1068 # This configures the current operation to allow reply parts.
1068 # This configures the current operation to allow reply parts.
1069 bundle2._processpart(op, part)
1069 bundle2._processpart(op, part)
1070 elif part.type == bundleparts.scratchbranchparttype:
1070 elif part.type == bundleparts.scratchbranchparttype:
1071 # Scratch branch parts need to be converted to normal
1071 # Scratch branch parts need to be converted to normal
1072 # changegroup parts, and the extra parameters stored for later
1072 # changegroup parts, and the extra parameters stored for later
1073 # when we upload to the store. Eventually those parameters will
1073 # when we upload to the store. Eventually those parameters will
1074 # be put on the actual bundle instead of this part, then we can
1074 # be put on the actual bundle instead of this part, then we can
1075 # send a vanilla changegroup instead of the scratchbranch part.
1075 # send a vanilla changegroup instead of the scratchbranch part.
1076 cgversion = part.params.get(b'cgversion', b'01')
1076 cgversion = part.params.get(b'cgversion', b'01')
1077 bundlepart = bundle2.bundlepart(
1077 bundlepart = bundle2.bundlepart(
1078 b'changegroup', data=part.read()
1078 b'changegroup', data=part.read()
1079 )
1079 )
1080 bundlepart.addparam(b'version', cgversion)
1080 bundlepart.addparam(b'version', cgversion)
1081 cgparams = part.params
1081 cgparams = part.params
1082
1082
1083 # If we're not dumping all parts into the new bundle, we need to
1083 # If we're not dumping all parts into the new bundle, we need to
1084 # alert the future pushkey and phase-heads handler to skip
1084 # alert the future pushkey and phase-heads handler to skip
1085 # the part.
1085 # the part.
1086 if not handleallparts:
1086 if not handleallparts:
1087 op.records.add(
1087 op.records.add(
1088 scratchbranchparttype + b'_skippushkey', True
1088 scratchbranchparttype + b'_skippushkey', True
1089 )
1089 )
1090 op.records.add(
1090 op.records.add(
1091 scratchbranchparttype + b'_skipphaseheads', True
1091 scratchbranchparttype + b'_skipphaseheads', True
1092 )
1092 )
1093 else:
1093 else:
1094 if handleallparts:
1094 if handleallparts:
1095 # Ideally we would not process any parts, and instead just
1095 # Ideally we would not process any parts, and instead just
1096 # forward them to the bundle for storage, but since this
1096 # forward them to the bundle for storage, but since this
1097 # differs from previous behavior, we need to put it behind a
1097 # differs from previous behavior, we need to put it behind a
1098 # config flag for incremental rollout.
1098 # config flag for incremental rollout.
1099 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1099 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1100 for key, value in pycompat.iteritems(part.params):
1100 for key, value in pycompat.iteritems(part.params):
1101 bundlepart.addparam(key, value)
1101 bundlepart.addparam(key, value)
1102
1102
1103 # Certain parts require a response
1103 # Certain parts require a response
1104 if part.type == b'pushkey':
1104 if part.type == b'pushkey':
1105 if op.reply is not None:
1105 if op.reply is not None:
1106 rpart = op.reply.newpart(b'reply:pushkey')
1106 rpart = op.reply.newpart(b'reply:pushkey')
1107 rpart.addparam(
1107 rpart.addparam(
1108 b'in-reply-to', str(part.id), mandatory=False
1108 b'in-reply-to', str(part.id), mandatory=False
1109 )
1109 )
1110 rpart.addparam(b'return', b'1', mandatory=False)
1110 rpart.addparam(b'return', b'1', mandatory=False)
1111 else:
1111 else:
1112 bundle2._processpart(op, part)
1112 bundle2._processpart(op, part)
1113
1113
1114 if handleallparts:
1114 if handleallparts:
1115 op.records.add(part.type, {b'return': 1,})
1115 op.records.add(part.type, {b'return': 1,})
1116 if bundlepart:
1116 if bundlepart:
1117 bundler.addpart(bundlepart)
1117 bundler.addpart(bundlepart)
1118
1118
1119 # If commits were sent, store them
1119 # If commits were sent, store them
1120 if cgparams:
1120 if cgparams:
1121 buf = util.chunkbuffer(bundler.getchunks())
1121 buf = util.chunkbuffer(bundler.getchunks())
1122 fd, bundlefile = pycompat.mkstemp()
1122 fd, bundlefile = pycompat.mkstemp()
1123 try:
1123 try:
1124 try:
1124 try:
1125 fp = os.fdopen(fd, 'wb')
1125 fp = os.fdopen(fd, 'wb')
1126 fp.write(buf.read())
1126 fp.write(buf.read())
1127 finally:
1127 finally:
1128 fp.close()
1128 fp.close()
1129 storebundle(op, cgparams, bundlefile)
1129 storebundle(op, cgparams, bundlefile)
1130 finally:
1130 finally:
1131 try:
1131 try:
1132 os.unlink(bundlefile)
1132 os.unlink(bundlefile)
1133 except Exception:
1133 except Exception:
1134 # we would rather see the original exception
1134 # we would rather see the original exception
1135 pass
1135 pass
1136
1136
1137
1137
1138 def storebundle(op, params, bundlefile):
1138 def storebundle(op, params, bundlefile):
1139 log = _getorcreateinfinitepushlogger(op)
1139 log = _getorcreateinfinitepushlogger(op)
1140 parthandlerstart = time.time()
1140 parthandlerstart = time.time()
1141 log(scratchbranchparttype, eventtype=b'start')
1141 log(scratchbranchparttype, eventtype=b'start')
1142 index = op.repo.bundlestore.index
1142 index = op.repo.bundlestore.index
1143 store = op.repo.bundlestore.store
1143 store = op.repo.bundlestore.store
1144 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1144 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1145
1145
1146 bundle = None
1146 bundle = None
1147 try: # guards bundle
1147 try: # guards bundle
1148 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1148 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1149 bundle = hg.repository(op.repo.ui, bundlepath)
1149 bundle = hg.repository(op.repo.ui, bundlepath)
1150
1150
1151 bookmark = params.get(b'bookmark')
1151 bookmark = params.get(b'bookmark')
1152 bookprevnode = params.get(b'bookprevnode', b'')
1152 bookprevnode = params.get(b'bookprevnode', b'')
1153 force = params.get(b'force')
1153 force = params.get(b'force')
1154
1154
1155 if bookmark:
1155 if bookmark:
1156 oldnode = index.getnode(bookmark)
1156 oldnode = index.getnode(bookmark)
1157 else:
1157 else:
1158 oldnode = None
1158 oldnode = None
1159 bundleheads = bundle.revs(b'heads(bundle())')
1159 bundleheads = bundle.revs(b'heads(bundle())')
1160 if bookmark and len(bundleheads) > 1:
1160 if bookmark and len(bundleheads) > 1:
1161 raise error.Abort(
1161 raise error.Abort(
1162 _(b'cannot push more than one head to a scratch branch')
1162 _(b'cannot push more than one head to a scratch branch')
1163 )
1163 )
1164
1164
1165 revs = _getrevs(bundle, oldnode, force, bookmark)
1165 revs = _getrevs(bundle, oldnode, force, bookmark)
1166
1166
1167 # Notify the user of what is being pushed
1167 # Notify the user of what is being pushed
1168 plural = b's' if len(revs) > 1 else b''
1168 plural = b's' if len(revs) > 1 else b''
1169 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1169 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1170 maxoutput = 10
1170 maxoutput = 10
1171 for i in range(0, min(len(revs), maxoutput)):
1171 for i in range(0, min(len(revs), maxoutput)):
1172 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1172 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1173 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1173 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1174
1174
1175 if len(revs) > maxoutput + 1:
1175 if len(revs) > maxoutput + 1:
1176 op.repo.ui.warn(b" ...\n")
1176 op.repo.ui.warn(b" ...\n")
1177 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1177 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1178 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1178 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1179
1179
1180 nodesctx = [bundle[rev] for rev in revs]
1180 nodesctx = [bundle[rev] for rev in revs]
1181 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1181 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1182 if bundleheads:
1182 if bundleheads:
1183 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1183 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1184 else:
1184 else:
1185 newheadscount = 0
1185 newheadscount = 0
1186 # If there's a bookmark specified, there should be only one head,
1186 # If there's a bookmark specified, there should be only one head,
1187 # so we choose the last node, which will be that head.
1187 # so we choose the last node, which will be that head.
1188 # If a bug or malicious client allows there to be a bookmark
1188 # If a bug or malicious client allows there to be a bookmark
1189 # with multiple heads, we will place the bookmark on the last head.
1189 # with multiple heads, we will place the bookmark on the last head.
1190 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1190 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1191 key = None
1191 key = None
1192 if newheadscount:
1192 if newheadscount:
1193 with open(bundlefile, b'rb') as f:
1193 with open(bundlefile, b'rb') as f:
1194 bundledata = f.read()
1194 bundledata = f.read()
1195 with logservicecall(
1195 with logservicecall(
1196 log, b'bundlestore', bundlesize=len(bundledata)
1196 log, b'bundlestore', bundlesize=len(bundledata)
1197 ):
1197 ):
1198 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1198 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1199 if len(bundledata) > bundlesizelimit:
1199 if len(bundledata) > bundlesizelimit:
1200 error_msg = (
1200 error_msg = (
1201 b'bundle is too big: %d bytes. '
1201 b'bundle is too big: %d bytes. '
1202 + b'max allowed size is 100 MB'
1202 + b'max allowed size is 100 MB'
1203 )
1203 )
1204 raise error.Abort(error_msg % (len(bundledata),))
1204 raise error.Abort(error_msg % (len(bundledata),))
1205 key = store.write(bundledata)
1205 key = store.write(bundledata)
1206
1206
1207 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1207 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1208 if key:
1208 if key:
1209 index.addbundle(key, nodesctx)
1209 index.addbundle(key, nodesctx)
1210 if bookmark:
1210 if bookmark:
1211 index.addbookmark(bookmark, bookmarknode)
1211 index.addbookmark(bookmark, bookmarknode)
1212 _maybeaddpushbackpart(
1212 _maybeaddpushbackpart(
1213 op, bookmark, bookmarknode, bookprevnode, params
1213 op, bookmark, bookmarknode, bookprevnode, params
1214 )
1214 )
1215 log(
1215 log(
1216 scratchbranchparttype,
1216 scratchbranchparttype,
1217 eventtype=b'success',
1217 eventtype=b'success',
1218 elapsedms=(time.time() - parthandlerstart) * 1000,
1218 elapsedms=(time.time() - parthandlerstart) * 1000,
1219 )
1219 )
1220
1220
1221 except Exception as e:
1221 except Exception as e:
1222 log(
1222 log(
1223 scratchbranchparttype,
1223 scratchbranchparttype,
1224 eventtype=b'failure',
1224 eventtype=b'failure',
1225 elapsedms=(time.time() - parthandlerstart) * 1000,
1225 elapsedms=(time.time() - parthandlerstart) * 1000,
1226 errormsg=stringutil.forcebytestr(e),
1226 errormsg=stringutil.forcebytestr(e),
1227 )
1227 )
1228 raise
1228 raise
1229 finally:
1229 finally:
1230 if bundle:
1230 if bundle:
1231 bundle.close()
1231 bundle.close()
1232
1232
1233
1233
1234 @bundle2.parthandler(
1234 @bundle2.parthandler(
1235 scratchbranchparttype,
1235 scratchbranchparttype,
1236 (
1236 (
1237 b'bookmark',
1237 b'bookmark',
1238 b'bookprevnode',
1238 b'bookprevnode',
1239 b'force',
1239 b'force',
1240 b'pushbackbookmarks',
1240 b'pushbackbookmarks',
1241 b'cgversion',
1241 b'cgversion',
1242 ),
1242 ),
1243 )
1243 )
1244 def bundle2scratchbranch(op, part):
1244 def bundle2scratchbranch(op, part):
1245 '''unbundle a bundle2 part containing a changegroup to store'''
1245 '''unbundle a bundle2 part containing a changegroup to store'''
1246
1246
1247 bundler = bundle2.bundle20(op.repo.ui)
1247 bundler = bundle2.bundle20(op.repo.ui)
1248 cgversion = part.params.get(b'cgversion', b'01')
1248 cgversion = part.params.get(b'cgversion', b'01')
1249 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1249 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1250 cgpart.addparam(b'version', cgversion)
1250 cgpart.addparam(b'version', cgversion)
1251 bundler.addpart(cgpart)
1251 bundler.addpart(cgpart)
1252 buf = util.chunkbuffer(bundler.getchunks())
1252 buf = util.chunkbuffer(bundler.getchunks())
1253
1253
1254 fd, bundlefile = pycompat.mkstemp()
1254 fd, bundlefile = pycompat.mkstemp()
1255 try:
1255 try:
1256 try:
1256 try:
1257 fp = os.fdopen(fd, 'wb')
1257 fp = os.fdopen(fd, 'wb')
1258 fp.write(buf.read())
1258 fp.write(buf.read())
1259 finally:
1259 finally:
1260 fp.close()
1260 fp.close()
1261 storebundle(op, part.params, bundlefile)
1261 storebundle(op, part.params, bundlefile)
1262 finally:
1262 finally:
1263 try:
1263 try:
1264 os.unlink(bundlefile)
1264 os.unlink(bundlefile)
1265 except OSError as e:
1265 except OSError as e:
1266 if e.errno != errno.ENOENT:
1266 if e.errno != errno.ENOENT:
1267 raise
1267 raise
1268
1268
1269 return 1
1269 return 1
1270
1270
1271
1271
1272 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1272 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1273 if params.get(b'pushbackbookmarks'):
1273 if params.get(b'pushbackbookmarks'):
1274 if op.reply and b'pushback' in op.reply.capabilities:
1274 if op.reply and b'pushback' in op.reply.capabilities:
1275 params = {
1275 params = {
1276 b'namespace': b'bookmarks',
1276 b'namespace': b'bookmarks',
1277 b'key': bookmark,
1277 b'key': bookmark,
1278 b'new': newnode,
1278 b'new': newnode,
1279 b'old': oldnode,
1279 b'old': oldnode,
1280 }
1280 }
1281 op.reply.newpart(
1281 op.reply.newpart(
1282 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1282 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1283 )
1283 )
1284
1284
1285
1285
1286 def bundle2pushkey(orig, op, part):
1286 def bundle2pushkey(orig, op, part):
1287 '''Wrapper of bundle2.handlepushkey()
1287 '''Wrapper of bundle2.handlepushkey()
1288
1288
1289 The only goal is to skip calling the original function if flag is set.
1289 The only goal is to skip calling the original function if flag is set.
1290 It's set if infinitepush push is happening.
1290 It's set if infinitepush push is happening.
1291 '''
1291 '''
1292 if op.records[scratchbranchparttype + b'_skippushkey']:
1292 if op.records[scratchbranchparttype + b'_skippushkey']:
1293 if op.reply is not None:
1293 if op.reply is not None:
1294 rpart = op.reply.newpart(b'reply:pushkey')
1294 rpart = op.reply.newpart(b'reply:pushkey')
1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1296 rpart.addparam(b'return', b'1', mandatory=False)
1296 rpart.addparam(b'return', b'1', mandatory=False)
1297 return 1
1297 return 1
1298
1298
1299 return orig(op, part)
1299 return orig(op, part)
1300
1300
1301
1301
1302 def bundle2handlephases(orig, op, part):
1302 def bundle2handlephases(orig, op, part):
1303 '''Wrapper of bundle2.handlephases()
1303 '''Wrapper of bundle2.handlephases()
1304
1304
1305 The only goal is to skip calling the original function if flag is set.
1305 The only goal is to skip calling the original function if flag is set.
1306 It's set if infinitepush push is happening.
1306 It's set if infinitepush push is happening.
1307 '''
1307 '''
1308
1308
1309 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1309 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1310 return
1310 return
1311
1311
1312 return orig(op, part)
1312 return orig(op, part)
1313
1313
1314
1314
1315 def _asyncsavemetadata(root, nodes):
1315 def _asyncsavemetadata(root, nodes):
1316 '''starts a separate process that fills metadata for the nodes
1316 '''starts a separate process that fills metadata for the nodes
1317
1317
1318 This function creates a separate process and doesn't wait for it's
1318 This function creates a separate process and doesn't wait for it's
1319 completion. This was done to avoid slowing down pushes
1319 completion. This was done to avoid slowing down pushes
1320 '''
1320 '''
1321
1321
1322 maxnodes = 50
1322 maxnodes = 50
1323 if len(nodes) > maxnodes:
1323 if len(nodes) > maxnodes:
1324 return
1324 return
1325 nodesargs = []
1325 nodesargs = []
1326 for node in nodes:
1326 for node in nodes:
1327 nodesargs.append(b'--node')
1327 nodesargs.append(b'--node')
1328 nodesargs.append(node)
1328 nodesargs.append(node)
1329 with open(os.devnull, b'w+b') as devnull:
1329 with open(os.devnull, b'w+b') as devnull:
1330 cmdline = [
1330 cmdline = [
1331 util.hgexecutable(),
1331 util.hgexecutable(),
1332 b'debugfillinfinitepushmetadata',
1332 b'debugfillinfinitepushmetadata',
1333 b'-R',
1333 b'-R',
1334 root,
1334 root,
1335 ] + nodesargs
1335 ] + nodesargs
1336 # Process will run in background. We don't care about the return code
1336 # Process will run in background. We don't care about the return code
1337 subprocess.Popen(
1337 subprocess.Popen(
1338 pycompat.rapply(procutil.tonativestr, cmdline),
1338 pycompat.rapply(procutil.tonativestr, cmdline),
1339 close_fds=True,
1339 close_fds=True,
1340 shell=False,
1340 shell=False,
1341 stdin=devnull,
1341 stdin=devnull,
1342 stdout=devnull,
1342 stdout=devnull,
1343 stderr=devnull,
1343 stderr=devnull,
1344 )
1344 )
@@ -1,2584 +1,2584 b''
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148 from __future__ import absolute_import, division
148 from __future__ import absolute_import, division
149
149
150 import collections
150 import collections
151 import errno
151 import errno
152 import os
152 import os
153 import re
153 import re
154 import string
154 import string
155 import struct
155 import struct
156 import sys
156 import sys
157
157
158 from .i18n import _
158 from .i18n import _
159 from . import (
159 from . import (
160 bookmarks,
160 bookmarks,
161 changegroup,
161 changegroup,
162 encoding,
162 encoding,
163 error,
163 error,
164 node as nodemod,
164 node as nodemod,
165 obsolete,
165 obsolete,
166 phases,
166 phases,
167 pushkey,
167 pushkey,
168 pycompat,
168 pycompat,
169 scmutil,
169 scmutil,
170 streamclone,
170 streamclone,
171 tags,
171 tags,
172 url,
172 url,
173 util,
173 util,
174 )
174 )
175 from .utils import stringutil
175 from .utils import stringutil
176
176
177 urlerr = util.urlerr
177 urlerr = util.urlerr
178 urlreq = util.urlreq
178 urlreq = util.urlreq
179
179
180 _pack = struct.pack
180 _pack = struct.pack
181 _unpack = struct.unpack
181 _unpack = struct.unpack
182
182
183 _fstreamparamsize = b'>i'
183 _fstreamparamsize = b'>i'
184 _fpartheadersize = b'>i'
184 _fpartheadersize = b'>i'
185 _fparttypesize = b'>B'
185 _fparttypesize = b'>B'
186 _fpartid = b'>I'
186 _fpartid = b'>I'
187 _fpayloadsize = b'>i'
187 _fpayloadsize = b'>i'
188 _fpartparamcount = b'>BB'
188 _fpartparamcount = b'>BB'
189
189
190 preferedchunksize = 32768
190 preferedchunksize = 32768
191
191
192 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
192 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
193
193
194
194
195 def outdebug(ui, message):
195 def outdebug(ui, message):
196 """debug regarding output stream (bundling)"""
196 """debug regarding output stream (bundling)"""
197 if ui.configbool(b'devel', b'bundle2.debug'):
197 if ui.configbool(b'devel', b'bundle2.debug'):
198 ui.debug(b'bundle2-output: %s\n' % message)
198 ui.debug(b'bundle2-output: %s\n' % message)
199
199
200
200
201 def indebug(ui, message):
201 def indebug(ui, message):
202 """debug on input stream (unbundling)"""
202 """debug on input stream (unbundling)"""
203 if ui.configbool(b'devel', b'bundle2.debug'):
203 if ui.configbool(b'devel', b'bundle2.debug'):
204 ui.debug(b'bundle2-input: %s\n' % message)
204 ui.debug(b'bundle2-input: %s\n' % message)
205
205
206
206
207 def validateparttype(parttype):
207 def validateparttype(parttype):
208 """raise ValueError if a parttype contains invalid character"""
208 """raise ValueError if a parttype contains invalid character"""
209 if _parttypeforbidden.search(parttype):
209 if _parttypeforbidden.search(parttype):
210 raise ValueError(parttype)
210 raise ValueError(parttype)
211
211
212
212
213 def _makefpartparamsizes(nbparams):
213 def _makefpartparamsizes(nbparams):
214 """return a struct format to read part parameter sizes
214 """return a struct format to read part parameter sizes
215
215
216 The number parameters is variable so we need to build that format
216 The number parameters is variable so we need to build that format
217 dynamically.
217 dynamically.
218 """
218 """
219 return b'>' + (b'BB' * nbparams)
219 return b'>' + (b'BB' * nbparams)
220
220
221
221
222 parthandlermapping = {}
222 parthandlermapping = {}
223
223
224
224
225 def parthandler(parttype, params=()):
225 def parthandler(parttype, params=()):
226 """decorator that register a function as a bundle2 part handler
226 """decorator that register a function as a bundle2 part handler
227
227
228 eg::
228 eg::
229
229
230 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
230 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
231 def myparttypehandler(...):
231 def myparttypehandler(...):
232 '''process a part of type "my part".'''
232 '''process a part of type "my part".'''
233 ...
233 ...
234 """
234 """
235 validateparttype(parttype)
235 validateparttype(parttype)
236
236
237 def _decorator(func):
237 def _decorator(func):
238 lparttype = parttype.lower() # enforce lower case matching.
238 lparttype = parttype.lower() # enforce lower case matching.
239 assert lparttype not in parthandlermapping
239 assert lparttype not in parthandlermapping
240 parthandlermapping[lparttype] = func
240 parthandlermapping[lparttype] = func
241 func.params = frozenset(params)
241 func.params = frozenset(params)
242 return func
242 return func
243
243
244 return _decorator
244 return _decorator
245
245
246
246
247 class unbundlerecords(object):
247 class unbundlerecords(object):
248 """keep record of what happens during and unbundle
248 """keep record of what happens during and unbundle
249
249
250 New records are added using `records.add('cat', obj)`. Where 'cat' is a
250 New records are added using `records.add('cat', obj)`. Where 'cat' is a
251 category of record and obj is an arbitrary object.
251 category of record and obj is an arbitrary object.
252
252
253 `records['cat']` will return all entries of this category 'cat'.
253 `records['cat']` will return all entries of this category 'cat'.
254
254
255 Iterating on the object itself will yield `('category', obj)` tuples
255 Iterating on the object itself will yield `('category', obj)` tuples
256 for all entries.
256 for all entries.
257
257
258 All iterations happens in chronological order.
258 All iterations happens in chronological order.
259 """
259 """
260
260
261 def __init__(self):
261 def __init__(self):
262 self._categories = {}
262 self._categories = {}
263 self._sequences = []
263 self._sequences = []
264 self._replies = {}
264 self._replies = {}
265
265
266 def add(self, category, entry, inreplyto=None):
266 def add(self, category, entry, inreplyto=None):
267 """add a new record of a given category.
267 """add a new record of a given category.
268
268
269 The entry can then be retrieved in the list returned by
269 The entry can then be retrieved in the list returned by
270 self['category']."""
270 self['category']."""
271 self._categories.setdefault(category, []).append(entry)
271 self._categories.setdefault(category, []).append(entry)
272 self._sequences.append((category, entry))
272 self._sequences.append((category, entry))
273 if inreplyto is not None:
273 if inreplyto is not None:
274 self.getreplies(inreplyto).add(category, entry)
274 self.getreplies(inreplyto).add(category, entry)
275
275
276 def getreplies(self, partid):
276 def getreplies(self, partid):
277 """get the records that are replies to a specific part"""
277 """get the records that are replies to a specific part"""
278 return self._replies.setdefault(partid, unbundlerecords())
278 return self._replies.setdefault(partid, unbundlerecords())
279
279
280 def __getitem__(self, cat):
280 def __getitem__(self, cat):
281 return tuple(self._categories.get(cat, ()))
281 return tuple(self._categories.get(cat, ()))
282
282
283 def __iter__(self):
283 def __iter__(self):
284 return iter(self._sequences)
284 return iter(self._sequences)
285
285
286 def __len__(self):
286 def __len__(self):
287 return len(self._sequences)
287 return len(self._sequences)
288
288
289 def __nonzero__(self):
289 def __nonzero__(self):
290 return bool(self._sequences)
290 return bool(self._sequences)
291
291
292 __bool__ = __nonzero__
292 __bool__ = __nonzero__
293
293
294
294
295 class bundleoperation(object):
295 class bundleoperation(object):
296 """an object that represents a single bundling process
296 """an object that represents a single bundling process
297
297
298 Its purpose is to carry unbundle-related objects and states.
298 Its purpose is to carry unbundle-related objects and states.
299
299
300 A new object should be created at the beginning of each bundle processing.
300 A new object should be created at the beginning of each bundle processing.
301 The object is to be returned by the processing function.
301 The object is to be returned by the processing function.
302
302
303 The object has very little content now it will ultimately contain:
303 The object has very little content now it will ultimately contain:
304 * an access to the repo the bundle is applied to,
304 * an access to the repo the bundle is applied to,
305 * a ui object,
305 * a ui object,
306 * a way to retrieve a transaction to add changes to the repo,
306 * a way to retrieve a transaction to add changes to the repo,
307 * a way to record the result of processing each part,
307 * a way to record the result of processing each part,
308 * a way to construct a bundle response when applicable.
308 * a way to construct a bundle response when applicable.
309 """
309 """
310
310
311 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
311 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
312 self.repo = repo
312 self.repo = repo
313 self.ui = repo.ui
313 self.ui = repo.ui
314 self.records = unbundlerecords()
314 self.records = unbundlerecords()
315 self.reply = None
315 self.reply = None
316 self.captureoutput = captureoutput
316 self.captureoutput = captureoutput
317 self.hookargs = {}
317 self.hookargs = {}
318 self._gettransaction = transactiongetter
318 self._gettransaction = transactiongetter
319 # carries value that can modify part behavior
319 # carries value that can modify part behavior
320 self.modes = {}
320 self.modes = {}
321 self.source = source
321 self.source = source
322
322
323 def gettransaction(self):
323 def gettransaction(self):
324 transaction = self._gettransaction()
324 transaction = self._gettransaction()
325
325
326 if self.hookargs:
326 if self.hookargs:
327 # the ones added to the transaction supercede those added
327 # the ones added to the transaction supercede those added
328 # to the operation.
328 # to the operation.
329 self.hookargs.update(transaction.hookargs)
329 self.hookargs.update(transaction.hookargs)
330 transaction.hookargs = self.hookargs
330 transaction.hookargs = self.hookargs
331
331
332 # mark the hookargs as flushed. further attempts to add to
332 # mark the hookargs as flushed. further attempts to add to
333 # hookargs will result in an abort.
333 # hookargs will result in an abort.
334 self.hookargs = None
334 self.hookargs = None
335
335
336 return transaction
336 return transaction
337
337
338 def addhookargs(self, hookargs):
338 def addhookargs(self, hookargs):
339 if self.hookargs is None:
339 if self.hookargs is None:
340 raise error.ProgrammingError(
340 raise error.ProgrammingError(
341 b'attempted to add hookargs to '
341 b'attempted to add hookargs to '
342 b'operation after transaction started'
342 b'operation after transaction started'
343 )
343 )
344 self.hookargs.update(hookargs)
344 self.hookargs.update(hookargs)
345
345
346
346
347 class TransactionUnavailable(RuntimeError):
347 class TransactionUnavailable(RuntimeError):
348 pass
348 pass
349
349
350
350
351 def _notransaction():
351 def _notransaction():
352 """default method to get a transaction while processing a bundle
352 """default method to get a transaction while processing a bundle
353
353
354 Raise an exception to highlight the fact that no transaction was expected
354 Raise an exception to highlight the fact that no transaction was expected
355 to be created"""
355 to be created"""
356 raise TransactionUnavailable()
356 raise TransactionUnavailable()
357
357
358
358
359 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
359 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
360 # transform me into unbundler.apply() as soon as the freeze is lifted
360 # transform me into unbundler.apply() as soon as the freeze is lifted
361 if isinstance(unbundler, unbundle20):
361 if isinstance(unbundler, unbundle20):
362 tr.hookargs[b'bundle2'] = b'1'
362 tr.hookargs[b'bundle2'] = b'1'
363 if source is not None and b'source' not in tr.hookargs:
363 if source is not None and b'source' not in tr.hookargs:
364 tr.hookargs[b'source'] = source
364 tr.hookargs[b'source'] = source
365 if url is not None and b'url' not in tr.hookargs:
365 if url is not None and b'url' not in tr.hookargs:
366 tr.hookargs[b'url'] = url
366 tr.hookargs[b'url'] = url
367 return processbundle(repo, unbundler, lambda: tr, source=source)
367 return processbundle(repo, unbundler, lambda: tr, source=source)
368 else:
368 else:
369 # the transactiongetter won't be used, but we might as well set it
369 # the transactiongetter won't be used, but we might as well set it
370 op = bundleoperation(repo, lambda: tr, source=source)
370 op = bundleoperation(repo, lambda: tr, source=source)
371 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
371 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
372 return op
372 return op
373
373
374
374
375 class partiterator(object):
375 class partiterator(object):
376 def __init__(self, repo, op, unbundler):
376 def __init__(self, repo, op, unbundler):
377 self.repo = repo
377 self.repo = repo
378 self.op = op
378 self.op = op
379 self.unbundler = unbundler
379 self.unbundler = unbundler
380 self.iterator = None
380 self.iterator = None
381 self.count = 0
381 self.count = 0
382 self.current = None
382 self.current = None
383
383
384 def __enter__(self):
384 def __enter__(self):
385 def func():
385 def func():
386 itr = enumerate(self.unbundler.iterparts(), 1)
386 itr = enumerate(self.unbundler.iterparts(), 1)
387 for count, p in itr:
387 for count, p in itr:
388 self.count = count
388 self.count = count
389 self.current = p
389 self.current = p
390 yield p
390 yield p
391 p.consume()
391 p.consume()
392 self.current = None
392 self.current = None
393
393
394 self.iterator = func()
394 self.iterator = func()
395 return self.iterator
395 return self.iterator
396
396
397 def __exit__(self, type, exc, tb):
397 def __exit__(self, type, exc, tb):
398 if not self.iterator:
398 if not self.iterator:
399 return
399 return
400
400
401 # Only gracefully abort in a normal exception situation. User aborts
401 # Only gracefully abort in a normal exception situation. User aborts
402 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
402 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
403 # and should not gracefully cleanup.
403 # and should not gracefully cleanup.
404 if isinstance(exc, Exception):
404 if isinstance(exc, Exception):
405 # Any exceptions seeking to the end of the bundle at this point are
405 # Any exceptions seeking to the end of the bundle at this point are
406 # almost certainly related to the underlying stream being bad.
406 # almost certainly related to the underlying stream being bad.
407 # And, chances are that the exception we're handling is related to
407 # And, chances are that the exception we're handling is related to
408 # getting in that bad state. So, we swallow the seeking error and
408 # getting in that bad state. So, we swallow the seeking error and
409 # re-raise the original error.
409 # re-raise the original error.
410 seekerror = False
410 seekerror = False
411 try:
411 try:
412 if self.current:
412 if self.current:
413 # consume the part content to not corrupt the stream.
413 # consume the part content to not corrupt the stream.
414 self.current.consume()
414 self.current.consume()
415
415
416 for part in self.iterator:
416 for part in self.iterator:
417 # consume the bundle content
417 # consume the bundle content
418 part.consume()
418 part.consume()
419 except Exception:
419 except Exception:
420 seekerror = True
420 seekerror = True
421
421
422 # Small hack to let caller code distinguish exceptions from bundle2
422 # Small hack to let caller code distinguish exceptions from bundle2
423 # processing from processing the old format. This is mostly needed
423 # processing from processing the old format. This is mostly needed
424 # to handle different return codes to unbundle according to the type
424 # to handle different return codes to unbundle according to the type
425 # of bundle. We should probably clean up or drop this return code
425 # of bundle. We should probably clean up or drop this return code
426 # craziness in a future version.
426 # craziness in a future version.
427 exc.duringunbundle2 = True
427 exc.duringunbundle2 = True
428 salvaged = []
428 salvaged = []
429 replycaps = None
429 replycaps = None
430 if self.op.reply is not None:
430 if self.op.reply is not None:
431 salvaged = self.op.reply.salvageoutput()
431 salvaged = self.op.reply.salvageoutput()
432 replycaps = self.op.reply.capabilities
432 replycaps = self.op.reply.capabilities
433 exc._replycaps = replycaps
433 exc._replycaps = replycaps
434 exc._bundle2salvagedoutput = salvaged
434 exc._bundle2salvagedoutput = salvaged
435
435
436 # Re-raising from a variable loses the original stack. So only use
436 # Re-raising from a variable loses the original stack. So only use
437 # that form if we need to.
437 # that form if we need to.
438 if seekerror:
438 if seekerror:
439 raise exc
439 raise exc
440
440
441 self.repo.ui.debug(
441 self.repo.ui.debug(
442 b'bundle2-input-bundle: %i parts total\n' % self.count
442 b'bundle2-input-bundle: %i parts total\n' % self.count
443 )
443 )
444
444
445
445
446 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
446 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
447 """This function process a bundle, apply effect to/from a repo
447 """This function process a bundle, apply effect to/from a repo
448
448
449 It iterates over each part then searches for and uses the proper handling
449 It iterates over each part then searches for and uses the proper handling
450 code to process the part. Parts are processed in order.
450 code to process the part. Parts are processed in order.
451
451
452 Unknown Mandatory part will abort the process.
452 Unknown Mandatory part will abort the process.
453
453
454 It is temporarily possible to provide a prebuilt bundleoperation to the
454 It is temporarily possible to provide a prebuilt bundleoperation to the
455 function. This is used to ensure output is properly propagated in case of
455 function. This is used to ensure output is properly propagated in case of
456 an error during the unbundling. This output capturing part will likely be
456 an error during the unbundling. This output capturing part will likely be
457 reworked and this ability will probably go away in the process.
457 reworked and this ability will probably go away in the process.
458 """
458 """
459 if op is None:
459 if op is None:
460 if transactiongetter is None:
460 if transactiongetter is None:
461 transactiongetter = _notransaction
461 transactiongetter = _notransaction
462 op = bundleoperation(repo, transactiongetter, source=source)
462 op = bundleoperation(repo, transactiongetter, source=source)
463 # todo:
463 # todo:
464 # - replace this is a init function soon.
464 # - replace this is a init function soon.
465 # - exception catching
465 # - exception catching
466 unbundler.params
466 unbundler.params
467 if repo.ui.debugflag:
467 if repo.ui.debugflag:
468 msg = [b'bundle2-input-bundle:']
468 msg = [b'bundle2-input-bundle:']
469 if unbundler.params:
469 if unbundler.params:
470 msg.append(b' %i params' % len(unbundler.params))
470 msg.append(b' %i params' % len(unbundler.params))
471 if op._gettransaction is None or op._gettransaction is _notransaction:
471 if op._gettransaction is None or op._gettransaction is _notransaction:
472 msg.append(b' no-transaction')
472 msg.append(b' no-transaction')
473 else:
473 else:
474 msg.append(b' with-transaction')
474 msg.append(b' with-transaction')
475 msg.append(b'\n')
475 msg.append(b'\n')
476 repo.ui.debug(b''.join(msg))
476 repo.ui.debug(b''.join(msg))
477
477
478 processparts(repo, op, unbundler)
478 processparts(repo, op, unbundler)
479
479
480 return op
480 return op
481
481
482
482
483 def processparts(repo, op, unbundler):
483 def processparts(repo, op, unbundler):
484 with partiterator(repo, op, unbundler) as parts:
484 with partiterator(repo, op, unbundler) as parts:
485 for part in parts:
485 for part in parts:
486 _processpart(op, part)
486 _processpart(op, part)
487
487
488
488
489 def _processchangegroup(op, cg, tr, source, url, **kwargs):
489 def _processchangegroup(op, cg, tr, source, url, **kwargs):
490 ret = cg.apply(op.repo, tr, source, url, **kwargs)
490 ret = cg.apply(op.repo, tr, source, url, **kwargs)
491 op.records.add(b'changegroup', {b'return': ret,})
491 op.records.add(b'changegroup', {b'return': ret,})
492 return ret
492 return ret
493
493
494
494
495 def _gethandler(op, part):
495 def _gethandler(op, part):
496 status = b'unknown' # used by debug output
496 status = b'unknown' # used by debug output
497 try:
497 try:
498 handler = parthandlermapping.get(part.type)
498 handler = parthandlermapping.get(part.type)
499 if handler is None:
499 if handler is None:
500 status = b'unsupported-type'
500 status = b'unsupported-type'
501 raise error.BundleUnknownFeatureError(parttype=part.type)
501 raise error.BundleUnknownFeatureError(parttype=part.type)
502 indebug(op.ui, b'found a handler for part %s' % part.type)
502 indebug(op.ui, b'found a handler for part %s' % part.type)
503 unknownparams = part.mandatorykeys - handler.params
503 unknownparams = part.mandatorykeys - handler.params
504 if unknownparams:
504 if unknownparams:
505 unknownparams = list(unknownparams)
505 unknownparams = list(unknownparams)
506 unknownparams.sort()
506 unknownparams.sort()
507 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
507 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
508 raise error.BundleUnknownFeatureError(
508 raise error.BundleUnknownFeatureError(
509 parttype=part.type, params=unknownparams
509 parttype=part.type, params=unknownparams
510 )
510 )
511 status = b'supported'
511 status = b'supported'
512 except error.BundleUnknownFeatureError as exc:
512 except error.BundleUnknownFeatureError as exc:
513 if part.mandatory: # mandatory parts
513 if part.mandatory: # mandatory parts
514 raise
514 raise
515 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
515 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
516 return # skip to part processing
516 return # skip to part processing
517 finally:
517 finally:
518 if op.ui.debugflag:
518 if op.ui.debugflag:
519 msg = [b'bundle2-input-part: "%s"' % part.type]
519 msg = [b'bundle2-input-part: "%s"' % part.type]
520 if not part.mandatory:
520 if not part.mandatory:
521 msg.append(b' (advisory)')
521 msg.append(b' (advisory)')
522 nbmp = len(part.mandatorykeys)
522 nbmp = len(part.mandatorykeys)
523 nbap = len(part.params) - nbmp
523 nbap = len(part.params) - nbmp
524 if nbmp or nbap:
524 if nbmp or nbap:
525 msg.append(b' (params:')
525 msg.append(b' (params:')
526 if nbmp:
526 if nbmp:
527 msg.append(b' %i mandatory' % nbmp)
527 msg.append(b' %i mandatory' % nbmp)
528 if nbap:
528 if nbap:
529 msg.append(b' %i advisory' % nbmp)
529 msg.append(b' %i advisory' % nbmp)
530 msg.append(b')')
530 msg.append(b')')
531 msg.append(b' %s\n' % status)
531 msg.append(b' %s\n' % status)
532 op.ui.debug(b''.join(msg))
532 op.ui.debug(b''.join(msg))
533
533
534 return handler
534 return handler
535
535
536
536
537 def _processpart(op, part):
537 def _processpart(op, part):
538 """process a single part from a bundle
538 """process a single part from a bundle
539
539
540 The part is guaranteed to have been fully consumed when the function exits
540 The part is guaranteed to have been fully consumed when the function exits
541 (even if an exception is raised)."""
541 (even if an exception is raised)."""
542 handler = _gethandler(op, part)
542 handler = _gethandler(op, part)
543 if handler is None:
543 if handler is None:
544 return
544 return
545
545
546 # handler is called outside the above try block so that we don't
546 # handler is called outside the above try block so that we don't
547 # risk catching KeyErrors from anything other than the
547 # risk catching KeyErrors from anything other than the
548 # parthandlermapping lookup (any KeyError raised by handler()
548 # parthandlermapping lookup (any KeyError raised by handler()
549 # itself represents a defect of a different variety).
549 # itself represents a defect of a different variety).
550 output = None
550 output = None
551 if op.captureoutput and op.reply is not None:
551 if op.captureoutput and op.reply is not None:
552 op.ui.pushbuffer(error=True, subproc=True)
552 op.ui.pushbuffer(error=True, subproc=True)
553 output = b''
553 output = b''
554 try:
554 try:
555 handler(op, part)
555 handler(op, part)
556 finally:
556 finally:
557 if output is not None:
557 if output is not None:
558 output = op.ui.popbuffer()
558 output = op.ui.popbuffer()
559 if output:
559 if output:
560 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
560 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
561 outpart.addparam(
561 outpart.addparam(
562 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
562 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
563 )
563 )
564
564
565
565
566 def decodecaps(blob):
566 def decodecaps(blob):
567 """decode a bundle2 caps bytes blob into a dictionary
567 """decode a bundle2 caps bytes blob into a dictionary
568
568
569 The blob is a list of capabilities (one per line)
569 The blob is a list of capabilities (one per line)
570 Capabilities may have values using a line of the form::
570 Capabilities may have values using a line of the form::
571
571
572 capability=value1,value2,value3
572 capability=value1,value2,value3
573
573
574 The values are always a list."""
574 The values are always a list."""
575 caps = {}
575 caps = {}
576 for line in blob.splitlines():
576 for line in blob.splitlines():
577 if not line:
577 if not line:
578 continue
578 continue
579 if b'=' not in line:
579 if b'=' not in line:
580 key, vals = line, ()
580 key, vals = line, ()
581 else:
581 else:
582 key, vals = line.split(b'=', 1)
582 key, vals = line.split(b'=', 1)
583 vals = vals.split(b',')
583 vals = vals.split(b',')
584 key = urlreq.unquote(key)
584 key = urlreq.unquote(key)
585 vals = [urlreq.unquote(v) for v in vals]
585 vals = [urlreq.unquote(v) for v in vals]
586 caps[key] = vals
586 caps[key] = vals
587 return caps
587 return caps
588
588
589
589
590 def encodecaps(caps):
590 def encodecaps(caps):
591 """encode a bundle2 caps dictionary into a bytes blob"""
591 """encode a bundle2 caps dictionary into a bytes blob"""
592 chunks = []
592 chunks = []
593 for ca in sorted(caps):
593 for ca in sorted(caps):
594 vals = caps[ca]
594 vals = caps[ca]
595 ca = urlreq.quote(ca)
595 ca = urlreq.quote(ca)
596 vals = [urlreq.quote(v) for v in vals]
596 vals = [urlreq.quote(v) for v in vals]
597 if vals:
597 if vals:
598 ca = b"%s=%s" % (ca, b','.join(vals))
598 ca = b"%s=%s" % (ca, b','.join(vals))
599 chunks.append(ca)
599 chunks.append(ca)
600 return b'\n'.join(chunks)
600 return b'\n'.join(chunks)
601
601
602
602
603 bundletypes = {
603 bundletypes = {
604 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
604 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
605 # since the unification ssh accepts a header but there
605 # since the unification ssh accepts a header but there
606 # is no capability signaling it.
606 # is no capability signaling it.
607 b"HG20": (), # special-cased below
607 b"HG20": (), # special-cased below
608 b"HG10UN": (b"HG10UN", b'UN'),
608 b"HG10UN": (b"HG10UN", b'UN'),
609 b"HG10BZ": (b"HG10", b'BZ'),
609 b"HG10BZ": (b"HG10", b'BZ'),
610 b"HG10GZ": (b"HG10GZ", b'GZ'),
610 b"HG10GZ": (b"HG10GZ", b'GZ'),
611 }
611 }
612
612
613 # hgweb uses this list to communicate its preferred type
613 # hgweb uses this list to communicate its preferred type
614 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
614 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
615
615
616
616
617 class bundle20(object):
617 class bundle20(object):
618 """represent an outgoing bundle2 container
618 """represent an outgoing bundle2 container
619
619
620 Use the `addparam` method to add stream level parameter. and `newpart` to
620 Use the `addparam` method to add stream level parameter. and `newpart` to
621 populate it. Then call `getchunks` to retrieve all the binary chunks of
621 populate it. Then call `getchunks` to retrieve all the binary chunks of
622 data that compose the bundle2 container."""
622 data that compose the bundle2 container."""
623
623
624 _magicstring = b'HG20'
624 _magicstring = b'HG20'
625
625
626 def __init__(self, ui, capabilities=()):
626 def __init__(self, ui, capabilities=()):
627 self.ui = ui
627 self.ui = ui
628 self._params = []
628 self._params = []
629 self._parts = []
629 self._parts = []
630 self.capabilities = dict(capabilities)
630 self.capabilities = dict(capabilities)
631 self._compengine = util.compengines.forbundletype(b'UN')
631 self._compengine = util.compengines.forbundletype(b'UN')
632 self._compopts = None
632 self._compopts = None
633 # If compression is being handled by a consumer of the raw
633 # If compression is being handled by a consumer of the raw
634 # data (e.g. the wire protocol), unsetting this flag tells
634 # data (e.g. the wire protocol), unsetting this flag tells
635 # consumers that the bundle is best left uncompressed.
635 # consumers that the bundle is best left uncompressed.
636 self.prefercompressed = True
636 self.prefercompressed = True
637
637
638 def setcompression(self, alg, compopts=None):
638 def setcompression(self, alg, compopts=None):
639 """setup core part compression to <alg>"""
639 """setup core part compression to <alg>"""
640 if alg in (None, b'UN'):
640 if alg in (None, b'UN'):
641 return
641 return
642 assert not any(n.lower() == b'compression' for n, v in self._params)
642 assert not any(n.lower() == b'compression' for n, v in self._params)
643 self.addparam(b'Compression', alg)
643 self.addparam(b'Compression', alg)
644 self._compengine = util.compengines.forbundletype(alg)
644 self._compengine = util.compengines.forbundletype(alg)
645 self._compopts = compopts
645 self._compopts = compopts
646
646
647 @property
647 @property
648 def nbparts(self):
648 def nbparts(self):
649 """total number of parts added to the bundler"""
649 """total number of parts added to the bundler"""
650 return len(self._parts)
650 return len(self._parts)
651
651
652 # methods used to defines the bundle2 content
652 # methods used to defines the bundle2 content
653 def addparam(self, name, value=None):
653 def addparam(self, name, value=None):
654 """add a stream level parameter"""
654 """add a stream level parameter"""
655 if not name:
655 if not name:
656 raise error.ProgrammingError(b'empty parameter name')
656 raise error.ProgrammingError(b'empty parameter name')
657 if name[0:1] not in pycompat.bytestr(
657 if name[0:1] not in pycompat.bytestr(
658 string.ascii_letters # pytype: disable=wrong-arg-types
658 string.ascii_letters # pytype: disable=wrong-arg-types
659 ):
659 ):
660 raise error.ProgrammingError(
660 raise error.ProgrammingError(
661 b'non letter first character: %s' % name
661 b'non letter first character: %s' % name
662 )
662 )
663 self._params.append((name, value))
663 self._params.append((name, value))
664
664
665 def addpart(self, part):
665 def addpart(self, part):
666 """add a new part to the bundle2 container
666 """add a new part to the bundle2 container
667
667
668 Parts contains the actual applicative payload."""
668 Parts contains the actual applicative payload."""
669 assert part.id is None
669 assert part.id is None
670 part.id = len(self._parts) # very cheap counter
670 part.id = len(self._parts) # very cheap counter
671 self._parts.append(part)
671 self._parts.append(part)
672
672
673 def newpart(self, typeid, *args, **kwargs):
673 def newpart(self, typeid, *args, **kwargs):
674 """create a new part and add it to the containers
674 """create a new part and add it to the containers
675
675
676 As the part is directly added to the containers. For now, this means
676 As the part is directly added to the containers. For now, this means
677 that any failure to properly initialize the part after calling
677 that any failure to properly initialize the part after calling
678 ``newpart`` should result in a failure of the whole bundling process.
678 ``newpart`` should result in a failure of the whole bundling process.
679
679
680 You can still fall back to manually create and add if you need better
680 You can still fall back to manually create and add if you need better
681 control."""
681 control."""
682 part = bundlepart(typeid, *args, **kwargs)
682 part = bundlepart(typeid, *args, **kwargs)
683 self.addpart(part)
683 self.addpart(part)
684 return part
684 return part
685
685
686 # methods used to generate the bundle2 stream
686 # methods used to generate the bundle2 stream
687 def getchunks(self):
687 def getchunks(self):
688 if self.ui.debugflag:
688 if self.ui.debugflag:
689 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
689 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
690 if self._params:
690 if self._params:
691 msg.append(b' (%i params)' % len(self._params))
691 msg.append(b' (%i params)' % len(self._params))
692 msg.append(b' %i parts total\n' % len(self._parts))
692 msg.append(b' %i parts total\n' % len(self._parts))
693 self.ui.debug(b''.join(msg))
693 self.ui.debug(b''.join(msg))
694 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
694 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
695 yield self._magicstring
695 yield self._magicstring
696 param = self._paramchunk()
696 param = self._paramchunk()
697 outdebug(self.ui, b'bundle parameter: %s' % param)
697 outdebug(self.ui, b'bundle parameter: %s' % param)
698 yield _pack(_fstreamparamsize, len(param))
698 yield _pack(_fstreamparamsize, len(param))
699 if param:
699 if param:
700 yield param
700 yield param
701 for chunk in self._compengine.compressstream(
701 for chunk in self._compengine.compressstream(
702 self._getcorechunk(), self._compopts
702 self._getcorechunk(), self._compopts
703 ):
703 ):
704 yield chunk
704 yield chunk
705
705
706 def _paramchunk(self):
706 def _paramchunk(self):
707 """return a encoded version of all stream parameters"""
707 """return a encoded version of all stream parameters"""
708 blocks = []
708 blocks = []
709 for par, value in self._params:
709 for par, value in self._params:
710 par = urlreq.quote(par)
710 par = urlreq.quote(par)
711 if value is not None:
711 if value is not None:
712 value = urlreq.quote(value)
712 value = urlreq.quote(value)
713 par = b'%s=%s' % (par, value)
713 par = b'%s=%s' % (par, value)
714 blocks.append(par)
714 blocks.append(par)
715 return b' '.join(blocks)
715 return b' '.join(blocks)
716
716
717 def _getcorechunk(self):
717 def _getcorechunk(self):
718 """yield chunk for the core part of the bundle
718 """yield chunk for the core part of the bundle
719
719
720 (all but headers and parameters)"""
720 (all but headers and parameters)"""
721 outdebug(self.ui, b'start of parts')
721 outdebug(self.ui, b'start of parts')
722 for part in self._parts:
722 for part in self._parts:
723 outdebug(self.ui, b'bundle part: "%s"' % part.type)
723 outdebug(self.ui, b'bundle part: "%s"' % part.type)
724 for chunk in part.getchunks(ui=self.ui):
724 for chunk in part.getchunks(ui=self.ui):
725 yield chunk
725 yield chunk
726 outdebug(self.ui, b'end of bundle')
726 outdebug(self.ui, b'end of bundle')
727 yield _pack(_fpartheadersize, 0)
727 yield _pack(_fpartheadersize, 0)
728
728
729 def salvageoutput(self):
729 def salvageoutput(self):
730 """return a list with a copy of all output parts in the bundle
730 """return a list with a copy of all output parts in the bundle
731
731
732 This is meant to be used during error handling to make sure we preserve
732 This is meant to be used during error handling to make sure we preserve
733 server output"""
733 server output"""
734 salvaged = []
734 salvaged = []
735 for part in self._parts:
735 for part in self._parts:
736 if part.type.startswith(b'output'):
736 if part.type.startswith(b'output'):
737 salvaged.append(part.copy())
737 salvaged.append(part.copy())
738 return salvaged
738 return salvaged
739
739
740
740
741 class unpackermixin(object):
741 class unpackermixin(object):
742 """A mixin to extract bytes and struct data from a stream"""
742 """A mixin to extract bytes and struct data from a stream"""
743
743
744 def __init__(self, fp):
744 def __init__(self, fp):
745 self._fp = fp
745 self._fp = fp
746
746
747 def _unpack(self, format):
747 def _unpack(self, format):
748 """unpack this struct format from the stream
748 """unpack this struct format from the stream
749
749
750 This method is meant for internal usage by the bundle2 protocol only.
750 This method is meant for internal usage by the bundle2 protocol only.
751 They directly manipulate the low level stream including bundle2 level
751 They directly manipulate the low level stream including bundle2 level
752 instruction.
752 instruction.
753
753
754 Do not use it to implement higher-level logic or methods."""
754 Do not use it to implement higher-level logic or methods."""
755 data = self._readexact(struct.calcsize(format))
755 data = self._readexact(struct.calcsize(format))
756 return _unpack(format, data)
756 return _unpack(format, data)
757
757
758 def _readexact(self, size):
758 def _readexact(self, size):
759 """read exactly <size> bytes from the stream
759 """read exactly <size> bytes from the stream
760
760
761 This method is meant for internal usage by the bundle2 protocol only.
761 This method is meant for internal usage by the bundle2 protocol only.
762 They directly manipulate the low level stream including bundle2 level
762 They directly manipulate the low level stream including bundle2 level
763 instruction.
763 instruction.
764
764
765 Do not use it to implement higher-level logic or methods."""
765 Do not use it to implement higher-level logic or methods."""
766 return changegroup.readexactly(self._fp, size)
766 return changegroup.readexactly(self._fp, size)
767
767
768
768
769 def getunbundler(ui, fp, magicstring=None):
769 def getunbundler(ui, fp, magicstring=None):
770 """return a valid unbundler object for a given magicstring"""
770 """return a valid unbundler object for a given magicstring"""
771 if magicstring is None:
771 if magicstring is None:
772 magicstring = changegroup.readexactly(fp, 4)
772 magicstring = changegroup.readexactly(fp, 4)
773 magic, version = magicstring[0:2], magicstring[2:4]
773 magic, version = magicstring[0:2], magicstring[2:4]
774 if magic != b'HG':
774 if magic != b'HG':
775 ui.debug(
775 ui.debug(
776 b"error: invalid magic: %r (version %r), should be 'HG'\n"
776 b"error: invalid magic: %r (version %r), should be 'HG'\n"
777 % (magic, version)
777 % (magic, version)
778 )
778 )
779 raise error.Abort(_(b'not a Mercurial bundle'))
779 raise error.Abort(_(b'not a Mercurial bundle'))
780 unbundlerclass = formatmap.get(version)
780 unbundlerclass = formatmap.get(version)
781 if unbundlerclass is None:
781 if unbundlerclass is None:
782 raise error.Abort(_(b'unknown bundle version %s') % version)
782 raise error.Abort(_(b'unknown bundle version %s') % version)
783 unbundler = unbundlerclass(ui, fp)
783 unbundler = unbundlerclass(ui, fp)
784 indebug(ui, b'start processing of %s stream' % magicstring)
784 indebug(ui, b'start processing of %s stream' % magicstring)
785 return unbundler
785 return unbundler
786
786
787
787
788 class unbundle20(unpackermixin):
788 class unbundle20(unpackermixin):
789 """interpret a bundle2 stream
789 """interpret a bundle2 stream
790
790
791 This class is fed with a binary stream and yields parts through its
791 This class is fed with a binary stream and yields parts through its
792 `iterparts` methods."""
792 `iterparts` methods."""
793
793
794 _magicstring = b'HG20'
794 _magicstring = b'HG20'
795
795
796 def __init__(self, ui, fp):
796 def __init__(self, ui, fp):
797 """If header is specified, we do not read it out of the stream."""
797 """If header is specified, we do not read it out of the stream."""
798 self.ui = ui
798 self.ui = ui
799 self._compengine = util.compengines.forbundletype(b'UN')
799 self._compengine = util.compengines.forbundletype(b'UN')
800 self._compressed = None
800 self._compressed = None
801 super(unbundle20, self).__init__(fp)
801 super(unbundle20, self).__init__(fp)
802
802
803 @util.propertycache
803 @util.propertycache
804 def params(self):
804 def params(self):
805 """dictionary of stream level parameters"""
805 """dictionary of stream level parameters"""
806 indebug(self.ui, b'reading bundle2 stream parameters')
806 indebug(self.ui, b'reading bundle2 stream parameters')
807 params = {}
807 params = {}
808 paramssize = self._unpack(_fstreamparamsize)[0]
808 paramssize = self._unpack(_fstreamparamsize)[0]
809 if paramssize < 0:
809 if paramssize < 0:
810 raise error.BundleValueError(
810 raise error.BundleValueError(
811 b'negative bundle param size: %i' % paramssize
811 b'negative bundle param size: %i' % paramssize
812 )
812 )
813 if paramssize:
813 if paramssize:
814 params = self._readexact(paramssize)
814 params = self._readexact(paramssize)
815 params = self._processallparams(params)
815 params = self._processallparams(params)
816 return params
816 return params
817
817
818 def _processallparams(self, paramsblock):
818 def _processallparams(self, paramsblock):
819 """"""
819 """"""
820 params = util.sortdict()
820 params = util.sortdict()
821 for p in paramsblock.split(b' '):
821 for p in paramsblock.split(b' '):
822 p = p.split(b'=', 1)
822 p = p.split(b'=', 1)
823 p = [urlreq.unquote(i) for i in p]
823 p = [urlreq.unquote(i) for i in p]
824 if len(p) < 2:
824 if len(p) < 2:
825 p.append(None)
825 p.append(None)
826 self._processparam(*p)
826 self._processparam(*p)
827 params[p[0]] = p[1]
827 params[p[0]] = p[1]
828 return params
828 return params
829
829
830 def _processparam(self, name, value):
830 def _processparam(self, name, value):
831 """process a parameter, applying its effect if needed
831 """process a parameter, applying its effect if needed
832
832
833 Parameter starting with a lower case letter are advisory and will be
833 Parameter starting with a lower case letter are advisory and will be
834 ignored when unknown. Those starting with an upper case letter are
834 ignored when unknown. Those starting with an upper case letter are
835 mandatory and will this function will raise a KeyError when unknown.
835 mandatory and will this function will raise a KeyError when unknown.
836
836
837 Note: no option are currently supported. Any input will be either
837 Note: no option are currently supported. Any input will be either
838 ignored or failing.
838 ignored or failing.
839 """
839 """
840 if not name:
840 if not name:
841 raise ValueError('empty parameter name')
841 raise ValueError('empty parameter name')
842 if name[0:1] not in pycompat.bytestr(
842 if name[0:1] not in pycompat.bytestr(
843 string.ascii_letters # pytype: disable=wrong-arg-types
843 string.ascii_letters # pytype: disable=wrong-arg-types
844 ):
844 ):
845 raise ValueError('non letter first character: %s' % name)
845 raise ValueError('non letter first character: %s' % name)
846 try:
846 try:
847 handler = b2streamparamsmap[name.lower()]
847 handler = b2streamparamsmap[name.lower()]
848 except KeyError:
848 except KeyError:
849 if name[0:1].islower():
849 if name[0:1].islower():
850 indebug(self.ui, b"ignoring unknown parameter %s" % name)
850 indebug(self.ui, b"ignoring unknown parameter %s" % name)
851 else:
851 else:
852 raise error.BundleUnknownFeatureError(params=(name,))
852 raise error.BundleUnknownFeatureError(params=(name,))
853 else:
853 else:
854 handler(self, name, value)
854 handler(self, name, value)
855
855
856 def _forwardchunks(self):
856 def _forwardchunks(self):
857 """utility to transfer a bundle2 as binary
857 """utility to transfer a bundle2 as binary
858
858
859 This is made necessary by the fact the 'getbundle' command over 'ssh'
859 This is made necessary by the fact the 'getbundle' command over 'ssh'
860 have no way to know then the reply end, relying on the bundle to be
860 have no way to know then the reply end, relying on the bundle to be
861 interpreted to know its end. This is terrible and we are sorry, but we
861 interpreted to know its end. This is terrible and we are sorry, but we
862 needed to move forward to get general delta enabled.
862 needed to move forward to get general delta enabled.
863 """
863 """
864 yield self._magicstring
864 yield self._magicstring
865 assert 'params' not in vars(self)
865 assert 'params' not in vars(self)
866 paramssize = self._unpack(_fstreamparamsize)[0]
866 paramssize = self._unpack(_fstreamparamsize)[0]
867 if paramssize < 0:
867 if paramssize < 0:
868 raise error.BundleValueError(
868 raise error.BundleValueError(
869 b'negative bundle param size: %i' % paramssize
869 b'negative bundle param size: %i' % paramssize
870 )
870 )
871 if paramssize:
871 if paramssize:
872 params = self._readexact(paramssize)
872 params = self._readexact(paramssize)
873 self._processallparams(params)
873 self._processallparams(params)
874 # The payload itself is decompressed below, so drop
874 # The payload itself is decompressed below, so drop
875 # the compression parameter passed down to compensate.
875 # the compression parameter passed down to compensate.
876 outparams = []
876 outparams = []
877 for p in params.split(b' '):
877 for p in params.split(b' '):
878 k, v = p.split(b'=', 1)
878 k, v = p.split(b'=', 1)
879 if k.lower() != b'compression':
879 if k.lower() != b'compression':
880 outparams.append(p)
880 outparams.append(p)
881 outparams = b' '.join(outparams)
881 outparams = b' '.join(outparams)
882 yield _pack(_fstreamparamsize, len(outparams))
882 yield _pack(_fstreamparamsize, len(outparams))
883 yield outparams
883 yield outparams
884 else:
884 else:
885 yield _pack(_fstreamparamsize, paramssize)
885 yield _pack(_fstreamparamsize, paramssize)
886 # From there, payload might need to be decompressed
886 # From there, payload might need to be decompressed
887 self._fp = self._compengine.decompressorreader(self._fp)
887 self._fp = self._compengine.decompressorreader(self._fp)
888 emptycount = 0
888 emptycount = 0
889 while emptycount < 2:
889 while emptycount < 2:
890 # so we can brainlessly loop
890 # so we can brainlessly loop
891 assert _fpartheadersize == _fpayloadsize
891 assert _fpartheadersize == _fpayloadsize
892 size = self._unpack(_fpartheadersize)[0]
892 size = self._unpack(_fpartheadersize)[0]
893 yield _pack(_fpartheadersize, size)
893 yield _pack(_fpartheadersize, size)
894 if size:
894 if size:
895 emptycount = 0
895 emptycount = 0
896 else:
896 else:
897 emptycount += 1
897 emptycount += 1
898 continue
898 continue
899 if size == flaginterrupt:
899 if size == flaginterrupt:
900 continue
900 continue
901 elif size < 0:
901 elif size < 0:
902 raise error.BundleValueError(b'negative chunk size: %i')
902 raise error.BundleValueError(b'negative chunk size: %i')
903 yield self._readexact(size)
903 yield self._readexact(size)
904
904
905 def iterparts(self, seekable=False):
905 def iterparts(self, seekable=False):
906 """yield all parts contained in the stream"""
906 """yield all parts contained in the stream"""
907 cls = seekableunbundlepart if seekable else unbundlepart
907 cls = seekableunbundlepart if seekable else unbundlepart
908 # make sure param have been loaded
908 # make sure param have been loaded
909 self.params
909 self.params
910 # From there, payload need to be decompressed
910 # From there, payload need to be decompressed
911 self._fp = self._compengine.decompressorreader(self._fp)
911 self._fp = self._compengine.decompressorreader(self._fp)
912 indebug(self.ui, b'start extraction of bundle2 parts')
912 indebug(self.ui, b'start extraction of bundle2 parts')
913 headerblock = self._readpartheader()
913 headerblock = self._readpartheader()
914 while headerblock is not None:
914 while headerblock is not None:
915 part = cls(self.ui, headerblock, self._fp)
915 part = cls(self.ui, headerblock, self._fp)
916 yield part
916 yield part
917 # Ensure part is fully consumed so we can start reading the next
917 # Ensure part is fully consumed so we can start reading the next
918 # part.
918 # part.
919 part.consume()
919 part.consume()
920
920
921 headerblock = self._readpartheader()
921 headerblock = self._readpartheader()
922 indebug(self.ui, b'end of bundle2 stream')
922 indebug(self.ui, b'end of bundle2 stream')
923
923
924 def _readpartheader(self):
924 def _readpartheader(self):
925 """reads a part header size and return the bytes blob
925 """reads a part header size and return the bytes blob
926
926
927 returns None if empty"""
927 returns None if empty"""
928 headersize = self._unpack(_fpartheadersize)[0]
928 headersize = self._unpack(_fpartheadersize)[0]
929 if headersize < 0:
929 if headersize < 0:
930 raise error.BundleValueError(
930 raise error.BundleValueError(
931 b'negative part header size: %i' % headersize
931 b'negative part header size: %i' % headersize
932 )
932 )
933 indebug(self.ui, b'part header size: %i' % headersize)
933 indebug(self.ui, b'part header size: %i' % headersize)
934 if headersize:
934 if headersize:
935 return self._readexact(headersize)
935 return self._readexact(headersize)
936 return None
936 return None
937
937
938 def compressed(self):
938 def compressed(self):
939 self.params # load params
939 self.params # load params
940 return self._compressed
940 return self._compressed
941
941
942 def close(self):
942 def close(self):
943 """close underlying file"""
943 """close underlying file"""
944 if util.safehasattr(self._fp, 'close'):
944 if util.safehasattr(self._fp, 'close'):
945 return self._fp.close()
945 return self._fp.close()
946
946
947
947
948 formatmap = {b'20': unbundle20}
948 formatmap = {b'20': unbundle20}
949
949
950 b2streamparamsmap = {}
950 b2streamparamsmap = {}
951
951
952
952
953 def b2streamparamhandler(name):
953 def b2streamparamhandler(name):
954 """register a handler for a stream level parameter"""
954 """register a handler for a stream level parameter"""
955
955
956 def decorator(func):
956 def decorator(func):
957 assert name not in formatmap
957 assert name not in formatmap
958 b2streamparamsmap[name] = func
958 b2streamparamsmap[name] = func
959 return func
959 return func
960
960
961 return decorator
961 return decorator
962
962
963
963
964 @b2streamparamhandler(b'compression')
964 @b2streamparamhandler(b'compression')
965 def processcompression(unbundler, param, value):
965 def processcompression(unbundler, param, value):
966 """read compression parameter and install payload decompression"""
966 """read compression parameter and install payload decompression"""
967 if value not in util.compengines.supportedbundletypes:
967 if value not in util.compengines.supportedbundletypes:
968 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
968 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
969 unbundler._compengine = util.compengines.forbundletype(value)
969 unbundler._compengine = util.compengines.forbundletype(value)
970 if value is not None:
970 if value is not None:
971 unbundler._compressed = True
971 unbundler._compressed = True
972
972
973
973
974 class bundlepart(object):
974 class bundlepart(object):
975 """A bundle2 part contains application level payload
975 """A bundle2 part contains application level payload
976
976
977 The part `type` is used to route the part to the application level
977 The part `type` is used to route the part to the application level
978 handler.
978 handler.
979
979
980 The part payload is contained in ``part.data``. It could be raw bytes or a
980 The part payload is contained in ``part.data``. It could be raw bytes or a
981 generator of byte chunks.
981 generator of byte chunks.
982
982
983 You can add parameters to the part using the ``addparam`` method.
983 You can add parameters to the part using the ``addparam`` method.
984 Parameters can be either mandatory (default) or advisory. Remote side
984 Parameters can be either mandatory (default) or advisory. Remote side
985 should be able to safely ignore the advisory ones.
985 should be able to safely ignore the advisory ones.
986
986
987 Both data and parameters cannot be modified after the generation has begun.
987 Both data and parameters cannot be modified after the generation has begun.
988 """
988 """
989
989
990 def __init__(
990 def __init__(
991 self,
991 self,
992 parttype,
992 parttype,
993 mandatoryparams=(),
993 mandatoryparams=(),
994 advisoryparams=(),
994 advisoryparams=(),
995 data=b'',
995 data=b'',
996 mandatory=True,
996 mandatory=True,
997 ):
997 ):
998 validateparttype(parttype)
998 validateparttype(parttype)
999 self.id = None
999 self.id = None
1000 self.type = parttype
1000 self.type = parttype
1001 self._data = data
1001 self._data = data
1002 self._mandatoryparams = list(mandatoryparams)
1002 self._mandatoryparams = list(mandatoryparams)
1003 self._advisoryparams = list(advisoryparams)
1003 self._advisoryparams = list(advisoryparams)
1004 # checking for duplicated entries
1004 # checking for duplicated entries
1005 self._seenparams = set()
1005 self._seenparams = set()
1006 for pname, __ in self._mandatoryparams + self._advisoryparams:
1006 for pname, __ in self._mandatoryparams + self._advisoryparams:
1007 if pname in self._seenparams:
1007 if pname in self._seenparams:
1008 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1008 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1009 self._seenparams.add(pname)
1009 self._seenparams.add(pname)
1010 # status of the part's generation:
1010 # status of the part's generation:
1011 # - None: not started,
1011 # - None: not started,
1012 # - False: currently generated,
1012 # - False: currently generated,
1013 # - True: generation done.
1013 # - True: generation done.
1014 self._generated = None
1014 self._generated = None
1015 self.mandatory = mandatory
1015 self.mandatory = mandatory
1016
1016
1017 def __repr__(self):
1017 def __repr__(self):
1018 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1018 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1019 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1019 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1020 cls,
1020 cls,
1021 id(self),
1021 id(self),
1022 self.id,
1022 self.id,
1023 self.type,
1023 self.type,
1024 self.mandatory,
1024 self.mandatory,
1025 )
1025 )
1026
1026
1027 def copy(self):
1027 def copy(self):
1028 """return a copy of the part
1028 """return a copy of the part
1029
1029
1030 The new part have the very same content but no partid assigned yet.
1030 The new part have the very same content but no partid assigned yet.
1031 Parts with generated data cannot be copied."""
1031 Parts with generated data cannot be copied."""
1032 assert not util.safehasattr(self.data, 'next')
1032 assert not util.safehasattr(self.data, 'next')
1033 return self.__class__(
1033 return self.__class__(
1034 self.type,
1034 self.type,
1035 self._mandatoryparams,
1035 self._mandatoryparams,
1036 self._advisoryparams,
1036 self._advisoryparams,
1037 self._data,
1037 self._data,
1038 self.mandatory,
1038 self.mandatory,
1039 )
1039 )
1040
1040
1041 # methods used to defines the part content
1041 # methods used to defines the part content
1042 @property
1042 @property
1043 def data(self):
1043 def data(self):
1044 return self._data
1044 return self._data
1045
1045
1046 @data.setter
1046 @data.setter
1047 def data(self, data):
1047 def data(self, data):
1048 if self._generated is not None:
1048 if self._generated is not None:
1049 raise error.ReadOnlyPartError(b'part is being generated')
1049 raise error.ReadOnlyPartError(b'part is being generated')
1050 self._data = data
1050 self._data = data
1051
1051
1052 @property
1052 @property
1053 def mandatoryparams(self):
1053 def mandatoryparams(self):
1054 # make it an immutable tuple to force people through ``addparam``
1054 # make it an immutable tuple to force people through ``addparam``
1055 return tuple(self._mandatoryparams)
1055 return tuple(self._mandatoryparams)
1056
1056
1057 @property
1057 @property
1058 def advisoryparams(self):
1058 def advisoryparams(self):
1059 # make it an immutable tuple to force people through ``addparam``
1059 # make it an immutable tuple to force people through ``addparam``
1060 return tuple(self._advisoryparams)
1060 return tuple(self._advisoryparams)
1061
1061
1062 def addparam(self, name, value=b'', mandatory=True):
1062 def addparam(self, name, value=b'', mandatory=True):
1063 """add a parameter to the part
1063 """add a parameter to the part
1064
1064
1065 If 'mandatory' is set to True, the remote handler must claim support
1065 If 'mandatory' is set to True, the remote handler must claim support
1066 for this parameter or the unbundling will be aborted.
1066 for this parameter or the unbundling will be aborted.
1067
1067
1068 The 'name' and 'value' cannot exceed 255 bytes each.
1068 The 'name' and 'value' cannot exceed 255 bytes each.
1069 """
1069 """
1070 if self._generated is not None:
1070 if self._generated is not None:
1071 raise error.ReadOnlyPartError(b'part is being generated')
1071 raise error.ReadOnlyPartError(b'part is being generated')
1072 if name in self._seenparams:
1072 if name in self._seenparams:
1073 raise ValueError(b'duplicated params: %s' % name)
1073 raise ValueError(b'duplicated params: %s' % name)
1074 self._seenparams.add(name)
1074 self._seenparams.add(name)
1075 params = self._advisoryparams
1075 params = self._advisoryparams
1076 if mandatory:
1076 if mandatory:
1077 params = self._mandatoryparams
1077 params = self._mandatoryparams
1078 params.append((name, value))
1078 params.append((name, value))
1079
1079
1080 # methods used to generates the bundle2 stream
1080 # methods used to generates the bundle2 stream
1081 def getchunks(self, ui):
1081 def getchunks(self, ui):
1082 if self._generated is not None:
1082 if self._generated is not None:
1083 raise error.ProgrammingError(b'part can only be consumed once')
1083 raise error.ProgrammingError(b'part can only be consumed once')
1084 self._generated = False
1084 self._generated = False
1085
1085
1086 if ui.debugflag:
1086 if ui.debugflag:
1087 msg = [b'bundle2-output-part: "%s"' % self.type]
1087 msg = [b'bundle2-output-part: "%s"' % self.type]
1088 if not self.mandatory:
1088 if not self.mandatory:
1089 msg.append(b' (advisory)')
1089 msg.append(b' (advisory)')
1090 nbmp = len(self.mandatoryparams)
1090 nbmp = len(self.mandatoryparams)
1091 nbap = len(self.advisoryparams)
1091 nbap = len(self.advisoryparams)
1092 if nbmp or nbap:
1092 if nbmp or nbap:
1093 msg.append(b' (params:')
1093 msg.append(b' (params:')
1094 if nbmp:
1094 if nbmp:
1095 msg.append(b' %i mandatory' % nbmp)
1095 msg.append(b' %i mandatory' % nbmp)
1096 if nbap:
1096 if nbap:
1097 msg.append(b' %i advisory' % nbmp)
1097 msg.append(b' %i advisory' % nbmp)
1098 msg.append(b')')
1098 msg.append(b')')
1099 if not self.data:
1099 if not self.data:
1100 msg.append(b' empty payload')
1100 msg.append(b' empty payload')
1101 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1101 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1102 self.data, b'__next__'
1102 self.data, b'__next__'
1103 ):
1103 ):
1104 msg.append(b' streamed payload')
1104 msg.append(b' streamed payload')
1105 else:
1105 else:
1106 msg.append(b' %i bytes payload' % len(self.data))
1106 msg.append(b' %i bytes payload' % len(self.data))
1107 msg.append(b'\n')
1107 msg.append(b'\n')
1108 ui.debug(b''.join(msg))
1108 ui.debug(b''.join(msg))
1109
1109
1110 #### header
1110 #### header
1111 if self.mandatory:
1111 if self.mandatory:
1112 parttype = self.type.upper()
1112 parttype = self.type.upper()
1113 else:
1113 else:
1114 parttype = self.type.lower()
1114 parttype = self.type.lower()
1115 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1115 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1116 ## parttype
1116 ## parttype
1117 header = [
1117 header = [
1118 _pack(_fparttypesize, len(parttype)),
1118 _pack(_fparttypesize, len(parttype)),
1119 parttype,
1119 parttype,
1120 _pack(_fpartid, self.id),
1120 _pack(_fpartid, self.id),
1121 ]
1121 ]
1122 ## parameters
1122 ## parameters
1123 # count
1123 # count
1124 manpar = self.mandatoryparams
1124 manpar = self.mandatoryparams
1125 advpar = self.advisoryparams
1125 advpar = self.advisoryparams
1126 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1126 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1127 # size
1127 # size
1128 parsizes = []
1128 parsizes = []
1129 for key, value in manpar:
1129 for key, value in manpar:
1130 parsizes.append(len(key))
1130 parsizes.append(len(key))
1131 parsizes.append(len(value))
1131 parsizes.append(len(value))
1132 for key, value in advpar:
1132 for key, value in advpar:
1133 parsizes.append(len(key))
1133 parsizes.append(len(key))
1134 parsizes.append(len(value))
1134 parsizes.append(len(value))
1135 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1135 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1136 header.append(paramsizes)
1136 header.append(paramsizes)
1137 # key, value
1137 # key, value
1138 for key, value in manpar:
1138 for key, value in manpar:
1139 header.append(key)
1139 header.append(key)
1140 header.append(value)
1140 header.append(value)
1141 for key, value in advpar:
1141 for key, value in advpar:
1142 header.append(key)
1142 header.append(key)
1143 header.append(value)
1143 header.append(value)
1144 ## finalize header
1144 ## finalize header
1145 try:
1145 try:
1146 headerchunk = b''.join(header)
1146 headerchunk = b''.join(header)
1147 except TypeError:
1147 except TypeError:
1148 raise TypeError(
1148 raise TypeError(
1149 'Found a non-bytes trying to '
1149 'Found a non-bytes trying to '
1150 'build bundle part header: %r' % header
1150 'build bundle part header: %r' % header
1151 )
1151 )
1152 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1152 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1153 yield _pack(_fpartheadersize, len(headerchunk))
1153 yield _pack(_fpartheadersize, len(headerchunk))
1154 yield headerchunk
1154 yield headerchunk
1155 ## payload
1155 ## payload
1156 try:
1156 try:
1157 for chunk in self._payloadchunks():
1157 for chunk in self._payloadchunks():
1158 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1158 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1159 yield _pack(_fpayloadsize, len(chunk))
1159 yield _pack(_fpayloadsize, len(chunk))
1160 yield chunk
1160 yield chunk
1161 except GeneratorExit:
1161 except GeneratorExit:
1162 # GeneratorExit means that nobody is listening for our
1162 # GeneratorExit means that nobody is listening for our
1163 # results anyway, so just bail quickly rather than trying
1163 # results anyway, so just bail quickly rather than trying
1164 # to produce an error part.
1164 # to produce an error part.
1165 ui.debug(b'bundle2-generatorexit\n')
1165 ui.debug(b'bundle2-generatorexit\n')
1166 raise
1166 raise
1167 except BaseException as exc:
1167 except BaseException as exc:
1168 bexc = stringutil.forcebytestr(exc)
1168 bexc = stringutil.forcebytestr(exc)
1169 # backup exception data for later
1169 # backup exception data for later
1170 ui.debug(
1170 ui.debug(
1171 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1171 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1172 )
1172 )
1173 tb = sys.exc_info()[2]
1173 tb = sys.exc_info()[2]
1174 msg = b'unexpected error: %s' % bexc
1174 msg = b'unexpected error: %s' % bexc
1175 interpart = bundlepart(
1175 interpart = bundlepart(
1176 b'error:abort', [(b'message', msg)], mandatory=False
1176 b'error:abort', [(b'message', msg)], mandatory=False
1177 )
1177 )
1178 interpart.id = 0
1178 interpart.id = 0
1179 yield _pack(_fpayloadsize, -1)
1179 yield _pack(_fpayloadsize, -1)
1180 for chunk in interpart.getchunks(ui=ui):
1180 for chunk in interpart.getchunks(ui=ui):
1181 yield chunk
1181 yield chunk
1182 outdebug(ui, b'closing payload chunk')
1182 outdebug(ui, b'closing payload chunk')
1183 # abort current part payload
1183 # abort current part payload
1184 yield _pack(_fpayloadsize, 0)
1184 yield _pack(_fpayloadsize, 0)
1185 pycompat.raisewithtb(exc, tb)
1185 pycompat.raisewithtb(exc, tb)
1186 # end of payload
1186 # end of payload
1187 outdebug(ui, b'closing payload chunk')
1187 outdebug(ui, b'closing payload chunk')
1188 yield _pack(_fpayloadsize, 0)
1188 yield _pack(_fpayloadsize, 0)
1189 self._generated = True
1189 self._generated = True
1190
1190
1191 def _payloadchunks(self):
1191 def _payloadchunks(self):
1192 """yield chunks of a the part payload
1192 """yield chunks of a the part payload
1193
1193
1194 Exists to handle the different methods to provide data to a part."""
1194 Exists to handle the different methods to provide data to a part."""
1195 # we only support fixed size data now.
1195 # we only support fixed size data now.
1196 # This will be improved in the future.
1196 # This will be improved in the future.
1197 if util.safehasattr(self.data, 'next') or util.safehasattr(
1197 if util.safehasattr(self.data, 'next') or util.safehasattr(
1198 self.data, b'__next__'
1198 self.data, b'__next__'
1199 ):
1199 ):
1200 buff = util.chunkbuffer(self.data)
1200 buff = util.chunkbuffer(self.data)
1201 chunk = buff.read(preferedchunksize)
1201 chunk = buff.read(preferedchunksize)
1202 while chunk:
1202 while chunk:
1203 yield chunk
1203 yield chunk
1204 chunk = buff.read(preferedchunksize)
1204 chunk = buff.read(preferedchunksize)
1205 elif len(self.data):
1205 elif len(self.data):
1206 yield self.data
1206 yield self.data
1207
1207
1208
1208
1209 flaginterrupt = -1
1209 flaginterrupt = -1
1210
1210
1211
1211
1212 class interrupthandler(unpackermixin):
1212 class interrupthandler(unpackermixin):
1213 """read one part and process it with restricted capability
1213 """read one part and process it with restricted capability
1214
1214
1215 This allows to transmit exception raised on the producer size during part
1215 This allows to transmit exception raised on the producer size during part
1216 iteration while the consumer is reading a part.
1216 iteration while the consumer is reading a part.
1217
1217
1218 Part processed in this manner only have access to a ui object,"""
1218 Part processed in this manner only have access to a ui object,"""
1219
1219
1220 def __init__(self, ui, fp):
1220 def __init__(self, ui, fp):
1221 super(interrupthandler, self).__init__(fp)
1221 super(interrupthandler, self).__init__(fp)
1222 self.ui = ui
1222 self.ui = ui
1223
1223
1224 def _readpartheader(self):
1224 def _readpartheader(self):
1225 """reads a part header size and return the bytes blob
1225 """reads a part header size and return the bytes blob
1226
1226
1227 returns None if empty"""
1227 returns None if empty"""
1228 headersize = self._unpack(_fpartheadersize)[0]
1228 headersize = self._unpack(_fpartheadersize)[0]
1229 if headersize < 0:
1229 if headersize < 0:
1230 raise error.BundleValueError(
1230 raise error.BundleValueError(
1231 b'negative part header size: %i' % headersize
1231 b'negative part header size: %i' % headersize
1232 )
1232 )
1233 indebug(self.ui, b'part header size: %i\n' % headersize)
1233 indebug(self.ui, b'part header size: %i\n' % headersize)
1234 if headersize:
1234 if headersize:
1235 return self._readexact(headersize)
1235 return self._readexact(headersize)
1236 return None
1236 return None
1237
1237
1238 def __call__(self):
1238 def __call__(self):
1239
1239
1240 self.ui.debug(
1240 self.ui.debug(
1241 b'bundle2-input-stream-interrupt: opening out of band context\n'
1241 b'bundle2-input-stream-interrupt: opening out of band context\n'
1242 )
1242 )
1243 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1243 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1244 headerblock = self._readpartheader()
1244 headerblock = self._readpartheader()
1245 if headerblock is None:
1245 if headerblock is None:
1246 indebug(self.ui, b'no part found during interruption.')
1246 indebug(self.ui, b'no part found during interruption.')
1247 return
1247 return
1248 part = unbundlepart(self.ui, headerblock, self._fp)
1248 part = unbundlepart(self.ui, headerblock, self._fp)
1249 op = interruptoperation(self.ui)
1249 op = interruptoperation(self.ui)
1250 hardabort = False
1250 hardabort = False
1251 try:
1251 try:
1252 _processpart(op, part)
1252 _processpart(op, part)
1253 except (SystemExit, KeyboardInterrupt):
1253 except (SystemExit, KeyboardInterrupt):
1254 hardabort = True
1254 hardabort = True
1255 raise
1255 raise
1256 finally:
1256 finally:
1257 if not hardabort:
1257 if not hardabort:
1258 part.consume()
1258 part.consume()
1259 self.ui.debug(
1259 self.ui.debug(
1260 b'bundle2-input-stream-interrupt: closing out of band context\n'
1260 b'bundle2-input-stream-interrupt: closing out of band context\n'
1261 )
1261 )
1262
1262
1263
1263
1264 class interruptoperation(object):
1264 class interruptoperation(object):
1265 """A limited operation to be use by part handler during interruption
1265 """A limited operation to be use by part handler during interruption
1266
1266
1267 It only have access to an ui object.
1267 It only have access to an ui object.
1268 """
1268 """
1269
1269
1270 def __init__(self, ui):
1270 def __init__(self, ui):
1271 self.ui = ui
1271 self.ui = ui
1272 self.reply = None
1272 self.reply = None
1273 self.captureoutput = False
1273 self.captureoutput = False
1274
1274
1275 @property
1275 @property
1276 def repo(self):
1276 def repo(self):
1277 raise error.ProgrammingError(b'no repo access from stream interruption')
1277 raise error.ProgrammingError(b'no repo access from stream interruption')
1278
1278
1279 def gettransaction(self):
1279 def gettransaction(self):
1280 raise TransactionUnavailable(b'no repo access from stream interruption')
1280 raise TransactionUnavailable(b'no repo access from stream interruption')
1281
1281
1282
1282
1283 def decodepayloadchunks(ui, fh):
1283 def decodepayloadchunks(ui, fh):
1284 """Reads bundle2 part payload data into chunks.
1284 """Reads bundle2 part payload data into chunks.
1285
1285
1286 Part payload data consists of framed chunks. This function takes
1286 Part payload data consists of framed chunks. This function takes
1287 a file handle and emits those chunks.
1287 a file handle and emits those chunks.
1288 """
1288 """
1289 dolog = ui.configbool(b'devel', b'bundle2.debug')
1289 dolog = ui.configbool(b'devel', b'bundle2.debug')
1290 debug = ui.debug
1290 debug = ui.debug
1291
1291
1292 headerstruct = struct.Struct(_fpayloadsize)
1292 headerstruct = struct.Struct(_fpayloadsize)
1293 headersize = headerstruct.size
1293 headersize = headerstruct.size
1294 unpack = headerstruct.unpack
1294 unpack = headerstruct.unpack
1295
1295
1296 readexactly = changegroup.readexactly
1296 readexactly = changegroup.readexactly
1297 read = fh.read
1297 read = fh.read
1298
1298
1299 chunksize = unpack(readexactly(fh, headersize))[0]
1299 chunksize = unpack(readexactly(fh, headersize))[0]
1300 indebug(ui, b'payload chunk size: %i' % chunksize)
1300 indebug(ui, b'payload chunk size: %i' % chunksize)
1301
1301
1302 # changegroup.readexactly() is inlined below for performance.
1302 # changegroup.readexactly() is inlined below for performance.
1303 while chunksize:
1303 while chunksize:
1304 if chunksize >= 0:
1304 if chunksize >= 0:
1305 s = read(chunksize)
1305 s = read(chunksize)
1306 if len(s) < chunksize:
1306 if len(s) < chunksize:
1307 raise error.Abort(
1307 raise error.Abort(
1308 _(
1308 _(
1309 b'stream ended unexpectedly '
1309 b'stream ended unexpectedly '
1310 b' (got %d bytes, expected %d)'
1310 b' (got %d bytes, expected %d)'
1311 )
1311 )
1312 % (len(s), chunksize)
1312 % (len(s), chunksize)
1313 )
1313 )
1314
1314
1315 yield s
1315 yield s
1316 elif chunksize == flaginterrupt:
1316 elif chunksize == flaginterrupt:
1317 # Interrupt "signal" detected. The regular stream is interrupted
1317 # Interrupt "signal" detected. The regular stream is interrupted
1318 # and a bundle2 part follows. Consume it.
1318 # and a bundle2 part follows. Consume it.
1319 interrupthandler(ui, fh)()
1319 interrupthandler(ui, fh)()
1320 else:
1320 else:
1321 raise error.BundleValueError(
1321 raise error.BundleValueError(
1322 b'negative payload chunk size: %s' % chunksize
1322 b'negative payload chunk size: %s' % chunksize
1323 )
1323 )
1324
1324
1325 s = read(headersize)
1325 s = read(headersize)
1326 if len(s) < headersize:
1326 if len(s) < headersize:
1327 raise error.Abort(
1327 raise error.Abort(
1328 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1328 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1329 % (len(s), chunksize)
1329 % (len(s), chunksize)
1330 )
1330 )
1331
1331
1332 chunksize = unpack(s)[0]
1332 chunksize = unpack(s)[0]
1333
1333
1334 # indebug() inlined for performance.
1334 # indebug() inlined for performance.
1335 if dolog:
1335 if dolog:
1336 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1336 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1337
1337
1338
1338
1339 class unbundlepart(unpackermixin):
1339 class unbundlepart(unpackermixin):
1340 """a bundle part read from a bundle"""
1340 """a bundle part read from a bundle"""
1341
1341
1342 def __init__(self, ui, header, fp):
1342 def __init__(self, ui, header, fp):
1343 super(unbundlepart, self).__init__(fp)
1343 super(unbundlepart, self).__init__(fp)
1344 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1344 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1345 fp, b'tell'
1345 fp, b'tell'
1346 )
1346 )
1347 self.ui = ui
1347 self.ui = ui
1348 # unbundle state attr
1348 # unbundle state attr
1349 self._headerdata = header
1349 self._headerdata = header
1350 self._headeroffset = 0
1350 self._headeroffset = 0
1351 self._initialized = False
1351 self._initialized = False
1352 self.consumed = False
1352 self.consumed = False
1353 # part data
1353 # part data
1354 self.id = None
1354 self.id = None
1355 self.type = None
1355 self.type = None
1356 self.mandatoryparams = None
1356 self.mandatoryparams = None
1357 self.advisoryparams = None
1357 self.advisoryparams = None
1358 self.params = None
1358 self.params = None
1359 self.mandatorykeys = ()
1359 self.mandatorykeys = ()
1360 self._readheader()
1360 self._readheader()
1361 self._mandatory = None
1361 self._mandatory = None
1362 self._pos = 0
1362 self._pos = 0
1363
1363
1364 def _fromheader(self, size):
1364 def _fromheader(self, size):
1365 """return the next <size> byte from the header"""
1365 """return the next <size> byte from the header"""
1366 offset = self._headeroffset
1366 offset = self._headeroffset
1367 data = self._headerdata[offset : (offset + size)]
1367 data = self._headerdata[offset : (offset + size)]
1368 self._headeroffset = offset + size
1368 self._headeroffset = offset + size
1369 return data
1369 return data
1370
1370
1371 def _unpackheader(self, format):
1371 def _unpackheader(self, format):
1372 """read given format from header
1372 """read given format from header
1373
1373
1374 This automatically compute the size of the format to read."""
1374 This automatically compute the size of the format to read."""
1375 data = self._fromheader(struct.calcsize(format))
1375 data = self._fromheader(struct.calcsize(format))
1376 return _unpack(format, data)
1376 return _unpack(format, data)
1377
1377
1378 def _initparams(self, mandatoryparams, advisoryparams):
1378 def _initparams(self, mandatoryparams, advisoryparams):
1379 """internal function to setup all logic related parameters"""
1379 """internal function to setup all logic related parameters"""
1380 # make it read only to prevent people touching it by mistake.
1380 # make it read only to prevent people touching it by mistake.
1381 self.mandatoryparams = tuple(mandatoryparams)
1381 self.mandatoryparams = tuple(mandatoryparams)
1382 self.advisoryparams = tuple(advisoryparams)
1382 self.advisoryparams = tuple(advisoryparams)
1383 # user friendly UI
1383 # user friendly UI
1384 self.params = util.sortdict(self.mandatoryparams)
1384 self.params = util.sortdict(self.mandatoryparams)
1385 self.params.update(self.advisoryparams)
1385 self.params.update(self.advisoryparams)
1386 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1386 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1387
1387
1388 def _readheader(self):
1388 def _readheader(self):
1389 """read the header and setup the object"""
1389 """read the header and setup the object"""
1390 typesize = self._unpackheader(_fparttypesize)[0]
1390 typesize = self._unpackheader(_fparttypesize)[0]
1391 self.type = self._fromheader(typesize)
1391 self.type = self._fromheader(typesize)
1392 indebug(self.ui, b'part type: "%s"' % self.type)
1392 indebug(self.ui, b'part type: "%s"' % self.type)
1393 self.id = self._unpackheader(_fpartid)[0]
1393 self.id = self._unpackheader(_fpartid)[0]
1394 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1394 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1395 # extract mandatory bit from type
1395 # extract mandatory bit from type
1396 self.mandatory = self.type != self.type.lower()
1396 self.mandatory = self.type != self.type.lower()
1397 self.type = self.type.lower()
1397 self.type = self.type.lower()
1398 ## reading parameters
1398 ## reading parameters
1399 # param count
1399 # param count
1400 mancount, advcount = self._unpackheader(_fpartparamcount)
1400 mancount, advcount = self._unpackheader(_fpartparamcount)
1401 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1401 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1402 # param size
1402 # param size
1403 fparamsizes = _makefpartparamsizes(mancount + advcount)
1403 fparamsizes = _makefpartparamsizes(mancount + advcount)
1404 paramsizes = self._unpackheader(fparamsizes)
1404 paramsizes = self._unpackheader(fparamsizes)
1405 # make it a list of couple again
1405 # make it a list of couple again
1406 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1406 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1407 # split mandatory from advisory
1407 # split mandatory from advisory
1408 mansizes = paramsizes[:mancount]
1408 mansizes = paramsizes[:mancount]
1409 advsizes = paramsizes[mancount:]
1409 advsizes = paramsizes[mancount:]
1410 # retrieve param value
1410 # retrieve param value
1411 manparams = []
1411 manparams = []
1412 for key, value in mansizes:
1412 for key, value in mansizes:
1413 manparams.append((self._fromheader(key), self._fromheader(value)))
1413 manparams.append((self._fromheader(key), self._fromheader(value)))
1414 advparams = []
1414 advparams = []
1415 for key, value in advsizes:
1415 for key, value in advsizes:
1416 advparams.append((self._fromheader(key), self._fromheader(value)))
1416 advparams.append((self._fromheader(key), self._fromheader(value)))
1417 self._initparams(manparams, advparams)
1417 self._initparams(manparams, advparams)
1418 ## part payload
1418 ## part payload
1419 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1419 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1420 # we read the data, tell it
1420 # we read the data, tell it
1421 self._initialized = True
1421 self._initialized = True
1422
1422
1423 def _payloadchunks(self):
1423 def _payloadchunks(self):
1424 """Generator of decoded chunks in the payload."""
1424 """Generator of decoded chunks in the payload."""
1425 return decodepayloadchunks(self.ui, self._fp)
1425 return decodepayloadchunks(self.ui, self._fp)
1426
1426
1427 def consume(self):
1427 def consume(self):
1428 """Read the part payload until completion.
1428 """Read the part payload until completion.
1429
1429
1430 By consuming the part data, the underlying stream read offset will
1430 By consuming the part data, the underlying stream read offset will
1431 be advanced to the next part (or end of stream).
1431 be advanced to the next part (or end of stream).
1432 """
1432 """
1433 if self.consumed:
1433 if self.consumed:
1434 return
1434 return
1435
1435
1436 chunk = self.read(32768)
1436 chunk = self.read(32768)
1437 while chunk:
1437 while chunk:
1438 self._pos += len(chunk)
1438 self._pos += len(chunk)
1439 chunk = self.read(32768)
1439 chunk = self.read(32768)
1440
1440
1441 def read(self, size=None):
1441 def read(self, size=None):
1442 """read payload data"""
1442 """read payload data"""
1443 if not self._initialized:
1443 if not self._initialized:
1444 self._readheader()
1444 self._readheader()
1445 if size is None:
1445 if size is None:
1446 data = self._payloadstream.read()
1446 data = self._payloadstream.read()
1447 else:
1447 else:
1448 data = self._payloadstream.read(size)
1448 data = self._payloadstream.read(size)
1449 self._pos += len(data)
1449 self._pos += len(data)
1450 if size is None or len(data) < size:
1450 if size is None or len(data) < size:
1451 if not self.consumed and self._pos:
1451 if not self.consumed and self._pos:
1452 self.ui.debug(
1452 self.ui.debug(
1453 b'bundle2-input-part: total payload size %i\n' % self._pos
1453 b'bundle2-input-part: total payload size %i\n' % self._pos
1454 )
1454 )
1455 self.consumed = True
1455 self.consumed = True
1456 return data
1456 return data
1457
1457
1458
1458
1459 class seekableunbundlepart(unbundlepart):
1459 class seekableunbundlepart(unbundlepart):
1460 """A bundle2 part in a bundle that is seekable.
1460 """A bundle2 part in a bundle that is seekable.
1461
1461
1462 Regular ``unbundlepart`` instances can only be read once. This class
1462 Regular ``unbundlepart`` instances can only be read once. This class
1463 extends ``unbundlepart`` to enable bi-directional seeking within the
1463 extends ``unbundlepart`` to enable bi-directional seeking within the
1464 part.
1464 part.
1465
1465
1466 Bundle2 part data consists of framed chunks. Offsets when seeking
1466 Bundle2 part data consists of framed chunks. Offsets when seeking
1467 refer to the decoded data, not the offsets in the underlying bundle2
1467 refer to the decoded data, not the offsets in the underlying bundle2
1468 stream.
1468 stream.
1469
1469
1470 To facilitate quickly seeking within the decoded data, instances of this
1470 To facilitate quickly seeking within the decoded data, instances of this
1471 class maintain a mapping between offsets in the underlying stream and
1471 class maintain a mapping between offsets in the underlying stream and
1472 the decoded payload. This mapping will consume memory in proportion
1472 the decoded payload. This mapping will consume memory in proportion
1473 to the number of chunks within the payload (which almost certainly
1473 to the number of chunks within the payload (which almost certainly
1474 increases in proportion with the size of the part).
1474 increases in proportion with the size of the part).
1475 """
1475 """
1476
1476
1477 def __init__(self, ui, header, fp):
1477 def __init__(self, ui, header, fp):
1478 # (payload, file) offsets for chunk starts.
1478 # (payload, file) offsets for chunk starts.
1479 self._chunkindex = []
1479 self._chunkindex = []
1480
1480
1481 super(seekableunbundlepart, self).__init__(ui, header, fp)
1481 super(seekableunbundlepart, self).__init__(ui, header, fp)
1482
1482
1483 def _payloadchunks(self, chunknum=0):
1483 def _payloadchunks(self, chunknum=0):
1484 '''seek to specified chunk and start yielding data'''
1484 '''seek to specified chunk and start yielding data'''
1485 if len(self._chunkindex) == 0:
1485 if len(self._chunkindex) == 0:
1486 assert chunknum == 0, b'Must start with chunk 0'
1486 assert chunknum == 0, b'Must start with chunk 0'
1487 self._chunkindex.append((0, self._tellfp()))
1487 self._chunkindex.append((0, self._tellfp()))
1488 else:
1488 else:
1489 assert chunknum < len(self._chunkindex), (
1489 assert chunknum < len(self._chunkindex), (
1490 b'Unknown chunk %d' % chunknum
1490 b'Unknown chunk %d' % chunknum
1491 )
1491 )
1492 self._seekfp(self._chunkindex[chunknum][1])
1492 self._seekfp(self._chunkindex[chunknum][1])
1493
1493
1494 pos = self._chunkindex[chunknum][0]
1494 pos = self._chunkindex[chunknum][0]
1495
1495
1496 for chunk in decodepayloadchunks(self.ui, self._fp):
1496 for chunk in decodepayloadchunks(self.ui, self._fp):
1497 chunknum += 1
1497 chunknum += 1
1498 pos += len(chunk)
1498 pos += len(chunk)
1499 if chunknum == len(self._chunkindex):
1499 if chunknum == len(self._chunkindex):
1500 self._chunkindex.append((pos, self._tellfp()))
1500 self._chunkindex.append((pos, self._tellfp()))
1501
1501
1502 yield chunk
1502 yield chunk
1503
1503
1504 def _findchunk(self, pos):
1504 def _findchunk(self, pos):
1505 '''for a given payload position, return a chunk number and offset'''
1505 '''for a given payload position, return a chunk number and offset'''
1506 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1506 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1507 if ppos == pos:
1507 if ppos == pos:
1508 return chunk, 0
1508 return chunk, 0
1509 elif ppos > pos:
1509 elif ppos > pos:
1510 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1510 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1511 raise ValueError(b'Unknown chunk')
1511 raise ValueError(b'Unknown chunk')
1512
1512
1513 def tell(self):
1513 def tell(self):
1514 return self._pos
1514 return self._pos
1515
1515
1516 def seek(self, offset, whence=os.SEEK_SET):
1516 def seek(self, offset, whence=os.SEEK_SET):
1517 if whence == os.SEEK_SET:
1517 if whence == os.SEEK_SET:
1518 newpos = offset
1518 newpos = offset
1519 elif whence == os.SEEK_CUR:
1519 elif whence == os.SEEK_CUR:
1520 newpos = self._pos + offset
1520 newpos = self._pos + offset
1521 elif whence == os.SEEK_END:
1521 elif whence == os.SEEK_END:
1522 if not self.consumed:
1522 if not self.consumed:
1523 # Can't use self.consume() here because it advances self._pos.
1523 # Can't use self.consume() here because it advances self._pos.
1524 chunk = self.read(32768)
1524 chunk = self.read(32768)
1525 while chunk:
1525 while chunk:
1526 chunk = self.read(32768)
1526 chunk = self.read(32768)
1527 newpos = self._chunkindex[-1][0] - offset
1527 newpos = self._chunkindex[-1][0] - offset
1528 else:
1528 else:
1529 raise ValueError(b'Unknown whence value: %r' % (whence,))
1529 raise ValueError(b'Unknown whence value: %r' % (whence,))
1530
1530
1531 if newpos > self._chunkindex[-1][0] and not self.consumed:
1531 if newpos > self._chunkindex[-1][0] and not self.consumed:
1532 # Can't use self.consume() here because it advances self._pos.
1532 # Can't use self.consume() here because it advances self._pos.
1533 chunk = self.read(32768)
1533 chunk = self.read(32768)
1534 while chunk:
1534 while chunk:
1535 chunk = self.read(32668)
1535 chunk = self.read(32668)
1536
1536
1537 if not 0 <= newpos <= self._chunkindex[-1][0]:
1537 if not 0 <= newpos <= self._chunkindex[-1][0]:
1538 raise ValueError(b'Offset out of range')
1538 raise ValueError(b'Offset out of range')
1539
1539
1540 if self._pos != newpos:
1540 if self._pos != newpos:
1541 chunk, internaloffset = self._findchunk(newpos)
1541 chunk, internaloffset = self._findchunk(newpos)
1542 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1542 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1543 adjust = self.read(internaloffset)
1543 adjust = self.read(internaloffset)
1544 if len(adjust) != internaloffset:
1544 if len(adjust) != internaloffset:
1545 raise error.Abort(_(b'Seek failed\n'))
1545 raise error.Abort(_(b'Seek failed\n'))
1546 self._pos = newpos
1546 self._pos = newpos
1547
1547
1548 def _seekfp(self, offset, whence=0):
1548 def _seekfp(self, offset, whence=0):
1549 """move the underlying file pointer
1549 """move the underlying file pointer
1550
1550
1551 This method is meant for internal usage by the bundle2 protocol only.
1551 This method is meant for internal usage by the bundle2 protocol only.
1552 They directly manipulate the low level stream including bundle2 level
1552 They directly manipulate the low level stream including bundle2 level
1553 instruction.
1553 instruction.
1554
1554
1555 Do not use it to implement higher-level logic or methods."""
1555 Do not use it to implement higher-level logic or methods."""
1556 if self._seekable:
1556 if self._seekable:
1557 return self._fp.seek(offset, whence)
1557 return self._fp.seek(offset, whence)
1558 else:
1558 else:
1559 raise NotImplementedError(_(b'File pointer is not seekable'))
1559 raise NotImplementedError(_(b'File pointer is not seekable'))
1560
1560
1561 def _tellfp(self):
1561 def _tellfp(self):
1562 """return the file offset, or None if file is not seekable
1562 """return the file offset, or None if file is not seekable
1563
1563
1564 This method is meant for internal usage by the bundle2 protocol only.
1564 This method is meant for internal usage by the bundle2 protocol only.
1565 They directly manipulate the low level stream including bundle2 level
1565 They directly manipulate the low level stream including bundle2 level
1566 instruction.
1566 instruction.
1567
1567
1568 Do not use it to implement higher-level logic or methods."""
1568 Do not use it to implement higher-level logic or methods."""
1569 if self._seekable:
1569 if self._seekable:
1570 try:
1570 try:
1571 return self._fp.tell()
1571 return self._fp.tell()
1572 except IOError as e:
1572 except IOError as e:
1573 if e.errno == errno.ESPIPE:
1573 if e.errno == errno.ESPIPE:
1574 self._seekable = False
1574 self._seekable = False
1575 else:
1575 else:
1576 raise
1576 raise
1577 return None
1577 return None
1578
1578
1579
1579
1580 # These are only the static capabilities.
1580 # These are only the static capabilities.
1581 # Check the 'getrepocaps' function for the rest.
1581 # Check the 'getrepocaps' function for the rest.
1582 capabilities = {
1582 capabilities = {
1583 b'HG20': (),
1583 b'HG20': (),
1584 b'bookmarks': (),
1584 b'bookmarks': (),
1585 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1585 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1586 b'listkeys': (),
1586 b'listkeys': (),
1587 b'pushkey': (),
1587 b'pushkey': (),
1588 b'digests': tuple(sorted(util.DIGESTS.keys())),
1588 b'digests': tuple(sorted(util.DIGESTS.keys())),
1589 b'remote-changegroup': (b'http', b'https'),
1589 b'remote-changegroup': (b'http', b'https'),
1590 b'hgtagsfnodes': (),
1590 b'hgtagsfnodes': (),
1591 b'rev-branch-cache': (),
1591 b'rev-branch-cache': (),
1592 b'phases': (b'heads',),
1592 b'phases': (b'heads',),
1593 b'stream': (b'v2',),
1593 b'stream': (b'v2',),
1594 }
1594 }
1595
1595
1596
1596
1597 def getrepocaps(repo, allowpushback=False, role=None):
1597 def getrepocaps(repo, allowpushback=False, role=None):
1598 """return the bundle2 capabilities for a given repo
1598 """return the bundle2 capabilities for a given repo
1599
1599
1600 Exists to allow extensions (like evolution) to mutate the capabilities.
1600 Exists to allow extensions (like evolution) to mutate the capabilities.
1601
1601
1602 The returned value is used for servers advertising their capabilities as
1602 The returned value is used for servers advertising their capabilities as
1603 well as clients advertising their capabilities to servers as part of
1603 well as clients advertising their capabilities to servers as part of
1604 bundle2 requests. The ``role`` argument specifies which is which.
1604 bundle2 requests. The ``role`` argument specifies which is which.
1605 """
1605 """
1606 if role not in (b'client', b'server'):
1606 if role not in (b'client', b'server'):
1607 raise error.ProgrammingError(b'role argument must be client or server')
1607 raise error.ProgrammingError(b'role argument must be client or server')
1608
1608
1609 caps = capabilities.copy()
1609 caps = capabilities.copy()
1610 caps[b'changegroup'] = tuple(
1610 caps[b'changegroup'] = tuple(
1611 sorted(changegroup.supportedincomingversions(repo))
1611 sorted(changegroup.supportedincomingversions(repo))
1612 )
1612 )
1613 if obsolete.isenabled(repo, obsolete.exchangeopt):
1613 if obsolete.isenabled(repo, obsolete.exchangeopt):
1614 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1614 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1615 caps[b'obsmarkers'] = supportedformat
1615 caps[b'obsmarkers'] = supportedformat
1616 if allowpushback:
1616 if allowpushback:
1617 caps[b'pushback'] = ()
1617 caps[b'pushback'] = ()
1618 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1618 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1619 if cpmode == b'check-related':
1619 if cpmode == b'check-related':
1620 caps[b'checkheads'] = (b'related',)
1620 caps[b'checkheads'] = (b'related',)
1621 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1621 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1622 caps.pop(b'phases')
1622 caps.pop(b'phases')
1623
1623
1624 # Don't advertise stream clone support in server mode if not configured.
1624 # Don't advertise stream clone support in server mode if not configured.
1625 if role == b'server':
1625 if role == b'server':
1626 streamsupported = repo.ui.configbool(
1626 streamsupported = repo.ui.configbool(
1627 b'server', b'uncompressed', untrusted=True
1627 b'server', b'uncompressed', untrusted=True
1628 )
1628 )
1629 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1629 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1630
1630
1631 if not streamsupported or not featuresupported:
1631 if not streamsupported or not featuresupported:
1632 caps.pop(b'stream')
1632 caps.pop(b'stream')
1633 # Else always advertise support on client, because payload support
1633 # Else always advertise support on client, because payload support
1634 # should always be advertised.
1634 # should always be advertised.
1635
1635
1636 return caps
1636 return caps
1637
1637
1638
1638
1639 def bundle2caps(remote):
1639 def bundle2caps(remote):
1640 """return the bundle capabilities of a peer as dict"""
1640 """return the bundle capabilities of a peer as dict"""
1641 raw = remote.capable(b'bundle2')
1641 raw = remote.capable(b'bundle2')
1642 if not raw and raw != b'':
1642 if not raw and raw != b'':
1643 return {}
1643 return {}
1644 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1644 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1645 return decodecaps(capsblob)
1645 return decodecaps(capsblob)
1646
1646
1647
1647
1648 def obsmarkersversion(caps):
1648 def obsmarkersversion(caps):
1649 """extract the list of supported obsmarkers versions from a bundle2caps dict
1649 """extract the list of supported obsmarkers versions from a bundle2caps dict
1650 """
1650 """
1651 obscaps = caps.get(b'obsmarkers', ())
1651 obscaps = caps.get(b'obsmarkers', ())
1652 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1652 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1653
1653
1654
1654
1655 def writenewbundle(
1655 def writenewbundle(
1656 ui,
1656 ui,
1657 repo,
1657 repo,
1658 source,
1658 source,
1659 filename,
1659 filename,
1660 bundletype,
1660 bundletype,
1661 outgoing,
1661 outgoing,
1662 opts,
1662 opts,
1663 vfs=None,
1663 vfs=None,
1664 compression=None,
1664 compression=None,
1665 compopts=None,
1665 compopts=None,
1666 ):
1666 ):
1667 if bundletype.startswith(b'HG10'):
1667 if bundletype.startswith(b'HG10'):
1668 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1668 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1669 return writebundle(
1669 return writebundle(
1670 ui,
1670 ui,
1671 cg,
1671 cg,
1672 filename,
1672 filename,
1673 bundletype,
1673 bundletype,
1674 vfs=vfs,
1674 vfs=vfs,
1675 compression=compression,
1675 compression=compression,
1676 compopts=compopts,
1676 compopts=compopts,
1677 )
1677 )
1678 elif not bundletype.startswith(b'HG20'):
1678 elif not bundletype.startswith(b'HG20'):
1679 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1679 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1680
1680
1681 caps = {}
1681 caps = {}
1682 if b'obsolescence' in opts:
1682 if b'obsolescence' in opts:
1683 caps[b'obsmarkers'] = (b'V1',)
1683 caps[b'obsmarkers'] = (b'V1',)
1684 bundle = bundle20(ui, caps)
1684 bundle = bundle20(ui, caps)
1685 bundle.setcompression(compression, compopts)
1685 bundle.setcompression(compression, compopts)
1686 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1686 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1687 chunkiter = bundle.getchunks()
1687 chunkiter = bundle.getchunks()
1688
1688
1689 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1689 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1690
1690
1691
1691
1692 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1692 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1693 # We should eventually reconcile this logic with the one behind
1693 # We should eventually reconcile this logic with the one behind
1694 # 'exchange.getbundle2partsgenerator'.
1694 # 'exchange.getbundle2partsgenerator'.
1695 #
1695 #
1696 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1696 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1697 # different right now. So we keep them separated for now for the sake of
1697 # different right now. So we keep them separated for now for the sake of
1698 # simplicity.
1698 # simplicity.
1699
1699
1700 # we might not always want a changegroup in such bundle, for example in
1700 # we might not always want a changegroup in such bundle, for example in
1701 # stream bundles
1701 # stream bundles
1702 if opts.get(b'changegroup', True):
1702 if opts.get(b'changegroup', True):
1703 cgversion = opts.get(b'cg.version')
1703 cgversion = opts.get(b'cg.version')
1704 if cgversion is None:
1704 if cgversion is None:
1705 cgversion = changegroup.safeversion(repo)
1705 cgversion = changegroup.safeversion(repo)
1706 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1706 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1707 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1707 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1708 part.addparam(b'version', cg.version)
1708 part.addparam(b'version', cg.version)
1709 if b'clcount' in cg.extras:
1709 if b'clcount' in cg.extras:
1710 part.addparam(
1710 part.addparam(
1711 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1711 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1712 )
1712 )
1713 if opts.get(b'phases') and repo.revs(
1713 if opts.get(b'phases') and repo.revs(
1714 b'%ln and secret()', outgoing.missingheads
1714 b'%ln and secret()', outgoing.ancestorsof
1715 ):
1715 ):
1716 part.addparam(
1716 part.addparam(
1717 b'targetphase', b'%d' % phases.secret, mandatory=False
1717 b'targetphase', b'%d' % phases.secret, mandatory=False
1718 )
1718 )
1719 if b'exp-sidedata-flag' in repo.requirements:
1719 if b'exp-sidedata-flag' in repo.requirements:
1720 part.addparam(b'exp-sidedata', b'1')
1720 part.addparam(b'exp-sidedata', b'1')
1721
1721
1722 if opts.get(b'streamv2', False):
1722 if opts.get(b'streamv2', False):
1723 addpartbundlestream2(bundler, repo, stream=True)
1723 addpartbundlestream2(bundler, repo, stream=True)
1724
1724
1725 if opts.get(b'tagsfnodescache', True):
1725 if opts.get(b'tagsfnodescache', True):
1726 addparttagsfnodescache(repo, bundler, outgoing)
1726 addparttagsfnodescache(repo, bundler, outgoing)
1727
1727
1728 if opts.get(b'revbranchcache', True):
1728 if opts.get(b'revbranchcache', True):
1729 addpartrevbranchcache(repo, bundler, outgoing)
1729 addpartrevbranchcache(repo, bundler, outgoing)
1730
1730
1731 if opts.get(b'obsolescence', False):
1731 if opts.get(b'obsolescence', False):
1732 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1732 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1733 buildobsmarkerspart(bundler, obsmarkers)
1733 buildobsmarkerspart(bundler, obsmarkers)
1734
1734
1735 if opts.get(b'phases', False):
1735 if opts.get(b'phases', False):
1736 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1736 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1737 phasedata = phases.binaryencode(headsbyphase)
1737 phasedata = phases.binaryencode(headsbyphase)
1738 bundler.newpart(b'phase-heads', data=phasedata)
1738 bundler.newpart(b'phase-heads', data=phasedata)
1739
1739
1740
1740
1741 def addparttagsfnodescache(repo, bundler, outgoing):
1741 def addparttagsfnodescache(repo, bundler, outgoing):
1742 # we include the tags fnode cache for the bundle changeset
1742 # we include the tags fnode cache for the bundle changeset
1743 # (as an optional parts)
1743 # (as an optional parts)
1744 cache = tags.hgtagsfnodescache(repo.unfiltered())
1744 cache = tags.hgtagsfnodescache(repo.unfiltered())
1745 chunks = []
1745 chunks = []
1746
1746
1747 # .hgtags fnodes are only relevant for head changesets. While we could
1747 # .hgtags fnodes are only relevant for head changesets. While we could
1748 # transfer values for all known nodes, there will likely be little to
1748 # transfer values for all known nodes, there will likely be little to
1749 # no benefit.
1749 # no benefit.
1750 #
1750 #
1751 # We don't bother using a generator to produce output data because
1751 # We don't bother using a generator to produce output data because
1752 # a) we only have 40 bytes per head and even esoteric numbers of heads
1752 # a) we only have 40 bytes per head and even esoteric numbers of heads
1753 # consume little memory (1M heads is 40MB) b) we don't want to send the
1753 # consume little memory (1M heads is 40MB) b) we don't want to send the
1754 # part if we don't have entries and knowing if we have entries requires
1754 # part if we don't have entries and knowing if we have entries requires
1755 # cache lookups.
1755 # cache lookups.
1756 for node in outgoing.missingheads:
1756 for node in outgoing.ancestorsof:
1757 # Don't compute missing, as this may slow down serving.
1757 # Don't compute missing, as this may slow down serving.
1758 fnode = cache.getfnode(node, computemissing=False)
1758 fnode = cache.getfnode(node, computemissing=False)
1759 if fnode is not None:
1759 if fnode is not None:
1760 chunks.extend([node, fnode])
1760 chunks.extend([node, fnode])
1761
1761
1762 if chunks:
1762 if chunks:
1763 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1763 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1764
1764
1765
1765
1766 def addpartrevbranchcache(repo, bundler, outgoing):
1766 def addpartrevbranchcache(repo, bundler, outgoing):
1767 # we include the rev branch cache for the bundle changeset
1767 # we include the rev branch cache for the bundle changeset
1768 # (as an optional parts)
1768 # (as an optional parts)
1769 cache = repo.revbranchcache()
1769 cache = repo.revbranchcache()
1770 cl = repo.unfiltered().changelog
1770 cl = repo.unfiltered().changelog
1771 branchesdata = collections.defaultdict(lambda: (set(), set()))
1771 branchesdata = collections.defaultdict(lambda: (set(), set()))
1772 for node in outgoing.missing:
1772 for node in outgoing.missing:
1773 branch, close = cache.branchinfo(cl.rev(node))
1773 branch, close = cache.branchinfo(cl.rev(node))
1774 branchesdata[branch][close].add(node)
1774 branchesdata[branch][close].add(node)
1775
1775
1776 def generate():
1776 def generate():
1777 for branch, (nodes, closed) in sorted(branchesdata.items()):
1777 for branch, (nodes, closed) in sorted(branchesdata.items()):
1778 utf8branch = encoding.fromlocal(branch)
1778 utf8branch = encoding.fromlocal(branch)
1779 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1779 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1780 yield utf8branch
1780 yield utf8branch
1781 for n in sorted(nodes):
1781 for n in sorted(nodes):
1782 yield n
1782 yield n
1783 for n in sorted(closed):
1783 for n in sorted(closed):
1784 yield n
1784 yield n
1785
1785
1786 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1786 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1787
1787
1788
1788
1789 def _formatrequirementsspec(requirements):
1789 def _formatrequirementsspec(requirements):
1790 requirements = [req for req in requirements if req != b"shared"]
1790 requirements = [req for req in requirements if req != b"shared"]
1791 return urlreq.quote(b','.join(sorted(requirements)))
1791 return urlreq.quote(b','.join(sorted(requirements)))
1792
1792
1793
1793
1794 def _formatrequirementsparams(requirements):
1794 def _formatrequirementsparams(requirements):
1795 requirements = _formatrequirementsspec(requirements)
1795 requirements = _formatrequirementsspec(requirements)
1796 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1796 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1797 return params
1797 return params
1798
1798
1799
1799
1800 def addpartbundlestream2(bundler, repo, **kwargs):
1800 def addpartbundlestream2(bundler, repo, **kwargs):
1801 if not kwargs.get('stream', False):
1801 if not kwargs.get('stream', False):
1802 return
1802 return
1803
1803
1804 if not streamclone.allowservergeneration(repo):
1804 if not streamclone.allowservergeneration(repo):
1805 raise error.Abort(
1805 raise error.Abort(
1806 _(
1806 _(
1807 b'stream data requested but server does not allow '
1807 b'stream data requested but server does not allow '
1808 b'this feature'
1808 b'this feature'
1809 ),
1809 ),
1810 hint=_(
1810 hint=_(
1811 b'well-behaved clients should not be '
1811 b'well-behaved clients should not be '
1812 b'requesting stream data from servers not '
1812 b'requesting stream data from servers not '
1813 b'advertising it; the client may be buggy'
1813 b'advertising it; the client may be buggy'
1814 ),
1814 ),
1815 )
1815 )
1816
1816
1817 # Stream clones don't compress well. And compression undermines a
1817 # Stream clones don't compress well. And compression undermines a
1818 # goal of stream clones, which is to be fast. Communicate the desire
1818 # goal of stream clones, which is to be fast. Communicate the desire
1819 # to avoid compression to consumers of the bundle.
1819 # to avoid compression to consumers of the bundle.
1820 bundler.prefercompressed = False
1820 bundler.prefercompressed = False
1821
1821
1822 # get the includes and excludes
1822 # get the includes and excludes
1823 includepats = kwargs.get('includepats')
1823 includepats = kwargs.get('includepats')
1824 excludepats = kwargs.get('excludepats')
1824 excludepats = kwargs.get('excludepats')
1825
1825
1826 narrowstream = repo.ui.configbool(
1826 narrowstream = repo.ui.configbool(
1827 b'experimental', b'server.stream-narrow-clones'
1827 b'experimental', b'server.stream-narrow-clones'
1828 )
1828 )
1829
1829
1830 if (includepats or excludepats) and not narrowstream:
1830 if (includepats or excludepats) and not narrowstream:
1831 raise error.Abort(_(b'server does not support narrow stream clones'))
1831 raise error.Abort(_(b'server does not support narrow stream clones'))
1832
1832
1833 includeobsmarkers = False
1833 includeobsmarkers = False
1834 if repo.obsstore:
1834 if repo.obsstore:
1835 remoteversions = obsmarkersversion(bundler.capabilities)
1835 remoteversions = obsmarkersversion(bundler.capabilities)
1836 if not remoteversions:
1836 if not remoteversions:
1837 raise error.Abort(
1837 raise error.Abort(
1838 _(
1838 _(
1839 b'server has obsolescence markers, but client '
1839 b'server has obsolescence markers, but client '
1840 b'cannot receive them via stream clone'
1840 b'cannot receive them via stream clone'
1841 )
1841 )
1842 )
1842 )
1843 elif repo.obsstore._version in remoteversions:
1843 elif repo.obsstore._version in remoteversions:
1844 includeobsmarkers = True
1844 includeobsmarkers = True
1845
1845
1846 filecount, bytecount, it = streamclone.generatev2(
1846 filecount, bytecount, it = streamclone.generatev2(
1847 repo, includepats, excludepats, includeobsmarkers
1847 repo, includepats, excludepats, includeobsmarkers
1848 )
1848 )
1849 requirements = _formatrequirementsspec(repo.requirements)
1849 requirements = _formatrequirementsspec(repo.requirements)
1850 part = bundler.newpart(b'stream2', data=it)
1850 part = bundler.newpart(b'stream2', data=it)
1851 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1851 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1852 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1852 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1853 part.addparam(b'requirements', requirements, mandatory=True)
1853 part.addparam(b'requirements', requirements, mandatory=True)
1854
1854
1855
1855
1856 def buildobsmarkerspart(bundler, markers):
1856 def buildobsmarkerspart(bundler, markers):
1857 """add an obsmarker part to the bundler with <markers>
1857 """add an obsmarker part to the bundler with <markers>
1858
1858
1859 No part is created if markers is empty.
1859 No part is created if markers is empty.
1860 Raises ValueError if the bundler doesn't support any known obsmarker format.
1860 Raises ValueError if the bundler doesn't support any known obsmarker format.
1861 """
1861 """
1862 if not markers:
1862 if not markers:
1863 return None
1863 return None
1864
1864
1865 remoteversions = obsmarkersversion(bundler.capabilities)
1865 remoteversions = obsmarkersversion(bundler.capabilities)
1866 version = obsolete.commonversion(remoteversions)
1866 version = obsolete.commonversion(remoteversions)
1867 if version is None:
1867 if version is None:
1868 raise ValueError(b'bundler does not support common obsmarker format')
1868 raise ValueError(b'bundler does not support common obsmarker format')
1869 stream = obsolete.encodemarkers(markers, True, version=version)
1869 stream = obsolete.encodemarkers(markers, True, version=version)
1870 return bundler.newpart(b'obsmarkers', data=stream)
1870 return bundler.newpart(b'obsmarkers', data=stream)
1871
1871
1872
1872
1873 def writebundle(
1873 def writebundle(
1874 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1874 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1875 ):
1875 ):
1876 """Write a bundle file and return its filename.
1876 """Write a bundle file and return its filename.
1877
1877
1878 Existing files will not be overwritten.
1878 Existing files will not be overwritten.
1879 If no filename is specified, a temporary file is created.
1879 If no filename is specified, a temporary file is created.
1880 bz2 compression can be turned off.
1880 bz2 compression can be turned off.
1881 The bundle file will be deleted in case of errors.
1881 The bundle file will be deleted in case of errors.
1882 """
1882 """
1883
1883
1884 if bundletype == b"HG20":
1884 if bundletype == b"HG20":
1885 bundle = bundle20(ui)
1885 bundle = bundle20(ui)
1886 bundle.setcompression(compression, compopts)
1886 bundle.setcompression(compression, compopts)
1887 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1887 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1888 part.addparam(b'version', cg.version)
1888 part.addparam(b'version', cg.version)
1889 if b'clcount' in cg.extras:
1889 if b'clcount' in cg.extras:
1890 part.addparam(
1890 part.addparam(
1891 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1891 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1892 )
1892 )
1893 chunkiter = bundle.getchunks()
1893 chunkiter = bundle.getchunks()
1894 else:
1894 else:
1895 # compression argument is only for the bundle2 case
1895 # compression argument is only for the bundle2 case
1896 assert compression is None
1896 assert compression is None
1897 if cg.version != b'01':
1897 if cg.version != b'01':
1898 raise error.Abort(
1898 raise error.Abort(
1899 _(b'old bundle types only supports v1 changegroups')
1899 _(b'old bundle types only supports v1 changegroups')
1900 )
1900 )
1901 header, comp = bundletypes[bundletype]
1901 header, comp = bundletypes[bundletype]
1902 if comp not in util.compengines.supportedbundletypes:
1902 if comp not in util.compengines.supportedbundletypes:
1903 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1903 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1904 compengine = util.compengines.forbundletype(comp)
1904 compengine = util.compengines.forbundletype(comp)
1905
1905
1906 def chunkiter():
1906 def chunkiter():
1907 yield header
1907 yield header
1908 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1908 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1909 yield chunk
1909 yield chunk
1910
1910
1911 chunkiter = chunkiter()
1911 chunkiter = chunkiter()
1912
1912
1913 # parse the changegroup data, otherwise we will block
1913 # parse the changegroup data, otherwise we will block
1914 # in case of sshrepo because we don't know the end of the stream
1914 # in case of sshrepo because we don't know the end of the stream
1915 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1915 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1916
1916
1917
1917
1918 def combinechangegroupresults(op):
1918 def combinechangegroupresults(op):
1919 """logic to combine 0 or more addchangegroup results into one"""
1919 """logic to combine 0 or more addchangegroup results into one"""
1920 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1920 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1921 changedheads = 0
1921 changedheads = 0
1922 result = 1
1922 result = 1
1923 for ret in results:
1923 for ret in results:
1924 # If any changegroup result is 0, return 0
1924 # If any changegroup result is 0, return 0
1925 if ret == 0:
1925 if ret == 0:
1926 result = 0
1926 result = 0
1927 break
1927 break
1928 if ret < -1:
1928 if ret < -1:
1929 changedheads += ret + 1
1929 changedheads += ret + 1
1930 elif ret > 1:
1930 elif ret > 1:
1931 changedheads += ret - 1
1931 changedheads += ret - 1
1932 if changedheads > 0:
1932 if changedheads > 0:
1933 result = 1 + changedheads
1933 result = 1 + changedheads
1934 elif changedheads < 0:
1934 elif changedheads < 0:
1935 result = -1 + changedheads
1935 result = -1 + changedheads
1936 return result
1936 return result
1937
1937
1938
1938
1939 @parthandler(
1939 @parthandler(
1940 b'changegroup',
1940 b'changegroup',
1941 (
1941 (
1942 b'version',
1942 b'version',
1943 b'nbchanges',
1943 b'nbchanges',
1944 b'exp-sidedata',
1944 b'exp-sidedata',
1945 b'treemanifest',
1945 b'treemanifest',
1946 b'targetphase',
1946 b'targetphase',
1947 ),
1947 ),
1948 )
1948 )
1949 def handlechangegroup(op, inpart):
1949 def handlechangegroup(op, inpart):
1950 """apply a changegroup part on the repo
1950 """apply a changegroup part on the repo
1951
1951
1952 This is a very early implementation that will massive rework before being
1952 This is a very early implementation that will massive rework before being
1953 inflicted to any end-user.
1953 inflicted to any end-user.
1954 """
1954 """
1955 from . import localrepo
1955 from . import localrepo
1956
1956
1957 tr = op.gettransaction()
1957 tr = op.gettransaction()
1958 unpackerversion = inpart.params.get(b'version', b'01')
1958 unpackerversion = inpart.params.get(b'version', b'01')
1959 # We should raise an appropriate exception here
1959 # We should raise an appropriate exception here
1960 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1960 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1961 # the source and url passed here are overwritten by the one contained in
1961 # the source and url passed here are overwritten by the one contained in
1962 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1962 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1963 nbchangesets = None
1963 nbchangesets = None
1964 if b'nbchanges' in inpart.params:
1964 if b'nbchanges' in inpart.params:
1965 nbchangesets = int(inpart.params.get(b'nbchanges'))
1965 nbchangesets = int(inpart.params.get(b'nbchanges'))
1966 if (
1966 if (
1967 b'treemanifest' in inpart.params
1967 b'treemanifest' in inpart.params
1968 and b'treemanifest' not in op.repo.requirements
1968 and b'treemanifest' not in op.repo.requirements
1969 ):
1969 ):
1970 if len(op.repo.changelog) != 0:
1970 if len(op.repo.changelog) != 0:
1971 raise error.Abort(
1971 raise error.Abort(
1972 _(
1972 _(
1973 b"bundle contains tree manifests, but local repo is "
1973 b"bundle contains tree manifests, but local repo is "
1974 b"non-empty and does not use tree manifests"
1974 b"non-empty and does not use tree manifests"
1975 )
1975 )
1976 )
1976 )
1977 op.repo.requirements.add(b'treemanifest')
1977 op.repo.requirements.add(b'treemanifest')
1978 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1978 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1979 op.repo.ui, op.repo.requirements, op.repo.features
1979 op.repo.ui, op.repo.requirements, op.repo.features
1980 )
1980 )
1981 scmutil.writereporequirements(op.repo)
1981 scmutil.writereporequirements(op.repo)
1982
1982
1983 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1983 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1984 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1984 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1985 if reposidedata and not bundlesidedata:
1985 if reposidedata and not bundlesidedata:
1986 msg = b"repository is using sidedata but the bundle source do not"
1986 msg = b"repository is using sidedata but the bundle source do not"
1987 hint = b'this is currently unsupported'
1987 hint = b'this is currently unsupported'
1988 raise error.Abort(msg, hint=hint)
1988 raise error.Abort(msg, hint=hint)
1989
1989
1990 extrakwargs = {}
1990 extrakwargs = {}
1991 targetphase = inpart.params.get(b'targetphase')
1991 targetphase = inpart.params.get(b'targetphase')
1992 if targetphase is not None:
1992 if targetphase is not None:
1993 extrakwargs['targetphase'] = int(targetphase)
1993 extrakwargs['targetphase'] = int(targetphase)
1994 ret = _processchangegroup(
1994 ret = _processchangegroup(
1995 op,
1995 op,
1996 cg,
1996 cg,
1997 tr,
1997 tr,
1998 b'bundle2',
1998 b'bundle2',
1999 b'bundle2',
1999 b'bundle2',
2000 expectedtotal=nbchangesets,
2000 expectedtotal=nbchangesets,
2001 **extrakwargs
2001 **extrakwargs
2002 )
2002 )
2003 if op.reply is not None:
2003 if op.reply is not None:
2004 # This is definitely not the final form of this
2004 # This is definitely not the final form of this
2005 # return. But one need to start somewhere.
2005 # return. But one need to start somewhere.
2006 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2006 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2007 part.addparam(
2007 part.addparam(
2008 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2008 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2009 )
2009 )
2010 part.addparam(b'return', b'%i' % ret, mandatory=False)
2010 part.addparam(b'return', b'%i' % ret, mandatory=False)
2011 assert not inpart.read()
2011 assert not inpart.read()
2012
2012
2013
2013
2014 _remotechangegroupparams = tuple(
2014 _remotechangegroupparams = tuple(
2015 [b'url', b'size', b'digests']
2015 [b'url', b'size', b'digests']
2016 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2016 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2017 )
2017 )
2018
2018
2019
2019
2020 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2020 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2021 def handleremotechangegroup(op, inpart):
2021 def handleremotechangegroup(op, inpart):
2022 """apply a bundle10 on the repo, given an url and validation information
2022 """apply a bundle10 on the repo, given an url and validation information
2023
2023
2024 All the information about the remote bundle to import are given as
2024 All the information about the remote bundle to import are given as
2025 parameters. The parameters include:
2025 parameters. The parameters include:
2026 - url: the url to the bundle10.
2026 - url: the url to the bundle10.
2027 - size: the bundle10 file size. It is used to validate what was
2027 - size: the bundle10 file size. It is used to validate what was
2028 retrieved by the client matches the server knowledge about the bundle.
2028 retrieved by the client matches the server knowledge about the bundle.
2029 - digests: a space separated list of the digest types provided as
2029 - digests: a space separated list of the digest types provided as
2030 parameters.
2030 parameters.
2031 - digest:<digest-type>: the hexadecimal representation of the digest with
2031 - digest:<digest-type>: the hexadecimal representation of the digest with
2032 that name. Like the size, it is used to validate what was retrieved by
2032 that name. Like the size, it is used to validate what was retrieved by
2033 the client matches what the server knows about the bundle.
2033 the client matches what the server knows about the bundle.
2034
2034
2035 When multiple digest types are given, all of them are checked.
2035 When multiple digest types are given, all of them are checked.
2036 """
2036 """
2037 try:
2037 try:
2038 raw_url = inpart.params[b'url']
2038 raw_url = inpart.params[b'url']
2039 except KeyError:
2039 except KeyError:
2040 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2040 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2041 parsed_url = util.url(raw_url)
2041 parsed_url = util.url(raw_url)
2042 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2042 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2043 raise error.Abort(
2043 raise error.Abort(
2044 _(b'remote-changegroup does not support %s urls')
2044 _(b'remote-changegroup does not support %s urls')
2045 % parsed_url.scheme
2045 % parsed_url.scheme
2046 )
2046 )
2047
2047
2048 try:
2048 try:
2049 size = int(inpart.params[b'size'])
2049 size = int(inpart.params[b'size'])
2050 except ValueError:
2050 except ValueError:
2051 raise error.Abort(
2051 raise error.Abort(
2052 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2052 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2053 )
2053 )
2054 except KeyError:
2054 except KeyError:
2055 raise error.Abort(
2055 raise error.Abort(
2056 _(b'remote-changegroup: missing "%s" param') % b'size'
2056 _(b'remote-changegroup: missing "%s" param') % b'size'
2057 )
2057 )
2058
2058
2059 digests = {}
2059 digests = {}
2060 for typ in inpart.params.get(b'digests', b'').split():
2060 for typ in inpart.params.get(b'digests', b'').split():
2061 param = b'digest:%s' % typ
2061 param = b'digest:%s' % typ
2062 try:
2062 try:
2063 value = inpart.params[param]
2063 value = inpart.params[param]
2064 except KeyError:
2064 except KeyError:
2065 raise error.Abort(
2065 raise error.Abort(
2066 _(b'remote-changegroup: missing "%s" param') % param
2066 _(b'remote-changegroup: missing "%s" param') % param
2067 )
2067 )
2068 digests[typ] = value
2068 digests[typ] = value
2069
2069
2070 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2070 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2071
2071
2072 tr = op.gettransaction()
2072 tr = op.gettransaction()
2073 from . import exchange
2073 from . import exchange
2074
2074
2075 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2075 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2076 if not isinstance(cg, changegroup.cg1unpacker):
2076 if not isinstance(cg, changegroup.cg1unpacker):
2077 raise error.Abort(
2077 raise error.Abort(
2078 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2078 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2079 )
2079 )
2080 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2080 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2081 if op.reply is not None:
2081 if op.reply is not None:
2082 # This is definitely not the final form of this
2082 # This is definitely not the final form of this
2083 # return. But one need to start somewhere.
2083 # return. But one need to start somewhere.
2084 part = op.reply.newpart(b'reply:changegroup')
2084 part = op.reply.newpart(b'reply:changegroup')
2085 part.addparam(
2085 part.addparam(
2086 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2086 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2087 )
2087 )
2088 part.addparam(b'return', b'%i' % ret, mandatory=False)
2088 part.addparam(b'return', b'%i' % ret, mandatory=False)
2089 try:
2089 try:
2090 real_part.validate()
2090 real_part.validate()
2091 except error.Abort as e:
2091 except error.Abort as e:
2092 raise error.Abort(
2092 raise error.Abort(
2093 _(b'bundle at %s is corrupted:\n%s')
2093 _(b'bundle at %s is corrupted:\n%s')
2094 % (util.hidepassword(raw_url), bytes(e))
2094 % (util.hidepassword(raw_url), bytes(e))
2095 )
2095 )
2096 assert not inpart.read()
2096 assert not inpart.read()
2097
2097
2098
2098
2099 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2099 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2100 def handlereplychangegroup(op, inpart):
2100 def handlereplychangegroup(op, inpart):
2101 ret = int(inpart.params[b'return'])
2101 ret = int(inpart.params[b'return'])
2102 replyto = int(inpart.params[b'in-reply-to'])
2102 replyto = int(inpart.params[b'in-reply-to'])
2103 op.records.add(b'changegroup', {b'return': ret}, replyto)
2103 op.records.add(b'changegroup', {b'return': ret}, replyto)
2104
2104
2105
2105
2106 @parthandler(b'check:bookmarks')
2106 @parthandler(b'check:bookmarks')
2107 def handlecheckbookmarks(op, inpart):
2107 def handlecheckbookmarks(op, inpart):
2108 """check location of bookmarks
2108 """check location of bookmarks
2109
2109
2110 This part is to be used to detect push race regarding bookmark, it
2110 This part is to be used to detect push race regarding bookmark, it
2111 contains binary encoded (bookmark, node) tuple. If the local state does
2111 contains binary encoded (bookmark, node) tuple. If the local state does
2112 not marks the one in the part, a PushRaced exception is raised
2112 not marks the one in the part, a PushRaced exception is raised
2113 """
2113 """
2114 bookdata = bookmarks.binarydecode(inpart)
2114 bookdata = bookmarks.binarydecode(inpart)
2115
2115
2116 msgstandard = (
2116 msgstandard = (
2117 b'remote repository changed while pushing - please try again '
2117 b'remote repository changed while pushing - please try again '
2118 b'(bookmark "%s" move from %s to %s)'
2118 b'(bookmark "%s" move from %s to %s)'
2119 )
2119 )
2120 msgmissing = (
2120 msgmissing = (
2121 b'remote repository changed while pushing - please try again '
2121 b'remote repository changed while pushing - please try again '
2122 b'(bookmark "%s" is missing, expected %s)'
2122 b'(bookmark "%s" is missing, expected %s)'
2123 )
2123 )
2124 msgexist = (
2124 msgexist = (
2125 b'remote repository changed while pushing - please try again '
2125 b'remote repository changed while pushing - please try again '
2126 b'(bookmark "%s" set on %s, expected missing)'
2126 b'(bookmark "%s" set on %s, expected missing)'
2127 )
2127 )
2128 for book, node in bookdata:
2128 for book, node in bookdata:
2129 currentnode = op.repo._bookmarks.get(book)
2129 currentnode = op.repo._bookmarks.get(book)
2130 if currentnode != node:
2130 if currentnode != node:
2131 if node is None:
2131 if node is None:
2132 finalmsg = msgexist % (book, nodemod.short(currentnode))
2132 finalmsg = msgexist % (book, nodemod.short(currentnode))
2133 elif currentnode is None:
2133 elif currentnode is None:
2134 finalmsg = msgmissing % (book, nodemod.short(node))
2134 finalmsg = msgmissing % (book, nodemod.short(node))
2135 else:
2135 else:
2136 finalmsg = msgstandard % (
2136 finalmsg = msgstandard % (
2137 book,
2137 book,
2138 nodemod.short(node),
2138 nodemod.short(node),
2139 nodemod.short(currentnode),
2139 nodemod.short(currentnode),
2140 )
2140 )
2141 raise error.PushRaced(finalmsg)
2141 raise error.PushRaced(finalmsg)
2142
2142
2143
2143
2144 @parthandler(b'check:heads')
2144 @parthandler(b'check:heads')
2145 def handlecheckheads(op, inpart):
2145 def handlecheckheads(op, inpart):
2146 """check that head of the repo did not change
2146 """check that head of the repo did not change
2147
2147
2148 This is used to detect a push race when using unbundle.
2148 This is used to detect a push race when using unbundle.
2149 This replaces the "heads" argument of unbundle."""
2149 This replaces the "heads" argument of unbundle."""
2150 h = inpart.read(20)
2150 h = inpart.read(20)
2151 heads = []
2151 heads = []
2152 while len(h) == 20:
2152 while len(h) == 20:
2153 heads.append(h)
2153 heads.append(h)
2154 h = inpart.read(20)
2154 h = inpart.read(20)
2155 assert not h
2155 assert not h
2156 # Trigger a transaction so that we are guaranteed to have the lock now.
2156 # Trigger a transaction so that we are guaranteed to have the lock now.
2157 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2157 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2158 op.gettransaction()
2158 op.gettransaction()
2159 if sorted(heads) != sorted(op.repo.heads()):
2159 if sorted(heads) != sorted(op.repo.heads()):
2160 raise error.PushRaced(
2160 raise error.PushRaced(
2161 b'remote repository changed while pushing - please try again'
2161 b'remote repository changed while pushing - please try again'
2162 )
2162 )
2163
2163
2164
2164
2165 @parthandler(b'check:updated-heads')
2165 @parthandler(b'check:updated-heads')
2166 def handlecheckupdatedheads(op, inpart):
2166 def handlecheckupdatedheads(op, inpart):
2167 """check for race on the heads touched by a push
2167 """check for race on the heads touched by a push
2168
2168
2169 This is similar to 'check:heads' but focus on the heads actually updated
2169 This is similar to 'check:heads' but focus on the heads actually updated
2170 during the push. If other activities happen on unrelated heads, it is
2170 during the push. If other activities happen on unrelated heads, it is
2171 ignored.
2171 ignored.
2172
2172
2173 This allow server with high traffic to avoid push contention as long as
2173 This allow server with high traffic to avoid push contention as long as
2174 unrelated parts of the graph are involved."""
2174 unrelated parts of the graph are involved."""
2175 h = inpart.read(20)
2175 h = inpart.read(20)
2176 heads = []
2176 heads = []
2177 while len(h) == 20:
2177 while len(h) == 20:
2178 heads.append(h)
2178 heads.append(h)
2179 h = inpart.read(20)
2179 h = inpart.read(20)
2180 assert not h
2180 assert not h
2181 # trigger a transaction so that we are guaranteed to have the lock now.
2181 # trigger a transaction so that we are guaranteed to have the lock now.
2182 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2182 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2183 op.gettransaction()
2183 op.gettransaction()
2184
2184
2185 currentheads = set()
2185 currentheads = set()
2186 for ls in op.repo.branchmap().iterheads():
2186 for ls in op.repo.branchmap().iterheads():
2187 currentheads.update(ls)
2187 currentheads.update(ls)
2188
2188
2189 for h in heads:
2189 for h in heads:
2190 if h not in currentheads:
2190 if h not in currentheads:
2191 raise error.PushRaced(
2191 raise error.PushRaced(
2192 b'remote repository changed while pushing - '
2192 b'remote repository changed while pushing - '
2193 b'please try again'
2193 b'please try again'
2194 )
2194 )
2195
2195
2196
2196
2197 @parthandler(b'check:phases')
2197 @parthandler(b'check:phases')
2198 def handlecheckphases(op, inpart):
2198 def handlecheckphases(op, inpart):
2199 """check that phase boundaries of the repository did not change
2199 """check that phase boundaries of the repository did not change
2200
2200
2201 This is used to detect a push race.
2201 This is used to detect a push race.
2202 """
2202 """
2203 phasetonodes = phases.binarydecode(inpart)
2203 phasetonodes = phases.binarydecode(inpart)
2204 unfi = op.repo.unfiltered()
2204 unfi = op.repo.unfiltered()
2205 cl = unfi.changelog
2205 cl = unfi.changelog
2206 phasecache = unfi._phasecache
2206 phasecache = unfi._phasecache
2207 msg = (
2207 msg = (
2208 b'remote repository changed while pushing - please try again '
2208 b'remote repository changed while pushing - please try again '
2209 b'(%s is %s expected %s)'
2209 b'(%s is %s expected %s)'
2210 )
2210 )
2211 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2211 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2212 for n in nodes:
2212 for n in nodes:
2213 actualphase = phasecache.phase(unfi, cl.rev(n))
2213 actualphase = phasecache.phase(unfi, cl.rev(n))
2214 if actualphase != expectedphase:
2214 if actualphase != expectedphase:
2215 finalmsg = msg % (
2215 finalmsg = msg % (
2216 nodemod.short(n),
2216 nodemod.short(n),
2217 phases.phasenames[actualphase],
2217 phases.phasenames[actualphase],
2218 phases.phasenames[expectedphase],
2218 phases.phasenames[expectedphase],
2219 )
2219 )
2220 raise error.PushRaced(finalmsg)
2220 raise error.PushRaced(finalmsg)
2221
2221
2222
2222
2223 @parthandler(b'output')
2223 @parthandler(b'output')
2224 def handleoutput(op, inpart):
2224 def handleoutput(op, inpart):
2225 """forward output captured on the server to the client"""
2225 """forward output captured on the server to the client"""
2226 for line in inpart.read().splitlines():
2226 for line in inpart.read().splitlines():
2227 op.ui.status(_(b'remote: %s\n') % line)
2227 op.ui.status(_(b'remote: %s\n') % line)
2228
2228
2229
2229
2230 @parthandler(b'replycaps')
2230 @parthandler(b'replycaps')
2231 def handlereplycaps(op, inpart):
2231 def handlereplycaps(op, inpart):
2232 """Notify that a reply bundle should be created
2232 """Notify that a reply bundle should be created
2233
2233
2234 The payload contains the capabilities information for the reply"""
2234 The payload contains the capabilities information for the reply"""
2235 caps = decodecaps(inpart.read())
2235 caps = decodecaps(inpart.read())
2236 if op.reply is None:
2236 if op.reply is None:
2237 op.reply = bundle20(op.ui, caps)
2237 op.reply = bundle20(op.ui, caps)
2238
2238
2239
2239
2240 class AbortFromPart(error.Abort):
2240 class AbortFromPart(error.Abort):
2241 """Sub-class of Abort that denotes an error from a bundle2 part."""
2241 """Sub-class of Abort that denotes an error from a bundle2 part."""
2242
2242
2243
2243
2244 @parthandler(b'error:abort', (b'message', b'hint'))
2244 @parthandler(b'error:abort', (b'message', b'hint'))
2245 def handleerrorabort(op, inpart):
2245 def handleerrorabort(op, inpart):
2246 """Used to transmit abort error over the wire"""
2246 """Used to transmit abort error over the wire"""
2247 raise AbortFromPart(
2247 raise AbortFromPart(
2248 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2248 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2249 )
2249 )
2250
2250
2251
2251
2252 @parthandler(
2252 @parthandler(
2253 b'error:pushkey',
2253 b'error:pushkey',
2254 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2254 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2255 )
2255 )
2256 def handleerrorpushkey(op, inpart):
2256 def handleerrorpushkey(op, inpart):
2257 """Used to transmit failure of a mandatory pushkey over the wire"""
2257 """Used to transmit failure of a mandatory pushkey over the wire"""
2258 kwargs = {}
2258 kwargs = {}
2259 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2259 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2260 value = inpart.params.get(name)
2260 value = inpart.params.get(name)
2261 if value is not None:
2261 if value is not None:
2262 kwargs[name] = value
2262 kwargs[name] = value
2263 raise error.PushkeyFailed(
2263 raise error.PushkeyFailed(
2264 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2264 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2265 )
2265 )
2266
2266
2267
2267
2268 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2268 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2269 def handleerrorunsupportedcontent(op, inpart):
2269 def handleerrorunsupportedcontent(op, inpart):
2270 """Used to transmit unknown content error over the wire"""
2270 """Used to transmit unknown content error over the wire"""
2271 kwargs = {}
2271 kwargs = {}
2272 parttype = inpart.params.get(b'parttype')
2272 parttype = inpart.params.get(b'parttype')
2273 if parttype is not None:
2273 if parttype is not None:
2274 kwargs[b'parttype'] = parttype
2274 kwargs[b'parttype'] = parttype
2275 params = inpart.params.get(b'params')
2275 params = inpart.params.get(b'params')
2276 if params is not None:
2276 if params is not None:
2277 kwargs[b'params'] = params.split(b'\0')
2277 kwargs[b'params'] = params.split(b'\0')
2278
2278
2279 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2279 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2280
2280
2281
2281
2282 @parthandler(b'error:pushraced', (b'message',))
2282 @parthandler(b'error:pushraced', (b'message',))
2283 def handleerrorpushraced(op, inpart):
2283 def handleerrorpushraced(op, inpart):
2284 """Used to transmit push race error over the wire"""
2284 """Used to transmit push race error over the wire"""
2285 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2285 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2286
2286
2287
2287
2288 @parthandler(b'listkeys', (b'namespace',))
2288 @parthandler(b'listkeys', (b'namespace',))
2289 def handlelistkeys(op, inpart):
2289 def handlelistkeys(op, inpart):
2290 """retrieve pushkey namespace content stored in a bundle2"""
2290 """retrieve pushkey namespace content stored in a bundle2"""
2291 namespace = inpart.params[b'namespace']
2291 namespace = inpart.params[b'namespace']
2292 r = pushkey.decodekeys(inpart.read())
2292 r = pushkey.decodekeys(inpart.read())
2293 op.records.add(b'listkeys', (namespace, r))
2293 op.records.add(b'listkeys', (namespace, r))
2294
2294
2295
2295
2296 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2296 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2297 def handlepushkey(op, inpart):
2297 def handlepushkey(op, inpart):
2298 """process a pushkey request"""
2298 """process a pushkey request"""
2299 dec = pushkey.decode
2299 dec = pushkey.decode
2300 namespace = dec(inpart.params[b'namespace'])
2300 namespace = dec(inpart.params[b'namespace'])
2301 key = dec(inpart.params[b'key'])
2301 key = dec(inpart.params[b'key'])
2302 old = dec(inpart.params[b'old'])
2302 old = dec(inpart.params[b'old'])
2303 new = dec(inpart.params[b'new'])
2303 new = dec(inpart.params[b'new'])
2304 # Grab the transaction to ensure that we have the lock before performing the
2304 # Grab the transaction to ensure that we have the lock before performing the
2305 # pushkey.
2305 # pushkey.
2306 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2306 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2307 op.gettransaction()
2307 op.gettransaction()
2308 ret = op.repo.pushkey(namespace, key, old, new)
2308 ret = op.repo.pushkey(namespace, key, old, new)
2309 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2309 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2310 op.records.add(b'pushkey', record)
2310 op.records.add(b'pushkey', record)
2311 if op.reply is not None:
2311 if op.reply is not None:
2312 rpart = op.reply.newpart(b'reply:pushkey')
2312 rpart = op.reply.newpart(b'reply:pushkey')
2313 rpart.addparam(
2313 rpart.addparam(
2314 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2314 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2315 )
2315 )
2316 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2316 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2317 if inpart.mandatory and not ret:
2317 if inpart.mandatory and not ret:
2318 kwargs = {}
2318 kwargs = {}
2319 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2319 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2320 if key in inpart.params:
2320 if key in inpart.params:
2321 kwargs[key] = inpart.params[key]
2321 kwargs[key] = inpart.params[key]
2322 raise error.PushkeyFailed(
2322 raise error.PushkeyFailed(
2323 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2323 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2324 )
2324 )
2325
2325
2326
2326
2327 @parthandler(b'bookmarks')
2327 @parthandler(b'bookmarks')
2328 def handlebookmark(op, inpart):
2328 def handlebookmark(op, inpart):
2329 """transmit bookmark information
2329 """transmit bookmark information
2330
2330
2331 The part contains binary encoded bookmark information.
2331 The part contains binary encoded bookmark information.
2332
2332
2333 The exact behavior of this part can be controlled by the 'bookmarks' mode
2333 The exact behavior of this part can be controlled by the 'bookmarks' mode
2334 on the bundle operation.
2334 on the bundle operation.
2335
2335
2336 When mode is 'apply' (the default) the bookmark information is applied as
2336 When mode is 'apply' (the default) the bookmark information is applied as
2337 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2337 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2338 issued earlier to check for push races in such update. This behavior is
2338 issued earlier to check for push races in such update. This behavior is
2339 suitable for pushing.
2339 suitable for pushing.
2340
2340
2341 When mode is 'records', the information is recorded into the 'bookmarks'
2341 When mode is 'records', the information is recorded into the 'bookmarks'
2342 records of the bundle operation. This behavior is suitable for pulling.
2342 records of the bundle operation. This behavior is suitable for pulling.
2343 """
2343 """
2344 changes = bookmarks.binarydecode(inpart)
2344 changes = bookmarks.binarydecode(inpart)
2345
2345
2346 pushkeycompat = op.repo.ui.configbool(
2346 pushkeycompat = op.repo.ui.configbool(
2347 b'server', b'bookmarks-pushkey-compat'
2347 b'server', b'bookmarks-pushkey-compat'
2348 )
2348 )
2349 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2349 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2350
2350
2351 if bookmarksmode == b'apply':
2351 if bookmarksmode == b'apply':
2352 tr = op.gettransaction()
2352 tr = op.gettransaction()
2353 bookstore = op.repo._bookmarks
2353 bookstore = op.repo._bookmarks
2354 if pushkeycompat:
2354 if pushkeycompat:
2355 allhooks = []
2355 allhooks = []
2356 for book, node in changes:
2356 for book, node in changes:
2357 hookargs = tr.hookargs.copy()
2357 hookargs = tr.hookargs.copy()
2358 hookargs[b'pushkeycompat'] = b'1'
2358 hookargs[b'pushkeycompat'] = b'1'
2359 hookargs[b'namespace'] = b'bookmarks'
2359 hookargs[b'namespace'] = b'bookmarks'
2360 hookargs[b'key'] = book
2360 hookargs[b'key'] = book
2361 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2361 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2362 hookargs[b'new'] = nodemod.hex(
2362 hookargs[b'new'] = nodemod.hex(
2363 node if node is not None else b''
2363 node if node is not None else b''
2364 )
2364 )
2365 allhooks.append(hookargs)
2365 allhooks.append(hookargs)
2366
2366
2367 for hookargs in allhooks:
2367 for hookargs in allhooks:
2368 op.repo.hook(
2368 op.repo.hook(
2369 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2369 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2370 )
2370 )
2371
2371
2372 for book, node in changes:
2372 for book, node in changes:
2373 if bookmarks.isdivergent(book):
2373 if bookmarks.isdivergent(book):
2374 msg = _(b'cannot accept divergent bookmark %s!') % book
2374 msg = _(b'cannot accept divergent bookmark %s!') % book
2375 raise error.Abort(msg)
2375 raise error.Abort(msg)
2376
2376
2377 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2377 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2378
2378
2379 if pushkeycompat:
2379 if pushkeycompat:
2380
2380
2381 def runhook(unused_success):
2381 def runhook(unused_success):
2382 for hookargs in allhooks:
2382 for hookargs in allhooks:
2383 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2383 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2384
2384
2385 op.repo._afterlock(runhook)
2385 op.repo._afterlock(runhook)
2386
2386
2387 elif bookmarksmode == b'records':
2387 elif bookmarksmode == b'records':
2388 for book, node in changes:
2388 for book, node in changes:
2389 record = {b'bookmark': book, b'node': node}
2389 record = {b'bookmark': book, b'node': node}
2390 op.records.add(b'bookmarks', record)
2390 op.records.add(b'bookmarks', record)
2391 else:
2391 else:
2392 raise error.ProgrammingError(
2392 raise error.ProgrammingError(
2393 b'unkown bookmark mode: %s' % bookmarksmode
2393 b'unkown bookmark mode: %s' % bookmarksmode
2394 )
2394 )
2395
2395
2396
2396
2397 @parthandler(b'phase-heads')
2397 @parthandler(b'phase-heads')
2398 def handlephases(op, inpart):
2398 def handlephases(op, inpart):
2399 """apply phases from bundle part to repo"""
2399 """apply phases from bundle part to repo"""
2400 headsbyphase = phases.binarydecode(inpart)
2400 headsbyphase = phases.binarydecode(inpart)
2401 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2401 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2402
2402
2403
2403
2404 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2404 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2405 def handlepushkeyreply(op, inpart):
2405 def handlepushkeyreply(op, inpart):
2406 """retrieve the result of a pushkey request"""
2406 """retrieve the result of a pushkey request"""
2407 ret = int(inpart.params[b'return'])
2407 ret = int(inpart.params[b'return'])
2408 partid = int(inpart.params[b'in-reply-to'])
2408 partid = int(inpart.params[b'in-reply-to'])
2409 op.records.add(b'pushkey', {b'return': ret}, partid)
2409 op.records.add(b'pushkey', {b'return': ret}, partid)
2410
2410
2411
2411
2412 @parthandler(b'obsmarkers')
2412 @parthandler(b'obsmarkers')
2413 def handleobsmarker(op, inpart):
2413 def handleobsmarker(op, inpart):
2414 """add a stream of obsmarkers to the repo"""
2414 """add a stream of obsmarkers to the repo"""
2415 tr = op.gettransaction()
2415 tr = op.gettransaction()
2416 markerdata = inpart.read()
2416 markerdata = inpart.read()
2417 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2417 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2418 op.ui.writenoi18n(
2418 op.ui.writenoi18n(
2419 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2419 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2420 )
2420 )
2421 # The mergemarkers call will crash if marker creation is not enabled.
2421 # The mergemarkers call will crash if marker creation is not enabled.
2422 # we want to avoid this if the part is advisory.
2422 # we want to avoid this if the part is advisory.
2423 if not inpart.mandatory and op.repo.obsstore.readonly:
2423 if not inpart.mandatory and op.repo.obsstore.readonly:
2424 op.repo.ui.debug(
2424 op.repo.ui.debug(
2425 b'ignoring obsolescence markers, feature not enabled\n'
2425 b'ignoring obsolescence markers, feature not enabled\n'
2426 )
2426 )
2427 return
2427 return
2428 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2428 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2429 op.repo.invalidatevolatilesets()
2429 op.repo.invalidatevolatilesets()
2430 op.records.add(b'obsmarkers', {b'new': new})
2430 op.records.add(b'obsmarkers', {b'new': new})
2431 if op.reply is not None:
2431 if op.reply is not None:
2432 rpart = op.reply.newpart(b'reply:obsmarkers')
2432 rpart = op.reply.newpart(b'reply:obsmarkers')
2433 rpart.addparam(
2433 rpart.addparam(
2434 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2434 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2435 )
2435 )
2436 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2436 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2437
2437
2438
2438
2439 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2439 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2440 def handleobsmarkerreply(op, inpart):
2440 def handleobsmarkerreply(op, inpart):
2441 """retrieve the result of a pushkey request"""
2441 """retrieve the result of a pushkey request"""
2442 ret = int(inpart.params[b'new'])
2442 ret = int(inpart.params[b'new'])
2443 partid = int(inpart.params[b'in-reply-to'])
2443 partid = int(inpart.params[b'in-reply-to'])
2444 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2444 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2445
2445
2446
2446
2447 @parthandler(b'hgtagsfnodes')
2447 @parthandler(b'hgtagsfnodes')
2448 def handlehgtagsfnodes(op, inpart):
2448 def handlehgtagsfnodes(op, inpart):
2449 """Applies .hgtags fnodes cache entries to the local repo.
2449 """Applies .hgtags fnodes cache entries to the local repo.
2450
2450
2451 Payload is pairs of 20 byte changeset nodes and filenodes.
2451 Payload is pairs of 20 byte changeset nodes and filenodes.
2452 """
2452 """
2453 # Grab the transaction so we ensure that we have the lock at this point.
2453 # Grab the transaction so we ensure that we have the lock at this point.
2454 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2454 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2455 op.gettransaction()
2455 op.gettransaction()
2456 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2456 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2457
2457
2458 count = 0
2458 count = 0
2459 while True:
2459 while True:
2460 node = inpart.read(20)
2460 node = inpart.read(20)
2461 fnode = inpart.read(20)
2461 fnode = inpart.read(20)
2462 if len(node) < 20 or len(fnode) < 20:
2462 if len(node) < 20 or len(fnode) < 20:
2463 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2463 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2464 break
2464 break
2465 cache.setfnode(node, fnode)
2465 cache.setfnode(node, fnode)
2466 count += 1
2466 count += 1
2467
2467
2468 cache.write()
2468 cache.write()
2469 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2469 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2470
2470
2471
2471
2472 rbcstruct = struct.Struct(b'>III')
2472 rbcstruct = struct.Struct(b'>III')
2473
2473
2474
2474
2475 @parthandler(b'cache:rev-branch-cache')
2475 @parthandler(b'cache:rev-branch-cache')
2476 def handlerbc(op, inpart):
2476 def handlerbc(op, inpart):
2477 """receive a rev-branch-cache payload and update the local cache
2477 """receive a rev-branch-cache payload and update the local cache
2478
2478
2479 The payload is a series of data related to each branch
2479 The payload is a series of data related to each branch
2480
2480
2481 1) branch name length
2481 1) branch name length
2482 2) number of open heads
2482 2) number of open heads
2483 3) number of closed heads
2483 3) number of closed heads
2484 4) open heads nodes
2484 4) open heads nodes
2485 5) closed heads nodes
2485 5) closed heads nodes
2486 """
2486 """
2487 total = 0
2487 total = 0
2488 rawheader = inpart.read(rbcstruct.size)
2488 rawheader = inpart.read(rbcstruct.size)
2489 cache = op.repo.revbranchcache()
2489 cache = op.repo.revbranchcache()
2490 cl = op.repo.unfiltered().changelog
2490 cl = op.repo.unfiltered().changelog
2491 while rawheader:
2491 while rawheader:
2492 header = rbcstruct.unpack(rawheader)
2492 header = rbcstruct.unpack(rawheader)
2493 total += header[1] + header[2]
2493 total += header[1] + header[2]
2494 utf8branch = inpart.read(header[0])
2494 utf8branch = inpart.read(header[0])
2495 branch = encoding.tolocal(utf8branch)
2495 branch = encoding.tolocal(utf8branch)
2496 for x in pycompat.xrange(header[1]):
2496 for x in pycompat.xrange(header[1]):
2497 node = inpart.read(20)
2497 node = inpart.read(20)
2498 rev = cl.rev(node)
2498 rev = cl.rev(node)
2499 cache.setdata(branch, rev, node, False)
2499 cache.setdata(branch, rev, node, False)
2500 for x in pycompat.xrange(header[2]):
2500 for x in pycompat.xrange(header[2]):
2501 node = inpart.read(20)
2501 node = inpart.read(20)
2502 rev = cl.rev(node)
2502 rev = cl.rev(node)
2503 cache.setdata(branch, rev, node, True)
2503 cache.setdata(branch, rev, node, True)
2504 rawheader = inpart.read(rbcstruct.size)
2504 rawheader = inpart.read(rbcstruct.size)
2505 cache.write()
2505 cache.write()
2506
2506
2507
2507
2508 @parthandler(b'pushvars')
2508 @parthandler(b'pushvars')
2509 def bundle2getvars(op, part):
2509 def bundle2getvars(op, part):
2510 '''unbundle a bundle2 containing shellvars on the server'''
2510 '''unbundle a bundle2 containing shellvars on the server'''
2511 # An option to disable unbundling on server-side for security reasons
2511 # An option to disable unbundling on server-side for security reasons
2512 if op.ui.configbool(b'push', b'pushvars.server'):
2512 if op.ui.configbool(b'push', b'pushvars.server'):
2513 hookargs = {}
2513 hookargs = {}
2514 for key, value in part.advisoryparams:
2514 for key, value in part.advisoryparams:
2515 key = key.upper()
2515 key = key.upper()
2516 # We want pushed variables to have USERVAR_ prepended so we know
2516 # We want pushed variables to have USERVAR_ prepended so we know
2517 # they came from the --pushvar flag.
2517 # they came from the --pushvar flag.
2518 key = b"USERVAR_" + key
2518 key = b"USERVAR_" + key
2519 hookargs[key] = value
2519 hookargs[key] = value
2520 op.addhookargs(hookargs)
2520 op.addhookargs(hookargs)
2521
2521
2522
2522
2523 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2523 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2524 def handlestreamv2bundle(op, part):
2524 def handlestreamv2bundle(op, part):
2525
2525
2526 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2526 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2527 filecount = int(part.params[b'filecount'])
2527 filecount = int(part.params[b'filecount'])
2528 bytecount = int(part.params[b'bytecount'])
2528 bytecount = int(part.params[b'bytecount'])
2529
2529
2530 repo = op.repo
2530 repo = op.repo
2531 if len(repo):
2531 if len(repo):
2532 msg = _(b'cannot apply stream clone to non empty repository')
2532 msg = _(b'cannot apply stream clone to non empty repository')
2533 raise error.Abort(msg)
2533 raise error.Abort(msg)
2534
2534
2535 repo.ui.debug(b'applying stream bundle\n')
2535 repo.ui.debug(b'applying stream bundle\n')
2536 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2536 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2537
2537
2538
2538
2539 def widen_bundle(
2539 def widen_bundle(
2540 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2540 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2541 ):
2541 ):
2542 """generates bundle2 for widening a narrow clone
2542 """generates bundle2 for widening a narrow clone
2543
2543
2544 bundler is the bundle to which data should be added
2544 bundler is the bundle to which data should be added
2545 repo is the localrepository instance
2545 repo is the localrepository instance
2546 oldmatcher matches what the client already has
2546 oldmatcher matches what the client already has
2547 newmatcher matches what the client needs (including what it already has)
2547 newmatcher matches what the client needs (including what it already has)
2548 common is set of common heads between server and client
2548 common is set of common heads between server and client
2549 known is a set of revs known on the client side (used in ellipses)
2549 known is a set of revs known on the client side (used in ellipses)
2550 cgversion is the changegroup version to send
2550 cgversion is the changegroup version to send
2551 ellipses is boolean value telling whether to send ellipses data or not
2551 ellipses is boolean value telling whether to send ellipses data or not
2552
2552
2553 returns bundle2 of the data required for extending
2553 returns bundle2 of the data required for extending
2554 """
2554 """
2555 commonnodes = set()
2555 commonnodes = set()
2556 cl = repo.changelog
2556 cl = repo.changelog
2557 for r in repo.revs(b"::%ln", common):
2557 for r in repo.revs(b"::%ln", common):
2558 commonnodes.add(cl.node(r))
2558 commonnodes.add(cl.node(r))
2559 if commonnodes:
2559 if commonnodes:
2560 # XXX: we should only send the filelogs (and treemanifest). user
2560 # XXX: we should only send the filelogs (and treemanifest). user
2561 # already has the changelog and manifest
2561 # already has the changelog and manifest
2562 packer = changegroup.getbundler(
2562 packer = changegroup.getbundler(
2563 cgversion,
2563 cgversion,
2564 repo,
2564 repo,
2565 oldmatcher=oldmatcher,
2565 oldmatcher=oldmatcher,
2566 matcher=newmatcher,
2566 matcher=newmatcher,
2567 fullnodes=commonnodes,
2567 fullnodes=commonnodes,
2568 )
2568 )
2569 cgdata = packer.generate(
2569 cgdata = packer.generate(
2570 {nodemod.nullid},
2570 {nodemod.nullid},
2571 list(commonnodes),
2571 list(commonnodes),
2572 False,
2572 False,
2573 b'narrow_widen',
2573 b'narrow_widen',
2574 changelog=False,
2574 changelog=False,
2575 )
2575 )
2576
2576
2577 part = bundler.newpart(b'changegroup', data=cgdata)
2577 part = bundler.newpart(b'changegroup', data=cgdata)
2578 part.addparam(b'version', cgversion)
2578 part.addparam(b'version', cgversion)
2579 if b'treemanifest' in repo.requirements:
2579 if b'treemanifest' in repo.requirements:
2580 part.addparam(b'treemanifest', b'1')
2580 part.addparam(b'treemanifest', b'1')
2581 if b'exp-sidedata-flag' in repo.requirements:
2581 if b'exp-sidedata-flag' in repo.requirements:
2582 part.addparam(b'exp-sidedata', b'1')
2582 part.addparam(b'exp-sidedata', b'1')
2583
2583
2584 return bundler
2584 return bundler
@@ -1,1689 +1,1689 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 from .interfaces import repository
32 from .interfaces import repository
33
33
34 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
34 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
35 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
35 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
36 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
36 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
37
37
38 LFS_REQUIREMENT = b'lfs'
38 LFS_REQUIREMENT = b'lfs'
39
39
40 readexactly = util.readexactly
40 readexactly = util.readexactly
41
41
42
42
43 def getchunk(stream):
43 def getchunk(stream):
44 """return the next chunk from stream as a string"""
44 """return the next chunk from stream as a string"""
45 d = readexactly(stream, 4)
45 d = readexactly(stream, 4)
46 l = struct.unpack(b">l", d)[0]
46 l = struct.unpack(b">l", d)[0]
47 if l <= 4:
47 if l <= 4:
48 if l:
48 if l:
49 raise error.Abort(_(b"invalid chunk length %d") % l)
49 raise error.Abort(_(b"invalid chunk length %d") % l)
50 return b""
50 return b""
51 return readexactly(stream, l - 4)
51 return readexactly(stream, l - 4)
52
52
53
53
54 def chunkheader(length):
54 def chunkheader(length):
55 """return a changegroup chunk header (string)"""
55 """return a changegroup chunk header (string)"""
56 return struct.pack(b">l", length + 4)
56 return struct.pack(b">l", length + 4)
57
57
58
58
59 def closechunk():
59 def closechunk():
60 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 return struct.pack(b">l", 0)
61 return struct.pack(b">l", 0)
62
62
63
63
64 def _fileheader(path):
64 def _fileheader(path):
65 """Obtain a changegroup chunk header for a named path."""
65 """Obtain a changegroup chunk header for a named path."""
66 return chunkheader(len(path)) + path
66 return chunkheader(len(path)) + path
67
67
68
68
69 def writechunks(ui, chunks, filename, vfs=None):
69 def writechunks(ui, chunks, filename, vfs=None):
70 """Write chunks to a file and return its filename.
70 """Write chunks to a file and return its filename.
71
71
72 The stream is assumed to be a bundle file.
72 The stream is assumed to be a bundle file.
73 Existing files will not be overwritten.
73 Existing files will not be overwritten.
74 If no filename is specified, a temporary file is created.
74 If no filename is specified, a temporary file is created.
75 """
75 """
76 fh = None
76 fh = None
77 cleanup = None
77 cleanup = None
78 try:
78 try:
79 if filename:
79 if filename:
80 if vfs:
80 if vfs:
81 fh = vfs.open(filename, b"wb")
81 fh = vfs.open(filename, b"wb")
82 else:
82 else:
83 # Increase default buffer size because default is usually
83 # Increase default buffer size because default is usually
84 # small (4k is common on Linux).
84 # small (4k is common on Linux).
85 fh = open(filename, b"wb", 131072)
85 fh = open(filename, b"wb", 131072)
86 else:
86 else:
87 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
87 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
88 fh = os.fdopen(fd, "wb")
88 fh = os.fdopen(fd, "wb")
89 cleanup = filename
89 cleanup = filename
90 for c in chunks:
90 for c in chunks:
91 fh.write(c)
91 fh.write(c)
92 cleanup = None
92 cleanup = None
93 return filename
93 return filename
94 finally:
94 finally:
95 if fh is not None:
95 if fh is not None:
96 fh.close()
96 fh.close()
97 if cleanup is not None:
97 if cleanup is not None:
98 if filename and vfs:
98 if filename and vfs:
99 vfs.unlink(cleanup)
99 vfs.unlink(cleanup)
100 else:
100 else:
101 os.unlink(cleanup)
101 os.unlink(cleanup)
102
102
103
103
104 class cg1unpacker(object):
104 class cg1unpacker(object):
105 """Unpacker for cg1 changegroup streams.
105 """Unpacker for cg1 changegroup streams.
106
106
107 A changegroup unpacker handles the framing of the revision data in
107 A changegroup unpacker handles the framing of the revision data in
108 the wire format. Most consumers will want to use the apply()
108 the wire format. Most consumers will want to use the apply()
109 method to add the changes from the changegroup to a repository.
109 method to add the changes from the changegroup to a repository.
110
110
111 If you're forwarding a changegroup unmodified to another consumer,
111 If you're forwarding a changegroup unmodified to another consumer,
112 use getchunks(), which returns an iterator of changegroup
112 use getchunks(), which returns an iterator of changegroup
113 chunks. This is mostly useful for cases where you need to know the
113 chunks. This is mostly useful for cases where you need to know the
114 data stream has ended by observing the end of the changegroup.
114 data stream has ended by observing the end of the changegroup.
115
115
116 deltachunk() is useful only if you're applying delta data. Most
116 deltachunk() is useful only if you're applying delta data. Most
117 consumers should prefer apply() instead.
117 consumers should prefer apply() instead.
118
118
119 A few other public methods exist. Those are used only for
119 A few other public methods exist. Those are used only for
120 bundlerepo and some debug commands - their use is discouraged.
120 bundlerepo and some debug commands - their use is discouraged.
121 """
121 """
122
122
123 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
123 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
124 deltaheadersize = deltaheader.size
124 deltaheadersize = deltaheader.size
125 version = b'01'
125 version = b'01'
126 _grouplistcount = 1 # One list of files after the manifests
126 _grouplistcount = 1 # One list of files after the manifests
127
127
128 def __init__(self, fh, alg, extras=None):
128 def __init__(self, fh, alg, extras=None):
129 if alg is None:
129 if alg is None:
130 alg = b'UN'
130 alg = b'UN'
131 if alg not in util.compengines.supportedbundletypes:
131 if alg not in util.compengines.supportedbundletypes:
132 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
132 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
133 if alg == b'BZ':
133 if alg == b'BZ':
134 alg = b'_truncatedBZ'
134 alg = b'_truncatedBZ'
135
135
136 compengine = util.compengines.forbundletype(alg)
136 compengine = util.compengines.forbundletype(alg)
137 self._stream = compengine.decompressorreader(fh)
137 self._stream = compengine.decompressorreader(fh)
138 self._type = alg
138 self._type = alg
139 self.extras = extras or {}
139 self.extras = extras or {}
140 self.callback = None
140 self.callback = None
141
141
142 # These methods (compressed, read, seek, tell) all appear to only
142 # These methods (compressed, read, seek, tell) all appear to only
143 # be used by bundlerepo, but it's a little hard to tell.
143 # be used by bundlerepo, but it's a little hard to tell.
144 def compressed(self):
144 def compressed(self):
145 return self._type is not None and self._type != b'UN'
145 return self._type is not None and self._type != b'UN'
146
146
147 def read(self, l):
147 def read(self, l):
148 return self._stream.read(l)
148 return self._stream.read(l)
149
149
150 def seek(self, pos):
150 def seek(self, pos):
151 return self._stream.seek(pos)
151 return self._stream.seek(pos)
152
152
153 def tell(self):
153 def tell(self):
154 return self._stream.tell()
154 return self._stream.tell()
155
155
156 def close(self):
156 def close(self):
157 return self._stream.close()
157 return self._stream.close()
158
158
159 def _chunklength(self):
159 def _chunklength(self):
160 d = readexactly(self._stream, 4)
160 d = readexactly(self._stream, 4)
161 l = struct.unpack(b">l", d)[0]
161 l = struct.unpack(b">l", d)[0]
162 if l <= 4:
162 if l <= 4:
163 if l:
163 if l:
164 raise error.Abort(_(b"invalid chunk length %d") % l)
164 raise error.Abort(_(b"invalid chunk length %d") % l)
165 return 0
165 return 0
166 if self.callback:
166 if self.callback:
167 self.callback()
167 self.callback()
168 return l - 4
168 return l - 4
169
169
170 def changelogheader(self):
170 def changelogheader(self):
171 """v10 does not have a changelog header chunk"""
171 """v10 does not have a changelog header chunk"""
172 return {}
172 return {}
173
173
174 def manifestheader(self):
174 def manifestheader(self):
175 """v10 does not have a manifest header chunk"""
175 """v10 does not have a manifest header chunk"""
176 return {}
176 return {}
177
177
178 def filelogheader(self):
178 def filelogheader(self):
179 """return the header of the filelogs chunk, v10 only has the filename"""
179 """return the header of the filelogs chunk, v10 only has the filename"""
180 l = self._chunklength()
180 l = self._chunklength()
181 if not l:
181 if not l:
182 return {}
182 return {}
183 fname = readexactly(self._stream, l)
183 fname = readexactly(self._stream, l)
184 return {b'filename': fname}
184 return {b'filename': fname}
185
185
186 def _deltaheader(self, headertuple, prevnode):
186 def _deltaheader(self, headertuple, prevnode):
187 node, p1, p2, cs = headertuple
187 node, p1, p2, cs = headertuple
188 if prevnode is None:
188 if prevnode is None:
189 deltabase = p1
189 deltabase = p1
190 else:
190 else:
191 deltabase = prevnode
191 deltabase = prevnode
192 flags = 0
192 flags = 0
193 return node, p1, p2, deltabase, cs, flags
193 return node, p1, p2, deltabase, cs, flags
194
194
195 def deltachunk(self, prevnode):
195 def deltachunk(self, prevnode):
196 l = self._chunklength()
196 l = self._chunklength()
197 if not l:
197 if not l:
198 return {}
198 return {}
199 headerdata = readexactly(self._stream, self.deltaheadersize)
199 headerdata = readexactly(self._stream, self.deltaheadersize)
200 header = self.deltaheader.unpack(headerdata)
200 header = self.deltaheader.unpack(headerdata)
201 delta = readexactly(self._stream, l - self.deltaheadersize)
201 delta = readexactly(self._stream, l - self.deltaheadersize)
202 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
202 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
203 return (node, p1, p2, cs, deltabase, delta, flags)
203 return (node, p1, p2, cs, deltabase, delta, flags)
204
204
205 def getchunks(self):
205 def getchunks(self):
206 """returns all the chunks contains in the bundle
206 """returns all the chunks contains in the bundle
207
207
208 Used when you need to forward the binary stream to a file or another
208 Used when you need to forward the binary stream to a file or another
209 network API. To do so, it parse the changegroup data, otherwise it will
209 network API. To do so, it parse the changegroup data, otherwise it will
210 block in case of sshrepo because it don't know the end of the stream.
210 block in case of sshrepo because it don't know the end of the stream.
211 """
211 """
212 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
212 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
213 # and a list of filelogs. For changegroup 3, we expect 4 parts:
213 # and a list of filelogs. For changegroup 3, we expect 4 parts:
214 # changelog, manifestlog, a list of tree manifestlogs, and a list of
214 # changelog, manifestlog, a list of tree manifestlogs, and a list of
215 # filelogs.
215 # filelogs.
216 #
216 #
217 # Changelog and manifestlog parts are terminated with empty chunks. The
217 # Changelog and manifestlog parts are terminated with empty chunks. The
218 # tree and file parts are a list of entry sections. Each entry section
218 # tree and file parts are a list of entry sections. Each entry section
219 # is a series of chunks terminating in an empty chunk. The list of these
219 # is a series of chunks terminating in an empty chunk. The list of these
220 # entry sections is terminated in yet another empty chunk, so we know
220 # entry sections is terminated in yet another empty chunk, so we know
221 # we've reached the end of the tree/file list when we reach an empty
221 # we've reached the end of the tree/file list when we reach an empty
222 # chunk that was proceeded by no non-empty chunks.
222 # chunk that was proceeded by no non-empty chunks.
223
223
224 parts = 0
224 parts = 0
225 while parts < 2 + self._grouplistcount:
225 while parts < 2 + self._grouplistcount:
226 noentries = True
226 noentries = True
227 while True:
227 while True:
228 chunk = getchunk(self)
228 chunk = getchunk(self)
229 if not chunk:
229 if not chunk:
230 # The first two empty chunks represent the end of the
230 # The first two empty chunks represent the end of the
231 # changelog and the manifestlog portions. The remaining
231 # changelog and the manifestlog portions. The remaining
232 # empty chunks represent either A) the end of individual
232 # empty chunks represent either A) the end of individual
233 # tree or file entries in the file list, or B) the end of
233 # tree or file entries in the file list, or B) the end of
234 # the entire list. It's the end of the entire list if there
234 # the entire list. It's the end of the entire list if there
235 # were no entries (i.e. noentries is True).
235 # were no entries (i.e. noentries is True).
236 if parts < 2:
236 if parts < 2:
237 parts += 1
237 parts += 1
238 elif noentries:
238 elif noentries:
239 parts += 1
239 parts += 1
240 break
240 break
241 noentries = False
241 noentries = False
242 yield chunkheader(len(chunk))
242 yield chunkheader(len(chunk))
243 pos = 0
243 pos = 0
244 while pos < len(chunk):
244 while pos < len(chunk):
245 next = pos + 2 ** 20
245 next = pos + 2 ** 20
246 yield chunk[pos:next]
246 yield chunk[pos:next]
247 pos = next
247 pos = next
248 yield closechunk()
248 yield closechunk()
249
249
250 def _unpackmanifests(self, repo, revmap, trp, prog):
250 def _unpackmanifests(self, repo, revmap, trp, prog):
251 self.callback = prog.increment
251 self.callback = prog.increment
252 # no need to check for empty manifest group here:
252 # no need to check for empty manifest group here:
253 # if the result of the merge of 1 and 2 is the same in 3 and 4,
253 # if the result of the merge of 1 and 2 is the same in 3 and 4,
254 # no new manifest will be created and the manifest group will
254 # no new manifest will be created and the manifest group will
255 # be empty during the pull
255 # be empty during the pull
256 self.manifestheader()
256 self.manifestheader()
257 deltas = self.deltaiter()
257 deltas = self.deltaiter()
258 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
258 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
259 prog.complete()
259 prog.complete()
260 self.callback = None
260 self.callback = None
261
261
262 def apply(
262 def apply(
263 self,
263 self,
264 repo,
264 repo,
265 tr,
265 tr,
266 srctype,
266 srctype,
267 url,
267 url,
268 targetphase=phases.draft,
268 targetphase=phases.draft,
269 expectedtotal=None,
269 expectedtotal=None,
270 ):
270 ):
271 """Add the changegroup returned by source.read() to this repo.
271 """Add the changegroup returned by source.read() to this repo.
272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
273 the URL of the repo where this changegroup is coming from.
273 the URL of the repo where this changegroup is coming from.
274
274
275 Return an integer summarizing the change to this repo:
275 Return an integer summarizing the change to this repo:
276 - nothing changed or no source: 0
276 - nothing changed or no source: 0
277 - more heads than before: 1+added heads (2..n)
277 - more heads than before: 1+added heads (2..n)
278 - fewer heads than before: -1-removed heads (-2..-n)
278 - fewer heads than before: -1-removed heads (-2..-n)
279 - number of heads stays the same: 1
279 - number of heads stays the same: 1
280 """
280 """
281 repo = repo.unfiltered()
281 repo = repo.unfiltered()
282
282
283 def csmap(x):
283 def csmap(x):
284 repo.ui.debug(b"add changeset %s\n" % short(x))
284 repo.ui.debug(b"add changeset %s\n" % short(x))
285 return len(cl)
285 return len(cl)
286
286
287 def revmap(x):
287 def revmap(x):
288 return cl.rev(x)
288 return cl.rev(x)
289
289
290 try:
290 try:
291 # The transaction may already carry source information. In this
291 # The transaction may already carry source information. In this
292 # case we use the top level data. We overwrite the argument
292 # case we use the top level data. We overwrite the argument
293 # because we need to use the top level value (if they exist)
293 # because we need to use the top level value (if they exist)
294 # in this function.
294 # in this function.
295 srctype = tr.hookargs.setdefault(b'source', srctype)
295 srctype = tr.hookargs.setdefault(b'source', srctype)
296 tr.hookargs.setdefault(b'url', url)
296 tr.hookargs.setdefault(b'url', url)
297 repo.hook(
297 repo.hook(
298 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
298 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
299 )
299 )
300
300
301 # write changelog data to temp files so concurrent readers
301 # write changelog data to temp files so concurrent readers
302 # will not see an inconsistent view
302 # will not see an inconsistent view
303 cl = repo.changelog
303 cl = repo.changelog
304 cl.delayupdate(tr)
304 cl.delayupdate(tr)
305 oldheads = set(cl.heads())
305 oldheads = set(cl.heads())
306
306
307 trp = weakref.proxy(tr)
307 trp = weakref.proxy(tr)
308 # pull off the changeset group
308 # pull off the changeset group
309 repo.ui.status(_(b"adding changesets\n"))
309 repo.ui.status(_(b"adding changesets\n"))
310 clstart = len(cl)
310 clstart = len(cl)
311 progress = repo.ui.makeprogress(
311 progress = repo.ui.makeprogress(
312 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
312 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
313 )
313 )
314 self.callback = progress.increment
314 self.callback = progress.increment
315
315
316 efilesset = set()
316 efilesset = set()
317
317
318 def onchangelog(cl, node):
318 def onchangelog(cl, node):
319 efilesset.update(cl.readfiles(node))
319 efilesset.update(cl.readfiles(node))
320
320
321 self.changelogheader()
321 self.changelogheader()
322 deltas = self.deltaiter()
322 deltas = self.deltaiter()
323 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
323 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
324 efiles = len(efilesset)
324 efiles = len(efilesset)
325
325
326 if not cgnodes:
326 if not cgnodes:
327 repo.ui.develwarn(
327 repo.ui.develwarn(
328 b'applied empty changelog from changegroup',
328 b'applied empty changelog from changegroup',
329 config=b'warn-empty-changegroup',
329 config=b'warn-empty-changegroup',
330 )
330 )
331 clend = len(cl)
331 clend = len(cl)
332 changesets = clend - clstart
332 changesets = clend - clstart
333 progress.complete()
333 progress.complete()
334 self.callback = None
334 self.callback = None
335
335
336 # pull off the manifest group
336 # pull off the manifest group
337 repo.ui.status(_(b"adding manifests\n"))
337 repo.ui.status(_(b"adding manifests\n"))
338 # We know that we'll never have more manifests than we had
338 # We know that we'll never have more manifests than we had
339 # changesets.
339 # changesets.
340 progress = repo.ui.makeprogress(
340 progress = repo.ui.makeprogress(
341 _(b'manifests'), unit=_(b'chunks'), total=changesets
341 _(b'manifests'), unit=_(b'chunks'), total=changesets
342 )
342 )
343 self._unpackmanifests(repo, revmap, trp, progress)
343 self._unpackmanifests(repo, revmap, trp, progress)
344
344
345 needfiles = {}
345 needfiles = {}
346 if repo.ui.configbool(b'server', b'validate'):
346 if repo.ui.configbool(b'server', b'validate'):
347 cl = repo.changelog
347 cl = repo.changelog
348 ml = repo.manifestlog
348 ml = repo.manifestlog
349 # validate incoming csets have their manifests
349 # validate incoming csets have their manifests
350 for cset in pycompat.xrange(clstart, clend):
350 for cset in pycompat.xrange(clstart, clend):
351 mfnode = cl.changelogrevision(cset).manifest
351 mfnode = cl.changelogrevision(cset).manifest
352 mfest = ml[mfnode].readdelta()
352 mfest = ml[mfnode].readdelta()
353 # store file cgnodes we must see
353 # store file cgnodes we must see
354 for f, n in pycompat.iteritems(mfest):
354 for f, n in pycompat.iteritems(mfest):
355 needfiles.setdefault(f, set()).add(n)
355 needfiles.setdefault(f, set()).add(n)
356
356
357 # process the files
357 # process the files
358 repo.ui.status(_(b"adding file changes\n"))
358 repo.ui.status(_(b"adding file changes\n"))
359 newrevs, newfiles = _addchangegroupfiles(
359 newrevs, newfiles = _addchangegroupfiles(
360 repo, self, revmap, trp, efiles, needfiles
360 repo, self, revmap, trp, efiles, needfiles
361 )
361 )
362
362
363 # making sure the value exists
363 # making sure the value exists
364 tr.changes.setdefault(b'changegroup-count-changesets', 0)
364 tr.changes.setdefault(b'changegroup-count-changesets', 0)
365 tr.changes.setdefault(b'changegroup-count-revisions', 0)
365 tr.changes.setdefault(b'changegroup-count-revisions', 0)
366 tr.changes.setdefault(b'changegroup-count-files', 0)
366 tr.changes.setdefault(b'changegroup-count-files', 0)
367 tr.changes.setdefault(b'changegroup-count-heads', 0)
367 tr.changes.setdefault(b'changegroup-count-heads', 0)
368
368
369 # some code use bundle operation for internal purpose. They usually
369 # some code use bundle operation for internal purpose. They usually
370 # set `ui.quiet` to do this outside of user sight. Size the report
370 # set `ui.quiet` to do this outside of user sight. Size the report
371 # of such operation now happens at the end of the transaction, that
371 # of such operation now happens at the end of the transaction, that
372 # ui.quiet has not direct effect on the output.
372 # ui.quiet has not direct effect on the output.
373 #
373 #
374 # To preserve this intend use an inelegant hack, we fail to report
374 # To preserve this intend use an inelegant hack, we fail to report
375 # the change if `quiet` is set. We should probably move to
375 # the change if `quiet` is set. We should probably move to
376 # something better, but this is a good first step to allow the "end
376 # something better, but this is a good first step to allow the "end
377 # of transaction report" to pass tests.
377 # of transaction report" to pass tests.
378 if not repo.ui.quiet:
378 if not repo.ui.quiet:
379 tr.changes[b'changegroup-count-changesets'] += changesets
379 tr.changes[b'changegroup-count-changesets'] += changesets
380 tr.changes[b'changegroup-count-revisions'] += newrevs
380 tr.changes[b'changegroup-count-revisions'] += newrevs
381 tr.changes[b'changegroup-count-files'] += newfiles
381 tr.changes[b'changegroup-count-files'] += newfiles
382
382
383 deltaheads = 0
383 deltaheads = 0
384 if oldheads:
384 if oldheads:
385 heads = cl.heads()
385 heads = cl.heads()
386 deltaheads += len(heads) - len(oldheads)
386 deltaheads += len(heads) - len(oldheads)
387 for h in heads:
387 for h in heads:
388 if h not in oldheads and repo[h].closesbranch():
388 if h not in oldheads and repo[h].closesbranch():
389 deltaheads -= 1
389 deltaheads -= 1
390
390
391 # see previous comment about checking ui.quiet
391 # see previous comment about checking ui.quiet
392 if not repo.ui.quiet:
392 if not repo.ui.quiet:
393 tr.changes[b'changegroup-count-heads'] += deltaheads
393 tr.changes[b'changegroup-count-heads'] += deltaheads
394 repo.invalidatevolatilesets()
394 repo.invalidatevolatilesets()
395
395
396 if changesets > 0:
396 if changesets > 0:
397 if b'node' not in tr.hookargs:
397 if b'node' not in tr.hookargs:
398 tr.hookargs[b'node'] = hex(cl.node(clstart))
398 tr.hookargs[b'node'] = hex(cl.node(clstart))
399 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
399 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
400 hookargs = dict(tr.hookargs)
400 hookargs = dict(tr.hookargs)
401 else:
401 else:
402 hookargs = dict(tr.hookargs)
402 hookargs = dict(tr.hookargs)
403 hookargs[b'node'] = hex(cl.node(clstart))
403 hookargs[b'node'] = hex(cl.node(clstart))
404 hookargs[b'node_last'] = hex(cl.node(clend - 1))
404 hookargs[b'node_last'] = hex(cl.node(clend - 1))
405 repo.hook(
405 repo.hook(
406 b'pretxnchangegroup',
406 b'pretxnchangegroup',
407 throw=True,
407 throw=True,
408 **pycompat.strkwargs(hookargs)
408 **pycompat.strkwargs(hookargs)
409 )
409 )
410
410
411 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
411 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
412 phaseall = None
412 phaseall = None
413 if srctype in (b'push', b'serve'):
413 if srctype in (b'push', b'serve'):
414 # Old servers can not push the boundary themselves.
414 # Old servers can not push the boundary themselves.
415 # New servers won't push the boundary if changeset already
415 # New servers won't push the boundary if changeset already
416 # exists locally as secret
416 # exists locally as secret
417 #
417 #
418 # We should not use added here but the list of all change in
418 # We should not use added here but the list of all change in
419 # the bundle
419 # the bundle
420 if repo.publishing():
420 if repo.publishing():
421 targetphase = phaseall = phases.public
421 targetphase = phaseall = phases.public
422 else:
422 else:
423 # closer target phase computation
423 # closer target phase computation
424
424
425 # Those changesets have been pushed from the
425 # Those changesets have been pushed from the
426 # outside, their phases are going to be pushed
426 # outside, their phases are going to be pushed
427 # alongside. Therefor `targetphase` is
427 # alongside. Therefor `targetphase` is
428 # ignored.
428 # ignored.
429 targetphase = phaseall = phases.draft
429 targetphase = phaseall = phases.draft
430 if added:
430 if added:
431 phases.registernew(repo, tr, targetphase, added)
431 phases.registernew(repo, tr, targetphase, added)
432 if phaseall is not None:
432 if phaseall is not None:
433 phases.advanceboundary(repo, tr, phaseall, cgnodes)
433 phases.advanceboundary(repo, tr, phaseall, cgnodes)
434
434
435 if changesets > 0:
435 if changesets > 0:
436
436
437 def runhooks(unused_success):
437 def runhooks(unused_success):
438 # These hooks run when the lock releases, not when the
438 # These hooks run when the lock releases, not when the
439 # transaction closes. So it's possible for the changelog
439 # transaction closes. So it's possible for the changelog
440 # to have changed since we last saw it.
440 # to have changed since we last saw it.
441 if clstart >= len(repo):
441 if clstart >= len(repo):
442 return
442 return
443
443
444 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
444 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
445
445
446 for n in added:
446 for n in added:
447 args = hookargs.copy()
447 args = hookargs.copy()
448 args[b'node'] = hex(n)
448 args[b'node'] = hex(n)
449 del args[b'node_last']
449 del args[b'node_last']
450 repo.hook(b"incoming", **pycompat.strkwargs(args))
450 repo.hook(b"incoming", **pycompat.strkwargs(args))
451
451
452 newheads = [h for h in repo.heads() if h not in oldheads]
452 newheads = [h for h in repo.heads() if h not in oldheads]
453 repo.ui.log(
453 repo.ui.log(
454 b"incoming",
454 b"incoming",
455 b"%d incoming changes - new heads: %s\n",
455 b"%d incoming changes - new heads: %s\n",
456 len(added),
456 len(added),
457 b', '.join([hex(c[:6]) for c in newheads]),
457 b', '.join([hex(c[:6]) for c in newheads]),
458 )
458 )
459
459
460 tr.addpostclose(
460 tr.addpostclose(
461 b'changegroup-runhooks-%020i' % clstart,
461 b'changegroup-runhooks-%020i' % clstart,
462 lambda tr: repo._afterlock(runhooks),
462 lambda tr: repo._afterlock(runhooks),
463 )
463 )
464 finally:
464 finally:
465 repo.ui.flush()
465 repo.ui.flush()
466 # never return 0 here:
466 # never return 0 here:
467 if deltaheads < 0:
467 if deltaheads < 0:
468 ret = deltaheads - 1
468 ret = deltaheads - 1
469 else:
469 else:
470 ret = deltaheads + 1
470 ret = deltaheads + 1
471 return ret
471 return ret
472
472
473 def deltaiter(self):
473 def deltaiter(self):
474 """
474 """
475 returns an iterator of the deltas in this changegroup
475 returns an iterator of the deltas in this changegroup
476
476
477 Useful for passing to the underlying storage system to be stored.
477 Useful for passing to the underlying storage system to be stored.
478 """
478 """
479 chain = None
479 chain = None
480 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
480 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
481 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
481 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
482 yield chunkdata
482 yield chunkdata
483 chain = chunkdata[0]
483 chain = chunkdata[0]
484
484
485
485
486 class cg2unpacker(cg1unpacker):
486 class cg2unpacker(cg1unpacker):
487 """Unpacker for cg2 streams.
487 """Unpacker for cg2 streams.
488
488
489 cg2 streams add support for generaldelta, so the delta header
489 cg2 streams add support for generaldelta, so the delta header
490 format is slightly different. All other features about the data
490 format is slightly different. All other features about the data
491 remain the same.
491 remain the same.
492 """
492 """
493
493
494 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
494 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
495 deltaheadersize = deltaheader.size
495 deltaheadersize = deltaheader.size
496 version = b'02'
496 version = b'02'
497
497
498 def _deltaheader(self, headertuple, prevnode):
498 def _deltaheader(self, headertuple, prevnode):
499 node, p1, p2, deltabase, cs = headertuple
499 node, p1, p2, deltabase, cs = headertuple
500 flags = 0
500 flags = 0
501 return node, p1, p2, deltabase, cs, flags
501 return node, p1, p2, deltabase, cs, flags
502
502
503
503
504 class cg3unpacker(cg2unpacker):
504 class cg3unpacker(cg2unpacker):
505 """Unpacker for cg3 streams.
505 """Unpacker for cg3 streams.
506
506
507 cg3 streams add support for exchanging treemanifests and revlog
507 cg3 streams add support for exchanging treemanifests and revlog
508 flags. It adds the revlog flags to the delta header and an empty chunk
508 flags. It adds the revlog flags to the delta header and an empty chunk
509 separating manifests and files.
509 separating manifests and files.
510 """
510 """
511
511
512 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
512 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
513 deltaheadersize = deltaheader.size
513 deltaheadersize = deltaheader.size
514 version = b'03'
514 version = b'03'
515 _grouplistcount = 2 # One list of manifests and one list of files
515 _grouplistcount = 2 # One list of manifests and one list of files
516
516
517 def _deltaheader(self, headertuple, prevnode):
517 def _deltaheader(self, headertuple, prevnode):
518 node, p1, p2, deltabase, cs, flags = headertuple
518 node, p1, p2, deltabase, cs, flags = headertuple
519 return node, p1, p2, deltabase, cs, flags
519 return node, p1, p2, deltabase, cs, flags
520
520
521 def _unpackmanifests(self, repo, revmap, trp, prog):
521 def _unpackmanifests(self, repo, revmap, trp, prog):
522 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
522 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
523 for chunkdata in iter(self.filelogheader, {}):
523 for chunkdata in iter(self.filelogheader, {}):
524 # If we get here, there are directory manifests in the changegroup
524 # If we get here, there are directory manifests in the changegroup
525 d = chunkdata[b"filename"]
525 d = chunkdata[b"filename"]
526 repo.ui.debug(b"adding %s revisions\n" % d)
526 repo.ui.debug(b"adding %s revisions\n" % d)
527 deltas = self.deltaiter()
527 deltas = self.deltaiter()
528 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
528 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
529 raise error.Abort(_(b"received dir revlog group is empty"))
529 raise error.Abort(_(b"received dir revlog group is empty"))
530
530
531
531
532 class headerlessfixup(object):
532 class headerlessfixup(object):
533 def __init__(self, fh, h):
533 def __init__(self, fh, h):
534 self._h = h
534 self._h = h
535 self._fh = fh
535 self._fh = fh
536
536
537 def read(self, n):
537 def read(self, n):
538 if self._h:
538 if self._h:
539 d, self._h = self._h[:n], self._h[n:]
539 d, self._h = self._h[:n], self._h[n:]
540 if len(d) < n:
540 if len(d) < n:
541 d += readexactly(self._fh, n - len(d))
541 d += readexactly(self._fh, n - len(d))
542 return d
542 return d
543 return readexactly(self._fh, n)
543 return readexactly(self._fh, n)
544
544
545
545
546 def _revisiondeltatochunks(delta, headerfn):
546 def _revisiondeltatochunks(delta, headerfn):
547 """Serialize a revisiondelta to changegroup chunks."""
547 """Serialize a revisiondelta to changegroup chunks."""
548
548
549 # The captured revision delta may be encoded as a delta against
549 # The captured revision delta may be encoded as a delta against
550 # a base revision or as a full revision. The changegroup format
550 # a base revision or as a full revision. The changegroup format
551 # requires that everything on the wire be deltas. So for full
551 # requires that everything on the wire be deltas. So for full
552 # revisions, we need to invent a header that says to rewrite
552 # revisions, we need to invent a header that says to rewrite
553 # data.
553 # data.
554
554
555 if delta.delta is not None:
555 if delta.delta is not None:
556 prefix, data = b'', delta.delta
556 prefix, data = b'', delta.delta
557 elif delta.basenode == nullid:
557 elif delta.basenode == nullid:
558 data = delta.revision
558 data = delta.revision
559 prefix = mdiff.trivialdiffheader(len(data))
559 prefix = mdiff.trivialdiffheader(len(data))
560 else:
560 else:
561 data = delta.revision
561 data = delta.revision
562 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
562 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
563
563
564 meta = headerfn(delta)
564 meta = headerfn(delta)
565
565
566 yield chunkheader(len(meta) + len(prefix) + len(data))
566 yield chunkheader(len(meta) + len(prefix) + len(data))
567 yield meta
567 yield meta
568 if prefix:
568 if prefix:
569 yield prefix
569 yield prefix
570 yield data
570 yield data
571
571
572
572
573 def _sortnodesellipsis(store, nodes, cl, lookup):
573 def _sortnodesellipsis(store, nodes, cl, lookup):
574 """Sort nodes for changegroup generation."""
574 """Sort nodes for changegroup generation."""
575 # Ellipses serving mode.
575 # Ellipses serving mode.
576 #
576 #
577 # In a perfect world, we'd generate better ellipsis-ified graphs
577 # In a perfect world, we'd generate better ellipsis-ified graphs
578 # for non-changelog revlogs. In practice, we haven't started doing
578 # for non-changelog revlogs. In practice, we haven't started doing
579 # that yet, so the resulting DAGs for the manifestlog and filelogs
579 # that yet, so the resulting DAGs for the manifestlog and filelogs
580 # are actually full of bogus parentage on all the ellipsis
580 # are actually full of bogus parentage on all the ellipsis
581 # nodes. This has the side effect that, while the contents are
581 # nodes. This has the side effect that, while the contents are
582 # correct, the individual DAGs might be completely out of whack in
582 # correct, the individual DAGs might be completely out of whack in
583 # a case like 882681bc3166 and its ancestors (back about 10
583 # a case like 882681bc3166 and its ancestors (back about 10
584 # revisions or so) in the main hg repo.
584 # revisions or so) in the main hg repo.
585 #
585 #
586 # The one invariant we *know* holds is that the new (potentially
586 # The one invariant we *know* holds is that the new (potentially
587 # bogus) DAG shape will be valid if we order the nodes in the
587 # bogus) DAG shape will be valid if we order the nodes in the
588 # order that they're introduced in dramatis personae by the
588 # order that they're introduced in dramatis personae by the
589 # changelog, so what we do is we sort the non-changelog histories
589 # changelog, so what we do is we sort the non-changelog histories
590 # by the order in which they are used by the changelog.
590 # by the order in which they are used by the changelog.
591 key = lambda n: cl.rev(lookup(n))
591 key = lambda n: cl.rev(lookup(n))
592 return sorted(nodes, key=key)
592 return sorted(nodes, key=key)
593
593
594
594
595 def _resolvenarrowrevisioninfo(
595 def _resolvenarrowrevisioninfo(
596 cl,
596 cl,
597 store,
597 store,
598 ischangelog,
598 ischangelog,
599 rev,
599 rev,
600 linkrev,
600 linkrev,
601 linknode,
601 linknode,
602 clrevtolocalrev,
602 clrevtolocalrev,
603 fullclnodes,
603 fullclnodes,
604 precomputedellipsis,
604 precomputedellipsis,
605 ):
605 ):
606 linkparents = precomputedellipsis[linkrev]
606 linkparents = precomputedellipsis[linkrev]
607
607
608 def local(clrev):
608 def local(clrev):
609 """Turn a changelog revnum into a local revnum.
609 """Turn a changelog revnum into a local revnum.
610
610
611 The ellipsis dag is stored as revnums on the changelog,
611 The ellipsis dag is stored as revnums on the changelog,
612 but when we're producing ellipsis entries for
612 but when we're producing ellipsis entries for
613 non-changelog revlogs, we need to turn those numbers into
613 non-changelog revlogs, we need to turn those numbers into
614 something local. This does that for us, and during the
614 something local. This does that for us, and during the
615 changelog sending phase will also expand the stored
615 changelog sending phase will also expand the stored
616 mappings as needed.
616 mappings as needed.
617 """
617 """
618 if clrev == nullrev:
618 if clrev == nullrev:
619 return nullrev
619 return nullrev
620
620
621 if ischangelog:
621 if ischangelog:
622 return clrev
622 return clrev
623
623
624 # Walk the ellipsis-ized changelog breadth-first looking for a
624 # Walk the ellipsis-ized changelog breadth-first looking for a
625 # change that has been linked from the current revlog.
625 # change that has been linked from the current revlog.
626 #
626 #
627 # For a flat manifest revlog only a single step should be necessary
627 # For a flat manifest revlog only a single step should be necessary
628 # as all relevant changelog entries are relevant to the flat
628 # as all relevant changelog entries are relevant to the flat
629 # manifest.
629 # manifest.
630 #
630 #
631 # For a filelog or tree manifest dirlog however not every changelog
631 # For a filelog or tree manifest dirlog however not every changelog
632 # entry will have been relevant, so we need to skip some changelog
632 # entry will have been relevant, so we need to skip some changelog
633 # nodes even after ellipsis-izing.
633 # nodes even after ellipsis-izing.
634 walk = [clrev]
634 walk = [clrev]
635 while walk:
635 while walk:
636 p = walk[0]
636 p = walk[0]
637 walk = walk[1:]
637 walk = walk[1:]
638 if p in clrevtolocalrev:
638 if p in clrevtolocalrev:
639 return clrevtolocalrev[p]
639 return clrevtolocalrev[p]
640 elif p in fullclnodes:
640 elif p in fullclnodes:
641 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
641 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
642 elif p in precomputedellipsis:
642 elif p in precomputedellipsis:
643 walk.extend(
643 walk.extend(
644 [pp for pp in precomputedellipsis[p] if pp != nullrev]
644 [pp for pp in precomputedellipsis[p] if pp != nullrev]
645 )
645 )
646 else:
646 else:
647 # In this case, we've got an ellipsis with parents
647 # In this case, we've got an ellipsis with parents
648 # outside the current bundle (likely an
648 # outside the current bundle (likely an
649 # incremental pull). We "know" that we can use the
649 # incremental pull). We "know" that we can use the
650 # value of this same revlog at whatever revision
650 # value of this same revlog at whatever revision
651 # is pointed to by linknode. "Know" is in scare
651 # is pointed to by linknode. "Know" is in scare
652 # quotes because I haven't done enough examination
652 # quotes because I haven't done enough examination
653 # of edge cases to convince myself this is really
653 # of edge cases to convince myself this is really
654 # a fact - it works for all the (admittedly
654 # a fact - it works for all the (admittedly
655 # thorough) cases in our testsuite, but I would be
655 # thorough) cases in our testsuite, but I would be
656 # somewhat unsurprised to find a case in the wild
656 # somewhat unsurprised to find a case in the wild
657 # where this breaks down a bit. That said, I don't
657 # where this breaks down a bit. That said, I don't
658 # know if it would hurt anything.
658 # know if it would hurt anything.
659 for i in pycompat.xrange(rev, 0, -1):
659 for i in pycompat.xrange(rev, 0, -1):
660 if store.linkrev(i) == clrev:
660 if store.linkrev(i) == clrev:
661 return i
661 return i
662 # We failed to resolve a parent for this node, so
662 # We failed to resolve a parent for this node, so
663 # we crash the changegroup construction.
663 # we crash the changegroup construction.
664 raise error.Abort(
664 raise error.Abort(
665 b'unable to resolve parent while packing %r %r'
665 b'unable to resolve parent while packing %r %r'
666 b' for changeset %r' % (store.indexfile, rev, clrev)
666 b' for changeset %r' % (store.indexfile, rev, clrev)
667 )
667 )
668
668
669 return nullrev
669 return nullrev
670
670
671 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
671 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
672 p1, p2 = nullrev, nullrev
672 p1, p2 = nullrev, nullrev
673 elif len(linkparents) == 1:
673 elif len(linkparents) == 1:
674 (p1,) = sorted(local(p) for p in linkparents)
674 (p1,) = sorted(local(p) for p in linkparents)
675 p2 = nullrev
675 p2 = nullrev
676 else:
676 else:
677 p1, p2 = sorted(local(p) for p in linkparents)
677 p1, p2 = sorted(local(p) for p in linkparents)
678
678
679 p1node, p2node = store.node(p1), store.node(p2)
679 p1node, p2node = store.node(p1), store.node(p2)
680
680
681 return p1node, p2node, linknode
681 return p1node, p2node, linknode
682
682
683
683
684 def deltagroup(
684 def deltagroup(
685 repo,
685 repo,
686 store,
686 store,
687 nodes,
687 nodes,
688 ischangelog,
688 ischangelog,
689 lookup,
689 lookup,
690 forcedeltaparentprev,
690 forcedeltaparentprev,
691 topic=None,
691 topic=None,
692 ellipses=False,
692 ellipses=False,
693 clrevtolocalrev=None,
693 clrevtolocalrev=None,
694 fullclnodes=None,
694 fullclnodes=None,
695 precomputedellipsis=None,
695 precomputedellipsis=None,
696 ):
696 ):
697 """Calculate deltas for a set of revisions.
697 """Calculate deltas for a set of revisions.
698
698
699 Is a generator of ``revisiondelta`` instances.
699 Is a generator of ``revisiondelta`` instances.
700
700
701 If topic is not None, progress detail will be generated using this
701 If topic is not None, progress detail will be generated using this
702 topic name (e.g. changesets, manifests, etc).
702 topic name (e.g. changesets, manifests, etc).
703 """
703 """
704 if not nodes:
704 if not nodes:
705 return
705 return
706
706
707 cl = repo.changelog
707 cl = repo.changelog
708
708
709 if ischangelog:
709 if ischangelog:
710 # `hg log` shows changesets in storage order. To preserve order
710 # `hg log` shows changesets in storage order. To preserve order
711 # across clones, send out changesets in storage order.
711 # across clones, send out changesets in storage order.
712 nodesorder = b'storage'
712 nodesorder = b'storage'
713 elif ellipses:
713 elif ellipses:
714 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
714 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
715 nodesorder = b'nodes'
715 nodesorder = b'nodes'
716 else:
716 else:
717 nodesorder = None
717 nodesorder = None
718
718
719 # Perform ellipses filtering and revision massaging. We do this before
719 # Perform ellipses filtering and revision massaging. We do this before
720 # emitrevisions() because a) filtering out revisions creates less work
720 # emitrevisions() because a) filtering out revisions creates less work
721 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
721 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
722 # assumptions about delta choices and we would possibly send a delta
722 # assumptions about delta choices and we would possibly send a delta
723 # referencing a missing base revision.
723 # referencing a missing base revision.
724 #
724 #
725 # Also, calling lookup() has side-effects with regards to populating
725 # Also, calling lookup() has side-effects with regards to populating
726 # data structures. If we don't call lookup() for each node or if we call
726 # data structures. If we don't call lookup() for each node or if we call
727 # lookup() after the first pass through each node, things can break -
727 # lookup() after the first pass through each node, things can break -
728 # possibly intermittently depending on the python hash seed! For that
728 # possibly intermittently depending on the python hash seed! For that
729 # reason, we store a mapping of all linknodes during the initial node
729 # reason, we store a mapping of all linknodes during the initial node
730 # pass rather than use lookup() on the output side.
730 # pass rather than use lookup() on the output side.
731 if ellipses:
731 if ellipses:
732 filtered = []
732 filtered = []
733 adjustedparents = {}
733 adjustedparents = {}
734 linknodes = {}
734 linknodes = {}
735
735
736 for node in nodes:
736 for node in nodes:
737 rev = store.rev(node)
737 rev = store.rev(node)
738 linknode = lookup(node)
738 linknode = lookup(node)
739 linkrev = cl.rev(linknode)
739 linkrev = cl.rev(linknode)
740 clrevtolocalrev[linkrev] = rev
740 clrevtolocalrev[linkrev] = rev
741
741
742 # If linknode is in fullclnodes, it means the corresponding
742 # If linknode is in fullclnodes, it means the corresponding
743 # changeset was a full changeset and is being sent unaltered.
743 # changeset was a full changeset and is being sent unaltered.
744 if linknode in fullclnodes:
744 if linknode in fullclnodes:
745 linknodes[node] = linknode
745 linknodes[node] = linknode
746
746
747 # If the corresponding changeset wasn't in the set computed
747 # If the corresponding changeset wasn't in the set computed
748 # as relevant to us, it should be dropped outright.
748 # as relevant to us, it should be dropped outright.
749 elif linkrev not in precomputedellipsis:
749 elif linkrev not in precomputedellipsis:
750 continue
750 continue
751
751
752 else:
752 else:
753 # We could probably do this later and avoid the dict
753 # We could probably do this later and avoid the dict
754 # holding state. But it likely doesn't matter.
754 # holding state. But it likely doesn't matter.
755 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
755 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
756 cl,
756 cl,
757 store,
757 store,
758 ischangelog,
758 ischangelog,
759 rev,
759 rev,
760 linkrev,
760 linkrev,
761 linknode,
761 linknode,
762 clrevtolocalrev,
762 clrevtolocalrev,
763 fullclnodes,
763 fullclnodes,
764 precomputedellipsis,
764 precomputedellipsis,
765 )
765 )
766
766
767 adjustedparents[node] = (p1node, p2node)
767 adjustedparents[node] = (p1node, p2node)
768 linknodes[node] = linknode
768 linknodes[node] = linknode
769
769
770 filtered.append(node)
770 filtered.append(node)
771
771
772 nodes = filtered
772 nodes = filtered
773
773
774 # We expect the first pass to be fast, so we only engage the progress
774 # We expect the first pass to be fast, so we only engage the progress
775 # meter for constructing the revision deltas.
775 # meter for constructing the revision deltas.
776 progress = None
776 progress = None
777 if topic is not None:
777 if topic is not None:
778 progress = repo.ui.makeprogress(
778 progress = repo.ui.makeprogress(
779 topic, unit=_(b'chunks'), total=len(nodes)
779 topic, unit=_(b'chunks'), total=len(nodes)
780 )
780 )
781
781
782 configtarget = repo.ui.config(b'devel', b'bundle.delta')
782 configtarget = repo.ui.config(b'devel', b'bundle.delta')
783 if configtarget not in (b'', b'p1', b'full'):
783 if configtarget not in (b'', b'p1', b'full'):
784 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
784 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
785 repo.ui.warn(msg % configtarget)
785 repo.ui.warn(msg % configtarget)
786
786
787 deltamode = repository.CG_DELTAMODE_STD
787 deltamode = repository.CG_DELTAMODE_STD
788 if forcedeltaparentprev:
788 if forcedeltaparentprev:
789 deltamode = repository.CG_DELTAMODE_PREV
789 deltamode = repository.CG_DELTAMODE_PREV
790 elif configtarget == b'p1':
790 elif configtarget == b'p1':
791 deltamode = repository.CG_DELTAMODE_P1
791 deltamode = repository.CG_DELTAMODE_P1
792 elif configtarget == b'full':
792 elif configtarget == b'full':
793 deltamode = repository.CG_DELTAMODE_FULL
793 deltamode = repository.CG_DELTAMODE_FULL
794
794
795 revisions = store.emitrevisions(
795 revisions = store.emitrevisions(
796 nodes,
796 nodes,
797 nodesorder=nodesorder,
797 nodesorder=nodesorder,
798 revisiondata=True,
798 revisiondata=True,
799 assumehaveparentrevisions=not ellipses,
799 assumehaveparentrevisions=not ellipses,
800 deltamode=deltamode,
800 deltamode=deltamode,
801 )
801 )
802
802
803 for i, revision in enumerate(revisions):
803 for i, revision in enumerate(revisions):
804 if progress:
804 if progress:
805 progress.update(i + 1)
805 progress.update(i + 1)
806
806
807 if ellipses:
807 if ellipses:
808 linknode = linknodes[revision.node]
808 linknode = linknodes[revision.node]
809
809
810 if revision.node in adjustedparents:
810 if revision.node in adjustedparents:
811 p1node, p2node = adjustedparents[revision.node]
811 p1node, p2node = adjustedparents[revision.node]
812 revision.p1node = p1node
812 revision.p1node = p1node
813 revision.p2node = p2node
813 revision.p2node = p2node
814 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
814 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
815
815
816 else:
816 else:
817 linknode = lookup(revision.node)
817 linknode = lookup(revision.node)
818
818
819 revision.linknode = linknode
819 revision.linknode = linknode
820 yield revision
820 yield revision
821
821
822 if progress:
822 if progress:
823 progress.complete()
823 progress.complete()
824
824
825
825
826 class cgpacker(object):
826 class cgpacker(object):
827 def __init__(
827 def __init__(
828 self,
828 self,
829 repo,
829 repo,
830 oldmatcher,
830 oldmatcher,
831 matcher,
831 matcher,
832 version,
832 version,
833 builddeltaheader,
833 builddeltaheader,
834 manifestsend,
834 manifestsend,
835 forcedeltaparentprev=False,
835 forcedeltaparentprev=False,
836 bundlecaps=None,
836 bundlecaps=None,
837 ellipses=False,
837 ellipses=False,
838 shallow=False,
838 shallow=False,
839 ellipsisroots=None,
839 ellipsisroots=None,
840 fullnodes=None,
840 fullnodes=None,
841 ):
841 ):
842 """Given a source repo, construct a bundler.
842 """Given a source repo, construct a bundler.
843
843
844 oldmatcher is a matcher that matches on files the client already has.
844 oldmatcher is a matcher that matches on files the client already has.
845 These will not be included in the changegroup.
845 These will not be included in the changegroup.
846
846
847 matcher is a matcher that matches on files to include in the
847 matcher is a matcher that matches on files to include in the
848 changegroup. Used to facilitate sparse changegroups.
848 changegroup. Used to facilitate sparse changegroups.
849
849
850 forcedeltaparentprev indicates whether delta parents must be against
850 forcedeltaparentprev indicates whether delta parents must be against
851 the previous revision in a delta group. This should only be used for
851 the previous revision in a delta group. This should only be used for
852 compatibility with changegroup version 1.
852 compatibility with changegroup version 1.
853
853
854 builddeltaheader is a callable that constructs the header for a group
854 builddeltaheader is a callable that constructs the header for a group
855 delta.
855 delta.
856
856
857 manifestsend is a chunk to send after manifests have been fully emitted.
857 manifestsend is a chunk to send after manifests have been fully emitted.
858
858
859 ellipses indicates whether ellipsis serving mode is enabled.
859 ellipses indicates whether ellipsis serving mode is enabled.
860
860
861 bundlecaps is optional and can be used to specify the set of
861 bundlecaps is optional and can be used to specify the set of
862 capabilities which can be used to build the bundle. While bundlecaps is
862 capabilities which can be used to build the bundle. While bundlecaps is
863 unused in core Mercurial, extensions rely on this feature to communicate
863 unused in core Mercurial, extensions rely on this feature to communicate
864 capabilities to customize the changegroup packer.
864 capabilities to customize the changegroup packer.
865
865
866 shallow indicates whether shallow data might be sent. The packer may
866 shallow indicates whether shallow data might be sent. The packer may
867 need to pack file contents not introduced by the changes being packed.
867 need to pack file contents not introduced by the changes being packed.
868
868
869 fullnodes is the set of changelog nodes which should not be ellipsis
869 fullnodes is the set of changelog nodes which should not be ellipsis
870 nodes. We store this rather than the set of nodes that should be
870 nodes. We store this rather than the set of nodes that should be
871 ellipsis because for very large histories we expect this to be
871 ellipsis because for very large histories we expect this to be
872 significantly smaller.
872 significantly smaller.
873 """
873 """
874 assert oldmatcher
874 assert oldmatcher
875 assert matcher
875 assert matcher
876 self._oldmatcher = oldmatcher
876 self._oldmatcher = oldmatcher
877 self._matcher = matcher
877 self._matcher = matcher
878
878
879 self.version = version
879 self.version = version
880 self._forcedeltaparentprev = forcedeltaparentprev
880 self._forcedeltaparentprev = forcedeltaparentprev
881 self._builddeltaheader = builddeltaheader
881 self._builddeltaheader = builddeltaheader
882 self._manifestsend = manifestsend
882 self._manifestsend = manifestsend
883 self._ellipses = ellipses
883 self._ellipses = ellipses
884
884
885 # Set of capabilities we can use to build the bundle.
885 # Set of capabilities we can use to build the bundle.
886 if bundlecaps is None:
886 if bundlecaps is None:
887 bundlecaps = set()
887 bundlecaps = set()
888 self._bundlecaps = bundlecaps
888 self._bundlecaps = bundlecaps
889 self._isshallow = shallow
889 self._isshallow = shallow
890 self._fullclnodes = fullnodes
890 self._fullclnodes = fullnodes
891
891
892 # Maps ellipsis revs to their roots at the changelog level.
892 # Maps ellipsis revs to their roots at the changelog level.
893 self._precomputedellipsis = ellipsisroots
893 self._precomputedellipsis = ellipsisroots
894
894
895 self._repo = repo
895 self._repo = repo
896
896
897 if self._repo.ui.verbose and not self._repo.ui.debugflag:
897 if self._repo.ui.verbose and not self._repo.ui.debugflag:
898 self._verbosenote = self._repo.ui.note
898 self._verbosenote = self._repo.ui.note
899 else:
899 else:
900 self._verbosenote = lambda s: None
900 self._verbosenote = lambda s: None
901
901
902 def generate(
902 def generate(
903 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
903 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
904 ):
904 ):
905 """Yield a sequence of changegroup byte chunks.
905 """Yield a sequence of changegroup byte chunks.
906 If changelog is False, changelog data won't be added to changegroup
906 If changelog is False, changelog data won't be added to changegroup
907 """
907 """
908
908
909 repo = self._repo
909 repo = self._repo
910 cl = repo.changelog
910 cl = repo.changelog
911
911
912 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
912 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
913 size = 0
913 size = 0
914
914
915 clstate, deltas = self._generatechangelog(
915 clstate, deltas = self._generatechangelog(
916 cl, clnodes, generate=changelog
916 cl, clnodes, generate=changelog
917 )
917 )
918 for delta in deltas:
918 for delta in deltas:
919 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
919 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
920 size += len(chunk)
920 size += len(chunk)
921 yield chunk
921 yield chunk
922
922
923 close = closechunk()
923 close = closechunk()
924 size += len(close)
924 size += len(close)
925 yield closechunk()
925 yield closechunk()
926
926
927 self._verbosenote(_(b'%8.i (changelog)\n') % size)
927 self._verbosenote(_(b'%8.i (changelog)\n') % size)
928
928
929 clrevorder = clstate[b'clrevorder']
929 clrevorder = clstate[b'clrevorder']
930 manifests = clstate[b'manifests']
930 manifests = clstate[b'manifests']
931 changedfiles = clstate[b'changedfiles']
931 changedfiles = clstate[b'changedfiles']
932
932
933 # We need to make sure that the linkrev in the changegroup refers to
933 # We need to make sure that the linkrev in the changegroup refers to
934 # the first changeset that introduced the manifest or file revision.
934 # the first changeset that introduced the manifest or file revision.
935 # The fastpath is usually safer than the slowpath, because the filelogs
935 # The fastpath is usually safer than the slowpath, because the filelogs
936 # are walked in revlog order.
936 # are walked in revlog order.
937 #
937 #
938 # When taking the slowpath when the manifest revlog uses generaldelta,
938 # When taking the slowpath when the manifest revlog uses generaldelta,
939 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
939 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
940 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
940 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
941 #
941 #
942 # When taking the fastpath, we are only vulnerable to reordering
942 # When taking the fastpath, we are only vulnerable to reordering
943 # of the changelog itself. The changelog never uses generaldelta and is
943 # of the changelog itself. The changelog never uses generaldelta and is
944 # never reordered. To handle this case, we simply take the slowpath,
944 # never reordered. To handle this case, we simply take the slowpath,
945 # which already has the 'clrevorder' logic. This was also fixed in
945 # which already has the 'clrevorder' logic. This was also fixed in
946 # cc0ff93d0c0c.
946 # cc0ff93d0c0c.
947
947
948 # Treemanifests don't work correctly with fastpathlinkrev
948 # Treemanifests don't work correctly with fastpathlinkrev
949 # either, because we don't discover which directory nodes to
949 # either, because we don't discover which directory nodes to
950 # send along with files. This could probably be fixed.
950 # send along with files. This could probably be fixed.
951 fastpathlinkrev = fastpathlinkrev and (
951 fastpathlinkrev = fastpathlinkrev and (
952 b'treemanifest' not in repo.requirements
952 b'treemanifest' not in repo.requirements
953 )
953 )
954
954
955 fnodes = {} # needed file nodes
955 fnodes = {} # needed file nodes
956
956
957 size = 0
957 size = 0
958 it = self.generatemanifests(
958 it = self.generatemanifests(
959 commonrevs,
959 commonrevs,
960 clrevorder,
960 clrevorder,
961 fastpathlinkrev,
961 fastpathlinkrev,
962 manifests,
962 manifests,
963 fnodes,
963 fnodes,
964 source,
964 source,
965 clstate[b'clrevtomanifestrev'],
965 clstate[b'clrevtomanifestrev'],
966 )
966 )
967
967
968 for tree, deltas in it:
968 for tree, deltas in it:
969 if tree:
969 if tree:
970 assert self.version == b'03'
970 assert self.version == b'03'
971 chunk = _fileheader(tree)
971 chunk = _fileheader(tree)
972 size += len(chunk)
972 size += len(chunk)
973 yield chunk
973 yield chunk
974
974
975 for delta in deltas:
975 for delta in deltas:
976 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
976 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
977 for chunk in chunks:
977 for chunk in chunks:
978 size += len(chunk)
978 size += len(chunk)
979 yield chunk
979 yield chunk
980
980
981 close = closechunk()
981 close = closechunk()
982 size += len(close)
982 size += len(close)
983 yield close
983 yield close
984
984
985 self._verbosenote(_(b'%8.i (manifests)\n') % size)
985 self._verbosenote(_(b'%8.i (manifests)\n') % size)
986 yield self._manifestsend
986 yield self._manifestsend
987
987
988 mfdicts = None
988 mfdicts = None
989 if self._ellipses and self._isshallow:
989 if self._ellipses and self._isshallow:
990 mfdicts = [
990 mfdicts = [
991 (self._repo.manifestlog[n].read(), lr)
991 (self._repo.manifestlog[n].read(), lr)
992 for (n, lr) in pycompat.iteritems(manifests)
992 for (n, lr) in pycompat.iteritems(manifests)
993 ]
993 ]
994
994
995 manifests.clear()
995 manifests.clear()
996 clrevs = {cl.rev(x) for x in clnodes}
996 clrevs = {cl.rev(x) for x in clnodes}
997
997
998 it = self.generatefiles(
998 it = self.generatefiles(
999 changedfiles,
999 changedfiles,
1000 commonrevs,
1000 commonrevs,
1001 source,
1001 source,
1002 mfdicts,
1002 mfdicts,
1003 fastpathlinkrev,
1003 fastpathlinkrev,
1004 fnodes,
1004 fnodes,
1005 clrevs,
1005 clrevs,
1006 )
1006 )
1007
1007
1008 for path, deltas in it:
1008 for path, deltas in it:
1009 h = _fileheader(path)
1009 h = _fileheader(path)
1010 size = len(h)
1010 size = len(h)
1011 yield h
1011 yield h
1012
1012
1013 for delta in deltas:
1013 for delta in deltas:
1014 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1014 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1015 for chunk in chunks:
1015 for chunk in chunks:
1016 size += len(chunk)
1016 size += len(chunk)
1017 yield chunk
1017 yield chunk
1018
1018
1019 close = closechunk()
1019 close = closechunk()
1020 size += len(close)
1020 size += len(close)
1021 yield close
1021 yield close
1022
1022
1023 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1023 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1024
1024
1025 yield closechunk()
1025 yield closechunk()
1026
1026
1027 if clnodes:
1027 if clnodes:
1028 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1028 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1029
1029
1030 def _generatechangelog(self, cl, nodes, generate=True):
1030 def _generatechangelog(self, cl, nodes, generate=True):
1031 """Generate data for changelog chunks.
1031 """Generate data for changelog chunks.
1032
1032
1033 Returns a 2-tuple of a dict containing state and an iterable of
1033 Returns a 2-tuple of a dict containing state and an iterable of
1034 byte chunks. The state will not be fully populated until the
1034 byte chunks. The state will not be fully populated until the
1035 chunk stream has been fully consumed.
1035 chunk stream has been fully consumed.
1036
1036
1037 if generate is False, the state will be fully populated and no chunk
1037 if generate is False, the state will be fully populated and no chunk
1038 stream will be yielded
1038 stream will be yielded
1039 """
1039 """
1040 clrevorder = {}
1040 clrevorder = {}
1041 manifests = {}
1041 manifests = {}
1042 mfl = self._repo.manifestlog
1042 mfl = self._repo.manifestlog
1043 changedfiles = set()
1043 changedfiles = set()
1044 clrevtomanifestrev = {}
1044 clrevtomanifestrev = {}
1045
1045
1046 state = {
1046 state = {
1047 b'clrevorder': clrevorder,
1047 b'clrevorder': clrevorder,
1048 b'manifests': manifests,
1048 b'manifests': manifests,
1049 b'changedfiles': changedfiles,
1049 b'changedfiles': changedfiles,
1050 b'clrevtomanifestrev': clrevtomanifestrev,
1050 b'clrevtomanifestrev': clrevtomanifestrev,
1051 }
1051 }
1052
1052
1053 if not (generate or self._ellipses):
1053 if not (generate or self._ellipses):
1054 # sort the nodes in storage order
1054 # sort the nodes in storage order
1055 nodes = sorted(nodes, key=cl.rev)
1055 nodes = sorted(nodes, key=cl.rev)
1056 for node in nodes:
1056 for node in nodes:
1057 c = cl.changelogrevision(node)
1057 c = cl.changelogrevision(node)
1058 clrevorder[node] = len(clrevorder)
1058 clrevorder[node] = len(clrevorder)
1059 # record the first changeset introducing this manifest version
1059 # record the first changeset introducing this manifest version
1060 manifests.setdefault(c.manifest, node)
1060 manifests.setdefault(c.manifest, node)
1061 # Record a complete list of potentially-changed files in
1061 # Record a complete list of potentially-changed files in
1062 # this manifest.
1062 # this manifest.
1063 changedfiles.update(c.files)
1063 changedfiles.update(c.files)
1064
1064
1065 return state, ()
1065 return state, ()
1066
1066
1067 # Callback for the changelog, used to collect changed files and
1067 # Callback for the changelog, used to collect changed files and
1068 # manifest nodes.
1068 # manifest nodes.
1069 # Returns the linkrev node (identity in the changelog case).
1069 # Returns the linkrev node (identity in the changelog case).
1070 def lookupcl(x):
1070 def lookupcl(x):
1071 c = cl.changelogrevision(x)
1071 c = cl.changelogrevision(x)
1072 clrevorder[x] = len(clrevorder)
1072 clrevorder[x] = len(clrevorder)
1073
1073
1074 if self._ellipses:
1074 if self._ellipses:
1075 # Only update manifests if x is going to be sent. Otherwise we
1075 # Only update manifests if x is going to be sent. Otherwise we
1076 # end up with bogus linkrevs specified for manifests and
1076 # end up with bogus linkrevs specified for manifests and
1077 # we skip some manifest nodes that we should otherwise
1077 # we skip some manifest nodes that we should otherwise
1078 # have sent.
1078 # have sent.
1079 if (
1079 if (
1080 x in self._fullclnodes
1080 x in self._fullclnodes
1081 or cl.rev(x) in self._precomputedellipsis
1081 or cl.rev(x) in self._precomputedellipsis
1082 ):
1082 ):
1083
1083
1084 manifestnode = c.manifest
1084 manifestnode = c.manifest
1085 # Record the first changeset introducing this manifest
1085 # Record the first changeset introducing this manifest
1086 # version.
1086 # version.
1087 manifests.setdefault(manifestnode, x)
1087 manifests.setdefault(manifestnode, x)
1088 # Set this narrow-specific dict so we have the lowest
1088 # Set this narrow-specific dict so we have the lowest
1089 # manifest revnum to look up for this cl revnum. (Part of
1089 # manifest revnum to look up for this cl revnum. (Part of
1090 # mapping changelog ellipsis parents to manifest ellipsis
1090 # mapping changelog ellipsis parents to manifest ellipsis
1091 # parents)
1091 # parents)
1092 clrevtomanifestrev.setdefault(
1092 clrevtomanifestrev.setdefault(
1093 cl.rev(x), mfl.rev(manifestnode)
1093 cl.rev(x), mfl.rev(manifestnode)
1094 )
1094 )
1095 # We can't trust the changed files list in the changeset if the
1095 # We can't trust the changed files list in the changeset if the
1096 # client requested a shallow clone.
1096 # client requested a shallow clone.
1097 if self._isshallow:
1097 if self._isshallow:
1098 changedfiles.update(mfl[c.manifest].read().keys())
1098 changedfiles.update(mfl[c.manifest].read().keys())
1099 else:
1099 else:
1100 changedfiles.update(c.files)
1100 changedfiles.update(c.files)
1101 else:
1101 else:
1102 # record the first changeset introducing this manifest version
1102 # record the first changeset introducing this manifest version
1103 manifests.setdefault(c.manifest, x)
1103 manifests.setdefault(c.manifest, x)
1104 # Record a complete list of potentially-changed files in
1104 # Record a complete list of potentially-changed files in
1105 # this manifest.
1105 # this manifest.
1106 changedfiles.update(c.files)
1106 changedfiles.update(c.files)
1107
1107
1108 return x
1108 return x
1109
1109
1110 gen = deltagroup(
1110 gen = deltagroup(
1111 self._repo,
1111 self._repo,
1112 cl,
1112 cl,
1113 nodes,
1113 nodes,
1114 True,
1114 True,
1115 lookupcl,
1115 lookupcl,
1116 self._forcedeltaparentprev,
1116 self._forcedeltaparentprev,
1117 ellipses=self._ellipses,
1117 ellipses=self._ellipses,
1118 topic=_(b'changesets'),
1118 topic=_(b'changesets'),
1119 clrevtolocalrev={},
1119 clrevtolocalrev={},
1120 fullclnodes=self._fullclnodes,
1120 fullclnodes=self._fullclnodes,
1121 precomputedellipsis=self._precomputedellipsis,
1121 precomputedellipsis=self._precomputedellipsis,
1122 )
1122 )
1123
1123
1124 return state, gen
1124 return state, gen
1125
1125
1126 def generatemanifests(
1126 def generatemanifests(
1127 self,
1127 self,
1128 commonrevs,
1128 commonrevs,
1129 clrevorder,
1129 clrevorder,
1130 fastpathlinkrev,
1130 fastpathlinkrev,
1131 manifests,
1131 manifests,
1132 fnodes,
1132 fnodes,
1133 source,
1133 source,
1134 clrevtolocalrev,
1134 clrevtolocalrev,
1135 ):
1135 ):
1136 """Returns an iterator of changegroup chunks containing manifests.
1136 """Returns an iterator of changegroup chunks containing manifests.
1137
1137
1138 `source` is unused here, but is used by extensions like remotefilelog to
1138 `source` is unused here, but is used by extensions like remotefilelog to
1139 change what is sent based in pulls vs pushes, etc.
1139 change what is sent based in pulls vs pushes, etc.
1140 """
1140 """
1141 repo = self._repo
1141 repo = self._repo
1142 mfl = repo.manifestlog
1142 mfl = repo.manifestlog
1143 tmfnodes = {b'': manifests}
1143 tmfnodes = {b'': manifests}
1144
1144
1145 # Callback for the manifest, used to collect linkrevs for filelog
1145 # Callback for the manifest, used to collect linkrevs for filelog
1146 # revisions.
1146 # revisions.
1147 # Returns the linkrev node (collected in lookupcl).
1147 # Returns the linkrev node (collected in lookupcl).
1148 def makelookupmflinknode(tree, nodes):
1148 def makelookupmflinknode(tree, nodes):
1149 if fastpathlinkrev:
1149 if fastpathlinkrev:
1150 assert not tree
1150 assert not tree
1151 return (
1151 return (
1152 manifests.__getitem__
1152 manifests.__getitem__
1153 ) # pytype: disable=unsupported-operands
1153 ) # pytype: disable=unsupported-operands
1154
1154
1155 def lookupmflinknode(x):
1155 def lookupmflinknode(x):
1156 """Callback for looking up the linknode for manifests.
1156 """Callback for looking up the linknode for manifests.
1157
1157
1158 Returns the linkrev node for the specified manifest.
1158 Returns the linkrev node for the specified manifest.
1159
1159
1160 SIDE EFFECT:
1160 SIDE EFFECT:
1161
1161
1162 1) fclnodes gets populated with the list of relevant
1162 1) fclnodes gets populated with the list of relevant
1163 file nodes if we're not using fastpathlinkrev
1163 file nodes if we're not using fastpathlinkrev
1164 2) When treemanifests are in use, collects treemanifest nodes
1164 2) When treemanifests are in use, collects treemanifest nodes
1165 to send
1165 to send
1166
1166
1167 Note that this means manifests must be completely sent to
1167 Note that this means manifests must be completely sent to
1168 the client before you can trust the list of files and
1168 the client before you can trust the list of files and
1169 treemanifests to send.
1169 treemanifests to send.
1170 """
1170 """
1171 clnode = nodes[x]
1171 clnode = nodes[x]
1172 mdata = mfl.get(tree, x).readfast(shallow=True)
1172 mdata = mfl.get(tree, x).readfast(shallow=True)
1173 for p, n, fl in mdata.iterentries():
1173 for p, n, fl in mdata.iterentries():
1174 if fl == b't': # subdirectory manifest
1174 if fl == b't': # subdirectory manifest
1175 subtree = tree + p + b'/'
1175 subtree = tree + p + b'/'
1176 tmfclnodes = tmfnodes.setdefault(subtree, {})
1176 tmfclnodes = tmfnodes.setdefault(subtree, {})
1177 tmfclnode = tmfclnodes.setdefault(n, clnode)
1177 tmfclnode = tmfclnodes.setdefault(n, clnode)
1178 if clrevorder[clnode] < clrevorder[tmfclnode]:
1178 if clrevorder[clnode] < clrevorder[tmfclnode]:
1179 tmfclnodes[n] = clnode
1179 tmfclnodes[n] = clnode
1180 else:
1180 else:
1181 f = tree + p
1181 f = tree + p
1182 fclnodes = fnodes.setdefault(f, {})
1182 fclnodes = fnodes.setdefault(f, {})
1183 fclnode = fclnodes.setdefault(n, clnode)
1183 fclnode = fclnodes.setdefault(n, clnode)
1184 if clrevorder[clnode] < clrevorder[fclnode]:
1184 if clrevorder[clnode] < clrevorder[fclnode]:
1185 fclnodes[n] = clnode
1185 fclnodes[n] = clnode
1186 return clnode
1186 return clnode
1187
1187
1188 return lookupmflinknode
1188 return lookupmflinknode
1189
1189
1190 while tmfnodes:
1190 while tmfnodes:
1191 tree, nodes = tmfnodes.popitem()
1191 tree, nodes = tmfnodes.popitem()
1192
1192
1193 should_visit = self._matcher.visitdir(tree[:-1])
1193 should_visit = self._matcher.visitdir(tree[:-1])
1194 if tree and not should_visit:
1194 if tree and not should_visit:
1195 continue
1195 continue
1196
1196
1197 store = mfl.getstorage(tree)
1197 store = mfl.getstorage(tree)
1198
1198
1199 if not should_visit:
1199 if not should_visit:
1200 # No nodes to send because this directory is out of
1200 # No nodes to send because this directory is out of
1201 # the client's view of the repository (probably
1201 # the client's view of the repository (probably
1202 # because of narrow clones). Do this even for the root
1202 # because of narrow clones). Do this even for the root
1203 # directory (tree=='')
1203 # directory (tree=='')
1204 prunednodes = []
1204 prunednodes = []
1205 else:
1205 else:
1206 # Avoid sending any manifest nodes we can prove the
1206 # Avoid sending any manifest nodes we can prove the
1207 # client already has by checking linkrevs. See the
1207 # client already has by checking linkrevs. See the
1208 # related comment in generatefiles().
1208 # related comment in generatefiles().
1209 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1209 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1210
1210
1211 if tree and not prunednodes:
1211 if tree and not prunednodes:
1212 continue
1212 continue
1213
1213
1214 lookupfn = makelookupmflinknode(tree, nodes)
1214 lookupfn = makelookupmflinknode(tree, nodes)
1215
1215
1216 deltas = deltagroup(
1216 deltas = deltagroup(
1217 self._repo,
1217 self._repo,
1218 store,
1218 store,
1219 prunednodes,
1219 prunednodes,
1220 False,
1220 False,
1221 lookupfn,
1221 lookupfn,
1222 self._forcedeltaparentprev,
1222 self._forcedeltaparentprev,
1223 ellipses=self._ellipses,
1223 ellipses=self._ellipses,
1224 topic=_(b'manifests'),
1224 topic=_(b'manifests'),
1225 clrevtolocalrev=clrevtolocalrev,
1225 clrevtolocalrev=clrevtolocalrev,
1226 fullclnodes=self._fullclnodes,
1226 fullclnodes=self._fullclnodes,
1227 precomputedellipsis=self._precomputedellipsis,
1227 precomputedellipsis=self._precomputedellipsis,
1228 )
1228 )
1229
1229
1230 if not self._oldmatcher.visitdir(store.tree[:-1]):
1230 if not self._oldmatcher.visitdir(store.tree[:-1]):
1231 yield tree, deltas
1231 yield tree, deltas
1232 else:
1232 else:
1233 # 'deltas' is a generator and we need to consume it even if
1233 # 'deltas' is a generator and we need to consume it even if
1234 # we are not going to send it because a side-effect is that
1234 # we are not going to send it because a side-effect is that
1235 # it updates tmdnodes (via lookupfn)
1235 # it updates tmdnodes (via lookupfn)
1236 for d in deltas:
1236 for d in deltas:
1237 pass
1237 pass
1238 if not tree:
1238 if not tree:
1239 yield tree, []
1239 yield tree, []
1240
1240
1241 def _prunemanifests(self, store, nodes, commonrevs):
1241 def _prunemanifests(self, store, nodes, commonrevs):
1242 if not self._ellipses:
1242 if not self._ellipses:
1243 # In non-ellipses case and large repositories, it is better to
1243 # In non-ellipses case and large repositories, it is better to
1244 # prevent calling of store.rev and store.linkrev on a lot of
1244 # prevent calling of store.rev and store.linkrev on a lot of
1245 # nodes as compared to sending some extra data
1245 # nodes as compared to sending some extra data
1246 return nodes.copy()
1246 return nodes.copy()
1247 # This is split out as a separate method to allow filtering
1247 # This is split out as a separate method to allow filtering
1248 # commonrevs in extension code.
1248 # commonrevs in extension code.
1249 #
1249 #
1250 # TODO(augie): this shouldn't be required, instead we should
1250 # TODO(augie): this shouldn't be required, instead we should
1251 # make filtering of revisions to send delegated to the store
1251 # make filtering of revisions to send delegated to the store
1252 # layer.
1252 # layer.
1253 frev, flr = store.rev, store.linkrev
1253 frev, flr = store.rev, store.linkrev
1254 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1254 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1255
1255
1256 # The 'source' parameter is useful for extensions
1256 # The 'source' parameter is useful for extensions
1257 def generatefiles(
1257 def generatefiles(
1258 self,
1258 self,
1259 changedfiles,
1259 changedfiles,
1260 commonrevs,
1260 commonrevs,
1261 source,
1261 source,
1262 mfdicts,
1262 mfdicts,
1263 fastpathlinkrev,
1263 fastpathlinkrev,
1264 fnodes,
1264 fnodes,
1265 clrevs,
1265 clrevs,
1266 ):
1266 ):
1267 changedfiles = [
1267 changedfiles = [
1268 f
1268 f
1269 for f in changedfiles
1269 for f in changedfiles
1270 if self._matcher(f) and not self._oldmatcher(f)
1270 if self._matcher(f) and not self._oldmatcher(f)
1271 ]
1271 ]
1272
1272
1273 if not fastpathlinkrev:
1273 if not fastpathlinkrev:
1274
1274
1275 def normallinknodes(unused, fname):
1275 def normallinknodes(unused, fname):
1276 return fnodes.get(fname, {})
1276 return fnodes.get(fname, {})
1277
1277
1278 else:
1278 else:
1279 cln = self._repo.changelog.node
1279 cln = self._repo.changelog.node
1280
1280
1281 def normallinknodes(store, fname):
1281 def normallinknodes(store, fname):
1282 flinkrev = store.linkrev
1282 flinkrev = store.linkrev
1283 fnode = store.node
1283 fnode = store.node
1284 revs = ((r, flinkrev(r)) for r in store)
1284 revs = ((r, flinkrev(r)) for r in store)
1285 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1285 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1286
1286
1287 clrevtolocalrev = {}
1287 clrevtolocalrev = {}
1288
1288
1289 if self._isshallow:
1289 if self._isshallow:
1290 # In a shallow clone, the linknodes callback needs to also include
1290 # In a shallow clone, the linknodes callback needs to also include
1291 # those file nodes that are in the manifests we sent but weren't
1291 # those file nodes that are in the manifests we sent but weren't
1292 # introduced by those manifests.
1292 # introduced by those manifests.
1293 commonctxs = [self._repo[c] for c in commonrevs]
1293 commonctxs = [self._repo[c] for c in commonrevs]
1294 clrev = self._repo.changelog.rev
1294 clrev = self._repo.changelog.rev
1295
1295
1296 def linknodes(flog, fname):
1296 def linknodes(flog, fname):
1297 for c in commonctxs:
1297 for c in commonctxs:
1298 try:
1298 try:
1299 fnode = c.filenode(fname)
1299 fnode = c.filenode(fname)
1300 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1300 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1301 except error.ManifestLookupError:
1301 except error.ManifestLookupError:
1302 pass
1302 pass
1303 links = normallinknodes(flog, fname)
1303 links = normallinknodes(flog, fname)
1304 if len(links) != len(mfdicts):
1304 if len(links) != len(mfdicts):
1305 for mf, lr in mfdicts:
1305 for mf, lr in mfdicts:
1306 fnode = mf.get(fname, None)
1306 fnode = mf.get(fname, None)
1307 if fnode in links:
1307 if fnode in links:
1308 links[fnode] = min(links[fnode], lr, key=clrev)
1308 links[fnode] = min(links[fnode], lr, key=clrev)
1309 elif fnode:
1309 elif fnode:
1310 links[fnode] = lr
1310 links[fnode] = lr
1311 return links
1311 return links
1312
1312
1313 else:
1313 else:
1314 linknodes = normallinknodes
1314 linknodes = normallinknodes
1315
1315
1316 repo = self._repo
1316 repo = self._repo
1317 progress = repo.ui.makeprogress(
1317 progress = repo.ui.makeprogress(
1318 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1318 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1319 )
1319 )
1320 for i, fname in enumerate(sorted(changedfiles)):
1320 for i, fname in enumerate(sorted(changedfiles)):
1321 filerevlog = repo.file(fname)
1321 filerevlog = repo.file(fname)
1322 if not filerevlog:
1322 if not filerevlog:
1323 raise error.Abort(
1323 raise error.Abort(
1324 _(b"empty or missing file data for %s") % fname
1324 _(b"empty or missing file data for %s") % fname
1325 )
1325 )
1326
1326
1327 clrevtolocalrev.clear()
1327 clrevtolocalrev.clear()
1328
1328
1329 linkrevnodes = linknodes(filerevlog, fname)
1329 linkrevnodes = linknodes(filerevlog, fname)
1330 # Lookup for filenodes, we collected the linkrev nodes above in the
1330 # Lookup for filenodes, we collected the linkrev nodes above in the
1331 # fastpath case and with lookupmf in the slowpath case.
1331 # fastpath case and with lookupmf in the slowpath case.
1332 def lookupfilelog(x):
1332 def lookupfilelog(x):
1333 return linkrevnodes[x]
1333 return linkrevnodes[x]
1334
1334
1335 frev, flr = filerevlog.rev, filerevlog.linkrev
1335 frev, flr = filerevlog.rev, filerevlog.linkrev
1336 # Skip sending any filenode we know the client already
1336 # Skip sending any filenode we know the client already
1337 # has. This avoids over-sending files relatively
1337 # has. This avoids over-sending files relatively
1338 # inexpensively, so it's not a problem if we under-filter
1338 # inexpensively, so it's not a problem if we under-filter
1339 # here.
1339 # here.
1340 filenodes = [
1340 filenodes = [
1341 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1341 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1342 ]
1342 ]
1343
1343
1344 if not filenodes:
1344 if not filenodes:
1345 continue
1345 continue
1346
1346
1347 progress.update(i + 1, item=fname)
1347 progress.update(i + 1, item=fname)
1348
1348
1349 deltas = deltagroup(
1349 deltas = deltagroup(
1350 self._repo,
1350 self._repo,
1351 filerevlog,
1351 filerevlog,
1352 filenodes,
1352 filenodes,
1353 False,
1353 False,
1354 lookupfilelog,
1354 lookupfilelog,
1355 self._forcedeltaparentprev,
1355 self._forcedeltaparentprev,
1356 ellipses=self._ellipses,
1356 ellipses=self._ellipses,
1357 clrevtolocalrev=clrevtolocalrev,
1357 clrevtolocalrev=clrevtolocalrev,
1358 fullclnodes=self._fullclnodes,
1358 fullclnodes=self._fullclnodes,
1359 precomputedellipsis=self._precomputedellipsis,
1359 precomputedellipsis=self._precomputedellipsis,
1360 )
1360 )
1361
1361
1362 yield fname, deltas
1362 yield fname, deltas
1363
1363
1364 progress.complete()
1364 progress.complete()
1365
1365
1366
1366
1367 def _makecg1packer(
1367 def _makecg1packer(
1368 repo,
1368 repo,
1369 oldmatcher,
1369 oldmatcher,
1370 matcher,
1370 matcher,
1371 bundlecaps,
1371 bundlecaps,
1372 ellipses=False,
1372 ellipses=False,
1373 shallow=False,
1373 shallow=False,
1374 ellipsisroots=None,
1374 ellipsisroots=None,
1375 fullnodes=None,
1375 fullnodes=None,
1376 ):
1376 ):
1377 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1377 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1378 d.node, d.p1node, d.p2node, d.linknode
1378 d.node, d.p1node, d.p2node, d.linknode
1379 )
1379 )
1380
1380
1381 return cgpacker(
1381 return cgpacker(
1382 repo,
1382 repo,
1383 oldmatcher,
1383 oldmatcher,
1384 matcher,
1384 matcher,
1385 b'01',
1385 b'01',
1386 builddeltaheader=builddeltaheader,
1386 builddeltaheader=builddeltaheader,
1387 manifestsend=b'',
1387 manifestsend=b'',
1388 forcedeltaparentprev=True,
1388 forcedeltaparentprev=True,
1389 bundlecaps=bundlecaps,
1389 bundlecaps=bundlecaps,
1390 ellipses=ellipses,
1390 ellipses=ellipses,
1391 shallow=shallow,
1391 shallow=shallow,
1392 ellipsisroots=ellipsisroots,
1392 ellipsisroots=ellipsisroots,
1393 fullnodes=fullnodes,
1393 fullnodes=fullnodes,
1394 )
1394 )
1395
1395
1396
1396
1397 def _makecg2packer(
1397 def _makecg2packer(
1398 repo,
1398 repo,
1399 oldmatcher,
1399 oldmatcher,
1400 matcher,
1400 matcher,
1401 bundlecaps,
1401 bundlecaps,
1402 ellipses=False,
1402 ellipses=False,
1403 shallow=False,
1403 shallow=False,
1404 ellipsisroots=None,
1404 ellipsisroots=None,
1405 fullnodes=None,
1405 fullnodes=None,
1406 ):
1406 ):
1407 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1407 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1408 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1408 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1409 )
1409 )
1410
1410
1411 return cgpacker(
1411 return cgpacker(
1412 repo,
1412 repo,
1413 oldmatcher,
1413 oldmatcher,
1414 matcher,
1414 matcher,
1415 b'02',
1415 b'02',
1416 builddeltaheader=builddeltaheader,
1416 builddeltaheader=builddeltaheader,
1417 manifestsend=b'',
1417 manifestsend=b'',
1418 bundlecaps=bundlecaps,
1418 bundlecaps=bundlecaps,
1419 ellipses=ellipses,
1419 ellipses=ellipses,
1420 shallow=shallow,
1420 shallow=shallow,
1421 ellipsisroots=ellipsisroots,
1421 ellipsisroots=ellipsisroots,
1422 fullnodes=fullnodes,
1422 fullnodes=fullnodes,
1423 )
1423 )
1424
1424
1425
1425
1426 def _makecg3packer(
1426 def _makecg3packer(
1427 repo,
1427 repo,
1428 oldmatcher,
1428 oldmatcher,
1429 matcher,
1429 matcher,
1430 bundlecaps,
1430 bundlecaps,
1431 ellipses=False,
1431 ellipses=False,
1432 shallow=False,
1432 shallow=False,
1433 ellipsisroots=None,
1433 ellipsisroots=None,
1434 fullnodes=None,
1434 fullnodes=None,
1435 ):
1435 ):
1436 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1436 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1437 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1437 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1438 )
1438 )
1439
1439
1440 return cgpacker(
1440 return cgpacker(
1441 repo,
1441 repo,
1442 oldmatcher,
1442 oldmatcher,
1443 matcher,
1443 matcher,
1444 b'03',
1444 b'03',
1445 builddeltaheader=builddeltaheader,
1445 builddeltaheader=builddeltaheader,
1446 manifestsend=closechunk(),
1446 manifestsend=closechunk(),
1447 bundlecaps=bundlecaps,
1447 bundlecaps=bundlecaps,
1448 ellipses=ellipses,
1448 ellipses=ellipses,
1449 shallow=shallow,
1449 shallow=shallow,
1450 ellipsisroots=ellipsisroots,
1450 ellipsisroots=ellipsisroots,
1451 fullnodes=fullnodes,
1451 fullnodes=fullnodes,
1452 )
1452 )
1453
1453
1454
1454
1455 _packermap = {
1455 _packermap = {
1456 b'01': (_makecg1packer, cg1unpacker),
1456 b'01': (_makecg1packer, cg1unpacker),
1457 # cg2 adds support for exchanging generaldelta
1457 # cg2 adds support for exchanging generaldelta
1458 b'02': (_makecg2packer, cg2unpacker),
1458 b'02': (_makecg2packer, cg2unpacker),
1459 # cg3 adds support for exchanging revlog flags and treemanifests
1459 # cg3 adds support for exchanging revlog flags and treemanifests
1460 b'03': (_makecg3packer, cg3unpacker),
1460 b'03': (_makecg3packer, cg3unpacker),
1461 }
1461 }
1462
1462
1463
1463
1464 def allsupportedversions(repo):
1464 def allsupportedversions(repo):
1465 versions = set(_packermap.keys())
1465 versions = set(_packermap.keys())
1466 needv03 = False
1466 needv03 = False
1467 if (
1467 if (
1468 repo.ui.configbool(b'experimental', b'changegroup3')
1468 repo.ui.configbool(b'experimental', b'changegroup3')
1469 or repo.ui.configbool(b'experimental', b'treemanifest')
1469 or repo.ui.configbool(b'experimental', b'treemanifest')
1470 or b'treemanifest' in repo.requirements
1470 or b'treemanifest' in repo.requirements
1471 ):
1471 ):
1472 # we keep version 03 because we need to to exchange treemanifest data
1472 # we keep version 03 because we need to to exchange treemanifest data
1473 #
1473 #
1474 # we also keep vresion 01 and 02, because it is possible for repo to
1474 # we also keep vresion 01 and 02, because it is possible for repo to
1475 # contains both normal and tree manifest at the same time. so using
1475 # contains both normal and tree manifest at the same time. so using
1476 # older version to pull data is viable
1476 # older version to pull data is viable
1477 #
1477 #
1478 # (or even to push subset of history)
1478 # (or even to push subset of history)
1479 needv03 = True
1479 needv03 = True
1480 if b'exp-sidedata-flag' in repo.requirements:
1480 if b'exp-sidedata-flag' in repo.requirements:
1481 needv03 = True
1481 needv03 = True
1482 # don't attempt to use 01/02 until we do sidedata cleaning
1482 # don't attempt to use 01/02 until we do sidedata cleaning
1483 versions.discard(b'01')
1483 versions.discard(b'01')
1484 versions.discard(b'02')
1484 versions.discard(b'02')
1485 if not needv03:
1485 if not needv03:
1486 versions.discard(b'03')
1486 versions.discard(b'03')
1487 return versions
1487 return versions
1488
1488
1489
1489
1490 # Changegroup versions that can be applied to the repo
1490 # Changegroup versions that can be applied to the repo
1491 def supportedincomingversions(repo):
1491 def supportedincomingversions(repo):
1492 return allsupportedversions(repo)
1492 return allsupportedversions(repo)
1493
1493
1494
1494
1495 # Changegroup versions that can be created from the repo
1495 # Changegroup versions that can be created from the repo
1496 def supportedoutgoingversions(repo):
1496 def supportedoutgoingversions(repo):
1497 versions = allsupportedversions(repo)
1497 versions = allsupportedversions(repo)
1498 if b'treemanifest' in repo.requirements:
1498 if b'treemanifest' in repo.requirements:
1499 # Versions 01 and 02 support only flat manifests and it's just too
1499 # Versions 01 and 02 support only flat manifests and it's just too
1500 # expensive to convert between the flat manifest and tree manifest on
1500 # expensive to convert between the flat manifest and tree manifest on
1501 # the fly. Since tree manifests are hashed differently, all of history
1501 # the fly. Since tree manifests are hashed differently, all of history
1502 # would have to be converted. Instead, we simply don't even pretend to
1502 # would have to be converted. Instead, we simply don't even pretend to
1503 # support versions 01 and 02.
1503 # support versions 01 and 02.
1504 versions.discard(b'01')
1504 versions.discard(b'01')
1505 versions.discard(b'02')
1505 versions.discard(b'02')
1506 if repository.NARROW_REQUIREMENT in repo.requirements:
1506 if repository.NARROW_REQUIREMENT in repo.requirements:
1507 # Versions 01 and 02 don't support revlog flags, and we need to
1507 # Versions 01 and 02 don't support revlog flags, and we need to
1508 # support that for stripping and unbundling to work.
1508 # support that for stripping and unbundling to work.
1509 versions.discard(b'01')
1509 versions.discard(b'01')
1510 versions.discard(b'02')
1510 versions.discard(b'02')
1511 if LFS_REQUIREMENT in repo.requirements:
1511 if LFS_REQUIREMENT in repo.requirements:
1512 # Versions 01 and 02 don't support revlog flags, and we need to
1512 # Versions 01 and 02 don't support revlog flags, and we need to
1513 # mark LFS entries with REVIDX_EXTSTORED.
1513 # mark LFS entries with REVIDX_EXTSTORED.
1514 versions.discard(b'01')
1514 versions.discard(b'01')
1515 versions.discard(b'02')
1515 versions.discard(b'02')
1516
1516
1517 return versions
1517 return versions
1518
1518
1519
1519
1520 def localversion(repo):
1520 def localversion(repo):
1521 # Finds the best version to use for bundles that are meant to be used
1521 # Finds the best version to use for bundles that are meant to be used
1522 # locally, such as those from strip and shelve, and temporary bundles.
1522 # locally, such as those from strip and shelve, and temporary bundles.
1523 return max(supportedoutgoingversions(repo))
1523 return max(supportedoutgoingversions(repo))
1524
1524
1525
1525
1526 def safeversion(repo):
1526 def safeversion(repo):
1527 # Finds the smallest version that it's safe to assume clients of the repo
1527 # Finds the smallest version that it's safe to assume clients of the repo
1528 # will support. For example, all hg versions that support generaldelta also
1528 # will support. For example, all hg versions that support generaldelta also
1529 # support changegroup 02.
1529 # support changegroup 02.
1530 versions = supportedoutgoingversions(repo)
1530 versions = supportedoutgoingversions(repo)
1531 if b'generaldelta' in repo.requirements:
1531 if b'generaldelta' in repo.requirements:
1532 versions.discard(b'01')
1532 versions.discard(b'01')
1533 assert versions
1533 assert versions
1534 return min(versions)
1534 return min(versions)
1535
1535
1536
1536
1537 def getbundler(
1537 def getbundler(
1538 version,
1538 version,
1539 repo,
1539 repo,
1540 bundlecaps=None,
1540 bundlecaps=None,
1541 oldmatcher=None,
1541 oldmatcher=None,
1542 matcher=None,
1542 matcher=None,
1543 ellipses=False,
1543 ellipses=False,
1544 shallow=False,
1544 shallow=False,
1545 ellipsisroots=None,
1545 ellipsisroots=None,
1546 fullnodes=None,
1546 fullnodes=None,
1547 ):
1547 ):
1548 assert version in supportedoutgoingversions(repo)
1548 assert version in supportedoutgoingversions(repo)
1549
1549
1550 if matcher is None:
1550 if matcher is None:
1551 matcher = matchmod.always()
1551 matcher = matchmod.always()
1552 if oldmatcher is None:
1552 if oldmatcher is None:
1553 oldmatcher = matchmod.never()
1553 oldmatcher = matchmod.never()
1554
1554
1555 if version == b'01' and not matcher.always():
1555 if version == b'01' and not matcher.always():
1556 raise error.ProgrammingError(
1556 raise error.ProgrammingError(
1557 b'version 01 changegroups do not support sparse file matchers'
1557 b'version 01 changegroups do not support sparse file matchers'
1558 )
1558 )
1559
1559
1560 if ellipses and version in (b'01', b'02'):
1560 if ellipses and version in (b'01', b'02'):
1561 raise error.Abort(
1561 raise error.Abort(
1562 _(
1562 _(
1563 b'ellipsis nodes require at least cg3 on client and server, '
1563 b'ellipsis nodes require at least cg3 on client and server, '
1564 b'but negotiated version %s'
1564 b'but negotiated version %s'
1565 )
1565 )
1566 % version
1566 % version
1567 )
1567 )
1568
1568
1569 # Requested files could include files not in the local store. So
1569 # Requested files could include files not in the local store. So
1570 # filter those out.
1570 # filter those out.
1571 matcher = repo.narrowmatch(matcher)
1571 matcher = repo.narrowmatch(matcher)
1572
1572
1573 fn = _packermap[version][0]
1573 fn = _packermap[version][0]
1574 return fn(
1574 return fn(
1575 repo,
1575 repo,
1576 oldmatcher,
1576 oldmatcher,
1577 matcher,
1577 matcher,
1578 bundlecaps,
1578 bundlecaps,
1579 ellipses=ellipses,
1579 ellipses=ellipses,
1580 shallow=shallow,
1580 shallow=shallow,
1581 ellipsisroots=ellipsisroots,
1581 ellipsisroots=ellipsisroots,
1582 fullnodes=fullnodes,
1582 fullnodes=fullnodes,
1583 )
1583 )
1584
1584
1585
1585
1586 def getunbundler(version, fh, alg, extras=None):
1586 def getunbundler(version, fh, alg, extras=None):
1587 return _packermap[version][1](fh, alg, extras=extras)
1587 return _packermap[version][1](fh, alg, extras=extras)
1588
1588
1589
1589
1590 def _changegroupinfo(repo, nodes, source):
1590 def _changegroupinfo(repo, nodes, source):
1591 if repo.ui.verbose or source == b'bundle':
1591 if repo.ui.verbose or source == b'bundle':
1592 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1592 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1593 if repo.ui.debugflag:
1593 if repo.ui.debugflag:
1594 repo.ui.debug(b"list of changesets:\n")
1594 repo.ui.debug(b"list of changesets:\n")
1595 for node in nodes:
1595 for node in nodes:
1596 repo.ui.debug(b"%s\n" % hex(node))
1596 repo.ui.debug(b"%s\n" % hex(node))
1597
1597
1598
1598
1599 def makechangegroup(
1599 def makechangegroup(
1600 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1600 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1601 ):
1601 ):
1602 cgstream = makestream(
1602 cgstream = makestream(
1603 repo,
1603 repo,
1604 outgoing,
1604 outgoing,
1605 version,
1605 version,
1606 source,
1606 source,
1607 fastpath=fastpath,
1607 fastpath=fastpath,
1608 bundlecaps=bundlecaps,
1608 bundlecaps=bundlecaps,
1609 )
1609 )
1610 return getunbundler(
1610 return getunbundler(
1611 version,
1611 version,
1612 util.chunkbuffer(cgstream),
1612 util.chunkbuffer(cgstream),
1613 None,
1613 None,
1614 {b'clcount': len(outgoing.missing)},
1614 {b'clcount': len(outgoing.missing)},
1615 )
1615 )
1616
1616
1617
1617
1618 def makestream(
1618 def makestream(
1619 repo,
1619 repo,
1620 outgoing,
1620 outgoing,
1621 version,
1621 version,
1622 source,
1622 source,
1623 fastpath=False,
1623 fastpath=False,
1624 bundlecaps=None,
1624 bundlecaps=None,
1625 matcher=None,
1625 matcher=None,
1626 ):
1626 ):
1627 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1627 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1628
1628
1629 repo = repo.unfiltered()
1629 repo = repo.unfiltered()
1630 commonrevs = outgoing.common
1630 commonrevs = outgoing.common
1631 csets = outgoing.missing
1631 csets = outgoing.missing
1632 heads = outgoing.missingheads
1632 heads = outgoing.ancestorsof
1633 # We go through the fast path if we get told to, or if all (unfiltered
1633 # We go through the fast path if we get told to, or if all (unfiltered
1634 # heads have been requested (since we then know there all linkrevs will
1634 # heads have been requested (since we then know there all linkrevs will
1635 # be pulled by the client).
1635 # be pulled by the client).
1636 heads.sort()
1636 heads.sort()
1637 fastpathlinkrev = fastpath or (
1637 fastpathlinkrev = fastpath or (
1638 repo.filtername is None and heads == sorted(repo.heads())
1638 repo.filtername is None and heads == sorted(repo.heads())
1639 )
1639 )
1640
1640
1641 repo.hook(b'preoutgoing', throw=True, source=source)
1641 repo.hook(b'preoutgoing', throw=True, source=source)
1642 _changegroupinfo(repo, csets, source)
1642 _changegroupinfo(repo, csets, source)
1643 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1643 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1644
1644
1645
1645
1646 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1646 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1647 revisions = 0
1647 revisions = 0
1648 files = 0
1648 files = 0
1649 progress = repo.ui.makeprogress(
1649 progress = repo.ui.makeprogress(
1650 _(b'files'), unit=_(b'files'), total=expectedfiles
1650 _(b'files'), unit=_(b'files'), total=expectedfiles
1651 )
1651 )
1652 for chunkdata in iter(source.filelogheader, {}):
1652 for chunkdata in iter(source.filelogheader, {}):
1653 files += 1
1653 files += 1
1654 f = chunkdata[b"filename"]
1654 f = chunkdata[b"filename"]
1655 repo.ui.debug(b"adding %s revisions\n" % f)
1655 repo.ui.debug(b"adding %s revisions\n" % f)
1656 progress.increment()
1656 progress.increment()
1657 fl = repo.file(f)
1657 fl = repo.file(f)
1658 o = len(fl)
1658 o = len(fl)
1659 try:
1659 try:
1660 deltas = source.deltaiter()
1660 deltas = source.deltaiter()
1661 if not fl.addgroup(deltas, revmap, trp):
1661 if not fl.addgroup(deltas, revmap, trp):
1662 raise error.Abort(_(b"received file revlog group is empty"))
1662 raise error.Abort(_(b"received file revlog group is empty"))
1663 except error.CensoredBaseError as e:
1663 except error.CensoredBaseError as e:
1664 raise error.Abort(_(b"received delta base is censored: %s") % e)
1664 raise error.Abort(_(b"received delta base is censored: %s") % e)
1665 revisions += len(fl) - o
1665 revisions += len(fl) - o
1666 if f in needfiles:
1666 if f in needfiles:
1667 needs = needfiles[f]
1667 needs = needfiles[f]
1668 for new in pycompat.xrange(o, len(fl)):
1668 for new in pycompat.xrange(o, len(fl)):
1669 n = fl.node(new)
1669 n = fl.node(new)
1670 if n in needs:
1670 if n in needs:
1671 needs.remove(n)
1671 needs.remove(n)
1672 else:
1672 else:
1673 raise error.Abort(_(b"received spurious file revlog entry"))
1673 raise error.Abort(_(b"received spurious file revlog entry"))
1674 if not needs:
1674 if not needs:
1675 del needfiles[f]
1675 del needfiles[f]
1676 progress.complete()
1676 progress.complete()
1677
1677
1678 for f, needs in pycompat.iteritems(needfiles):
1678 for f, needs in pycompat.iteritems(needfiles):
1679 fl = repo.file(f)
1679 fl = repo.file(f)
1680 for n in needs:
1680 for n in needs:
1681 try:
1681 try:
1682 fl.rev(n)
1682 fl.rev(n)
1683 except error.LookupError:
1683 except error.LookupError:
1684 raise error.Abort(
1684 raise error.Abort(
1685 _(b'missing file data for %s:%s - run hg verify')
1685 _(b'missing file data for %s:%s - run hg verify')
1686 % (f, hex(n))
1686 % (f, hex(n))
1687 )
1687 )
1688
1688
1689 return revisions, files
1689 return revisions, files
@@ -1,616 +1,620 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 setdiscovery,
26 setdiscovery,
27 treediscovery,
27 treediscovery,
28 util,
28 util,
29 )
29 )
30
30
31
31
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
35
35
36 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
39 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
40 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
41 any longer.
41 any longer.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 these nodes. Changeset outside of this set won't be considered (but may
44 these nodes. Changeset outside of this set won't be considered (but may
45 still appear in "common").
45 still appear in "common").
46
46
47 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
48 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
49
49
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
52 """
52 """
53
53
54 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56
56
57 if heads:
57 if heads:
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
60 return (heads, False, heads)
60 return (heads, False, heads)
61
61
62 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
63 repo.ui,
63 repo.ui,
64 repo,
64 repo,
65 remote,
65 remote,
66 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
68 )
68 )
69 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
70 if heads and not anyinc:
70 if heads and not anyinc:
71 # server could be lying on the advertised heads
71 # server could be lying on the advertised heads
72 has_node = repo.changelog.hasnode
72 has_node = repo.changelog.hasnode
73 anyinc = any(not has_node(n) for n in heads)
73 anyinc = any(not has_node(n) for n in heads)
74 return (list(common), anyinc, heads or list(srvheads))
74 return (list(common), anyinc, heads or list(srvheads))
75
75
76
76
77 class outgoing(object):
77 class outgoing(object):
78 '''Represents the result of a findcommonoutgoing() call.
78 '''Represents the result of a findcommonoutgoing() call.
79
79
80 Members:
80 Members:
81
81
82 ancestorsof is a list of the nodes whose ancestors are included in the
82 ancestorsof is a list of the nodes whose ancestors are included in the
83 outgoing operation.
83 outgoing operation.
84
84
85 missing is a list of those ancestors of ancestorsof that are present in
85 missing is a list of those ancestors of ancestorsof that are present in
86 local but not in remote.
86 local but not in remote.
87
87
88 common is a set containing revs common between the local and the remote
88 common is a set containing revs common between the local and the remote
89 repository (at least all of those that are ancestors of ancestorsof).
89 repository (at least all of those that are ancestors of ancestorsof).
90
90
91 commonheads is the list of heads of common.
91 commonheads is the list of heads of common.
92
92
93 excluded is the list of missing changeset that shouldn't be sent
93 excluded is the list of missing changeset that shouldn't be sent
94 remotely.
94 remotely.
95
95
96 missingheads is an alias to ancestorsof, but the name is wrong and it
97 will be removed
98
99 Some members are computed on demand from the heads, unless provided upfront
96 Some members are computed on demand from the heads, unless provided upfront
100 by discovery.'''
97 by discovery.'''
101
98
102 def __init__(
99 def __init__(
103 self, repo, commonheads=None, missingheads=None, missingroots=None
100 self, repo, commonheads=None, ancestorsof=None, missingroots=None
104 ):
101 ):
105 # at least one of them must not be set
102 # at least one of them must not be set
106 assert None in (commonheads, missingroots)
103 assert None in (commonheads, missingroots)
107 cl = repo.changelog
104 cl = repo.changelog
108 if missingheads is None:
105 if ancestorsof is None:
109 missingheads = cl.heads()
106 ancestorsof = cl.heads()
110 if missingroots:
107 if missingroots:
111 discbases = []
108 discbases = []
112 for n in missingroots:
109 for n in missingroots:
113 discbases.extend([p for p in cl.parents(n) if p != nullid])
110 discbases.extend([p for p in cl.parents(n) if p != nullid])
114 # TODO remove call to nodesbetween.
111 # TODO remove call to nodesbetween.
115 # TODO populate attributes on outgoing instance instead of setting
112 # TODO populate attributes on outgoing instance instead of setting
116 # discbases.
113 # discbases.
117 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
114 csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
118 included = set(csets)
115 included = set(csets)
119 missingheads = heads
116 ancestorsof = heads
120 commonheads = [n for n in discbases if n not in included]
117 commonheads = [n for n in discbases if n not in included]
121 elif not commonheads:
118 elif not commonheads:
122 commonheads = [nullid]
119 commonheads = [nullid]
123 self.commonheads = commonheads
120 self.commonheads = commonheads
124 self.missingheads = missingheads
121 self.ancestorsof = ancestorsof
125 self._revlog = cl
122 self._revlog = cl
126 self._common = None
123 self._common = None
127 self._missing = None
124 self._missing = None
128 self.excluded = []
125 self.excluded = []
129
126
130 def _computecommonmissing(self):
127 def _computecommonmissing(self):
131 sets = self._revlog.findcommonmissing(
128 sets = self._revlog.findcommonmissing(
132 self.commonheads, self.missingheads
129 self.commonheads, self.ancestorsof
133 )
130 )
134 self._common, self._missing = sets
131 self._common, self._missing = sets
135
132
136 @util.propertycache
133 @util.propertycache
137 def common(self):
134 def common(self):
138 if self._common is None:
135 if self._common is None:
139 self._computecommonmissing()
136 self._computecommonmissing()
140 return self._common
137 return self._common
141
138
142 @util.propertycache
139 @util.propertycache
143 def missing(self):
140 def missing(self):
144 if self._missing is None:
141 if self._missing is None:
145 self._computecommonmissing()
142 self._computecommonmissing()
146 return self._missing
143 return self._missing
147
144
148 @property
145 @property
149 def ancestorsof(self):
146 def missingheads(self):
150 return self.missingheads
147 util.nouideprecwarn(
148 b'outgoing.missingheads never contained what the name suggests and '
149 b'was renamed to outgoing.ancestorsof. check your code for '
150 b'correctness.',
151 b'5.5',
152 stacklevel=2,
153 )
154 return self.ancestorsof
151
155
152
156
153 def findcommonoutgoing(
157 def findcommonoutgoing(
154 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
158 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
155 ):
159 ):
156 '''Return an outgoing instance to identify the nodes present in repo but
160 '''Return an outgoing instance to identify the nodes present in repo but
157 not in other.
161 not in other.
158
162
159 If onlyheads is given, only nodes ancestral to nodes in onlyheads
163 If onlyheads is given, only nodes ancestral to nodes in onlyheads
160 (inclusive) are included. If you already know the local repo's heads,
164 (inclusive) are included. If you already know the local repo's heads,
161 passing them in onlyheads is faster than letting them be recomputed here.
165 passing them in onlyheads is faster than letting them be recomputed here.
162
166
163 If commoninc is given, it must be the result of a prior call to
167 If commoninc is given, it must be the result of a prior call to
164 findcommonincoming(repo, other, force) to avoid recomputing it here.
168 findcommonincoming(repo, other, force) to avoid recomputing it here.
165
169
166 If portable is given, compute more conservative common and missingheads,
170 If portable is given, compute more conservative common and ancestorsof,
167 to make bundles created from the instance more portable.'''
171 to make bundles created from the instance more portable.'''
168 # declare an empty outgoing object to be filled later
172 # declare an empty outgoing object to be filled later
169 og = outgoing(repo, None, None)
173 og = outgoing(repo, None, None)
170
174
171 # get common set if not provided
175 # get common set if not provided
172 if commoninc is None:
176 if commoninc is None:
173 commoninc = findcommonincoming(
177 commoninc = findcommonincoming(
174 repo, other, force=force, ancestorsof=onlyheads
178 repo, other, force=force, ancestorsof=onlyheads
175 )
179 )
176 og.commonheads, _any, _hds = commoninc
180 og.commonheads, _any, _hds = commoninc
177
181
178 # compute outgoing
182 # compute outgoing
179 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
183 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
180 if not mayexclude:
184 if not mayexclude:
181 og.missingheads = onlyheads or repo.heads()
185 og.ancestorsof = onlyheads or repo.heads()
182 elif onlyheads is None:
186 elif onlyheads is None:
183 # use visible heads as it should be cached
187 # use visible heads as it should be cached
184 og.missingheads = repo.filtered(b"served").heads()
188 og.ancestorsof = repo.filtered(b"served").heads()
185 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
189 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
186 else:
190 else:
187 # compute common, missing and exclude secret stuff
191 # compute common, missing and exclude secret stuff
188 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
192 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
189 og._common, allmissing = sets
193 og._common, allmissing = sets
190 og._missing = missing = []
194 og._missing = missing = []
191 og.excluded = excluded = []
195 og.excluded = excluded = []
192 for node in allmissing:
196 for node in allmissing:
193 ctx = repo[node]
197 ctx = repo[node]
194 if ctx.phase() >= phases.secret or ctx.extinct():
198 if ctx.phase() >= phases.secret or ctx.extinct():
195 excluded.append(node)
199 excluded.append(node)
196 else:
200 else:
197 missing.append(node)
201 missing.append(node)
198 if len(missing) == len(allmissing):
202 if len(missing) == len(allmissing):
199 missingheads = onlyheads
203 ancestorsof = onlyheads
200 else: # update missing heads
204 else: # update missing heads
201 missingheads = phases.newheads(repo, onlyheads, excluded)
205 ancestorsof = phases.newheads(repo, onlyheads, excluded)
202 og.missingheads = missingheads
206 og.ancestorsof = ancestorsof
203 if portable:
207 if portable:
204 # recompute common and missingheads as if -r<rev> had been given for
208 # recompute common and ancestorsof as if -r<rev> had been given for
205 # each head of missing, and --base <rev> for each head of the proper
209 # each head of missing, and --base <rev> for each head of the proper
206 # ancestors of missing
210 # ancestors of missing
207 og._computecommonmissing()
211 og._computecommonmissing()
208 cl = repo.changelog
212 cl = repo.changelog
209 missingrevs = {cl.rev(n) for n in og._missing}
213 missingrevs = {cl.rev(n) for n in og._missing}
210 og._common = set(cl.ancestors(missingrevs)) - missingrevs
214 og._common = set(cl.ancestors(missingrevs)) - missingrevs
211 commonheads = set(og.commonheads)
215 commonheads = set(og.commonheads)
212 og.missingheads = [h for h in og.missingheads if h not in commonheads]
216 og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
213
217
214 return og
218 return og
215
219
216
220
217 def _headssummary(pushop):
221 def _headssummary(pushop):
218 """compute a summary of branch and heads status before and after push
222 """compute a summary of branch and heads status before and after push
219
223
220 return {'branch': ([remoteheads], [newheads],
224 return {'branch': ([remoteheads], [newheads],
221 [unsyncedheads], [discardedheads])} mapping
225 [unsyncedheads], [discardedheads])} mapping
222
226
223 - branch: the branch name,
227 - branch: the branch name,
224 - remoteheads: the list of remote heads known locally
228 - remoteheads: the list of remote heads known locally
225 None if the branch is new,
229 None if the branch is new,
226 - newheads: the new remote heads (known locally) with outgoing pushed,
230 - newheads: the new remote heads (known locally) with outgoing pushed,
227 - unsyncedheads: the list of remote heads unknown locally,
231 - unsyncedheads: the list of remote heads unknown locally,
228 - discardedheads: the list of heads made obsolete by the push.
232 - discardedheads: the list of heads made obsolete by the push.
229 """
233 """
230 repo = pushop.repo.unfiltered()
234 repo = pushop.repo.unfiltered()
231 remote = pushop.remote
235 remote = pushop.remote
232 outgoing = pushop.outgoing
236 outgoing = pushop.outgoing
233 cl = repo.changelog
237 cl = repo.changelog
234 headssum = {}
238 headssum = {}
235 missingctx = set()
239 missingctx = set()
236 # A. Create set of branches involved in the push.
240 # A. Create set of branches involved in the push.
237 branches = set()
241 branches = set()
238 for n in outgoing.missing:
242 for n in outgoing.missing:
239 ctx = repo[n]
243 ctx = repo[n]
240 missingctx.add(ctx)
244 missingctx.add(ctx)
241 branches.add(ctx.branch())
245 branches.add(ctx.branch())
242
246
243 with remote.commandexecutor() as e:
247 with remote.commandexecutor() as e:
244 remotemap = e.callcommand(b'branchmap', {}).result()
248 remotemap = e.callcommand(b'branchmap', {}).result()
245
249
246 knownnode = cl.hasnode # do not use nodemap until it is filtered
250 knownnode = cl.hasnode # do not use nodemap until it is filtered
247 # A. register remote heads of branches which are in outgoing set
251 # A. register remote heads of branches which are in outgoing set
248 for branch, heads in pycompat.iteritems(remotemap):
252 for branch, heads in pycompat.iteritems(remotemap):
249 # don't add head info about branches which we don't have locally
253 # don't add head info about branches which we don't have locally
250 if branch not in branches:
254 if branch not in branches:
251 continue
255 continue
252 known = []
256 known = []
253 unsynced = []
257 unsynced = []
254 for h in heads:
258 for h in heads:
255 if knownnode(h):
259 if knownnode(h):
256 known.append(h)
260 known.append(h)
257 else:
261 else:
258 unsynced.append(h)
262 unsynced.append(h)
259 headssum[branch] = (known, list(known), unsynced)
263 headssum[branch] = (known, list(known), unsynced)
260
264
261 # B. add new branch data
265 # B. add new branch data
262 for branch in branches:
266 for branch in branches:
263 if branch not in headssum:
267 if branch not in headssum:
264 headssum[branch] = (None, [], [])
268 headssum[branch] = (None, [], [])
265
269
266 # C. Update newmap with outgoing changes.
270 # C. Update newmap with outgoing changes.
267 # This will possibly add new heads and remove existing ones.
271 # This will possibly add new heads and remove existing ones.
268 newmap = branchmap.remotebranchcache(
272 newmap = branchmap.remotebranchcache(
269 (branch, heads[1])
273 (branch, heads[1])
270 for branch, heads in pycompat.iteritems(headssum)
274 for branch, heads in pycompat.iteritems(headssum)
271 if heads[0] is not None
275 if heads[0] is not None
272 )
276 )
273 newmap.update(repo, (ctx.rev() for ctx in missingctx))
277 newmap.update(repo, (ctx.rev() for ctx in missingctx))
274 for branch, newheads in pycompat.iteritems(newmap):
278 for branch, newheads in pycompat.iteritems(newmap):
275 headssum[branch][1][:] = newheads
279 headssum[branch][1][:] = newheads
276 for branch, items in pycompat.iteritems(headssum):
280 for branch, items in pycompat.iteritems(headssum):
277 for l in items:
281 for l in items:
278 if l is not None:
282 if l is not None:
279 l.sort()
283 l.sort()
280 headssum[branch] = items + ([],)
284 headssum[branch] = items + ([],)
281
285
282 # If there are no obsstore, no post processing are needed.
286 # If there are no obsstore, no post processing are needed.
283 if repo.obsstore:
287 if repo.obsstore:
284 torev = repo.changelog.rev
288 torev = repo.changelog.rev
285 futureheads = {torev(h) for h in outgoing.missingheads}
289 futureheads = {torev(h) for h in outgoing.ancestorsof}
286 futureheads |= {torev(h) for h in outgoing.commonheads}
290 futureheads |= {torev(h) for h in outgoing.commonheads}
287 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
291 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
288 for branch, heads in sorted(pycompat.iteritems(headssum)):
292 for branch, heads in sorted(pycompat.iteritems(headssum)):
289 remoteheads, newheads, unsyncedheads, placeholder = heads
293 remoteheads, newheads, unsyncedheads, placeholder = heads
290 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
294 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
291 headssum[branch] = (
295 headssum[branch] = (
292 remoteheads,
296 remoteheads,
293 sorted(result[0]),
297 sorted(result[0]),
294 unsyncedheads,
298 unsyncedheads,
295 sorted(result[1]),
299 sorted(result[1]),
296 )
300 )
297 return headssum
301 return headssum
298
302
299
303
300 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
304 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
301 """Compute branchmapsummary for repo without branchmap support"""
305 """Compute branchmapsummary for repo without branchmap support"""
302
306
303 # 1-4b. old servers: Check for new topological heads.
307 # 1-4b. old servers: Check for new topological heads.
304 # Construct {old,new}map with branch = None (topological branch).
308 # Construct {old,new}map with branch = None (topological branch).
305 # (code based on update)
309 # (code based on update)
306 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
310 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
307 oldheads = sorted(h for h in remoteheads if knownnode(h))
311 oldheads = sorted(h for h in remoteheads if knownnode(h))
308 # all nodes in outgoing.missing are children of either:
312 # all nodes in outgoing.missing are children of either:
309 # - an element of oldheads
313 # - an element of oldheads
310 # - another element of outgoing.missing
314 # - another element of outgoing.missing
311 # - nullrev
315 # - nullrev
312 # This explains why the new head are very simple to compute.
316 # This explains why the new head are very simple to compute.
313 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
317 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
314 newheads = sorted(c.node() for c in r)
318 newheads = sorted(c.node() for c in r)
315 # set some unsynced head to issue the "unsynced changes" warning
319 # set some unsynced head to issue the "unsynced changes" warning
316 if inc:
320 if inc:
317 unsynced = [None]
321 unsynced = [None]
318 else:
322 else:
319 unsynced = []
323 unsynced = []
320 return {None: (oldheads, newheads, unsynced, [])}
324 return {None: (oldheads, newheads, unsynced, [])}
321
325
322
326
323 def _nowarnheads(pushop):
327 def _nowarnheads(pushop):
324 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
328 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
325 repo = pushop.repo.unfiltered()
329 repo = pushop.repo.unfiltered()
326 remote = pushop.remote
330 remote = pushop.remote
327 localbookmarks = repo._bookmarks
331 localbookmarks = repo._bookmarks
328
332
329 with remote.commandexecutor() as e:
333 with remote.commandexecutor() as e:
330 remotebookmarks = e.callcommand(
334 remotebookmarks = e.callcommand(
331 b'listkeys', {b'namespace': b'bookmarks',}
335 b'listkeys', {b'namespace': b'bookmarks',}
332 ).result()
336 ).result()
333
337
334 bookmarkedheads = set()
338 bookmarkedheads = set()
335
339
336 # internal config: bookmarks.pushing
340 # internal config: bookmarks.pushing
337 newbookmarks = [
341 newbookmarks = [
338 localbookmarks.expandname(b)
342 localbookmarks.expandname(b)
339 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
343 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
340 ]
344 ]
341
345
342 for bm in localbookmarks:
346 for bm in localbookmarks:
343 rnode = remotebookmarks.get(bm)
347 rnode = remotebookmarks.get(bm)
344 if rnode and rnode in repo:
348 if rnode and rnode in repo:
345 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
349 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
346 if bookmarks.validdest(repo, rctx, lctx):
350 if bookmarks.validdest(repo, rctx, lctx):
347 bookmarkedheads.add(lctx.node())
351 bookmarkedheads.add(lctx.node())
348 else:
352 else:
349 if bm in newbookmarks and bm not in remotebookmarks:
353 if bm in newbookmarks and bm not in remotebookmarks:
350 bookmarkedheads.add(localbookmarks[bm])
354 bookmarkedheads.add(localbookmarks[bm])
351
355
352 return bookmarkedheads
356 return bookmarkedheads
353
357
354
358
355 def checkheads(pushop):
359 def checkheads(pushop):
356 """Check that a push won't add any outgoing head
360 """Check that a push won't add any outgoing head
357
361
358 raise Abort error and display ui message as needed.
362 raise Abort error and display ui message as needed.
359 """
363 """
360
364
361 repo = pushop.repo.unfiltered()
365 repo = pushop.repo.unfiltered()
362 remote = pushop.remote
366 remote = pushop.remote
363 outgoing = pushop.outgoing
367 outgoing = pushop.outgoing
364 remoteheads = pushop.remoteheads
368 remoteheads = pushop.remoteheads
365 newbranch = pushop.newbranch
369 newbranch = pushop.newbranch
366 inc = bool(pushop.incoming)
370 inc = bool(pushop.incoming)
367
371
368 # Check for each named branch if we're creating new remote heads.
372 # Check for each named branch if we're creating new remote heads.
369 # To be a remote head after push, node must be either:
373 # To be a remote head after push, node must be either:
370 # - unknown locally
374 # - unknown locally
371 # - a local outgoing head descended from update
375 # - a local outgoing head descended from update
372 # - a remote head that's known locally and not
376 # - a remote head that's known locally and not
373 # ancestral to an outgoing head
377 # ancestral to an outgoing head
374 if remoteheads == [nullid]:
378 if remoteheads == [nullid]:
375 # remote is empty, nothing to check.
379 # remote is empty, nothing to check.
376 return
380 return
377
381
378 if remote.capable(b'branchmap'):
382 if remote.capable(b'branchmap'):
379 headssum = _headssummary(pushop)
383 headssum = _headssummary(pushop)
380 else:
384 else:
381 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
385 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
382 pushop.pushbranchmap = headssum
386 pushop.pushbranchmap = headssum
383 newbranches = [
387 newbranches = [
384 branch
388 branch
385 for branch, heads in pycompat.iteritems(headssum)
389 for branch, heads in pycompat.iteritems(headssum)
386 if heads[0] is None
390 if heads[0] is None
387 ]
391 ]
388 # 1. Check for new branches on the remote.
392 # 1. Check for new branches on the remote.
389 if newbranches and not newbranch: # new branch requires --new-branch
393 if newbranches and not newbranch: # new branch requires --new-branch
390 branchnames = b', '.join(sorted(newbranches))
394 branchnames = b', '.join(sorted(newbranches))
391 # Calculate how many of the new branches are closed branches
395 # Calculate how many of the new branches are closed branches
392 closedbranches = set()
396 closedbranches = set()
393 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
397 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
394 if isclosed:
398 if isclosed:
395 closedbranches.add(tag)
399 closedbranches.add(tag)
396 closedbranches = closedbranches & set(newbranches)
400 closedbranches = closedbranches & set(newbranches)
397 if closedbranches:
401 if closedbranches:
398 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
402 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
399 branchnames,
403 branchnames,
400 len(closedbranches),
404 len(closedbranches),
401 )
405 )
402 else:
406 else:
403 errmsg = _(b"push creates new remote branches: %s!") % branchnames
407 errmsg = _(b"push creates new remote branches: %s!") % branchnames
404 hint = _(b"use 'hg push --new-branch' to create new remote branches")
408 hint = _(b"use 'hg push --new-branch' to create new remote branches")
405 raise error.Abort(errmsg, hint=hint)
409 raise error.Abort(errmsg, hint=hint)
406
410
407 # 2. Find heads that we need not warn about
411 # 2. Find heads that we need not warn about
408 nowarnheads = _nowarnheads(pushop)
412 nowarnheads = _nowarnheads(pushop)
409
413
410 # 3. Check for new heads.
414 # 3. Check for new heads.
411 # If there are more heads after the push than before, a suitable
415 # If there are more heads after the push than before, a suitable
412 # error message, depending on unsynced status, is displayed.
416 # error message, depending on unsynced status, is displayed.
413 errormsg = None
417 errormsg = None
414 for branch, heads in sorted(pycompat.iteritems(headssum)):
418 for branch, heads in sorted(pycompat.iteritems(headssum)):
415 remoteheads, newheads, unsyncedheads, discardedheads = heads
419 remoteheads, newheads, unsyncedheads, discardedheads = heads
416 # add unsynced data
420 # add unsynced data
417 if remoteheads is None:
421 if remoteheads is None:
418 oldhs = set()
422 oldhs = set()
419 else:
423 else:
420 oldhs = set(remoteheads)
424 oldhs = set(remoteheads)
421 oldhs.update(unsyncedheads)
425 oldhs.update(unsyncedheads)
422 dhs = None # delta heads, the new heads on branch
426 dhs = None # delta heads, the new heads on branch
423 newhs = set(newheads)
427 newhs = set(newheads)
424 newhs.update(unsyncedheads)
428 newhs.update(unsyncedheads)
425 if unsyncedheads:
429 if unsyncedheads:
426 if None in unsyncedheads:
430 if None in unsyncedheads:
427 # old remote, no heads data
431 # old remote, no heads data
428 heads = None
432 heads = None
429 else:
433 else:
430 heads = scmutil.nodesummaries(repo, unsyncedheads)
434 heads = scmutil.nodesummaries(repo, unsyncedheads)
431 if heads is None:
435 if heads is None:
432 repo.ui.status(
436 repo.ui.status(
433 _(b"remote has heads that are not known locally\n")
437 _(b"remote has heads that are not known locally\n")
434 )
438 )
435 elif branch is None:
439 elif branch is None:
436 repo.ui.status(
440 repo.ui.status(
437 _(b"remote has heads that are not known locally: %s\n")
441 _(b"remote has heads that are not known locally: %s\n")
438 % heads
442 % heads
439 )
443 )
440 else:
444 else:
441 repo.ui.status(
445 repo.ui.status(
442 _(
446 _(
443 b"remote has heads on branch '%s' that are "
447 b"remote has heads on branch '%s' that are "
444 b"not known locally: %s\n"
448 b"not known locally: %s\n"
445 )
449 )
446 % (branch, heads)
450 % (branch, heads)
447 )
451 )
448 if remoteheads is None:
452 if remoteheads is None:
449 if len(newhs) > 1:
453 if len(newhs) > 1:
450 dhs = list(newhs)
454 dhs = list(newhs)
451 if errormsg is None:
455 if errormsg is None:
452 errormsg = (
456 errormsg = (
453 _(b"push creates new branch '%s' with multiple heads")
457 _(b"push creates new branch '%s' with multiple heads")
454 % branch
458 % branch
455 )
459 )
456 hint = _(
460 hint = _(
457 b"merge or"
461 b"merge or"
458 b" see 'hg help push' for details about"
462 b" see 'hg help push' for details about"
459 b" pushing new heads"
463 b" pushing new heads"
460 )
464 )
461 elif len(newhs) > len(oldhs):
465 elif len(newhs) > len(oldhs):
462 # remove bookmarked or existing remote heads from the new heads list
466 # remove bookmarked or existing remote heads from the new heads list
463 dhs = sorted(newhs - nowarnheads - oldhs)
467 dhs = sorted(newhs - nowarnheads - oldhs)
464 if dhs:
468 if dhs:
465 if errormsg is None:
469 if errormsg is None:
466 if branch not in (b'default', None):
470 if branch not in (b'default', None):
467 errormsg = _(
471 errormsg = _(
468 b"push creates new remote head %s on branch '%s'!"
472 b"push creates new remote head %s on branch '%s'!"
469 ) % (short(dhs[0]), branch,)
473 ) % (short(dhs[0]), branch,)
470 elif repo[dhs[0]].bookmarks():
474 elif repo[dhs[0]].bookmarks():
471 errormsg = _(
475 errormsg = _(
472 b"push creates new remote head %s "
476 b"push creates new remote head %s "
473 b"with bookmark '%s'!"
477 b"with bookmark '%s'!"
474 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
478 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
475 else:
479 else:
476 errormsg = _(b"push creates new remote head %s!") % short(
480 errormsg = _(b"push creates new remote head %s!") % short(
477 dhs[0]
481 dhs[0]
478 )
482 )
479 if unsyncedheads:
483 if unsyncedheads:
480 hint = _(
484 hint = _(
481 b"pull and merge or"
485 b"pull and merge or"
482 b" see 'hg help push' for details about"
486 b" see 'hg help push' for details about"
483 b" pushing new heads"
487 b" pushing new heads"
484 )
488 )
485 else:
489 else:
486 hint = _(
490 hint = _(
487 b"merge or"
491 b"merge or"
488 b" see 'hg help push' for details about"
492 b" see 'hg help push' for details about"
489 b" pushing new heads"
493 b" pushing new heads"
490 )
494 )
491 if branch is None:
495 if branch is None:
492 repo.ui.note(_(b"new remote heads:\n"))
496 repo.ui.note(_(b"new remote heads:\n"))
493 else:
497 else:
494 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
498 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
495 for h in dhs:
499 for h in dhs:
496 repo.ui.note(b" %s\n" % short(h))
500 repo.ui.note(b" %s\n" % short(h))
497 if errormsg:
501 if errormsg:
498 raise error.Abort(errormsg, hint=hint)
502 raise error.Abort(errormsg, hint=hint)
499
503
500
504
501 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
505 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
502 """post process the list of new heads with obsolescence information
506 """post process the list of new heads with obsolescence information
503
507
504 Exists as a sub-function to contain the complexity and allow extensions to
508 Exists as a sub-function to contain the complexity and allow extensions to
505 experiment with smarter logic.
509 experiment with smarter logic.
506
510
507 Returns (newheads, discarded_heads) tuple
511 Returns (newheads, discarded_heads) tuple
508 """
512 """
509 # known issue
513 # known issue
510 #
514 #
511 # * We "silently" skip processing on all changeset unknown locally
515 # * We "silently" skip processing on all changeset unknown locally
512 #
516 #
513 # * if <nh> is public on the remote, it won't be affected by obsolete
517 # * if <nh> is public on the remote, it won't be affected by obsolete
514 # marker and a new is created
518 # marker and a new is created
515
519
516 # define various utilities and containers
520 # define various utilities and containers
517 repo = pushop.repo
521 repo = pushop.repo
518 unfi = repo.unfiltered()
522 unfi = repo.unfiltered()
519 torev = unfi.changelog.index.get_rev
523 torev = unfi.changelog.index.get_rev
520 public = phases.public
524 public = phases.public
521 getphase = unfi._phasecache.phase
525 getphase = unfi._phasecache.phase
522 ispublic = lambda r: getphase(unfi, r) == public
526 ispublic = lambda r: getphase(unfi, r) == public
523 ispushed = lambda n: torev(n) in futurecommon
527 ispushed = lambda n: torev(n) in futurecommon
524 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
528 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
525 successorsmarkers = unfi.obsstore.successors
529 successorsmarkers = unfi.obsstore.successors
526 newhs = set() # final set of new heads
530 newhs = set() # final set of new heads
527 discarded = set() # new head of fully replaced branch
531 discarded = set() # new head of fully replaced branch
528
532
529 localcandidate = set() # candidate heads known locally
533 localcandidate = set() # candidate heads known locally
530 unknownheads = set() # candidate heads unknown locally
534 unknownheads = set() # candidate heads unknown locally
531 for h in candidate_newhs:
535 for h in candidate_newhs:
532 if h in unfi:
536 if h in unfi:
533 localcandidate.add(h)
537 localcandidate.add(h)
534 else:
538 else:
535 if successorsmarkers.get(h) is not None:
539 if successorsmarkers.get(h) is not None:
536 msg = (
540 msg = (
537 b'checkheads: remote head unknown locally has'
541 b'checkheads: remote head unknown locally has'
538 b' local marker: %s\n'
542 b' local marker: %s\n'
539 )
543 )
540 repo.ui.debug(msg % hex(h))
544 repo.ui.debug(msg % hex(h))
541 unknownheads.add(h)
545 unknownheads.add(h)
542
546
543 # fast path the simple case
547 # fast path the simple case
544 if len(localcandidate) == 1:
548 if len(localcandidate) == 1:
545 return unknownheads | set(candidate_newhs), set()
549 return unknownheads | set(candidate_newhs), set()
546
550
547 # actually process branch replacement
551 # actually process branch replacement
548 while localcandidate:
552 while localcandidate:
549 nh = localcandidate.pop()
553 nh = localcandidate.pop()
550 current_branch = unfi[nh].branch()
554 current_branch = unfi[nh].branch()
551 # run this check early to skip the evaluation of the whole branch
555 # run this check early to skip the evaluation of the whole branch
552 if torev(nh) in futurecommon or ispublic(torev(nh)):
556 if torev(nh) in futurecommon or ispublic(torev(nh)):
553 newhs.add(nh)
557 newhs.add(nh)
554 continue
558 continue
555
559
556 # Get all revs/nodes on the branch exclusive to this head
560 # Get all revs/nodes on the branch exclusive to this head
557 # (already filtered heads are "ignored"))
561 # (already filtered heads are "ignored"))
558 branchrevs = unfi.revs(
562 branchrevs = unfi.revs(
559 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
563 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
560 )
564 )
561
565
562 branchnodes = []
566 branchnodes = []
563 for r in branchrevs:
567 for r in branchrevs:
564 c = unfi[r]
568 c = unfi[r]
565 if c.branch() == current_branch:
569 if c.branch() == current_branch:
566 branchnodes.append(c.node())
570 branchnodes.append(c.node())
567
571
568 # The branch won't be hidden on the remote if
572 # The branch won't be hidden on the remote if
569 # * any part of it is public,
573 # * any part of it is public,
570 # * any part of it is considered part of the result by previous logic,
574 # * any part of it is considered part of the result by previous logic,
571 # * if we have no markers to push to obsolete it.
575 # * if we have no markers to push to obsolete it.
572 if (
576 if (
573 any(ispublic(r) for r in branchrevs)
577 any(ispublic(r) for r in branchrevs)
574 or any(torev(n) in futurecommon for n in branchnodes)
578 or any(torev(n) in futurecommon for n in branchnodes)
575 or any(not hasoutmarker(n) for n in branchnodes)
579 or any(not hasoutmarker(n) for n in branchnodes)
576 ):
580 ):
577 newhs.add(nh)
581 newhs.add(nh)
578 else:
582 else:
579 # note: there is a corner case if there is a merge in the branch.
583 # note: there is a corner case if there is a merge in the branch.
580 # we might end up with -more- heads. However, these heads are not
584 # we might end up with -more- heads. However, these heads are not
581 # "added" by the push, but more by the "removal" on the remote so I
585 # "added" by the push, but more by the "removal" on the remote so I
582 # think is a okay to ignore them,
586 # think is a okay to ignore them,
583 discarded.add(nh)
587 discarded.add(nh)
584 newhs |= unknownheads
588 newhs |= unknownheads
585 return newhs, discarded
589 return newhs, discarded
586
590
587
591
588 def pushingmarkerfor(obsstore, ispushed, node):
592 def pushingmarkerfor(obsstore, ispushed, node):
589 """true if some markers are to be pushed for node
593 """true if some markers are to be pushed for node
590
594
591 We cannot just look in to the pushed obsmarkers from the pushop because
595 We cannot just look in to the pushed obsmarkers from the pushop because
592 discovery might have filtered relevant markers. In addition listing all
596 discovery might have filtered relevant markers. In addition listing all
593 markers relevant to all changesets in the pushed set would be too expensive
597 markers relevant to all changesets in the pushed set would be too expensive
594 (O(len(repo)))
598 (O(len(repo)))
595
599
596 (note: There are cache opportunity in this function. but it would requires
600 (note: There are cache opportunity in this function. but it would requires
597 a two dimensional stack.)
601 a two dimensional stack.)
598 """
602 """
599 successorsmarkers = obsstore.successors
603 successorsmarkers = obsstore.successors
600 stack = [node]
604 stack = [node]
601 seen = set(stack)
605 seen = set(stack)
602 while stack:
606 while stack:
603 current = stack.pop()
607 current = stack.pop()
604 if ispushed(current):
608 if ispushed(current):
605 return True
609 return True
606 markers = successorsmarkers.get(current, ())
610 markers = successorsmarkers.get(current, ())
607 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
611 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
608 for m in markers:
612 for m in markers:
609 nexts = m[1] # successors
613 nexts = m[1] # successors
610 if not nexts: # this is a prune marker
614 if not nexts: # this is a prune marker
611 nexts = m[5] or () # parents
615 nexts = m[5] or () # parents
612 for n in nexts:
616 for n in nexts:
613 if n not in seen:
617 if n not in seen:
614 seen.add(n)
618 seen.add(n)
615 stack.append(n)
619 stack.append(n)
616 return False
620 return False
@@ -1,3157 +1,3157 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .thirdparty import attr
19 from .thirdparty import attr
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchangev2,
26 exchangev2,
27 lock as lockmod,
27 lock as lockmod,
28 logexchange,
28 logexchange,
29 narrowspec,
29 narrowspec,
30 obsolete,
30 obsolete,
31 obsutil,
31 obsutil,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 sslutil,
36 sslutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 wireprototypes,
40 wireprototypes,
41 )
41 )
42 from .interfaces import repository
42 from .interfaces import repository
43 from .utils import (
43 from .utils import (
44 hashutil,
44 hashutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = b'narrowacl'
51 _NARROWACL_SECTION = b'narrowacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {
54 _bundlespeccgversions = {
55 b'v1': b'01',
55 b'v1': b'01',
56 b'v2': b'02',
56 b'v2': b'02',
57 b'packed1': b's1',
57 b'packed1': b's1',
58 b'bundle2': b'02', # legacy
58 b'bundle2': b'02', # legacy
59 }
59 }
60
60
61 # Maps bundle version with content opts to choose which part to bundle
61 # Maps bundle version with content opts to choose which part to bundle
62 _bundlespeccontentopts = {
62 _bundlespeccontentopts = {
63 b'v1': {
63 b'v1': {
64 b'changegroup': True,
64 b'changegroup': True,
65 b'cg.version': b'01',
65 b'cg.version': b'01',
66 b'obsolescence': False,
66 b'obsolescence': False,
67 b'phases': False,
67 b'phases': False,
68 b'tagsfnodescache': False,
68 b'tagsfnodescache': False,
69 b'revbranchcache': False,
69 b'revbranchcache': False,
70 },
70 },
71 b'v2': {
71 b'v2': {
72 b'changegroup': True,
72 b'changegroup': True,
73 b'cg.version': b'02',
73 b'cg.version': b'02',
74 b'obsolescence': False,
74 b'obsolescence': False,
75 b'phases': False,
75 b'phases': False,
76 b'tagsfnodescache': True,
76 b'tagsfnodescache': True,
77 b'revbranchcache': True,
77 b'revbranchcache': True,
78 },
78 },
79 b'packed1': {b'cg.version': b's1'},
79 b'packed1': {b'cg.version': b's1'},
80 }
80 }
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
82
82
83 _bundlespecvariants = {
83 _bundlespecvariants = {
84 b"streamv2": {
84 b"streamv2": {
85 b"changegroup": False,
85 b"changegroup": False,
86 b"streamv2": True,
86 b"streamv2": True,
87 b"tagsfnodescache": False,
87 b"tagsfnodescache": False,
88 b"revbranchcache": False,
88 b"revbranchcache": False,
89 }
89 }
90 }
90 }
91
91
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
94
94
95
95
96 @attr.s
96 @attr.s
97 class bundlespec(object):
97 class bundlespec(object):
98 compression = attr.ib()
98 compression = attr.ib()
99 wirecompression = attr.ib()
99 wirecompression = attr.ib()
100 version = attr.ib()
100 version = attr.ib()
101 wireversion = attr.ib()
101 wireversion = attr.ib()
102 params = attr.ib()
102 params = attr.ib()
103 contentopts = attr.ib()
103 contentopts = attr.ib()
104
104
105
105
106 def parsebundlespec(repo, spec, strict=True):
106 def parsebundlespec(repo, spec, strict=True):
107 """Parse a bundle string specification into parts.
107 """Parse a bundle string specification into parts.
108
108
109 Bundle specifications denote a well-defined bundle/exchange format.
109 Bundle specifications denote a well-defined bundle/exchange format.
110 The content of a given specification should not change over time in
110 The content of a given specification should not change over time in
111 order to ensure that bundles produced by a newer version of Mercurial are
111 order to ensure that bundles produced by a newer version of Mercurial are
112 readable from an older version.
112 readable from an older version.
113
113
114 The string currently has the form:
114 The string currently has the form:
115
115
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
117
117
118 Where <compression> is one of the supported compression formats
118 Where <compression> is one of the supported compression formats
119 and <type> is (currently) a version string. A ";" can follow the type and
119 and <type> is (currently) a version string. A ";" can follow the type and
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 pairs.
121 pairs.
122
122
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 it is optional.
124 it is optional.
125
125
126 Returns a bundlespec object of (compression, version, parameters).
126 Returns a bundlespec object of (compression, version, parameters).
127 Compression will be ``None`` if not in strict mode and a compression isn't
127 Compression will be ``None`` if not in strict mode and a compression isn't
128 defined.
128 defined.
129
129
130 An ``InvalidBundleSpecification`` is raised when the specification is
130 An ``InvalidBundleSpecification`` is raised when the specification is
131 not syntactically well formed.
131 not syntactically well formed.
132
132
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 bundle type/version is not recognized.
134 bundle type/version is not recognized.
135
135
136 Note: this function will likely eventually return a more complex data
136 Note: this function will likely eventually return a more complex data
137 structure, including bundle2 part information.
137 structure, including bundle2 part information.
138 """
138 """
139
139
140 def parseparams(s):
140 def parseparams(s):
141 if b';' not in s:
141 if b';' not in s:
142 return s, {}
142 return s, {}
143
143
144 params = {}
144 params = {}
145 version, paramstr = s.split(b';', 1)
145 version, paramstr = s.split(b';', 1)
146
146
147 for p in paramstr.split(b';'):
147 for p in paramstr.split(b';'):
148 if b'=' not in p:
148 if b'=' not in p:
149 raise error.InvalidBundleSpecification(
149 raise error.InvalidBundleSpecification(
150 _(
150 _(
151 b'invalid bundle specification: '
151 b'invalid bundle specification: '
152 b'missing "=" in parameter: %s'
152 b'missing "=" in parameter: %s'
153 )
153 )
154 % p
154 % p
155 )
155 )
156
156
157 key, value = p.split(b'=', 1)
157 key, value = p.split(b'=', 1)
158 key = urlreq.unquote(key)
158 key = urlreq.unquote(key)
159 value = urlreq.unquote(value)
159 value = urlreq.unquote(value)
160 params[key] = value
160 params[key] = value
161
161
162 return version, params
162 return version, params
163
163
164 if strict and b'-' not in spec:
164 if strict and b'-' not in spec:
165 raise error.InvalidBundleSpecification(
165 raise error.InvalidBundleSpecification(
166 _(
166 _(
167 b'invalid bundle specification; '
167 b'invalid bundle specification; '
168 b'must be prefixed with compression: %s'
168 b'must be prefixed with compression: %s'
169 )
169 )
170 % spec
170 % spec
171 )
171 )
172
172
173 if b'-' in spec:
173 if b'-' in spec:
174 compression, version = spec.split(b'-', 1)
174 compression, version = spec.split(b'-', 1)
175
175
176 if compression not in util.compengines.supportedbundlenames:
176 if compression not in util.compengines.supportedbundlenames:
177 raise error.UnsupportedBundleSpecification(
177 raise error.UnsupportedBundleSpecification(
178 _(b'%s compression is not supported') % compression
178 _(b'%s compression is not supported') % compression
179 )
179 )
180
180
181 version, params = parseparams(version)
181 version, params = parseparams(version)
182
182
183 if version not in _bundlespeccgversions:
183 if version not in _bundlespeccgversions:
184 raise error.UnsupportedBundleSpecification(
184 raise error.UnsupportedBundleSpecification(
185 _(b'%s is not a recognized bundle version') % version
185 _(b'%s is not a recognized bundle version') % version
186 )
186 )
187 else:
187 else:
188 # Value could be just the compression or just the version, in which
188 # Value could be just the compression or just the version, in which
189 # case some defaults are assumed (but only when not in strict mode).
189 # case some defaults are assumed (but only when not in strict mode).
190 assert not strict
190 assert not strict
191
191
192 spec, params = parseparams(spec)
192 spec, params = parseparams(spec)
193
193
194 if spec in util.compengines.supportedbundlenames:
194 if spec in util.compengines.supportedbundlenames:
195 compression = spec
195 compression = spec
196 version = b'v1'
196 version = b'v1'
197 # Generaldelta repos require v2.
197 # Generaldelta repos require v2.
198 if b'generaldelta' in repo.requirements:
198 if b'generaldelta' in repo.requirements:
199 version = b'v2'
199 version = b'v2'
200 # Modern compression engines require v2.
200 # Modern compression engines require v2.
201 if compression not in _bundlespecv1compengines:
201 if compression not in _bundlespecv1compengines:
202 version = b'v2'
202 version = b'v2'
203 elif spec in _bundlespeccgversions:
203 elif spec in _bundlespeccgversions:
204 if spec == b'packed1':
204 if spec == b'packed1':
205 compression = b'none'
205 compression = b'none'
206 else:
206 else:
207 compression = b'bzip2'
207 compression = b'bzip2'
208 version = spec
208 version = spec
209 else:
209 else:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _(b'%s is not a recognized bundle specification') % spec
211 _(b'%s is not a recognized bundle specification') % spec
212 )
212 )
213
213
214 # Bundle version 1 only supports a known set of compression engines.
214 # Bundle version 1 only supports a known set of compression engines.
215 if version == b'v1' and compression not in _bundlespecv1compengines:
215 if version == b'v1' and compression not in _bundlespecv1compengines:
216 raise error.UnsupportedBundleSpecification(
216 raise error.UnsupportedBundleSpecification(
217 _(b'compression engine %s is not supported on v1 bundles')
217 _(b'compression engine %s is not supported on v1 bundles')
218 % compression
218 % compression
219 )
219 )
220
220
221 # The specification for packed1 can optionally declare the data formats
221 # The specification for packed1 can optionally declare the data formats
222 # required to apply it. If we see this metadata, compare against what the
222 # required to apply it. If we see this metadata, compare against what the
223 # repo supports and error if the bundle isn't compatible.
223 # repo supports and error if the bundle isn't compatible.
224 if version == b'packed1' and b'requirements' in params:
224 if version == b'packed1' and b'requirements' in params:
225 requirements = set(params[b'requirements'].split(b','))
225 requirements = set(params[b'requirements'].split(b','))
226 missingreqs = requirements - repo.supportedformats
226 missingreqs = requirements - repo.supportedformats
227 if missingreqs:
227 if missingreqs:
228 raise error.UnsupportedBundleSpecification(
228 raise error.UnsupportedBundleSpecification(
229 _(b'missing support for repository features: %s')
229 _(b'missing support for repository features: %s')
230 % b', '.join(sorted(missingreqs))
230 % b', '.join(sorted(missingreqs))
231 )
231 )
232
232
233 # Compute contentopts based on the version
233 # Compute contentopts based on the version
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235
235
236 # Process the variants
236 # Process the variants
237 if b"stream" in params and params[b"stream"] == b"v2":
237 if b"stream" in params and params[b"stream"] == b"v2":
238 variant = _bundlespecvariants[b"streamv2"]
238 variant = _bundlespecvariants[b"streamv2"]
239 contentopts.update(variant)
239 contentopts.update(variant)
240
240
241 engine = util.compengines.forbundlename(compression)
241 engine = util.compengines.forbundlename(compression)
242 compression, wirecompression = engine.bundletype()
242 compression, wirecompression = engine.bundletype()
243 wireversion = _bundlespeccgversions[version]
243 wireversion = _bundlespeccgversions[version]
244
244
245 return bundlespec(
245 return bundlespec(
246 compression, wirecompression, version, wireversion, params, contentopts
246 compression, wirecompression, version, wireversion, params, contentopts
247 )
247 )
248
248
249
249
250 def readbundle(ui, fh, fname, vfs=None):
250 def readbundle(ui, fh, fname, vfs=None):
251 header = changegroup.readexactly(fh, 4)
251 header = changegroup.readexactly(fh, 4)
252
252
253 alg = None
253 alg = None
254 if not fname:
254 if not fname:
255 fname = b"stream"
255 fname = b"stream"
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
256 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 fh = changegroup.headerlessfixup(fh, header)
257 fh = changegroup.headerlessfixup(fh, header)
258 header = b"HG10"
258 header = b"HG10"
259 alg = b'UN'
259 alg = b'UN'
260 elif vfs:
260 elif vfs:
261 fname = vfs.join(fname)
261 fname = vfs.join(fname)
262
262
263 magic, version = header[0:2], header[2:4]
263 magic, version = header[0:2], header[2:4]
264
264
265 if magic != b'HG':
265 if magic != b'HG':
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
266 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 if version == b'10':
267 if version == b'10':
268 if alg is None:
268 if alg is None:
269 alg = changegroup.readexactly(fh, 2)
269 alg = changegroup.readexactly(fh, 2)
270 return changegroup.cg1unpacker(fh, alg)
270 return changegroup.cg1unpacker(fh, alg)
271 elif version.startswith(b'2'):
271 elif version.startswith(b'2'):
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
272 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 elif version == b'S1':
273 elif version == b'S1':
274 return streamclone.streamcloneapplier(fh)
274 return streamclone.streamcloneapplier(fh)
275 else:
275 else:
276 raise error.Abort(
276 raise error.Abort(
277 _(b'%s: unknown bundle version %s') % (fname, version)
277 _(b'%s: unknown bundle version %s') % (fname, version)
278 )
278 )
279
279
280
280
281 def getbundlespec(ui, fh):
281 def getbundlespec(ui, fh):
282 """Infer the bundlespec from a bundle file handle.
282 """Infer the bundlespec from a bundle file handle.
283
283
284 The input file handle is seeked and the original seek position is not
284 The input file handle is seeked and the original seek position is not
285 restored.
285 restored.
286 """
286 """
287
287
288 def speccompression(alg):
288 def speccompression(alg):
289 try:
289 try:
290 return util.compengines.forbundletype(alg).bundletype()[0]
290 return util.compengines.forbundletype(alg).bundletype()[0]
291 except KeyError:
291 except KeyError:
292 return None
292 return None
293
293
294 b = readbundle(ui, fh, None)
294 b = readbundle(ui, fh, None)
295 if isinstance(b, changegroup.cg1unpacker):
295 if isinstance(b, changegroup.cg1unpacker):
296 alg = b._type
296 alg = b._type
297 if alg == b'_truncatedBZ':
297 if alg == b'_truncatedBZ':
298 alg = b'BZ'
298 alg = b'BZ'
299 comp = speccompression(alg)
299 comp = speccompression(alg)
300 if not comp:
300 if not comp:
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
301 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 return b'%s-v1' % comp
302 return b'%s-v1' % comp
303 elif isinstance(b, bundle2.unbundle20):
303 elif isinstance(b, bundle2.unbundle20):
304 if b'Compression' in b.params:
304 if b'Compression' in b.params:
305 comp = speccompression(b.params[b'Compression'])
305 comp = speccompression(b.params[b'Compression'])
306 if not comp:
306 if not comp:
307 raise error.Abort(
307 raise error.Abort(
308 _(b'unknown compression algorithm: %s') % comp
308 _(b'unknown compression algorithm: %s') % comp
309 )
309 )
310 else:
310 else:
311 comp = b'none'
311 comp = b'none'
312
312
313 version = None
313 version = None
314 for part in b.iterparts():
314 for part in b.iterparts():
315 if part.type == b'changegroup':
315 if part.type == b'changegroup':
316 version = part.params[b'version']
316 version = part.params[b'version']
317 if version in (b'01', b'02'):
317 if version in (b'01', b'02'):
318 version = b'v2'
318 version = b'v2'
319 else:
319 else:
320 raise error.Abort(
320 raise error.Abort(
321 _(
321 _(
322 b'changegroup version %s does not have '
322 b'changegroup version %s does not have '
323 b'a known bundlespec'
323 b'a known bundlespec'
324 )
324 )
325 % version,
325 % version,
326 hint=_(b'try upgrading your Mercurial client'),
326 hint=_(b'try upgrading your Mercurial client'),
327 )
327 )
328 elif part.type == b'stream2' and version is None:
328 elif part.type == b'stream2' and version is None:
329 # A stream2 part requires to be part of a v2 bundle
329 # A stream2 part requires to be part of a v2 bundle
330 requirements = urlreq.unquote(part.params[b'requirements'])
330 requirements = urlreq.unquote(part.params[b'requirements'])
331 splitted = requirements.split()
331 splitted = requirements.split()
332 params = bundle2._formatrequirementsparams(splitted)
332 params = bundle2._formatrequirementsparams(splitted)
333 return b'none-v2;stream=v2;%s' % params
333 return b'none-v2;stream=v2;%s' % params
334
334
335 if not version:
335 if not version:
336 raise error.Abort(
336 raise error.Abort(
337 _(b'could not identify changegroup version in bundle')
337 _(b'could not identify changegroup version in bundle')
338 )
338 )
339
339
340 return b'%s-%s' % (comp, version)
340 return b'%s-%s' % (comp, version)
341 elif isinstance(b, streamclone.streamcloneapplier):
341 elif isinstance(b, streamclone.streamcloneapplier):
342 requirements = streamclone.readbundle1header(fh)[2]
342 requirements = streamclone.readbundle1header(fh)[2]
343 formatted = bundle2._formatrequirementsparams(requirements)
343 formatted = bundle2._formatrequirementsparams(requirements)
344 return b'none-packed1;%s' % formatted
344 return b'none-packed1;%s' % formatted
345 else:
345 else:
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
346 raise error.Abort(_(b'unknown bundle type: %s') % b)
347
347
348
348
349 def _computeoutgoing(repo, heads, common):
349 def _computeoutgoing(repo, heads, common):
350 """Computes which revs are outgoing given a set of common
350 """Computes which revs are outgoing given a set of common
351 and a set of heads.
351 and a set of heads.
352
352
353 This is a separate function so extensions can have access to
353 This is a separate function so extensions can have access to
354 the logic.
354 the logic.
355
355
356 Returns a discovery.outgoing object.
356 Returns a discovery.outgoing object.
357 """
357 """
358 cl = repo.changelog
358 cl = repo.changelog
359 if common:
359 if common:
360 hasnode = cl.hasnode
360 hasnode = cl.hasnode
361 common = [n for n in common if hasnode(n)]
361 common = [n for n in common if hasnode(n)]
362 else:
362 else:
363 common = [nullid]
363 common = [nullid]
364 if not heads:
364 if not heads:
365 heads = cl.heads()
365 heads = cl.heads()
366 return discovery.outgoing(repo, common, heads)
366 return discovery.outgoing(repo, common, heads)
367
367
368
368
369 def _checkpublish(pushop):
369 def _checkpublish(pushop):
370 repo = pushop.repo
370 repo = pushop.repo
371 ui = repo.ui
371 ui = repo.ui
372 behavior = ui.config(b'experimental', b'auto-publish')
372 behavior = ui.config(b'experimental', b'auto-publish')
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
373 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 return
374 return
375 remotephases = listkeys(pushop.remote, b'phases')
375 remotephases = listkeys(pushop.remote, b'phases')
376 if not remotephases.get(b'publishing', False):
376 if not remotephases.get(b'publishing', False):
377 return
377 return
378
378
379 if pushop.revs is None:
379 if pushop.revs is None:
380 published = repo.filtered(b'served').revs(b'not public()')
380 published = repo.filtered(b'served').revs(b'not public()')
381 else:
381 else:
382 published = repo.revs(b'::%ln - public()', pushop.revs)
382 published = repo.revs(b'::%ln - public()', pushop.revs)
383 if published:
383 if published:
384 if behavior == b'warn':
384 if behavior == b'warn':
385 ui.warn(
385 ui.warn(
386 _(b'%i changesets about to be published\n') % len(published)
386 _(b'%i changesets about to be published\n') % len(published)
387 )
387 )
388 elif behavior == b'confirm':
388 elif behavior == b'confirm':
389 if ui.promptchoice(
389 if ui.promptchoice(
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
390 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 % len(published)
391 % len(published)
392 ):
392 ):
393 raise error.Abort(_(b'user quit'))
393 raise error.Abort(_(b'user quit'))
394 elif behavior == b'abort':
394 elif behavior == b'abort':
395 msg = _(b'push would publish %i changesets') % len(published)
395 msg = _(b'push would publish %i changesets') % len(published)
396 hint = _(
396 hint = _(
397 b"use --publish or adjust 'experimental.auto-publish'"
397 b"use --publish or adjust 'experimental.auto-publish'"
398 b" config"
398 b" config"
399 )
399 )
400 raise error.Abort(msg, hint=hint)
400 raise error.Abort(msg, hint=hint)
401
401
402
402
403 def _forcebundle1(op):
403 def _forcebundle1(op):
404 """return true if a pull/push must use bundle1
404 """return true if a pull/push must use bundle1
405
405
406 This function is used to allow testing of the older bundle version"""
406 This function is used to allow testing of the older bundle version"""
407 ui = op.repo.ui
407 ui = op.repo.ui
408 # The goal is this config is to allow developer to choose the bundle
408 # The goal is this config is to allow developer to choose the bundle
409 # version used during exchanged. This is especially handy during test.
409 # version used during exchanged. This is especially handy during test.
410 # Value is a list of bundle version to be picked from, highest version
410 # Value is a list of bundle version to be picked from, highest version
411 # should be used.
411 # should be used.
412 #
412 #
413 # developer config: devel.legacy.exchange
413 # developer config: devel.legacy.exchange
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
414 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
415 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 return forcebundle1 or not op.remote.capable(b'bundle2')
416 return forcebundle1 or not op.remote.capable(b'bundle2')
417
417
418
418
419 class pushoperation(object):
419 class pushoperation(object):
420 """A object that represent a single push operation
420 """A object that represent a single push operation
421
421
422 Its purpose is to carry push related state and very common operations.
422 Its purpose is to carry push related state and very common operations.
423
423
424 A new pushoperation should be created at the beginning of each push and
424 A new pushoperation should be created at the beginning of each push and
425 discarded afterward.
425 discarded afterward.
426 """
426 """
427
427
428 def __init__(
428 def __init__(
429 self,
429 self,
430 repo,
430 repo,
431 remote,
431 remote,
432 force=False,
432 force=False,
433 revs=None,
433 revs=None,
434 newbranch=False,
434 newbranch=False,
435 bookmarks=(),
435 bookmarks=(),
436 publish=False,
436 publish=False,
437 pushvars=None,
437 pushvars=None,
438 ):
438 ):
439 # repo we push from
439 # repo we push from
440 self.repo = repo
440 self.repo = repo
441 self.ui = repo.ui
441 self.ui = repo.ui
442 # repo we push to
442 # repo we push to
443 self.remote = remote
443 self.remote = remote
444 # force option provided
444 # force option provided
445 self.force = force
445 self.force = force
446 # revs to be pushed (None is "all")
446 # revs to be pushed (None is "all")
447 self.revs = revs
447 self.revs = revs
448 # bookmark explicitly pushed
448 # bookmark explicitly pushed
449 self.bookmarks = bookmarks
449 self.bookmarks = bookmarks
450 # allow push of new branch
450 # allow push of new branch
451 self.newbranch = newbranch
451 self.newbranch = newbranch
452 # step already performed
452 # step already performed
453 # (used to check what steps have been already performed through bundle2)
453 # (used to check what steps have been already performed through bundle2)
454 self.stepsdone = set()
454 self.stepsdone = set()
455 # Integer version of the changegroup push result
455 # Integer version of the changegroup push result
456 # - None means nothing to push
456 # - None means nothing to push
457 # - 0 means HTTP error
457 # - 0 means HTTP error
458 # - 1 means we pushed and remote head count is unchanged *or*
458 # - 1 means we pushed and remote head count is unchanged *or*
459 # we have outgoing changesets but refused to push
459 # we have outgoing changesets but refused to push
460 # - other values as described by addchangegroup()
460 # - other values as described by addchangegroup()
461 self.cgresult = None
461 self.cgresult = None
462 # Boolean value for the bookmark push
462 # Boolean value for the bookmark push
463 self.bkresult = None
463 self.bkresult = None
464 # discover.outgoing object (contains common and outgoing data)
464 # discover.outgoing object (contains common and outgoing data)
465 self.outgoing = None
465 self.outgoing = None
466 # all remote topological heads before the push
466 # all remote topological heads before the push
467 self.remoteheads = None
467 self.remoteheads = None
468 # Details of the remote branch pre and post push
468 # Details of the remote branch pre and post push
469 #
469 #
470 # mapping: {'branch': ([remoteheads],
470 # mapping: {'branch': ([remoteheads],
471 # [newheads],
471 # [newheads],
472 # [unsyncedheads],
472 # [unsyncedheads],
473 # [discardedheads])}
473 # [discardedheads])}
474 # - branch: the branch name
474 # - branch: the branch name
475 # - remoteheads: the list of remote heads known locally
475 # - remoteheads: the list of remote heads known locally
476 # None if the branch is new
476 # None if the branch is new
477 # - newheads: the new remote heads (known locally) with outgoing pushed
477 # - newheads: the new remote heads (known locally) with outgoing pushed
478 # - unsyncedheads: the list of remote heads unknown locally.
478 # - unsyncedheads: the list of remote heads unknown locally.
479 # - discardedheads: the list of remote heads made obsolete by the push
479 # - discardedheads: the list of remote heads made obsolete by the push
480 self.pushbranchmap = None
480 self.pushbranchmap = None
481 # testable as a boolean indicating if any nodes are missing locally.
481 # testable as a boolean indicating if any nodes are missing locally.
482 self.incoming = None
482 self.incoming = None
483 # summary of the remote phase situation
483 # summary of the remote phase situation
484 self.remotephases = None
484 self.remotephases = None
485 # phases changes that must be pushed along side the changesets
485 # phases changes that must be pushed along side the changesets
486 self.outdatedphases = None
486 self.outdatedphases = None
487 # phases changes that must be pushed if changeset push fails
487 # phases changes that must be pushed if changeset push fails
488 self.fallbackoutdatedphases = None
488 self.fallbackoutdatedphases = None
489 # outgoing obsmarkers
489 # outgoing obsmarkers
490 self.outobsmarkers = set()
490 self.outobsmarkers = set()
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
491 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 self.outbookmarks = []
492 self.outbookmarks = []
493 # transaction manager
493 # transaction manager
494 self.trmanager = None
494 self.trmanager = None
495 # map { pushkey partid -> callback handling failure}
495 # map { pushkey partid -> callback handling failure}
496 # used to handle exception from mandatory pushkey part failure
496 # used to handle exception from mandatory pushkey part failure
497 self.pkfailcb = {}
497 self.pkfailcb = {}
498 # an iterable of pushvars or None
498 # an iterable of pushvars or None
499 self.pushvars = pushvars
499 self.pushvars = pushvars
500 # publish pushed changesets
500 # publish pushed changesets
501 self.publish = publish
501 self.publish = publish
502
502
503 @util.propertycache
503 @util.propertycache
504 def futureheads(self):
504 def futureheads(self):
505 """future remote heads if the changeset push succeeds"""
505 """future remote heads if the changeset push succeeds"""
506 return self.outgoing.missingheads
506 return self.outgoing.ancestorsof
507
507
508 @util.propertycache
508 @util.propertycache
509 def fallbackheads(self):
509 def fallbackheads(self):
510 """future remote heads if the changeset push fails"""
510 """future remote heads if the changeset push fails"""
511 if self.revs is None:
511 if self.revs is None:
512 # not target to push, all common are relevant
512 # not target to push, all common are relevant
513 return self.outgoing.commonheads
513 return self.outgoing.commonheads
514 unfi = self.repo.unfiltered()
514 unfi = self.repo.unfiltered()
515 # I want cheads = heads(::missingheads and ::commonheads)
515 # I want cheads = heads(::ancestorsof and ::commonheads)
516 # (missingheads is revs with secret changeset filtered out)
516 # (ancestorsof is revs with secret changeset filtered out)
517 #
517 #
518 # This can be expressed as:
518 # This can be expressed as:
519 # cheads = ( (missingheads and ::commonheads)
519 # cheads = ( (ancestorsof and ::commonheads)
520 # + (commonheads and ::missingheads))"
520 # + (commonheads and ::ancestorsof))"
521 # )
521 # )
522 #
522 #
523 # while trying to push we already computed the following:
523 # while trying to push we already computed the following:
524 # common = (::commonheads)
524 # common = (::commonheads)
525 # missing = ((commonheads::missingheads) - commonheads)
525 # missing = ((commonheads::ancestorsof) - commonheads)
526 #
526 #
527 # We can pick:
527 # We can pick:
528 # * missingheads part of common (::commonheads)
528 # * ancestorsof part of common (::commonheads)
529 common = self.outgoing.common
529 common = self.outgoing.common
530 rev = self.repo.changelog.index.rev
530 rev = self.repo.changelog.index.rev
531 cheads = [node for node in self.revs if rev(node) in common]
531 cheads = [node for node in self.revs if rev(node) in common]
532 # and
532 # and
533 # * commonheads parents on missing
533 # * commonheads parents on missing
534 revset = unfi.set(
534 revset = unfi.set(
535 b'%ln and parents(roots(%ln))',
535 b'%ln and parents(roots(%ln))',
536 self.outgoing.commonheads,
536 self.outgoing.commonheads,
537 self.outgoing.missing,
537 self.outgoing.missing,
538 )
538 )
539 cheads.extend(c.node() for c in revset)
539 cheads.extend(c.node() for c in revset)
540 return cheads
540 return cheads
541
541
542 @property
542 @property
543 def commonheads(self):
543 def commonheads(self):
544 """set of all common heads after changeset bundle push"""
544 """set of all common heads after changeset bundle push"""
545 if self.cgresult:
545 if self.cgresult:
546 return self.futureheads
546 return self.futureheads
547 else:
547 else:
548 return self.fallbackheads
548 return self.fallbackheads
549
549
550
550
551 # mapping of message used when pushing bookmark
551 # mapping of message used when pushing bookmark
552 bookmsgmap = {
552 bookmsgmap = {
553 b'update': (
553 b'update': (
554 _(b"updating bookmark %s\n"),
554 _(b"updating bookmark %s\n"),
555 _(b'updating bookmark %s failed!\n'),
555 _(b'updating bookmark %s failed!\n'),
556 ),
556 ),
557 b'export': (
557 b'export': (
558 _(b"exporting bookmark %s\n"),
558 _(b"exporting bookmark %s\n"),
559 _(b'exporting bookmark %s failed!\n'),
559 _(b'exporting bookmark %s failed!\n'),
560 ),
560 ),
561 b'delete': (
561 b'delete': (
562 _(b"deleting remote bookmark %s\n"),
562 _(b"deleting remote bookmark %s\n"),
563 _(b'deleting remote bookmark %s failed!\n'),
563 _(b'deleting remote bookmark %s failed!\n'),
564 ),
564 ),
565 }
565 }
566
566
567
567
568 def push(
568 def push(
569 repo,
569 repo,
570 remote,
570 remote,
571 force=False,
571 force=False,
572 revs=None,
572 revs=None,
573 newbranch=False,
573 newbranch=False,
574 bookmarks=(),
574 bookmarks=(),
575 publish=False,
575 publish=False,
576 opargs=None,
576 opargs=None,
577 ):
577 ):
578 '''Push outgoing changesets (limited by revs) from a local
578 '''Push outgoing changesets (limited by revs) from a local
579 repository to remote. Return an integer:
579 repository to remote. Return an integer:
580 - None means nothing to push
580 - None means nothing to push
581 - 0 means HTTP error
581 - 0 means HTTP error
582 - 1 means we pushed and remote head count is unchanged *or*
582 - 1 means we pushed and remote head count is unchanged *or*
583 we have outgoing changesets but refused to push
583 we have outgoing changesets but refused to push
584 - other values as described by addchangegroup()
584 - other values as described by addchangegroup()
585 '''
585 '''
586 if opargs is None:
586 if opargs is None:
587 opargs = {}
587 opargs = {}
588 pushop = pushoperation(
588 pushop = pushoperation(
589 repo,
589 repo,
590 remote,
590 remote,
591 force,
591 force,
592 revs,
592 revs,
593 newbranch,
593 newbranch,
594 bookmarks,
594 bookmarks,
595 publish,
595 publish,
596 **pycompat.strkwargs(opargs)
596 **pycompat.strkwargs(opargs)
597 )
597 )
598 if pushop.remote.local():
598 if pushop.remote.local():
599 missing = (
599 missing = (
600 set(pushop.repo.requirements) - pushop.remote.local().supported
600 set(pushop.repo.requirements) - pushop.remote.local().supported
601 )
601 )
602 if missing:
602 if missing:
603 msg = _(
603 msg = _(
604 b"required features are not"
604 b"required features are not"
605 b" supported in the destination:"
605 b" supported in the destination:"
606 b" %s"
606 b" %s"
607 ) % (b', '.join(sorted(missing)))
607 ) % (b', '.join(sorted(missing)))
608 raise error.Abort(msg)
608 raise error.Abort(msg)
609
609
610 if not pushop.remote.canpush():
610 if not pushop.remote.canpush():
611 raise error.Abort(_(b"destination does not support push"))
611 raise error.Abort(_(b"destination does not support push"))
612
612
613 if not pushop.remote.capable(b'unbundle'):
613 if not pushop.remote.capable(b'unbundle'):
614 raise error.Abort(
614 raise error.Abort(
615 _(
615 _(
616 b'cannot push: destination does not support the '
616 b'cannot push: destination does not support the '
617 b'unbundle wire protocol command'
617 b'unbundle wire protocol command'
618 )
618 )
619 )
619 )
620
620
621 # get lock as we might write phase data
621 # get lock as we might write phase data
622 wlock = lock = None
622 wlock = lock = None
623 try:
623 try:
624 # bundle2 push may receive a reply bundle touching bookmarks
624 # bundle2 push may receive a reply bundle touching bookmarks
625 # requiring the wlock. Take it now to ensure proper ordering.
625 # requiring the wlock. Take it now to ensure proper ordering.
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
626 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 if (
627 if (
628 (not _forcebundle1(pushop))
628 (not _forcebundle1(pushop))
629 and maypushback
629 and maypushback
630 and not bookmod.bookmarksinstore(repo)
630 and not bookmod.bookmarksinstore(repo)
631 ):
631 ):
632 wlock = pushop.repo.wlock()
632 wlock = pushop.repo.wlock()
633 lock = pushop.repo.lock()
633 lock = pushop.repo.lock()
634 pushop.trmanager = transactionmanager(
634 pushop.trmanager = transactionmanager(
635 pushop.repo, b'push-response', pushop.remote.url()
635 pushop.repo, b'push-response', pushop.remote.url()
636 )
636 )
637 except error.LockUnavailable as err:
637 except error.LockUnavailable as err:
638 # source repo cannot be locked.
638 # source repo cannot be locked.
639 # We do not abort the push, but just disable the local phase
639 # We do not abort the push, but just disable the local phase
640 # synchronisation.
640 # synchronisation.
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
641 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 err
642 err
643 )
643 )
644 pushop.ui.debug(msg)
644 pushop.ui.debug(msg)
645
645
646 with wlock or util.nullcontextmanager():
646 with wlock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
647 with lock or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
648 with pushop.trmanager or util.nullcontextmanager():
649 pushop.repo.checkpush(pushop)
649 pushop.repo.checkpush(pushop)
650 _checkpublish(pushop)
650 _checkpublish(pushop)
651 _pushdiscovery(pushop)
651 _pushdiscovery(pushop)
652 if not pushop.force:
652 if not pushop.force:
653 _checksubrepostate(pushop)
653 _checksubrepostate(pushop)
654 if not _forcebundle1(pushop):
654 if not _forcebundle1(pushop):
655 _pushbundle2(pushop)
655 _pushbundle2(pushop)
656 _pushchangeset(pushop)
656 _pushchangeset(pushop)
657 _pushsyncphase(pushop)
657 _pushsyncphase(pushop)
658 _pushobsolete(pushop)
658 _pushobsolete(pushop)
659 _pushbookmark(pushop)
659 _pushbookmark(pushop)
660
660
661 if repo.ui.configbool(b'experimental', b'remotenames'):
661 if repo.ui.configbool(b'experimental', b'remotenames'):
662 logexchange.pullremotenames(repo, remote)
662 logexchange.pullremotenames(repo, remote)
663
663
664 return pushop
664 return pushop
665
665
666
666
667 # list of steps to perform discovery before push
667 # list of steps to perform discovery before push
668 pushdiscoveryorder = []
668 pushdiscoveryorder = []
669
669
670 # Mapping between step name and function
670 # Mapping between step name and function
671 #
671 #
672 # This exists to help extensions wrap steps if necessary
672 # This exists to help extensions wrap steps if necessary
673 pushdiscoverymapping = {}
673 pushdiscoverymapping = {}
674
674
675
675
676 def pushdiscovery(stepname):
676 def pushdiscovery(stepname):
677 """decorator for function performing discovery before push
677 """decorator for function performing discovery before push
678
678
679 The function is added to the step -> function mapping and appended to the
679 The function is added to the step -> function mapping and appended to the
680 list of steps. Beware that decorated function will be added in order (this
680 list of steps. Beware that decorated function will be added in order (this
681 may matter).
681 may matter).
682
682
683 You can only use this decorator for a new step, if you want to wrap a step
683 You can only use this decorator for a new step, if you want to wrap a step
684 from an extension, change the pushdiscovery dictionary directly."""
684 from an extension, change the pushdiscovery dictionary directly."""
685
685
686 def dec(func):
686 def dec(func):
687 assert stepname not in pushdiscoverymapping
687 assert stepname not in pushdiscoverymapping
688 pushdiscoverymapping[stepname] = func
688 pushdiscoverymapping[stepname] = func
689 pushdiscoveryorder.append(stepname)
689 pushdiscoveryorder.append(stepname)
690 return func
690 return func
691
691
692 return dec
692 return dec
693
693
694
694
695 def _pushdiscovery(pushop):
695 def _pushdiscovery(pushop):
696 """Run all discovery steps"""
696 """Run all discovery steps"""
697 for stepname in pushdiscoveryorder:
697 for stepname in pushdiscoveryorder:
698 step = pushdiscoverymapping[stepname]
698 step = pushdiscoverymapping[stepname]
699 step(pushop)
699 step(pushop)
700
700
701
701
702 def _checksubrepostate(pushop):
702 def _checksubrepostate(pushop):
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
703 """Ensure all outgoing referenced subrepo revisions are present locally"""
704 for n in pushop.outgoing.missing:
704 for n in pushop.outgoing.missing:
705 ctx = pushop.repo[n]
705 ctx = pushop.repo[n]
706
706
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
707 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
708 for subpath in sorted(ctx.substate):
708 for subpath in sorted(ctx.substate):
709 sub = ctx.sub(subpath)
709 sub = ctx.sub(subpath)
710 sub.verify(onpush=True)
710 sub.verify(onpush=True)
711
711
712
712
713 @pushdiscovery(b'changeset')
713 @pushdiscovery(b'changeset')
714 def _pushdiscoverychangeset(pushop):
714 def _pushdiscoverychangeset(pushop):
715 """discover the changeset that need to be pushed"""
715 """discover the changeset that need to be pushed"""
716 fci = discovery.findcommonincoming
716 fci = discovery.findcommonincoming
717 if pushop.revs:
717 if pushop.revs:
718 commoninc = fci(
718 commoninc = fci(
719 pushop.repo,
719 pushop.repo,
720 pushop.remote,
720 pushop.remote,
721 force=pushop.force,
721 force=pushop.force,
722 ancestorsof=pushop.revs,
722 ancestorsof=pushop.revs,
723 )
723 )
724 else:
724 else:
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
725 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
726 common, inc, remoteheads = commoninc
726 common, inc, remoteheads = commoninc
727 fco = discovery.findcommonoutgoing
727 fco = discovery.findcommonoutgoing
728 outgoing = fco(
728 outgoing = fco(
729 pushop.repo,
729 pushop.repo,
730 pushop.remote,
730 pushop.remote,
731 onlyheads=pushop.revs,
731 onlyheads=pushop.revs,
732 commoninc=commoninc,
732 commoninc=commoninc,
733 force=pushop.force,
733 force=pushop.force,
734 )
734 )
735 pushop.outgoing = outgoing
735 pushop.outgoing = outgoing
736 pushop.remoteheads = remoteheads
736 pushop.remoteheads = remoteheads
737 pushop.incoming = inc
737 pushop.incoming = inc
738
738
739
739
740 @pushdiscovery(b'phase')
740 @pushdiscovery(b'phase')
741 def _pushdiscoveryphase(pushop):
741 def _pushdiscoveryphase(pushop):
742 """discover the phase that needs to be pushed
742 """discover the phase that needs to be pushed
743
743
744 (computed for both success and failure case for changesets push)"""
744 (computed for both success and failure case for changesets push)"""
745 outgoing = pushop.outgoing
745 outgoing = pushop.outgoing
746 unfi = pushop.repo.unfiltered()
746 unfi = pushop.repo.unfiltered()
747 remotephases = listkeys(pushop.remote, b'phases')
747 remotephases = listkeys(pushop.remote, b'phases')
748
748
749 if (
749 if (
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
750 pushop.ui.configbool(b'ui', b'_usedassubrepo')
751 and remotephases # server supports phases
751 and remotephases # server supports phases
752 and not pushop.outgoing.missing # no changesets to be pushed
752 and not pushop.outgoing.missing # no changesets to be pushed
753 and remotephases.get(b'publishing', False)
753 and remotephases.get(b'publishing', False)
754 ):
754 ):
755 # When:
755 # When:
756 # - this is a subrepo push
756 # - this is a subrepo push
757 # - and remote support phase
757 # - and remote support phase
758 # - and no changeset are to be pushed
758 # - and no changeset are to be pushed
759 # - and remote is publishing
759 # - and remote is publishing
760 # We may be in issue 3781 case!
760 # We may be in issue 3781 case!
761 # We drop the possible phase synchronisation done by
761 # We drop the possible phase synchronisation done by
762 # courtesy to publish changesets possibly locally draft
762 # courtesy to publish changesets possibly locally draft
763 # on the remote.
763 # on the remote.
764 pushop.outdatedphases = []
764 pushop.outdatedphases = []
765 pushop.fallbackoutdatedphases = []
765 pushop.fallbackoutdatedphases = []
766 return
766 return
767
767
768 pushop.remotephases = phases.remotephasessummary(
768 pushop.remotephases = phases.remotephasessummary(
769 pushop.repo, pushop.fallbackheads, remotephases
769 pushop.repo, pushop.fallbackheads, remotephases
770 )
770 )
771 droots = pushop.remotephases.draftroots
771 droots = pushop.remotephases.draftroots
772
772
773 extracond = b''
773 extracond = b''
774 if not pushop.remotephases.publishing:
774 if not pushop.remotephases.publishing:
775 extracond = b' and public()'
775 extracond = b' and public()'
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
776 revset = b'heads((%%ln::%%ln) %s)' % extracond
777 # Get the list of all revs draft on remote by public here.
777 # Get the list of all revs draft on remote by public here.
778 # XXX Beware that revset break if droots is not strictly
778 # XXX Beware that revset break if droots is not strictly
779 # XXX root we may want to ensure it is but it is costly
779 # XXX root we may want to ensure it is but it is costly
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
780 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
781 if not pushop.remotephases.publishing and pushop.publish:
781 if not pushop.remotephases.publishing and pushop.publish:
782 future = list(
782 future = list(
783 unfi.set(
783 unfi.set(
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
784 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
785 )
785 )
786 )
786 )
787 elif not outgoing.missing:
787 elif not outgoing.missing:
788 future = fallback
788 future = fallback
789 else:
789 else:
790 # adds changeset we are going to push as draft
790 # adds changeset we are going to push as draft
791 #
791 #
792 # should not be necessary for publishing server, but because of an
792 # should not be necessary for publishing server, but because of an
793 # issue fixed in xxxxx we have to do it anyway.
793 # issue fixed in xxxxx we have to do it anyway.
794 fdroots = list(
794 fdroots = list(
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
795 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
796 )
796 )
797 fdroots = [f.node() for f in fdroots]
797 fdroots = [f.node() for f in fdroots]
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
798 future = list(unfi.set(revset, fdroots, pushop.futureheads))
799 pushop.outdatedphases = future
799 pushop.outdatedphases = future
800 pushop.fallbackoutdatedphases = fallback
800 pushop.fallbackoutdatedphases = fallback
801
801
802
802
803 @pushdiscovery(b'obsmarker')
803 @pushdiscovery(b'obsmarker')
804 def _pushdiscoveryobsmarkers(pushop):
804 def _pushdiscoveryobsmarkers(pushop):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
805 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
806 return
806 return
807
807
808 if not pushop.repo.obsstore:
808 if not pushop.repo.obsstore:
809 return
809 return
810
810
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
811 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
812 return
812 return
813
813
814 repo = pushop.repo
814 repo = pushop.repo
815 # very naive computation, that can be quite expensive on big repo.
815 # very naive computation, that can be quite expensive on big repo.
816 # However: evolution is currently slow on them anyway.
816 # However: evolution is currently slow on them anyway.
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
817 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
818 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
819
819
820
820
821 @pushdiscovery(b'bookmarks')
821 @pushdiscovery(b'bookmarks')
822 def _pushdiscoverybookmarks(pushop):
822 def _pushdiscoverybookmarks(pushop):
823 ui = pushop.ui
823 ui = pushop.ui
824 repo = pushop.repo.unfiltered()
824 repo = pushop.repo.unfiltered()
825 remote = pushop.remote
825 remote = pushop.remote
826 ui.debug(b"checking for updated bookmarks\n")
826 ui.debug(b"checking for updated bookmarks\n")
827 ancestors = ()
827 ancestors = ()
828 if pushop.revs:
828 if pushop.revs:
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
829 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
830 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
831
831
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
832 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
833
833
834 explicit = {
834 explicit = {
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
835 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
836 }
836 }
837
837
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
838 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
839 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
840
840
841
841
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
842 def _processcompared(pushop, pushed, explicit, remotebms, comp):
843 """take decision on bookmarks to push to the remote repo
843 """take decision on bookmarks to push to the remote repo
844
844
845 Exists to help extensions alter this behavior.
845 Exists to help extensions alter this behavior.
846 """
846 """
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
847 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
848
848
849 repo = pushop.repo
849 repo = pushop.repo
850
850
851 for b, scid, dcid in advsrc:
851 for b, scid, dcid in advsrc:
852 if b in explicit:
852 if b in explicit:
853 explicit.remove(b)
853 explicit.remove(b)
854 if not pushed or repo[scid].rev() in pushed:
854 if not pushed or repo[scid].rev() in pushed:
855 pushop.outbookmarks.append((b, dcid, scid))
855 pushop.outbookmarks.append((b, dcid, scid))
856 # search added bookmark
856 # search added bookmark
857 for b, scid, dcid in addsrc:
857 for b, scid, dcid in addsrc:
858 if b in explicit:
858 if b in explicit:
859 explicit.remove(b)
859 explicit.remove(b)
860 if bookmod.isdivergent(b):
860 if bookmod.isdivergent(b):
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
861 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
862 pushop.bkresult = 2
862 pushop.bkresult = 2
863 else:
863 else:
864 pushop.outbookmarks.append((b, b'', scid))
864 pushop.outbookmarks.append((b, b'', scid))
865 # search for overwritten bookmark
865 # search for overwritten bookmark
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
866 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
867 if b in explicit:
867 if b in explicit:
868 explicit.remove(b)
868 explicit.remove(b)
869 pushop.outbookmarks.append((b, dcid, scid))
869 pushop.outbookmarks.append((b, dcid, scid))
870 # search for bookmark to delete
870 # search for bookmark to delete
871 for b, scid, dcid in adddst:
871 for b, scid, dcid in adddst:
872 if b in explicit:
872 if b in explicit:
873 explicit.remove(b)
873 explicit.remove(b)
874 # treat as "deleted locally"
874 # treat as "deleted locally"
875 pushop.outbookmarks.append((b, dcid, b''))
875 pushop.outbookmarks.append((b, dcid, b''))
876 # identical bookmarks shouldn't get reported
876 # identical bookmarks shouldn't get reported
877 for b, scid, dcid in same:
877 for b, scid, dcid in same:
878 if b in explicit:
878 if b in explicit:
879 explicit.remove(b)
879 explicit.remove(b)
880
880
881 if explicit:
881 if explicit:
882 explicit = sorted(explicit)
882 explicit = sorted(explicit)
883 # we should probably list all of them
883 # we should probably list all of them
884 pushop.ui.warn(
884 pushop.ui.warn(
885 _(
885 _(
886 b'bookmark %s does not exist on the local '
886 b'bookmark %s does not exist on the local '
887 b'or remote repository!\n'
887 b'or remote repository!\n'
888 )
888 )
889 % explicit[0]
889 % explicit[0]
890 )
890 )
891 pushop.bkresult = 2
891 pushop.bkresult = 2
892
892
893 pushop.outbookmarks.sort()
893 pushop.outbookmarks.sort()
894
894
895
895
896 def _pushcheckoutgoing(pushop):
896 def _pushcheckoutgoing(pushop):
897 outgoing = pushop.outgoing
897 outgoing = pushop.outgoing
898 unfi = pushop.repo.unfiltered()
898 unfi = pushop.repo.unfiltered()
899 if not outgoing.missing:
899 if not outgoing.missing:
900 # nothing to push
900 # nothing to push
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
901 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
902 return False
902 return False
903 # something to push
903 # something to push
904 if not pushop.force:
904 if not pushop.force:
905 # if repo.obsstore == False --> no obsolete
905 # if repo.obsstore == False --> no obsolete
906 # then, save the iteration
906 # then, save the iteration
907 if unfi.obsstore:
907 if unfi.obsstore:
908 # this message are here for 80 char limit reason
908 # this message are here for 80 char limit reason
909 mso = _(b"push includes obsolete changeset: %s!")
909 mso = _(b"push includes obsolete changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
910 mspd = _(b"push includes phase-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
911 mscd = _(b"push includes content-divergent changeset: %s!")
912 mst = {
912 mst = {
913 b"orphan": _(b"push includes orphan changeset: %s!"),
913 b"orphan": _(b"push includes orphan changeset: %s!"),
914 b"phase-divergent": mspd,
914 b"phase-divergent": mspd,
915 b"content-divergent": mscd,
915 b"content-divergent": mscd,
916 }
916 }
917 # If we are to push if there is at least one
917 # If we are to push if there is at least one
918 # obsolete or unstable changeset in missing, at
918 # obsolete or unstable changeset in missing, at
919 # least one of the missinghead will be obsolete or
919 # least one of the missinghead will be obsolete or
920 # unstable. So checking heads only is ok
920 # unstable. So checking heads only is ok
921 for node in outgoing.missingheads:
921 for node in outgoing.ancestorsof:
922 ctx = unfi[node]
922 ctx = unfi[node]
923 if ctx.obsolete():
923 if ctx.obsolete():
924 raise error.Abort(mso % ctx)
924 raise error.Abort(mso % ctx)
925 elif ctx.isunstable():
925 elif ctx.isunstable():
926 # TODO print more than one instability in the abort
926 # TODO print more than one instability in the abort
927 # message
927 # message
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
928 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
929
929
930 discovery.checkheads(pushop)
930 discovery.checkheads(pushop)
931 return True
931 return True
932
932
933
933
934 # List of names of steps to perform for an outgoing bundle2, order matters.
934 # List of names of steps to perform for an outgoing bundle2, order matters.
935 b2partsgenorder = []
935 b2partsgenorder = []
936
936
937 # Mapping between step name and function
937 # Mapping between step name and function
938 #
938 #
939 # This exists to help extensions wrap steps if necessary
939 # This exists to help extensions wrap steps if necessary
940 b2partsgenmapping = {}
940 b2partsgenmapping = {}
941
941
942
942
943 def b2partsgenerator(stepname, idx=None):
943 def b2partsgenerator(stepname, idx=None):
944 """decorator for function generating bundle2 part
944 """decorator for function generating bundle2 part
945
945
946 The function is added to the step -> function mapping and appended to the
946 The function is added to the step -> function mapping and appended to the
947 list of steps. Beware that decorated functions will be added in order
947 list of steps. Beware that decorated functions will be added in order
948 (this may matter).
948 (this may matter).
949
949
950 You can only use this decorator for new steps, if you want to wrap a step
950 You can only use this decorator for new steps, if you want to wrap a step
951 from an extension, attack the b2partsgenmapping dictionary directly."""
951 from an extension, attack the b2partsgenmapping dictionary directly."""
952
952
953 def dec(func):
953 def dec(func):
954 assert stepname not in b2partsgenmapping
954 assert stepname not in b2partsgenmapping
955 b2partsgenmapping[stepname] = func
955 b2partsgenmapping[stepname] = func
956 if idx is None:
956 if idx is None:
957 b2partsgenorder.append(stepname)
957 b2partsgenorder.append(stepname)
958 else:
958 else:
959 b2partsgenorder.insert(idx, stepname)
959 b2partsgenorder.insert(idx, stepname)
960 return func
960 return func
961
961
962 return dec
962 return dec
963
963
964
964
965 def _pushb2ctxcheckheads(pushop, bundler):
965 def _pushb2ctxcheckheads(pushop, bundler):
966 """Generate race condition checking parts
966 """Generate race condition checking parts
967
967
968 Exists as an independent function to aid extensions
968 Exists as an independent function to aid extensions
969 """
969 """
970 # * 'force' do not check for push race,
970 # * 'force' do not check for push race,
971 # * if we don't push anything, there are nothing to check.
971 # * if we don't push anything, there are nothing to check.
972 if not pushop.force and pushop.outgoing.missingheads:
972 if not pushop.force and pushop.outgoing.ancestorsof:
973 allowunrelated = b'related' in bundler.capabilities.get(
973 allowunrelated = b'related' in bundler.capabilities.get(
974 b'checkheads', ()
974 b'checkheads', ()
975 )
975 )
976 emptyremote = pushop.pushbranchmap is None
976 emptyremote = pushop.pushbranchmap is None
977 if not allowunrelated or emptyremote:
977 if not allowunrelated or emptyremote:
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
978 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
979 else:
979 else:
980 affected = set()
980 affected = set()
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
981 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
982 remoteheads, newheads, unsyncedheads, discardedheads = heads
983 if remoteheads is not None:
983 if remoteheads is not None:
984 remote = set(remoteheads)
984 remote = set(remoteheads)
985 affected |= set(discardedheads) & remote
985 affected |= set(discardedheads) & remote
986 affected |= remote - set(newheads)
986 affected |= remote - set(newheads)
987 if affected:
987 if affected:
988 data = iter(sorted(affected))
988 data = iter(sorted(affected))
989 bundler.newpart(b'check:updated-heads', data=data)
989 bundler.newpart(b'check:updated-heads', data=data)
990
990
991
991
992 def _pushing(pushop):
992 def _pushing(pushop):
993 """return True if we are pushing anything"""
993 """return True if we are pushing anything"""
994 return bool(
994 return bool(
995 pushop.outgoing.missing
995 pushop.outgoing.missing
996 or pushop.outdatedphases
996 or pushop.outdatedphases
997 or pushop.outobsmarkers
997 or pushop.outobsmarkers
998 or pushop.outbookmarks
998 or pushop.outbookmarks
999 )
999 )
1000
1000
1001
1001
1002 @b2partsgenerator(b'check-bookmarks')
1002 @b2partsgenerator(b'check-bookmarks')
1003 def _pushb2checkbookmarks(pushop, bundler):
1003 def _pushb2checkbookmarks(pushop, bundler):
1004 """insert bookmark move checking"""
1004 """insert bookmark move checking"""
1005 if not _pushing(pushop) or pushop.force:
1005 if not _pushing(pushop) or pushop.force:
1006 return
1006 return
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1007 b2caps = bundle2.bundle2caps(pushop.remote)
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1008 hasbookmarkcheck = b'bookmarks' in b2caps
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1009 if not (pushop.outbookmarks and hasbookmarkcheck):
1010 return
1010 return
1011 data = []
1011 data = []
1012 for book, old, new in pushop.outbookmarks:
1012 for book, old, new in pushop.outbookmarks:
1013 data.append((book, old))
1013 data.append((book, old))
1014 checkdata = bookmod.binaryencode(data)
1014 checkdata = bookmod.binaryencode(data)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1015 bundler.newpart(b'check:bookmarks', data=checkdata)
1016
1016
1017
1017
1018 @b2partsgenerator(b'check-phases')
1018 @b2partsgenerator(b'check-phases')
1019 def _pushb2checkphases(pushop, bundler):
1019 def _pushb2checkphases(pushop, bundler):
1020 """insert phase move checking"""
1020 """insert phase move checking"""
1021 if not _pushing(pushop) or pushop.force:
1021 if not _pushing(pushop) or pushop.force:
1022 return
1022 return
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1023 b2caps = bundle2.bundle2caps(pushop.remote)
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1024 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1025 if pushop.remotephases is not None and hasphaseheads:
1025 if pushop.remotephases is not None and hasphaseheads:
1026 # check that the remote phase has not changed
1026 # check that the remote phase has not changed
1027 checks = {p: [] for p in phases.allphases}
1027 checks = {p: [] for p in phases.allphases}
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1028 checks[phases.public].extend(pushop.remotephases.publicheads)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1029 checks[phases.draft].extend(pushop.remotephases.draftroots)
1030 if any(pycompat.itervalues(checks)):
1030 if any(pycompat.itervalues(checks)):
1031 for phase in checks:
1031 for phase in checks:
1032 checks[phase].sort()
1032 checks[phase].sort()
1033 checkdata = phases.binaryencode(checks)
1033 checkdata = phases.binaryencode(checks)
1034 bundler.newpart(b'check:phases', data=checkdata)
1034 bundler.newpart(b'check:phases', data=checkdata)
1035
1035
1036
1036
1037 @b2partsgenerator(b'changeset')
1037 @b2partsgenerator(b'changeset')
1038 def _pushb2ctx(pushop, bundler):
1038 def _pushb2ctx(pushop, bundler):
1039 """handle changegroup push through bundle2
1039 """handle changegroup push through bundle2
1040
1040
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1041 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1042 """
1042 """
1043 if b'changesets' in pushop.stepsdone:
1043 if b'changesets' in pushop.stepsdone:
1044 return
1044 return
1045 pushop.stepsdone.add(b'changesets')
1045 pushop.stepsdone.add(b'changesets')
1046 # Send known heads to the server for race detection.
1046 # Send known heads to the server for race detection.
1047 if not _pushcheckoutgoing(pushop):
1047 if not _pushcheckoutgoing(pushop):
1048 return
1048 return
1049 pushop.repo.prepushoutgoinghooks(pushop)
1049 pushop.repo.prepushoutgoinghooks(pushop)
1050
1050
1051 _pushb2ctxcheckheads(pushop, bundler)
1051 _pushb2ctxcheckheads(pushop, bundler)
1052
1052
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1053 b2caps = bundle2.bundle2caps(pushop.remote)
1054 version = b'01'
1054 version = b'01'
1055 cgversions = b2caps.get(b'changegroup')
1055 cgversions = b2caps.get(b'changegroup')
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1056 if cgversions: # 3.1 and 3.2 ship with an empty value
1057 cgversions = [
1057 cgversions = [
1058 v
1058 v
1059 for v in cgversions
1059 for v in cgversions
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1060 if v in changegroup.supportedoutgoingversions(pushop.repo)
1061 ]
1061 ]
1062 if not cgversions:
1062 if not cgversions:
1063 raise error.Abort(_(b'no common changegroup version'))
1063 raise error.Abort(_(b'no common changegroup version'))
1064 version = max(cgversions)
1064 version = max(cgversions)
1065 cgstream = changegroup.makestream(
1065 cgstream = changegroup.makestream(
1066 pushop.repo, pushop.outgoing, version, b'push'
1066 pushop.repo, pushop.outgoing, version, b'push'
1067 )
1067 )
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1068 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1069 if cgversions:
1069 if cgversions:
1070 cgpart.addparam(b'version', version)
1070 cgpart.addparam(b'version', version)
1071 if b'treemanifest' in pushop.repo.requirements:
1071 if b'treemanifest' in pushop.repo.requirements:
1072 cgpart.addparam(b'treemanifest', b'1')
1072 cgpart.addparam(b'treemanifest', b'1')
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1073 if b'exp-sidedata-flag' in pushop.repo.requirements:
1074 cgpart.addparam(b'exp-sidedata', b'1')
1074 cgpart.addparam(b'exp-sidedata', b'1')
1075
1075
1076 def handlereply(op):
1076 def handlereply(op):
1077 """extract addchangegroup returns from server reply"""
1077 """extract addchangegroup returns from server reply"""
1078 cgreplies = op.records.getreplies(cgpart.id)
1078 cgreplies = op.records.getreplies(cgpart.id)
1079 assert len(cgreplies[b'changegroup']) == 1
1079 assert len(cgreplies[b'changegroup']) == 1
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1080 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1081
1081
1082 return handlereply
1082 return handlereply
1083
1083
1084
1084
1085 @b2partsgenerator(b'phase')
1085 @b2partsgenerator(b'phase')
1086 def _pushb2phases(pushop, bundler):
1086 def _pushb2phases(pushop, bundler):
1087 """handle phase push through bundle2"""
1087 """handle phase push through bundle2"""
1088 if b'phases' in pushop.stepsdone:
1088 if b'phases' in pushop.stepsdone:
1089 return
1089 return
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1090 b2caps = bundle2.bundle2caps(pushop.remote)
1091 ui = pushop.repo.ui
1091 ui = pushop.repo.ui
1092
1092
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1093 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1094 haspushkey = b'pushkey' in b2caps
1094 haspushkey = b'pushkey' in b2caps
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1095 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1096
1096
1097 if hasphaseheads and not legacyphase:
1097 if hasphaseheads and not legacyphase:
1098 return _pushb2phaseheads(pushop, bundler)
1098 return _pushb2phaseheads(pushop, bundler)
1099 elif haspushkey:
1099 elif haspushkey:
1100 return _pushb2phasespushkey(pushop, bundler)
1100 return _pushb2phasespushkey(pushop, bundler)
1101
1101
1102
1102
1103 def _pushb2phaseheads(pushop, bundler):
1103 def _pushb2phaseheads(pushop, bundler):
1104 """push phase information through a bundle2 - binary part"""
1104 """push phase information through a bundle2 - binary part"""
1105 pushop.stepsdone.add(b'phases')
1105 pushop.stepsdone.add(b'phases')
1106 if pushop.outdatedphases:
1106 if pushop.outdatedphases:
1107 updates = {p: [] for p in phases.allphases}
1107 updates = {p: [] for p in phases.allphases}
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1108 updates[0].extend(h.node() for h in pushop.outdatedphases)
1109 phasedata = phases.binaryencode(updates)
1109 phasedata = phases.binaryencode(updates)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1110 bundler.newpart(b'phase-heads', data=phasedata)
1111
1111
1112
1112
1113 def _pushb2phasespushkey(pushop, bundler):
1113 def _pushb2phasespushkey(pushop, bundler):
1114 """push phase information through a bundle2 - pushkey part"""
1114 """push phase information through a bundle2 - pushkey part"""
1115 pushop.stepsdone.add(b'phases')
1115 pushop.stepsdone.add(b'phases')
1116 part2node = []
1116 part2node = []
1117
1117
1118 def handlefailure(pushop, exc):
1118 def handlefailure(pushop, exc):
1119 targetid = int(exc.partid)
1119 targetid = int(exc.partid)
1120 for partid, node in part2node:
1120 for partid, node in part2node:
1121 if partid == targetid:
1121 if partid == targetid:
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1122 raise error.Abort(_(b'updating %s to public failed') % node)
1123
1123
1124 enc = pushkey.encode
1124 enc = pushkey.encode
1125 for newremotehead in pushop.outdatedphases:
1125 for newremotehead in pushop.outdatedphases:
1126 part = bundler.newpart(b'pushkey')
1126 part = bundler.newpart(b'pushkey')
1127 part.addparam(b'namespace', enc(b'phases'))
1127 part.addparam(b'namespace', enc(b'phases'))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1128 part.addparam(b'key', enc(newremotehead.hex()))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1129 part.addparam(b'old', enc(b'%d' % phases.draft))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1130 part.addparam(b'new', enc(b'%d' % phases.public))
1131 part2node.append((part.id, newremotehead))
1131 part2node.append((part.id, newremotehead))
1132 pushop.pkfailcb[part.id] = handlefailure
1132 pushop.pkfailcb[part.id] = handlefailure
1133
1133
1134 def handlereply(op):
1134 def handlereply(op):
1135 for partid, node in part2node:
1135 for partid, node in part2node:
1136 partrep = op.records.getreplies(partid)
1136 partrep = op.records.getreplies(partid)
1137 results = partrep[b'pushkey']
1137 results = partrep[b'pushkey']
1138 assert len(results) <= 1
1138 assert len(results) <= 1
1139 msg = None
1139 msg = None
1140 if not results:
1140 if not results:
1141 msg = _(b'server ignored update of %s to public!\n') % node
1141 msg = _(b'server ignored update of %s to public!\n') % node
1142 elif not int(results[0][b'return']):
1142 elif not int(results[0][b'return']):
1143 msg = _(b'updating %s to public failed!\n') % node
1143 msg = _(b'updating %s to public failed!\n') % node
1144 if msg is not None:
1144 if msg is not None:
1145 pushop.ui.warn(msg)
1145 pushop.ui.warn(msg)
1146
1146
1147 return handlereply
1147 return handlereply
1148
1148
1149
1149
1150 @b2partsgenerator(b'obsmarkers')
1150 @b2partsgenerator(b'obsmarkers')
1151 def _pushb2obsmarkers(pushop, bundler):
1151 def _pushb2obsmarkers(pushop, bundler):
1152 if b'obsmarkers' in pushop.stepsdone:
1152 if b'obsmarkers' in pushop.stepsdone:
1153 return
1153 return
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1154 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1155 if obsolete.commonversion(remoteversions) is None:
1155 if obsolete.commonversion(remoteversions) is None:
1156 return
1156 return
1157 pushop.stepsdone.add(b'obsmarkers')
1157 pushop.stepsdone.add(b'obsmarkers')
1158 if pushop.outobsmarkers:
1158 if pushop.outobsmarkers:
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1159 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1160 bundle2.buildobsmarkerspart(bundler, markers)
1161
1161
1162
1162
1163 @b2partsgenerator(b'bookmarks')
1163 @b2partsgenerator(b'bookmarks')
1164 def _pushb2bookmarks(pushop, bundler):
1164 def _pushb2bookmarks(pushop, bundler):
1165 """handle bookmark push through bundle2"""
1165 """handle bookmark push through bundle2"""
1166 if b'bookmarks' in pushop.stepsdone:
1166 if b'bookmarks' in pushop.stepsdone:
1167 return
1167 return
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1168 b2caps = bundle2.bundle2caps(pushop.remote)
1169
1169
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1170 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1171 legacybooks = b'bookmarks' in legacy
1171 legacybooks = b'bookmarks' in legacy
1172
1172
1173 if not legacybooks and b'bookmarks' in b2caps:
1173 if not legacybooks and b'bookmarks' in b2caps:
1174 return _pushb2bookmarkspart(pushop, bundler)
1174 return _pushb2bookmarkspart(pushop, bundler)
1175 elif b'pushkey' in b2caps:
1175 elif b'pushkey' in b2caps:
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1176 return _pushb2bookmarkspushkey(pushop, bundler)
1177
1177
1178
1178
1179 def _bmaction(old, new):
1179 def _bmaction(old, new):
1180 """small utility for bookmark pushing"""
1180 """small utility for bookmark pushing"""
1181 if not old:
1181 if not old:
1182 return b'export'
1182 return b'export'
1183 elif not new:
1183 elif not new:
1184 return b'delete'
1184 return b'delete'
1185 return b'update'
1185 return b'update'
1186
1186
1187
1187
1188 def _abortonsecretctx(pushop, node, b):
1188 def _abortonsecretctx(pushop, node, b):
1189 """abort if a given bookmark points to a secret changeset"""
1189 """abort if a given bookmark points to a secret changeset"""
1190 if node and pushop.repo[node].phase() == phases.secret:
1190 if node and pushop.repo[node].phase() == phases.secret:
1191 raise error.Abort(
1191 raise error.Abort(
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1192 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1193 )
1193 )
1194
1194
1195
1195
1196 def _pushb2bookmarkspart(pushop, bundler):
1196 def _pushb2bookmarkspart(pushop, bundler):
1197 pushop.stepsdone.add(b'bookmarks')
1197 pushop.stepsdone.add(b'bookmarks')
1198 if not pushop.outbookmarks:
1198 if not pushop.outbookmarks:
1199 return
1199 return
1200
1200
1201 allactions = []
1201 allactions = []
1202 data = []
1202 data = []
1203 for book, old, new in pushop.outbookmarks:
1203 for book, old, new in pushop.outbookmarks:
1204 _abortonsecretctx(pushop, new, book)
1204 _abortonsecretctx(pushop, new, book)
1205 data.append((book, new))
1205 data.append((book, new))
1206 allactions.append((book, _bmaction(old, new)))
1206 allactions.append((book, _bmaction(old, new)))
1207 checkdata = bookmod.binaryencode(data)
1207 checkdata = bookmod.binaryencode(data)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1208 bundler.newpart(b'bookmarks', data=checkdata)
1209
1209
1210 def handlereply(op):
1210 def handlereply(op):
1211 ui = pushop.ui
1211 ui = pushop.ui
1212 # if success
1212 # if success
1213 for book, action in allactions:
1213 for book, action in allactions:
1214 ui.status(bookmsgmap[action][0] % book)
1214 ui.status(bookmsgmap[action][0] % book)
1215
1215
1216 return handlereply
1216 return handlereply
1217
1217
1218
1218
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1219 def _pushb2bookmarkspushkey(pushop, bundler):
1220 pushop.stepsdone.add(b'bookmarks')
1220 pushop.stepsdone.add(b'bookmarks')
1221 part2book = []
1221 part2book = []
1222 enc = pushkey.encode
1222 enc = pushkey.encode
1223
1223
1224 def handlefailure(pushop, exc):
1224 def handlefailure(pushop, exc):
1225 targetid = int(exc.partid)
1225 targetid = int(exc.partid)
1226 for partid, book, action in part2book:
1226 for partid, book, action in part2book:
1227 if partid == targetid:
1227 if partid == targetid:
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1228 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1229 # we should not be called for part we did not generated
1229 # we should not be called for part we did not generated
1230 assert False
1230 assert False
1231
1231
1232 for book, old, new in pushop.outbookmarks:
1232 for book, old, new in pushop.outbookmarks:
1233 _abortonsecretctx(pushop, new, book)
1233 _abortonsecretctx(pushop, new, book)
1234 part = bundler.newpart(b'pushkey')
1234 part = bundler.newpart(b'pushkey')
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1235 part.addparam(b'namespace', enc(b'bookmarks'))
1236 part.addparam(b'key', enc(book))
1236 part.addparam(b'key', enc(book))
1237 part.addparam(b'old', enc(hex(old)))
1237 part.addparam(b'old', enc(hex(old)))
1238 part.addparam(b'new', enc(hex(new)))
1238 part.addparam(b'new', enc(hex(new)))
1239 action = b'update'
1239 action = b'update'
1240 if not old:
1240 if not old:
1241 action = b'export'
1241 action = b'export'
1242 elif not new:
1242 elif not new:
1243 action = b'delete'
1243 action = b'delete'
1244 part2book.append((part.id, book, action))
1244 part2book.append((part.id, book, action))
1245 pushop.pkfailcb[part.id] = handlefailure
1245 pushop.pkfailcb[part.id] = handlefailure
1246
1246
1247 def handlereply(op):
1247 def handlereply(op):
1248 ui = pushop.ui
1248 ui = pushop.ui
1249 for partid, book, action in part2book:
1249 for partid, book, action in part2book:
1250 partrep = op.records.getreplies(partid)
1250 partrep = op.records.getreplies(partid)
1251 results = partrep[b'pushkey']
1251 results = partrep[b'pushkey']
1252 assert len(results) <= 1
1252 assert len(results) <= 1
1253 if not results:
1253 if not results:
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1254 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1255 else:
1255 else:
1256 ret = int(results[0][b'return'])
1256 ret = int(results[0][b'return'])
1257 if ret:
1257 if ret:
1258 ui.status(bookmsgmap[action][0] % book)
1258 ui.status(bookmsgmap[action][0] % book)
1259 else:
1259 else:
1260 ui.warn(bookmsgmap[action][1] % book)
1260 ui.warn(bookmsgmap[action][1] % book)
1261 if pushop.bkresult is not None:
1261 if pushop.bkresult is not None:
1262 pushop.bkresult = 1
1262 pushop.bkresult = 1
1263
1263
1264 return handlereply
1264 return handlereply
1265
1265
1266
1266
1267 @b2partsgenerator(b'pushvars', idx=0)
1267 @b2partsgenerator(b'pushvars', idx=0)
1268 def _getbundlesendvars(pushop, bundler):
1268 def _getbundlesendvars(pushop, bundler):
1269 '''send shellvars via bundle2'''
1269 '''send shellvars via bundle2'''
1270 pushvars = pushop.pushvars
1270 pushvars = pushop.pushvars
1271 if pushvars:
1271 if pushvars:
1272 shellvars = {}
1272 shellvars = {}
1273 for raw in pushvars:
1273 for raw in pushvars:
1274 if b'=' not in raw:
1274 if b'=' not in raw:
1275 msg = (
1275 msg = (
1276 b"unable to parse variable '%s', should follow "
1276 b"unable to parse variable '%s', should follow "
1277 b"'KEY=VALUE' or 'KEY=' format"
1277 b"'KEY=VALUE' or 'KEY=' format"
1278 )
1278 )
1279 raise error.Abort(msg % raw)
1279 raise error.Abort(msg % raw)
1280 k, v = raw.split(b'=', 1)
1280 k, v = raw.split(b'=', 1)
1281 shellvars[k] = v
1281 shellvars[k] = v
1282
1282
1283 part = bundler.newpart(b'pushvars')
1283 part = bundler.newpart(b'pushvars')
1284
1284
1285 for key, value in pycompat.iteritems(shellvars):
1285 for key, value in pycompat.iteritems(shellvars):
1286 part.addparam(key, value, mandatory=False)
1286 part.addparam(key, value, mandatory=False)
1287
1287
1288
1288
1289 def _pushbundle2(pushop):
1289 def _pushbundle2(pushop):
1290 """push data to the remote using bundle2
1290 """push data to the remote using bundle2
1291
1291
1292 The only currently supported type of data is changegroup but this will
1292 The only currently supported type of data is changegroup but this will
1293 evolve in the future."""
1293 evolve in the future."""
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1294 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1295 pushback = pushop.trmanager and pushop.ui.configbool(
1296 b'experimental', b'bundle2.pushback'
1296 b'experimental', b'bundle2.pushback'
1297 )
1297 )
1298
1298
1299 # create reply capability
1299 # create reply capability
1300 capsblob = bundle2.encodecaps(
1300 capsblob = bundle2.encodecaps(
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1301 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1302 )
1302 )
1303 bundler.newpart(b'replycaps', data=capsblob)
1303 bundler.newpart(b'replycaps', data=capsblob)
1304 replyhandlers = []
1304 replyhandlers = []
1305 for partgenname in b2partsgenorder:
1305 for partgenname in b2partsgenorder:
1306 partgen = b2partsgenmapping[partgenname]
1306 partgen = b2partsgenmapping[partgenname]
1307 ret = partgen(pushop, bundler)
1307 ret = partgen(pushop, bundler)
1308 if callable(ret):
1308 if callable(ret):
1309 replyhandlers.append(ret)
1309 replyhandlers.append(ret)
1310 # do not push if nothing to push
1310 # do not push if nothing to push
1311 if bundler.nbparts <= 1:
1311 if bundler.nbparts <= 1:
1312 return
1312 return
1313 stream = util.chunkbuffer(bundler.getchunks())
1313 stream = util.chunkbuffer(bundler.getchunks())
1314 try:
1314 try:
1315 try:
1315 try:
1316 with pushop.remote.commandexecutor() as e:
1316 with pushop.remote.commandexecutor() as e:
1317 reply = e.callcommand(
1317 reply = e.callcommand(
1318 b'unbundle',
1318 b'unbundle',
1319 {
1319 {
1320 b'bundle': stream,
1320 b'bundle': stream,
1321 b'heads': [b'force'],
1321 b'heads': [b'force'],
1322 b'url': pushop.remote.url(),
1322 b'url': pushop.remote.url(),
1323 },
1323 },
1324 ).result()
1324 ).result()
1325 except error.BundleValueError as exc:
1325 except error.BundleValueError as exc:
1326 raise error.Abort(_(b'missing support for %s') % exc)
1326 raise error.Abort(_(b'missing support for %s') % exc)
1327 try:
1327 try:
1328 trgetter = None
1328 trgetter = None
1329 if pushback:
1329 if pushback:
1330 trgetter = pushop.trmanager.transaction
1330 trgetter = pushop.trmanager.transaction
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1331 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1332 except error.BundleValueError as exc:
1332 except error.BundleValueError as exc:
1333 raise error.Abort(_(b'missing support for %s') % exc)
1333 raise error.Abort(_(b'missing support for %s') % exc)
1334 except bundle2.AbortFromPart as exc:
1334 except bundle2.AbortFromPart as exc:
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1335 pushop.ui.status(_(b'remote: %s\n') % exc)
1336 if exc.hint is not None:
1336 if exc.hint is not None:
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1337 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1338 raise error.Abort(_(b'push failed on remote'))
1338 raise error.Abort(_(b'push failed on remote'))
1339 except error.PushkeyFailed as exc:
1339 except error.PushkeyFailed as exc:
1340 partid = int(exc.partid)
1340 partid = int(exc.partid)
1341 if partid not in pushop.pkfailcb:
1341 if partid not in pushop.pkfailcb:
1342 raise
1342 raise
1343 pushop.pkfailcb[partid](pushop, exc)
1343 pushop.pkfailcb[partid](pushop, exc)
1344 for rephand in replyhandlers:
1344 for rephand in replyhandlers:
1345 rephand(op)
1345 rephand(op)
1346
1346
1347
1347
1348 def _pushchangeset(pushop):
1348 def _pushchangeset(pushop):
1349 """Make the actual push of changeset bundle to remote repo"""
1349 """Make the actual push of changeset bundle to remote repo"""
1350 if b'changesets' in pushop.stepsdone:
1350 if b'changesets' in pushop.stepsdone:
1351 return
1351 return
1352 pushop.stepsdone.add(b'changesets')
1352 pushop.stepsdone.add(b'changesets')
1353 if not _pushcheckoutgoing(pushop):
1353 if not _pushcheckoutgoing(pushop):
1354 return
1354 return
1355
1355
1356 # Should have verified this in push().
1356 # Should have verified this in push().
1357 assert pushop.remote.capable(b'unbundle')
1357 assert pushop.remote.capable(b'unbundle')
1358
1358
1359 pushop.repo.prepushoutgoinghooks(pushop)
1359 pushop.repo.prepushoutgoinghooks(pushop)
1360 outgoing = pushop.outgoing
1360 outgoing = pushop.outgoing
1361 # TODO: get bundlecaps from remote
1361 # TODO: get bundlecaps from remote
1362 bundlecaps = None
1362 bundlecaps = None
1363 # create a changegroup from local
1363 # create a changegroup from local
1364 if pushop.revs is None and not (
1364 if pushop.revs is None and not (
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1365 outgoing.excluded or pushop.repo.changelog.filteredrevs
1366 ):
1366 ):
1367 # push everything,
1367 # push everything,
1368 # use the fast path, no race possible on push
1368 # use the fast path, no race possible on push
1369 cg = changegroup.makechangegroup(
1369 cg = changegroup.makechangegroup(
1370 pushop.repo,
1370 pushop.repo,
1371 outgoing,
1371 outgoing,
1372 b'01',
1372 b'01',
1373 b'push',
1373 b'push',
1374 fastpath=True,
1374 fastpath=True,
1375 bundlecaps=bundlecaps,
1375 bundlecaps=bundlecaps,
1376 )
1376 )
1377 else:
1377 else:
1378 cg = changegroup.makechangegroup(
1378 cg = changegroup.makechangegroup(
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1379 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1380 )
1380 )
1381
1381
1382 # apply changegroup to remote
1382 # apply changegroup to remote
1383 # local repo finds heads on server, finds out what
1383 # local repo finds heads on server, finds out what
1384 # revs it must push. once revs transferred, if server
1384 # revs it must push. once revs transferred, if server
1385 # finds it has different heads (someone else won
1385 # finds it has different heads (someone else won
1386 # commit/push race), server aborts.
1386 # commit/push race), server aborts.
1387 if pushop.force:
1387 if pushop.force:
1388 remoteheads = [b'force']
1388 remoteheads = [b'force']
1389 else:
1389 else:
1390 remoteheads = pushop.remoteheads
1390 remoteheads = pushop.remoteheads
1391 # ssh: return remote's addchangegroup()
1391 # ssh: return remote's addchangegroup()
1392 # http: return remote's addchangegroup() or 0 for error
1392 # http: return remote's addchangegroup() or 0 for error
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1393 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1394
1394
1395
1395
1396 def _pushsyncphase(pushop):
1396 def _pushsyncphase(pushop):
1397 """synchronise phase information locally and remotely"""
1397 """synchronise phase information locally and remotely"""
1398 cheads = pushop.commonheads
1398 cheads = pushop.commonheads
1399 # even when we don't push, exchanging phase data is useful
1399 # even when we don't push, exchanging phase data is useful
1400 remotephases = listkeys(pushop.remote, b'phases')
1400 remotephases = listkeys(pushop.remote, b'phases')
1401 if (
1401 if (
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1402 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1403 and remotephases # server supports phases
1403 and remotephases # server supports phases
1404 and pushop.cgresult is None # nothing was pushed
1404 and pushop.cgresult is None # nothing was pushed
1405 and remotephases.get(b'publishing', False)
1405 and remotephases.get(b'publishing', False)
1406 ):
1406 ):
1407 # When:
1407 # When:
1408 # - this is a subrepo push
1408 # - this is a subrepo push
1409 # - and remote support phase
1409 # - and remote support phase
1410 # - and no changeset was pushed
1410 # - and no changeset was pushed
1411 # - and remote is publishing
1411 # - and remote is publishing
1412 # We may be in issue 3871 case!
1412 # We may be in issue 3871 case!
1413 # We drop the possible phase synchronisation done by
1413 # We drop the possible phase synchronisation done by
1414 # courtesy to publish changesets possibly locally draft
1414 # courtesy to publish changesets possibly locally draft
1415 # on the remote.
1415 # on the remote.
1416 remotephases = {b'publishing': b'True'}
1416 remotephases = {b'publishing': b'True'}
1417 if not remotephases: # old server or public only reply from non-publishing
1417 if not remotephases: # old server or public only reply from non-publishing
1418 _localphasemove(pushop, cheads)
1418 _localphasemove(pushop, cheads)
1419 # don't push any phase data as there is nothing to push
1419 # don't push any phase data as there is nothing to push
1420 else:
1420 else:
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1421 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1422 pheads, droots = ana
1422 pheads, droots = ana
1423 ### Apply remote phase on local
1423 ### Apply remote phase on local
1424 if remotephases.get(b'publishing', False):
1424 if remotephases.get(b'publishing', False):
1425 _localphasemove(pushop, cheads)
1425 _localphasemove(pushop, cheads)
1426 else: # publish = False
1426 else: # publish = False
1427 _localphasemove(pushop, pheads)
1427 _localphasemove(pushop, pheads)
1428 _localphasemove(pushop, cheads, phases.draft)
1428 _localphasemove(pushop, cheads, phases.draft)
1429 ### Apply local phase on remote
1429 ### Apply local phase on remote
1430
1430
1431 if pushop.cgresult:
1431 if pushop.cgresult:
1432 if b'phases' in pushop.stepsdone:
1432 if b'phases' in pushop.stepsdone:
1433 # phases already pushed though bundle2
1433 # phases already pushed though bundle2
1434 return
1434 return
1435 outdated = pushop.outdatedphases
1435 outdated = pushop.outdatedphases
1436 else:
1436 else:
1437 outdated = pushop.fallbackoutdatedphases
1437 outdated = pushop.fallbackoutdatedphases
1438
1438
1439 pushop.stepsdone.add(b'phases')
1439 pushop.stepsdone.add(b'phases')
1440
1440
1441 # filter heads already turned public by the push
1441 # filter heads already turned public by the push
1442 outdated = [c for c in outdated if c.node() not in pheads]
1442 outdated = [c for c in outdated if c.node() not in pheads]
1443 # fallback to independent pushkey command
1443 # fallback to independent pushkey command
1444 for newremotehead in outdated:
1444 for newremotehead in outdated:
1445 with pushop.remote.commandexecutor() as e:
1445 with pushop.remote.commandexecutor() as e:
1446 r = e.callcommand(
1446 r = e.callcommand(
1447 b'pushkey',
1447 b'pushkey',
1448 {
1448 {
1449 b'namespace': b'phases',
1449 b'namespace': b'phases',
1450 b'key': newremotehead.hex(),
1450 b'key': newremotehead.hex(),
1451 b'old': b'%d' % phases.draft,
1451 b'old': b'%d' % phases.draft,
1452 b'new': b'%d' % phases.public,
1452 b'new': b'%d' % phases.public,
1453 },
1453 },
1454 ).result()
1454 ).result()
1455
1455
1456 if not r:
1456 if not r:
1457 pushop.ui.warn(
1457 pushop.ui.warn(
1458 _(b'updating %s to public failed!\n') % newremotehead
1458 _(b'updating %s to public failed!\n') % newremotehead
1459 )
1459 )
1460
1460
1461
1461
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1462 def _localphasemove(pushop, nodes, phase=phases.public):
1463 """move <nodes> to <phase> in the local source repo"""
1463 """move <nodes> to <phase> in the local source repo"""
1464 if pushop.trmanager:
1464 if pushop.trmanager:
1465 phases.advanceboundary(
1465 phases.advanceboundary(
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1466 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1467 )
1467 )
1468 else:
1468 else:
1469 # repo is not locked, do not change any phases!
1469 # repo is not locked, do not change any phases!
1470 # Informs the user that phases should have been moved when
1470 # Informs the user that phases should have been moved when
1471 # applicable.
1471 # applicable.
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1472 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1473 phasestr = phases.phasenames[phase]
1473 phasestr = phases.phasenames[phase]
1474 if actualmoves:
1474 if actualmoves:
1475 pushop.ui.status(
1475 pushop.ui.status(
1476 _(
1476 _(
1477 b'cannot lock source repo, skipping '
1477 b'cannot lock source repo, skipping '
1478 b'local %s phase update\n'
1478 b'local %s phase update\n'
1479 )
1479 )
1480 % phasestr
1480 % phasestr
1481 )
1481 )
1482
1482
1483
1483
1484 def _pushobsolete(pushop):
1484 def _pushobsolete(pushop):
1485 """utility function to push obsolete markers to a remote"""
1485 """utility function to push obsolete markers to a remote"""
1486 if b'obsmarkers' in pushop.stepsdone:
1486 if b'obsmarkers' in pushop.stepsdone:
1487 return
1487 return
1488 repo = pushop.repo
1488 repo = pushop.repo
1489 remote = pushop.remote
1489 remote = pushop.remote
1490 pushop.stepsdone.add(b'obsmarkers')
1490 pushop.stepsdone.add(b'obsmarkers')
1491 if pushop.outobsmarkers:
1491 if pushop.outobsmarkers:
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1492 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1493 rslts = []
1493 rslts = []
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1494 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1495 remotedata = obsolete._pushkeyescape(markers)
1495 remotedata = obsolete._pushkeyescape(markers)
1496 for key in sorted(remotedata, reverse=True):
1496 for key in sorted(remotedata, reverse=True):
1497 # reverse sort to ensure we end with dump0
1497 # reverse sort to ensure we end with dump0
1498 data = remotedata[key]
1498 data = remotedata[key]
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1499 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1500 if [r for r in rslts if not r]:
1500 if [r for r in rslts if not r]:
1501 msg = _(b'failed to push some obsolete markers!\n')
1501 msg = _(b'failed to push some obsolete markers!\n')
1502 repo.ui.warn(msg)
1502 repo.ui.warn(msg)
1503
1503
1504
1504
1505 def _pushbookmark(pushop):
1505 def _pushbookmark(pushop):
1506 """Update bookmark position on remote"""
1506 """Update bookmark position on remote"""
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1507 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1508 return
1508 return
1509 pushop.stepsdone.add(b'bookmarks')
1509 pushop.stepsdone.add(b'bookmarks')
1510 ui = pushop.ui
1510 ui = pushop.ui
1511 remote = pushop.remote
1511 remote = pushop.remote
1512
1512
1513 for b, old, new in pushop.outbookmarks:
1513 for b, old, new in pushop.outbookmarks:
1514 action = b'update'
1514 action = b'update'
1515 if not old:
1515 if not old:
1516 action = b'export'
1516 action = b'export'
1517 elif not new:
1517 elif not new:
1518 action = b'delete'
1518 action = b'delete'
1519
1519
1520 with remote.commandexecutor() as e:
1520 with remote.commandexecutor() as e:
1521 r = e.callcommand(
1521 r = e.callcommand(
1522 b'pushkey',
1522 b'pushkey',
1523 {
1523 {
1524 b'namespace': b'bookmarks',
1524 b'namespace': b'bookmarks',
1525 b'key': b,
1525 b'key': b,
1526 b'old': hex(old),
1526 b'old': hex(old),
1527 b'new': hex(new),
1527 b'new': hex(new),
1528 },
1528 },
1529 ).result()
1529 ).result()
1530
1530
1531 if r:
1531 if r:
1532 ui.status(bookmsgmap[action][0] % b)
1532 ui.status(bookmsgmap[action][0] % b)
1533 else:
1533 else:
1534 ui.warn(bookmsgmap[action][1] % b)
1534 ui.warn(bookmsgmap[action][1] % b)
1535 # discovery can have set the value form invalid entry
1535 # discovery can have set the value form invalid entry
1536 if pushop.bkresult is not None:
1536 if pushop.bkresult is not None:
1537 pushop.bkresult = 1
1537 pushop.bkresult = 1
1538
1538
1539
1539
1540 class pulloperation(object):
1540 class pulloperation(object):
1541 """A object that represent a single pull operation
1541 """A object that represent a single pull operation
1542
1542
1543 It purpose is to carry pull related state and very common operation.
1543 It purpose is to carry pull related state and very common operation.
1544
1544
1545 A new should be created at the beginning of each pull and discarded
1545 A new should be created at the beginning of each pull and discarded
1546 afterward.
1546 afterward.
1547 """
1547 """
1548
1548
1549 def __init__(
1549 def __init__(
1550 self,
1550 self,
1551 repo,
1551 repo,
1552 remote,
1552 remote,
1553 heads=None,
1553 heads=None,
1554 force=False,
1554 force=False,
1555 bookmarks=(),
1555 bookmarks=(),
1556 remotebookmarks=None,
1556 remotebookmarks=None,
1557 streamclonerequested=None,
1557 streamclonerequested=None,
1558 includepats=None,
1558 includepats=None,
1559 excludepats=None,
1559 excludepats=None,
1560 depth=None,
1560 depth=None,
1561 ):
1561 ):
1562 # repo we pull into
1562 # repo we pull into
1563 self.repo = repo
1563 self.repo = repo
1564 # repo we pull from
1564 # repo we pull from
1565 self.remote = remote
1565 self.remote = remote
1566 # revision we try to pull (None is "all")
1566 # revision we try to pull (None is "all")
1567 self.heads = heads
1567 self.heads = heads
1568 # bookmark pulled explicitly
1568 # bookmark pulled explicitly
1569 self.explicitbookmarks = [
1569 self.explicitbookmarks = [
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1570 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1571 ]
1571 ]
1572 # do we force pull?
1572 # do we force pull?
1573 self.force = force
1573 self.force = force
1574 # whether a streaming clone was requested
1574 # whether a streaming clone was requested
1575 self.streamclonerequested = streamclonerequested
1575 self.streamclonerequested = streamclonerequested
1576 # transaction manager
1576 # transaction manager
1577 self.trmanager = None
1577 self.trmanager = None
1578 # set of common changeset between local and remote before pull
1578 # set of common changeset between local and remote before pull
1579 self.common = None
1579 self.common = None
1580 # set of pulled head
1580 # set of pulled head
1581 self.rheads = None
1581 self.rheads = None
1582 # list of missing changeset to fetch remotely
1582 # list of missing changeset to fetch remotely
1583 self.fetch = None
1583 self.fetch = None
1584 # remote bookmarks data
1584 # remote bookmarks data
1585 self.remotebookmarks = remotebookmarks
1585 self.remotebookmarks = remotebookmarks
1586 # result of changegroup pulling (used as return code by pull)
1586 # result of changegroup pulling (used as return code by pull)
1587 self.cgresult = None
1587 self.cgresult = None
1588 # list of step already done
1588 # list of step already done
1589 self.stepsdone = set()
1589 self.stepsdone = set()
1590 # Whether we attempted a clone from pre-generated bundles.
1590 # Whether we attempted a clone from pre-generated bundles.
1591 self.clonebundleattempted = False
1591 self.clonebundleattempted = False
1592 # Set of file patterns to include.
1592 # Set of file patterns to include.
1593 self.includepats = includepats
1593 self.includepats = includepats
1594 # Set of file patterns to exclude.
1594 # Set of file patterns to exclude.
1595 self.excludepats = excludepats
1595 self.excludepats = excludepats
1596 # Number of ancestor changesets to pull from each pulled head.
1596 # Number of ancestor changesets to pull from each pulled head.
1597 self.depth = depth
1597 self.depth = depth
1598
1598
1599 @util.propertycache
1599 @util.propertycache
1600 def pulledsubset(self):
1600 def pulledsubset(self):
1601 """heads of the set of changeset target by the pull"""
1601 """heads of the set of changeset target by the pull"""
1602 # compute target subset
1602 # compute target subset
1603 if self.heads is None:
1603 if self.heads is None:
1604 # We pulled every thing possible
1604 # We pulled every thing possible
1605 # sync on everything common
1605 # sync on everything common
1606 c = set(self.common)
1606 c = set(self.common)
1607 ret = list(self.common)
1607 ret = list(self.common)
1608 for n in self.rheads:
1608 for n in self.rheads:
1609 if n not in c:
1609 if n not in c:
1610 ret.append(n)
1610 ret.append(n)
1611 return ret
1611 return ret
1612 else:
1612 else:
1613 # We pulled a specific subset
1613 # We pulled a specific subset
1614 # sync on this subset
1614 # sync on this subset
1615 return self.heads
1615 return self.heads
1616
1616
1617 @util.propertycache
1617 @util.propertycache
1618 def canusebundle2(self):
1618 def canusebundle2(self):
1619 return not _forcebundle1(self)
1619 return not _forcebundle1(self)
1620
1620
1621 @util.propertycache
1621 @util.propertycache
1622 def remotebundle2caps(self):
1622 def remotebundle2caps(self):
1623 return bundle2.bundle2caps(self.remote)
1623 return bundle2.bundle2caps(self.remote)
1624
1624
1625 def gettransaction(self):
1625 def gettransaction(self):
1626 # deprecated; talk to trmanager directly
1626 # deprecated; talk to trmanager directly
1627 return self.trmanager.transaction()
1627 return self.trmanager.transaction()
1628
1628
1629
1629
1630 class transactionmanager(util.transactional):
1630 class transactionmanager(util.transactional):
1631 """An object to manage the life cycle of a transaction
1631 """An object to manage the life cycle of a transaction
1632
1632
1633 It creates the transaction on demand and calls the appropriate hooks when
1633 It creates the transaction on demand and calls the appropriate hooks when
1634 closing the transaction."""
1634 closing the transaction."""
1635
1635
1636 def __init__(self, repo, source, url):
1636 def __init__(self, repo, source, url):
1637 self.repo = repo
1637 self.repo = repo
1638 self.source = source
1638 self.source = source
1639 self.url = url
1639 self.url = url
1640 self._tr = None
1640 self._tr = None
1641
1641
1642 def transaction(self):
1642 def transaction(self):
1643 """Return an open transaction object, constructing if necessary"""
1643 """Return an open transaction object, constructing if necessary"""
1644 if not self._tr:
1644 if not self._tr:
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1645 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1646 self._tr = self.repo.transaction(trname)
1646 self._tr = self.repo.transaction(trname)
1647 self._tr.hookargs[b'source'] = self.source
1647 self._tr.hookargs[b'source'] = self.source
1648 self._tr.hookargs[b'url'] = self.url
1648 self._tr.hookargs[b'url'] = self.url
1649 return self._tr
1649 return self._tr
1650
1650
1651 def close(self):
1651 def close(self):
1652 """close transaction if created"""
1652 """close transaction if created"""
1653 if self._tr is not None:
1653 if self._tr is not None:
1654 self._tr.close()
1654 self._tr.close()
1655
1655
1656 def release(self):
1656 def release(self):
1657 """release transaction if created"""
1657 """release transaction if created"""
1658 if self._tr is not None:
1658 if self._tr is not None:
1659 self._tr.release()
1659 self._tr.release()
1660
1660
1661
1661
1662 def listkeys(remote, namespace):
1662 def listkeys(remote, namespace):
1663 with remote.commandexecutor() as e:
1663 with remote.commandexecutor() as e:
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1664 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1665
1665
1666
1666
1667 def _fullpullbundle2(repo, pullop):
1667 def _fullpullbundle2(repo, pullop):
1668 # The server may send a partial reply, i.e. when inlining
1668 # The server may send a partial reply, i.e. when inlining
1669 # pre-computed bundles. In that case, update the common
1669 # pre-computed bundles. In that case, update the common
1670 # set based on the results and pull another bundle.
1670 # set based on the results and pull another bundle.
1671 #
1671 #
1672 # There are two indicators that the process is finished:
1672 # There are two indicators that the process is finished:
1673 # - no changeset has been added, or
1673 # - no changeset has been added, or
1674 # - all remote heads are known locally.
1674 # - all remote heads are known locally.
1675 # The head check must use the unfiltered view as obsoletion
1675 # The head check must use the unfiltered view as obsoletion
1676 # markers can hide heads.
1676 # markers can hide heads.
1677 unfi = repo.unfiltered()
1677 unfi = repo.unfiltered()
1678 unficl = unfi.changelog
1678 unficl = unfi.changelog
1679
1679
1680 def headsofdiff(h1, h2):
1680 def headsofdiff(h1, h2):
1681 """Returns heads(h1 % h2)"""
1681 """Returns heads(h1 % h2)"""
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1682 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1683 return {ctx.node() for ctx in res}
1683 return {ctx.node() for ctx in res}
1684
1684
1685 def headsofunion(h1, h2):
1685 def headsofunion(h1, h2):
1686 """Returns heads((h1 + h2) - null)"""
1686 """Returns heads((h1 + h2) - null)"""
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1687 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1688 return {ctx.node() for ctx in res}
1688 return {ctx.node() for ctx in res}
1689
1689
1690 while True:
1690 while True:
1691 old_heads = unficl.heads()
1691 old_heads = unficl.heads()
1692 clstart = len(unficl)
1692 clstart = len(unficl)
1693 _pullbundle2(pullop)
1693 _pullbundle2(pullop)
1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1694 if repository.NARROW_REQUIREMENT in repo.requirements:
1695 # XXX narrow clones filter the heads on the server side during
1695 # XXX narrow clones filter the heads on the server side during
1696 # XXX getbundle and result in partial replies as well.
1696 # XXX getbundle and result in partial replies as well.
1697 # XXX Disable pull bundles in this case as band aid to avoid
1697 # XXX Disable pull bundles in this case as band aid to avoid
1698 # XXX extra round trips.
1698 # XXX extra round trips.
1699 break
1699 break
1700 if clstart == len(unficl):
1700 if clstart == len(unficl):
1701 break
1701 break
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1702 if all(unficl.hasnode(n) for n in pullop.rheads):
1703 break
1703 break
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1704 new_heads = headsofdiff(unficl.heads(), old_heads)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1705 pullop.common = headsofunion(new_heads, pullop.common)
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1706 pullop.rheads = set(pullop.rheads) - pullop.common
1707
1707
1708
1708
1709 def add_confirm_callback(repo, pullop):
1709 def add_confirm_callback(repo, pullop):
1710 """ adds a finalize callback to transaction which can be used to show stats
1710 """ adds a finalize callback to transaction which can be used to show stats
1711 to user and confirm the pull before committing transaction """
1711 to user and confirm the pull before committing transaction """
1712
1712
1713 tr = pullop.trmanager.transaction()
1713 tr = pullop.trmanager.transaction()
1714 scmutil.registersummarycallback(
1714 scmutil.registersummarycallback(
1715 repo, tr, txnname=b'pull', as_validator=True
1715 repo, tr, txnname=b'pull', as_validator=True
1716 )
1716 )
1717 reporef = weakref.ref(repo.unfiltered())
1717 reporef = weakref.ref(repo.unfiltered())
1718
1718
1719 def prompt(tr):
1719 def prompt(tr):
1720 repo = reporef()
1720 repo = reporef()
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1721 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1722 if repo.ui.promptchoice(cm):
1722 if repo.ui.promptchoice(cm):
1723 raise error.Abort("user aborted")
1723 raise error.Abort("user aborted")
1724
1724
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1725 tr.addvalidator(b'900-pull-prompt', prompt)
1726
1726
1727
1727
1728 def pull(
1728 def pull(
1729 repo,
1729 repo,
1730 remote,
1730 remote,
1731 heads=None,
1731 heads=None,
1732 force=False,
1732 force=False,
1733 bookmarks=(),
1733 bookmarks=(),
1734 opargs=None,
1734 opargs=None,
1735 streamclonerequested=None,
1735 streamclonerequested=None,
1736 includepats=None,
1736 includepats=None,
1737 excludepats=None,
1737 excludepats=None,
1738 depth=None,
1738 depth=None,
1739 confirm=None,
1739 confirm=None,
1740 ):
1740 ):
1741 """Fetch repository data from a remote.
1741 """Fetch repository data from a remote.
1742
1742
1743 This is the main function used to retrieve data from a remote repository.
1743 This is the main function used to retrieve data from a remote repository.
1744
1744
1745 ``repo`` is the local repository to clone into.
1745 ``repo`` is the local repository to clone into.
1746 ``remote`` is a peer instance.
1746 ``remote`` is a peer instance.
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1747 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1748 default) means to pull everything from the remote.
1748 default) means to pull everything from the remote.
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1749 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1750 default, all remote bookmarks are pulled.
1750 default, all remote bookmarks are pulled.
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1751 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1752 initialization.
1752 initialization.
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1753 ``streamclonerequested`` is a boolean indicating whether a "streaming
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1754 clone" is requested. A "streaming clone" is essentially a raw file copy
1755 of revlogs from the server. This only works when the local repository is
1755 of revlogs from the server. This only works when the local repository is
1756 empty. The default value of ``None`` means to respect the server
1756 empty. The default value of ``None`` means to respect the server
1757 configuration for preferring stream clones.
1757 configuration for preferring stream clones.
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1758 ``includepats`` and ``excludepats`` define explicit file patterns to
1759 include and exclude in storage, respectively. If not defined, narrow
1759 include and exclude in storage, respectively. If not defined, narrow
1760 patterns from the repo instance are used, if available.
1760 patterns from the repo instance are used, if available.
1761 ``depth`` is an integer indicating the DAG depth of history we're
1761 ``depth`` is an integer indicating the DAG depth of history we're
1762 interested in. If defined, for each revision specified in ``heads``, we
1762 interested in. If defined, for each revision specified in ``heads``, we
1763 will fetch up to this many of its ancestors and data associated with them.
1763 will fetch up to this many of its ancestors and data associated with them.
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1764 ``confirm`` is a boolean indicating whether the pull should be confirmed
1765 before committing the transaction. This overrides HGPLAIN.
1765 before committing the transaction. This overrides HGPLAIN.
1766
1766
1767 Returns the ``pulloperation`` created for this pull.
1767 Returns the ``pulloperation`` created for this pull.
1768 """
1768 """
1769 if opargs is None:
1769 if opargs is None:
1770 opargs = {}
1770 opargs = {}
1771
1771
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1772 # We allow the narrow patterns to be passed in explicitly to provide more
1773 # flexibility for API consumers.
1773 # flexibility for API consumers.
1774 if includepats or excludepats:
1774 if includepats or excludepats:
1775 includepats = includepats or set()
1775 includepats = includepats or set()
1776 excludepats = excludepats or set()
1776 excludepats = excludepats or set()
1777 else:
1777 else:
1778 includepats, excludepats = repo.narrowpats
1778 includepats, excludepats = repo.narrowpats
1779
1779
1780 narrowspec.validatepatterns(includepats)
1780 narrowspec.validatepatterns(includepats)
1781 narrowspec.validatepatterns(excludepats)
1781 narrowspec.validatepatterns(excludepats)
1782
1782
1783 pullop = pulloperation(
1783 pullop = pulloperation(
1784 repo,
1784 repo,
1785 remote,
1785 remote,
1786 heads,
1786 heads,
1787 force,
1787 force,
1788 bookmarks=bookmarks,
1788 bookmarks=bookmarks,
1789 streamclonerequested=streamclonerequested,
1789 streamclonerequested=streamclonerequested,
1790 includepats=includepats,
1790 includepats=includepats,
1791 excludepats=excludepats,
1791 excludepats=excludepats,
1792 depth=depth,
1792 depth=depth,
1793 **pycompat.strkwargs(opargs)
1793 **pycompat.strkwargs(opargs)
1794 )
1794 )
1795
1795
1796 peerlocal = pullop.remote.local()
1796 peerlocal = pullop.remote.local()
1797 if peerlocal:
1797 if peerlocal:
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1798 missing = set(peerlocal.requirements) - pullop.repo.supported
1799 if missing:
1799 if missing:
1800 msg = _(
1800 msg = _(
1801 b"required features are not"
1801 b"required features are not"
1802 b" supported in the destination:"
1802 b" supported in the destination:"
1803 b" %s"
1803 b" %s"
1804 ) % (b', '.join(sorted(missing)))
1804 ) % (b', '.join(sorted(missing)))
1805 raise error.Abort(msg)
1805 raise error.Abort(msg)
1806
1806
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1807 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1808 wlock = util.nullcontextmanager()
1808 wlock = util.nullcontextmanager()
1809 if not bookmod.bookmarksinstore(repo):
1809 if not bookmod.bookmarksinstore(repo):
1810 wlock = repo.wlock()
1810 wlock = repo.wlock()
1811 with wlock, repo.lock(), pullop.trmanager:
1811 with wlock, repo.lock(), pullop.trmanager:
1812 if confirm or (
1812 if confirm or (
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1813 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1814 ):
1814 ):
1815 add_confirm_callback(repo, pullop)
1815 add_confirm_callback(repo, pullop)
1816
1816
1817 # Use the modern wire protocol, if available.
1817 # Use the modern wire protocol, if available.
1818 if remote.capable(b'command-changesetdata'):
1818 if remote.capable(b'command-changesetdata'):
1819 exchangev2.pull(pullop)
1819 exchangev2.pull(pullop)
1820 else:
1820 else:
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1821 # This should ideally be in _pullbundle2(). However, it needs to run
1822 # before discovery to avoid extra work.
1822 # before discovery to avoid extra work.
1823 _maybeapplyclonebundle(pullop)
1823 _maybeapplyclonebundle(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1824 streamclone.maybeperformlegacystreamclone(pullop)
1825 _pulldiscovery(pullop)
1825 _pulldiscovery(pullop)
1826 if pullop.canusebundle2:
1826 if pullop.canusebundle2:
1827 _fullpullbundle2(repo, pullop)
1827 _fullpullbundle2(repo, pullop)
1828 _pullchangeset(pullop)
1828 _pullchangeset(pullop)
1829 _pullphase(pullop)
1829 _pullphase(pullop)
1830 _pullbookmarks(pullop)
1830 _pullbookmarks(pullop)
1831 _pullobsolete(pullop)
1831 _pullobsolete(pullop)
1832
1832
1833 # storing remotenames
1833 # storing remotenames
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1834 if repo.ui.configbool(b'experimental', b'remotenames'):
1835 logexchange.pullremotenames(repo, remote)
1835 logexchange.pullremotenames(repo, remote)
1836
1836
1837 return pullop
1837 return pullop
1838
1838
1839
1839
1840 # list of steps to perform discovery before pull
1840 # list of steps to perform discovery before pull
1841 pulldiscoveryorder = []
1841 pulldiscoveryorder = []
1842
1842
1843 # Mapping between step name and function
1843 # Mapping between step name and function
1844 #
1844 #
1845 # This exists to help extensions wrap steps if necessary
1845 # This exists to help extensions wrap steps if necessary
1846 pulldiscoverymapping = {}
1846 pulldiscoverymapping = {}
1847
1847
1848
1848
1849 def pulldiscovery(stepname):
1849 def pulldiscovery(stepname):
1850 """decorator for function performing discovery before pull
1850 """decorator for function performing discovery before pull
1851
1851
1852 The function is added to the step -> function mapping and appended to the
1852 The function is added to the step -> function mapping and appended to the
1853 list of steps. Beware that decorated function will be added in order (this
1853 list of steps. Beware that decorated function will be added in order (this
1854 may matter).
1854 may matter).
1855
1855
1856 You can only use this decorator for a new step, if you want to wrap a step
1856 You can only use this decorator for a new step, if you want to wrap a step
1857 from an extension, change the pulldiscovery dictionary directly."""
1857 from an extension, change the pulldiscovery dictionary directly."""
1858
1858
1859 def dec(func):
1859 def dec(func):
1860 assert stepname not in pulldiscoverymapping
1860 assert stepname not in pulldiscoverymapping
1861 pulldiscoverymapping[stepname] = func
1861 pulldiscoverymapping[stepname] = func
1862 pulldiscoveryorder.append(stepname)
1862 pulldiscoveryorder.append(stepname)
1863 return func
1863 return func
1864
1864
1865 return dec
1865 return dec
1866
1866
1867
1867
1868 def _pulldiscovery(pullop):
1868 def _pulldiscovery(pullop):
1869 """Run all discovery steps"""
1869 """Run all discovery steps"""
1870 for stepname in pulldiscoveryorder:
1870 for stepname in pulldiscoveryorder:
1871 step = pulldiscoverymapping[stepname]
1871 step = pulldiscoverymapping[stepname]
1872 step(pullop)
1872 step(pullop)
1873
1873
1874
1874
1875 @pulldiscovery(b'b1:bookmarks')
1875 @pulldiscovery(b'b1:bookmarks')
1876 def _pullbookmarkbundle1(pullop):
1876 def _pullbookmarkbundle1(pullop):
1877 """fetch bookmark data in bundle1 case
1877 """fetch bookmark data in bundle1 case
1878
1878
1879 If not using bundle2, we have to fetch bookmarks before changeset
1879 If not using bundle2, we have to fetch bookmarks before changeset
1880 discovery to reduce the chance and impact of race conditions."""
1880 discovery to reduce the chance and impact of race conditions."""
1881 if pullop.remotebookmarks is not None:
1881 if pullop.remotebookmarks is not None:
1882 return
1882 return
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1883 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1884 # all known bundle2 servers now support listkeys, but lets be nice with
1885 # new implementation.
1885 # new implementation.
1886 return
1886 return
1887 books = listkeys(pullop.remote, b'bookmarks')
1887 books = listkeys(pullop.remote, b'bookmarks')
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1888 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1889
1889
1890
1890
1891 @pulldiscovery(b'changegroup')
1891 @pulldiscovery(b'changegroup')
1892 def _pulldiscoverychangegroup(pullop):
1892 def _pulldiscoverychangegroup(pullop):
1893 """discovery phase for the pull
1893 """discovery phase for the pull
1894
1894
1895 Current handle changeset discovery only, will change handle all discovery
1895 Current handle changeset discovery only, will change handle all discovery
1896 at some point."""
1896 at some point."""
1897 tmp = discovery.findcommonincoming(
1897 tmp = discovery.findcommonincoming(
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1898 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1899 )
1899 )
1900 common, fetch, rheads = tmp
1900 common, fetch, rheads = tmp
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1901 has_node = pullop.repo.unfiltered().changelog.index.has_node
1902 if fetch and rheads:
1902 if fetch and rheads:
1903 # If a remote heads is filtered locally, put in back in common.
1903 # If a remote heads is filtered locally, put in back in common.
1904 #
1904 #
1905 # This is a hackish solution to catch most of "common but locally
1905 # This is a hackish solution to catch most of "common but locally
1906 # hidden situation". We do not performs discovery on unfiltered
1906 # hidden situation". We do not performs discovery on unfiltered
1907 # repository because it end up doing a pathological amount of round
1907 # repository because it end up doing a pathological amount of round
1908 # trip for w huge amount of changeset we do not care about.
1908 # trip for w huge amount of changeset we do not care about.
1909 #
1909 #
1910 # If a set of such "common but filtered" changeset exist on the server
1910 # If a set of such "common but filtered" changeset exist on the server
1911 # but are not including a remote heads, we'll not be able to detect it,
1911 # but are not including a remote heads, we'll not be able to detect it,
1912 scommon = set(common)
1912 scommon = set(common)
1913 for n in rheads:
1913 for n in rheads:
1914 if has_node(n):
1914 if has_node(n):
1915 if n not in scommon:
1915 if n not in scommon:
1916 common.append(n)
1916 common.append(n)
1917 if set(rheads).issubset(set(common)):
1917 if set(rheads).issubset(set(common)):
1918 fetch = []
1918 fetch = []
1919 pullop.common = common
1919 pullop.common = common
1920 pullop.fetch = fetch
1920 pullop.fetch = fetch
1921 pullop.rheads = rheads
1921 pullop.rheads = rheads
1922
1922
1923
1923
1924 def _pullbundle2(pullop):
1924 def _pullbundle2(pullop):
1925 """pull data using bundle2
1925 """pull data using bundle2
1926
1926
1927 For now, the only supported data are changegroup."""
1927 For now, the only supported data are changegroup."""
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1928 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1929
1929
1930 # make ui easier to access
1930 # make ui easier to access
1931 ui = pullop.repo.ui
1931 ui = pullop.repo.ui
1932
1932
1933 # At the moment we don't do stream clones over bundle2. If that is
1933 # At the moment we don't do stream clones over bundle2. If that is
1934 # implemented then here's where the check for that will go.
1934 # implemented then here's where the check for that will go.
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1935 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1936
1936
1937 # declare pull perimeters
1937 # declare pull perimeters
1938 kwargs[b'common'] = pullop.common
1938 kwargs[b'common'] = pullop.common
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1939 kwargs[b'heads'] = pullop.heads or pullop.rheads
1940
1940
1941 # check server supports narrow and then adding includepats and excludepats
1941 # check server supports narrow and then adding includepats and excludepats
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1942 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1943 if servernarrow and pullop.includepats:
1943 if servernarrow and pullop.includepats:
1944 kwargs[b'includepats'] = pullop.includepats
1944 kwargs[b'includepats'] = pullop.includepats
1945 if servernarrow and pullop.excludepats:
1945 if servernarrow and pullop.excludepats:
1946 kwargs[b'excludepats'] = pullop.excludepats
1946 kwargs[b'excludepats'] = pullop.excludepats
1947
1947
1948 if streaming:
1948 if streaming:
1949 kwargs[b'cg'] = False
1949 kwargs[b'cg'] = False
1950 kwargs[b'stream'] = True
1950 kwargs[b'stream'] = True
1951 pullop.stepsdone.add(b'changegroup')
1951 pullop.stepsdone.add(b'changegroup')
1952 pullop.stepsdone.add(b'phases')
1952 pullop.stepsdone.add(b'phases')
1953
1953
1954 else:
1954 else:
1955 # pulling changegroup
1955 # pulling changegroup
1956 pullop.stepsdone.add(b'changegroup')
1956 pullop.stepsdone.add(b'changegroup')
1957
1957
1958 kwargs[b'cg'] = pullop.fetch
1958 kwargs[b'cg'] = pullop.fetch
1959
1959
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1960 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1961 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1962 if not legacyphase and hasbinaryphase:
1962 if not legacyphase and hasbinaryphase:
1963 kwargs[b'phases'] = True
1963 kwargs[b'phases'] = True
1964 pullop.stepsdone.add(b'phases')
1964 pullop.stepsdone.add(b'phases')
1965
1965
1966 if b'listkeys' in pullop.remotebundle2caps:
1966 if b'listkeys' in pullop.remotebundle2caps:
1967 if b'phases' not in pullop.stepsdone:
1967 if b'phases' not in pullop.stepsdone:
1968 kwargs[b'listkeys'] = [b'phases']
1968 kwargs[b'listkeys'] = [b'phases']
1969
1969
1970 bookmarksrequested = False
1970 bookmarksrequested = False
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1971 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1972 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1973
1973
1974 if pullop.remotebookmarks is not None:
1974 if pullop.remotebookmarks is not None:
1975 pullop.stepsdone.add(b'request-bookmarks')
1975 pullop.stepsdone.add(b'request-bookmarks')
1976
1976
1977 if (
1977 if (
1978 b'request-bookmarks' not in pullop.stepsdone
1978 b'request-bookmarks' not in pullop.stepsdone
1979 and pullop.remotebookmarks is None
1979 and pullop.remotebookmarks is None
1980 and not legacybookmark
1980 and not legacybookmark
1981 and hasbinarybook
1981 and hasbinarybook
1982 ):
1982 ):
1983 kwargs[b'bookmarks'] = True
1983 kwargs[b'bookmarks'] = True
1984 bookmarksrequested = True
1984 bookmarksrequested = True
1985
1985
1986 if b'listkeys' in pullop.remotebundle2caps:
1986 if b'listkeys' in pullop.remotebundle2caps:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1987 if b'request-bookmarks' not in pullop.stepsdone:
1988 # make sure to always includes bookmark data when migrating
1988 # make sure to always includes bookmark data when migrating
1989 # `hg incoming --bundle` to using this function.
1989 # `hg incoming --bundle` to using this function.
1990 pullop.stepsdone.add(b'request-bookmarks')
1990 pullop.stepsdone.add(b'request-bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1991 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1992
1992
1993 # If this is a full pull / clone and the server supports the clone bundles
1993 # If this is a full pull / clone and the server supports the clone bundles
1994 # feature, tell the server whether we attempted a clone bundle. The
1994 # feature, tell the server whether we attempted a clone bundle. The
1995 # presence of this flag indicates the client supports clone bundles. This
1995 # presence of this flag indicates the client supports clone bundles. This
1996 # will enable the server to treat clients that support clone bundles
1996 # will enable the server to treat clients that support clone bundles
1997 # differently from those that don't.
1997 # differently from those that don't.
1998 if (
1998 if (
1999 pullop.remote.capable(b'clonebundles')
1999 pullop.remote.capable(b'clonebundles')
2000 and pullop.heads is None
2000 and pullop.heads is None
2001 and list(pullop.common) == [nullid]
2001 and list(pullop.common) == [nullid]
2002 ):
2002 ):
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2003 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2004
2004
2005 if streaming:
2005 if streaming:
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2006 pullop.repo.ui.status(_(b'streaming all changes\n'))
2007 elif not pullop.fetch:
2007 elif not pullop.fetch:
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2008 pullop.repo.ui.status(_(b"no changes found\n"))
2009 pullop.cgresult = 0
2009 pullop.cgresult = 0
2010 else:
2010 else:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2011 if pullop.heads is None and list(pullop.common) == [nullid]:
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2012 pullop.repo.ui.status(_(b"requesting all changes\n"))
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2013 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2014 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2015 if obsolete.commonversion(remoteversions) is not None:
2015 if obsolete.commonversion(remoteversions) is not None:
2016 kwargs[b'obsmarkers'] = True
2016 kwargs[b'obsmarkers'] = True
2017 pullop.stepsdone.add(b'obsmarkers')
2017 pullop.stepsdone.add(b'obsmarkers')
2018 _pullbundle2extraprepare(pullop, kwargs)
2018 _pullbundle2extraprepare(pullop, kwargs)
2019
2019
2020 with pullop.remote.commandexecutor() as e:
2020 with pullop.remote.commandexecutor() as e:
2021 args = dict(kwargs)
2021 args = dict(kwargs)
2022 args[b'source'] = b'pull'
2022 args[b'source'] = b'pull'
2023 bundle = e.callcommand(b'getbundle', args).result()
2023 bundle = e.callcommand(b'getbundle', args).result()
2024
2024
2025 try:
2025 try:
2026 op = bundle2.bundleoperation(
2026 op = bundle2.bundleoperation(
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2027 pullop.repo, pullop.gettransaction, source=b'pull'
2028 )
2028 )
2029 op.modes[b'bookmarks'] = b'records'
2029 op.modes[b'bookmarks'] = b'records'
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2030 bundle2.processbundle(pullop.repo, bundle, op=op)
2031 except bundle2.AbortFromPart as exc:
2031 except bundle2.AbortFromPart as exc:
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2032 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2033 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2034 except error.BundleValueError as exc:
2034 except error.BundleValueError as exc:
2035 raise error.Abort(_(b'missing support for %s') % exc)
2035 raise error.Abort(_(b'missing support for %s') % exc)
2036
2036
2037 if pullop.fetch:
2037 if pullop.fetch:
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2038 pullop.cgresult = bundle2.combinechangegroupresults(op)
2039
2039
2040 # processing phases change
2040 # processing phases change
2041 for namespace, value in op.records[b'listkeys']:
2041 for namespace, value in op.records[b'listkeys']:
2042 if namespace == b'phases':
2042 if namespace == b'phases':
2043 _pullapplyphases(pullop, value)
2043 _pullapplyphases(pullop, value)
2044
2044
2045 # processing bookmark update
2045 # processing bookmark update
2046 if bookmarksrequested:
2046 if bookmarksrequested:
2047 books = {}
2047 books = {}
2048 for record in op.records[b'bookmarks']:
2048 for record in op.records[b'bookmarks']:
2049 books[record[b'bookmark']] = record[b"node"]
2049 books[record[b'bookmark']] = record[b"node"]
2050 pullop.remotebookmarks = books
2050 pullop.remotebookmarks = books
2051 else:
2051 else:
2052 for namespace, value in op.records[b'listkeys']:
2052 for namespace, value in op.records[b'listkeys']:
2053 if namespace == b'bookmarks':
2053 if namespace == b'bookmarks':
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2054 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2055
2055
2056 # bookmark data were either already there or pulled in the bundle
2056 # bookmark data were either already there or pulled in the bundle
2057 if pullop.remotebookmarks is not None:
2057 if pullop.remotebookmarks is not None:
2058 _pullbookmarks(pullop)
2058 _pullbookmarks(pullop)
2059
2059
2060
2060
2061 def _pullbundle2extraprepare(pullop, kwargs):
2061 def _pullbundle2extraprepare(pullop, kwargs):
2062 """hook function so that extensions can extend the getbundle call"""
2062 """hook function so that extensions can extend the getbundle call"""
2063
2063
2064
2064
2065 def _pullchangeset(pullop):
2065 def _pullchangeset(pullop):
2066 """pull changeset from unbundle into the local repo"""
2066 """pull changeset from unbundle into the local repo"""
2067 # We delay the open of the transaction as late as possible so we
2067 # We delay the open of the transaction as late as possible so we
2068 # don't open transaction for nothing or you break future useful
2068 # don't open transaction for nothing or you break future useful
2069 # rollback call
2069 # rollback call
2070 if b'changegroup' in pullop.stepsdone:
2070 if b'changegroup' in pullop.stepsdone:
2071 return
2071 return
2072 pullop.stepsdone.add(b'changegroup')
2072 pullop.stepsdone.add(b'changegroup')
2073 if not pullop.fetch:
2073 if not pullop.fetch:
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2074 pullop.repo.ui.status(_(b"no changes found\n"))
2075 pullop.cgresult = 0
2075 pullop.cgresult = 0
2076 return
2076 return
2077 tr = pullop.gettransaction()
2077 tr = pullop.gettransaction()
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2078 if pullop.heads is None and list(pullop.common) == [nullid]:
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2079 pullop.repo.ui.status(_(b"requesting all changes\n"))
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2080 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2081 # issue1320, avoid a race if remote changed after discovery
2081 # issue1320, avoid a race if remote changed after discovery
2082 pullop.heads = pullop.rheads
2082 pullop.heads = pullop.rheads
2083
2083
2084 if pullop.remote.capable(b'getbundle'):
2084 if pullop.remote.capable(b'getbundle'):
2085 # TODO: get bundlecaps from remote
2085 # TODO: get bundlecaps from remote
2086 cg = pullop.remote.getbundle(
2086 cg = pullop.remote.getbundle(
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2087 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2088 )
2088 )
2089 elif pullop.heads is None:
2089 elif pullop.heads is None:
2090 with pullop.remote.commandexecutor() as e:
2090 with pullop.remote.commandexecutor() as e:
2091 cg = e.callcommand(
2091 cg = e.callcommand(
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2092 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2093 ).result()
2093 ).result()
2094
2094
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2095 elif not pullop.remote.capable(b'changegroupsubset'):
2096 raise error.Abort(
2096 raise error.Abort(
2097 _(
2097 _(
2098 b"partial pull cannot be done because "
2098 b"partial pull cannot be done because "
2099 b"other repository doesn't support "
2099 b"other repository doesn't support "
2100 b"changegroupsubset."
2100 b"changegroupsubset."
2101 )
2101 )
2102 )
2102 )
2103 else:
2103 else:
2104 with pullop.remote.commandexecutor() as e:
2104 with pullop.remote.commandexecutor() as e:
2105 cg = e.callcommand(
2105 cg = e.callcommand(
2106 b'changegroupsubset',
2106 b'changegroupsubset',
2107 {
2107 {
2108 b'bases': pullop.fetch,
2108 b'bases': pullop.fetch,
2109 b'heads': pullop.heads,
2109 b'heads': pullop.heads,
2110 b'source': b'pull',
2110 b'source': b'pull',
2111 },
2111 },
2112 ).result()
2112 ).result()
2113
2113
2114 bundleop = bundle2.applybundle(
2114 bundleop = bundle2.applybundle(
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2115 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2116 )
2116 )
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2117 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2118
2118
2119
2119
2120 def _pullphase(pullop):
2120 def _pullphase(pullop):
2121 # Get remote phases data from remote
2121 # Get remote phases data from remote
2122 if b'phases' in pullop.stepsdone:
2122 if b'phases' in pullop.stepsdone:
2123 return
2123 return
2124 remotephases = listkeys(pullop.remote, b'phases')
2124 remotephases = listkeys(pullop.remote, b'phases')
2125 _pullapplyphases(pullop, remotephases)
2125 _pullapplyphases(pullop, remotephases)
2126
2126
2127
2127
2128 def _pullapplyphases(pullop, remotephases):
2128 def _pullapplyphases(pullop, remotephases):
2129 """apply phase movement from observed remote state"""
2129 """apply phase movement from observed remote state"""
2130 if b'phases' in pullop.stepsdone:
2130 if b'phases' in pullop.stepsdone:
2131 return
2131 return
2132 pullop.stepsdone.add(b'phases')
2132 pullop.stepsdone.add(b'phases')
2133 publishing = bool(remotephases.get(b'publishing', False))
2133 publishing = bool(remotephases.get(b'publishing', False))
2134 if remotephases and not publishing:
2134 if remotephases and not publishing:
2135 # remote is new and non-publishing
2135 # remote is new and non-publishing
2136 pheads, _dr = phases.analyzeremotephases(
2136 pheads, _dr = phases.analyzeremotephases(
2137 pullop.repo, pullop.pulledsubset, remotephases
2137 pullop.repo, pullop.pulledsubset, remotephases
2138 )
2138 )
2139 dheads = pullop.pulledsubset
2139 dheads = pullop.pulledsubset
2140 else:
2140 else:
2141 # Remote is old or publishing all common changesets
2141 # Remote is old or publishing all common changesets
2142 # should be seen as public
2142 # should be seen as public
2143 pheads = pullop.pulledsubset
2143 pheads = pullop.pulledsubset
2144 dheads = []
2144 dheads = []
2145 unfi = pullop.repo.unfiltered()
2145 unfi = pullop.repo.unfiltered()
2146 phase = unfi._phasecache.phase
2146 phase = unfi._phasecache.phase
2147 rev = unfi.changelog.index.get_rev
2147 rev = unfi.changelog.index.get_rev
2148 public = phases.public
2148 public = phases.public
2149 draft = phases.draft
2149 draft = phases.draft
2150
2150
2151 # exclude changesets already public locally and update the others
2151 # exclude changesets already public locally and update the others
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2152 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2153 if pheads:
2153 if pheads:
2154 tr = pullop.gettransaction()
2154 tr = pullop.gettransaction()
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2155 phases.advanceboundary(pullop.repo, tr, public, pheads)
2156
2156
2157 # exclude changesets already draft locally and update the others
2157 # exclude changesets already draft locally and update the others
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2158 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2159 if dheads:
2159 if dheads:
2160 tr = pullop.gettransaction()
2160 tr = pullop.gettransaction()
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2161 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2162
2162
2163
2163
2164 def _pullbookmarks(pullop):
2164 def _pullbookmarks(pullop):
2165 """process the remote bookmark information to update the local one"""
2165 """process the remote bookmark information to update the local one"""
2166 if b'bookmarks' in pullop.stepsdone:
2166 if b'bookmarks' in pullop.stepsdone:
2167 return
2167 return
2168 pullop.stepsdone.add(b'bookmarks')
2168 pullop.stepsdone.add(b'bookmarks')
2169 repo = pullop.repo
2169 repo = pullop.repo
2170 remotebookmarks = pullop.remotebookmarks
2170 remotebookmarks = pullop.remotebookmarks
2171 bookmod.updatefromremote(
2171 bookmod.updatefromremote(
2172 repo.ui,
2172 repo.ui,
2173 repo,
2173 repo,
2174 remotebookmarks,
2174 remotebookmarks,
2175 pullop.remote.url(),
2175 pullop.remote.url(),
2176 pullop.gettransaction,
2176 pullop.gettransaction,
2177 explicit=pullop.explicitbookmarks,
2177 explicit=pullop.explicitbookmarks,
2178 )
2178 )
2179
2179
2180
2180
2181 def _pullobsolete(pullop):
2181 def _pullobsolete(pullop):
2182 """utility function to pull obsolete markers from a remote
2182 """utility function to pull obsolete markers from a remote
2183
2183
2184 The `gettransaction` is function that return the pull transaction, creating
2184 The `gettransaction` is function that return the pull transaction, creating
2185 one if necessary. We return the transaction to inform the calling code that
2185 one if necessary. We return the transaction to inform the calling code that
2186 a new transaction have been created (when applicable).
2186 a new transaction have been created (when applicable).
2187
2187
2188 Exists mostly to allow overriding for experimentation purpose"""
2188 Exists mostly to allow overriding for experimentation purpose"""
2189 if b'obsmarkers' in pullop.stepsdone:
2189 if b'obsmarkers' in pullop.stepsdone:
2190 return
2190 return
2191 pullop.stepsdone.add(b'obsmarkers')
2191 pullop.stepsdone.add(b'obsmarkers')
2192 tr = None
2192 tr = None
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2193 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2194 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2195 remoteobs = listkeys(pullop.remote, b'obsolete')
2196 if b'dump0' in remoteobs:
2196 if b'dump0' in remoteobs:
2197 tr = pullop.gettransaction()
2197 tr = pullop.gettransaction()
2198 markers = []
2198 markers = []
2199 for key in sorted(remoteobs, reverse=True):
2199 for key in sorted(remoteobs, reverse=True):
2200 if key.startswith(b'dump'):
2200 if key.startswith(b'dump'):
2201 data = util.b85decode(remoteobs[key])
2201 data = util.b85decode(remoteobs[key])
2202 version, newmarks = obsolete._readmarkers(data)
2202 version, newmarks = obsolete._readmarkers(data)
2203 markers += newmarks
2203 markers += newmarks
2204 if markers:
2204 if markers:
2205 pullop.repo.obsstore.add(tr, markers)
2205 pullop.repo.obsstore.add(tr, markers)
2206 pullop.repo.invalidatevolatilesets()
2206 pullop.repo.invalidatevolatilesets()
2207 return tr
2207 return tr
2208
2208
2209
2209
2210 def applynarrowacl(repo, kwargs):
2210 def applynarrowacl(repo, kwargs):
2211 """Apply narrow fetch access control.
2211 """Apply narrow fetch access control.
2212
2212
2213 This massages the named arguments for getbundle wire protocol commands
2213 This massages the named arguments for getbundle wire protocol commands
2214 so requested data is filtered through access control rules.
2214 so requested data is filtered through access control rules.
2215 """
2215 """
2216 ui = repo.ui
2216 ui = repo.ui
2217 # TODO this assumes existence of HTTP and is a layering violation.
2217 # TODO this assumes existence of HTTP and is a layering violation.
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2218 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2219 user_includes = ui.configlist(
2219 user_includes = ui.configlist(
2220 _NARROWACL_SECTION,
2220 _NARROWACL_SECTION,
2221 username + b'.includes',
2221 username + b'.includes',
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2222 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2223 )
2223 )
2224 user_excludes = ui.configlist(
2224 user_excludes = ui.configlist(
2225 _NARROWACL_SECTION,
2225 _NARROWACL_SECTION,
2226 username + b'.excludes',
2226 username + b'.excludes',
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2227 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2228 )
2228 )
2229 if not user_includes:
2229 if not user_includes:
2230 raise error.Abort(
2230 raise error.Abort(
2231 _(b"%s configuration for user %s is empty")
2231 _(b"%s configuration for user %s is empty")
2232 % (_NARROWACL_SECTION, username)
2232 % (_NARROWACL_SECTION, username)
2233 )
2233 )
2234
2234
2235 user_includes = [
2235 user_includes = [
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2236 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2237 ]
2237 ]
2238 user_excludes = [
2238 user_excludes = [
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2239 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2240 ]
2240 ]
2241
2241
2242 req_includes = set(kwargs.get('includepats', []))
2242 req_includes = set(kwargs.get('includepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2243 req_excludes = set(kwargs.get('excludepats', []))
2244
2244
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2245 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2246 req_includes, req_excludes, user_includes, user_excludes
2246 req_includes, req_excludes, user_includes, user_excludes
2247 )
2247 )
2248
2248
2249 if invalid_includes:
2249 if invalid_includes:
2250 raise error.Abort(
2250 raise error.Abort(
2251 _(b"The following includes are not accessible for %s: %s")
2251 _(b"The following includes are not accessible for %s: %s")
2252 % (username, stringutil.pprint(invalid_includes))
2252 % (username, stringutil.pprint(invalid_includes))
2253 )
2253 )
2254
2254
2255 new_args = {}
2255 new_args = {}
2256 new_args.update(kwargs)
2256 new_args.update(kwargs)
2257 new_args['narrow'] = True
2257 new_args['narrow'] = True
2258 new_args['narrow_acl'] = True
2258 new_args['narrow_acl'] = True
2259 new_args['includepats'] = req_includes
2259 new_args['includepats'] = req_includes
2260 if req_excludes:
2260 if req_excludes:
2261 new_args['excludepats'] = req_excludes
2261 new_args['excludepats'] = req_excludes
2262
2262
2263 return new_args
2263 return new_args
2264
2264
2265
2265
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2266 def _computeellipsis(repo, common, heads, known, match, depth=None):
2267 """Compute the shape of a narrowed DAG.
2267 """Compute the shape of a narrowed DAG.
2268
2268
2269 Args:
2269 Args:
2270 repo: The repository we're transferring.
2270 repo: The repository we're transferring.
2271 common: The roots of the DAG range we're transferring.
2271 common: The roots of the DAG range we're transferring.
2272 May be just [nullid], which means all ancestors of heads.
2272 May be just [nullid], which means all ancestors of heads.
2273 heads: The heads of the DAG range we're transferring.
2273 heads: The heads of the DAG range we're transferring.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2274 match: The narrowmatcher that allows us to identify relevant changes.
2275 depth: If not None, only consider nodes to be full nodes if they are at
2275 depth: If not None, only consider nodes to be full nodes if they are at
2276 most depth changesets away from one of heads.
2276 most depth changesets away from one of heads.
2277
2277
2278 Returns:
2278 Returns:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2279 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2280
2280
2281 visitnodes: The list of nodes (either full or ellipsis) which
2281 visitnodes: The list of nodes (either full or ellipsis) which
2282 need to be sent to the client.
2282 need to be sent to the client.
2283 relevant_nodes: The set of changelog nodes which change a file inside
2283 relevant_nodes: The set of changelog nodes which change a file inside
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2284 the narrowspec. The client needs these as non-ellipsis nodes.
2285 ellipsisroots: A dict of {rev: parents} that is used in
2285 ellipsisroots: A dict of {rev: parents} that is used in
2286 narrowchangegroup to produce ellipsis nodes with the
2286 narrowchangegroup to produce ellipsis nodes with the
2287 correct parents.
2287 correct parents.
2288 """
2288 """
2289 cl = repo.changelog
2289 cl = repo.changelog
2290 mfl = repo.manifestlog
2290 mfl = repo.manifestlog
2291
2291
2292 clrev = cl.rev
2292 clrev = cl.rev
2293
2293
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2294 commonrevs = {clrev(n) for n in common} | {nullrev}
2295 headsrevs = {clrev(n) for n in heads}
2295 headsrevs = {clrev(n) for n in heads}
2296
2296
2297 if depth:
2297 if depth:
2298 revdepth = {h: 0 for h in headsrevs}
2298 revdepth = {h: 0 for h in headsrevs}
2299
2299
2300 ellipsisheads = collections.defaultdict(set)
2300 ellipsisheads = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2301 ellipsisroots = collections.defaultdict(set)
2302
2302
2303 def addroot(head, curchange):
2303 def addroot(head, curchange):
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2304 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2305 ellipsisroots[head].add(curchange)
2305 ellipsisroots[head].add(curchange)
2306 # Recursively split ellipsis heads with 3 roots by finding the
2306 # Recursively split ellipsis heads with 3 roots by finding the
2307 # roots' youngest common descendant which is an elided merge commit.
2307 # roots' youngest common descendant which is an elided merge commit.
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2308 # That descendant takes 2 of the 3 roots as its own, and becomes a
2309 # root of the head.
2309 # root of the head.
2310 while len(ellipsisroots[head]) > 2:
2310 while len(ellipsisroots[head]) > 2:
2311 child, roots = splithead(head)
2311 child, roots = splithead(head)
2312 splitroots(head, child, roots)
2312 splitroots(head, child, roots)
2313 head = child # Recurse in case we just added a 3rd root
2313 head = child # Recurse in case we just added a 3rd root
2314
2314
2315 def splitroots(head, child, roots):
2315 def splitroots(head, child, roots):
2316 ellipsisroots[head].difference_update(roots)
2316 ellipsisroots[head].difference_update(roots)
2317 ellipsisroots[head].add(child)
2317 ellipsisroots[head].add(child)
2318 ellipsisroots[child].update(roots)
2318 ellipsisroots[child].update(roots)
2319 ellipsisroots[child].discard(child)
2319 ellipsisroots[child].discard(child)
2320
2320
2321 def splithead(head):
2321 def splithead(head):
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2322 r1, r2, r3 = sorted(ellipsisroots[head])
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2323 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2324 mid = repo.revs(
2324 mid = repo.revs(
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2325 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2326 )
2326 )
2327 for j in mid:
2327 for j in mid:
2328 if j == nr2:
2328 if j == nr2:
2329 return nr2, (nr1, nr2)
2329 return nr2, (nr1, nr2)
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2330 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2331 return j, (nr1, nr2)
2331 return j, (nr1, nr2)
2332 raise error.Abort(
2332 raise error.Abort(
2333 _(
2333 _(
2334 b'Failed to split up ellipsis node! head: %d, '
2334 b'Failed to split up ellipsis node! head: %d, '
2335 b'roots: %d %d %d'
2335 b'roots: %d %d %d'
2336 )
2336 )
2337 % (head, r1, r2, r3)
2337 % (head, r1, r2, r3)
2338 )
2338 )
2339
2339
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2340 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2341 visit = reversed(missing)
2341 visit = reversed(missing)
2342 relevant_nodes = set()
2342 relevant_nodes = set()
2343 visitnodes = [cl.node(m) for m in missing]
2343 visitnodes = [cl.node(m) for m in missing]
2344 required = set(headsrevs) | known
2344 required = set(headsrevs) | known
2345 for rev in visit:
2345 for rev in visit:
2346 clrev = cl.changelogrevision(rev)
2346 clrev = cl.changelogrevision(rev)
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2347 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2348 if depth is not None:
2348 if depth is not None:
2349 curdepth = revdepth[rev]
2349 curdepth = revdepth[rev]
2350 for p in ps:
2350 for p in ps:
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2351 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2352 needed = False
2352 needed = False
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2353 shallow_enough = depth is None or revdepth[rev] <= depth
2354 if shallow_enough:
2354 if shallow_enough:
2355 curmf = mfl[clrev.manifest].read()
2355 curmf = mfl[clrev.manifest].read()
2356 if ps:
2356 if ps:
2357 # We choose to not trust the changed files list in
2357 # We choose to not trust the changed files list in
2358 # changesets because it's not always correct. TODO: could
2358 # changesets because it's not always correct. TODO: could
2359 # we trust it for the non-merge case?
2359 # we trust it for the non-merge case?
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2360 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2361 needed = bool(curmf.diff(p1mf, match))
2361 needed = bool(curmf.diff(p1mf, match))
2362 if not needed and len(ps) > 1:
2362 if not needed and len(ps) > 1:
2363 # For merge changes, the list of changed files is not
2363 # For merge changes, the list of changed files is not
2364 # helpful, since we need to emit the merge if a file
2364 # helpful, since we need to emit the merge if a file
2365 # in the narrow spec has changed on either side of the
2365 # in the narrow spec has changed on either side of the
2366 # merge. As a result, we do a manifest diff to check.
2366 # merge. As a result, we do a manifest diff to check.
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2367 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2368 needed = bool(curmf.diff(p2mf, match))
2368 needed = bool(curmf.diff(p2mf, match))
2369 else:
2369 else:
2370 # For a root node, we need to include the node if any
2370 # For a root node, we need to include the node if any
2371 # files in the node match the narrowspec.
2371 # files in the node match the narrowspec.
2372 needed = any(curmf.walk(match))
2372 needed = any(curmf.walk(match))
2373
2373
2374 if needed:
2374 if needed:
2375 for head in ellipsisheads[rev]:
2375 for head in ellipsisheads[rev]:
2376 addroot(head, rev)
2376 addroot(head, rev)
2377 for p in ps:
2377 for p in ps:
2378 required.add(p)
2378 required.add(p)
2379 relevant_nodes.add(cl.node(rev))
2379 relevant_nodes.add(cl.node(rev))
2380 else:
2380 else:
2381 if not ps:
2381 if not ps:
2382 ps = [nullrev]
2382 ps = [nullrev]
2383 if rev in required:
2383 if rev in required:
2384 for head in ellipsisheads[rev]:
2384 for head in ellipsisheads[rev]:
2385 addroot(head, rev)
2385 addroot(head, rev)
2386 for p in ps:
2386 for p in ps:
2387 ellipsisheads[p].add(rev)
2387 ellipsisheads[p].add(rev)
2388 else:
2388 else:
2389 for p in ps:
2389 for p in ps:
2390 ellipsisheads[p] |= ellipsisheads[rev]
2390 ellipsisheads[p] |= ellipsisheads[rev]
2391
2391
2392 # add common changesets as roots of their reachable ellipsis heads
2392 # add common changesets as roots of their reachable ellipsis heads
2393 for c in commonrevs:
2393 for c in commonrevs:
2394 for head in ellipsisheads[c]:
2394 for head in ellipsisheads[c]:
2395 addroot(head, c)
2395 addroot(head, c)
2396 return visitnodes, relevant_nodes, ellipsisroots
2396 return visitnodes, relevant_nodes, ellipsisroots
2397
2397
2398
2398
2399 def caps20to10(repo, role):
2399 def caps20to10(repo, role):
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2400 """return a set with appropriate options to use bundle20 during getbundle"""
2401 caps = {b'HG20'}
2401 caps = {b'HG20'}
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2402 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2403 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2404 return caps
2404 return caps
2405
2405
2406
2406
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2407 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2408 getbundle2partsorder = []
2408 getbundle2partsorder = []
2409
2409
2410 # Mapping between step name and function
2410 # Mapping between step name and function
2411 #
2411 #
2412 # This exists to help extensions wrap steps if necessary
2412 # This exists to help extensions wrap steps if necessary
2413 getbundle2partsmapping = {}
2413 getbundle2partsmapping = {}
2414
2414
2415
2415
2416 def getbundle2partsgenerator(stepname, idx=None):
2416 def getbundle2partsgenerator(stepname, idx=None):
2417 """decorator for function generating bundle2 part for getbundle
2417 """decorator for function generating bundle2 part for getbundle
2418
2418
2419 The function is added to the step -> function mapping and appended to the
2419 The function is added to the step -> function mapping and appended to the
2420 list of steps. Beware that decorated functions will be added in order
2420 list of steps. Beware that decorated functions will be added in order
2421 (this may matter).
2421 (this may matter).
2422
2422
2423 You can only use this decorator for new steps, if you want to wrap a step
2423 You can only use this decorator for new steps, if you want to wrap a step
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2424 from an extension, attack the getbundle2partsmapping dictionary directly."""
2425
2425
2426 def dec(func):
2426 def dec(func):
2427 assert stepname not in getbundle2partsmapping
2427 assert stepname not in getbundle2partsmapping
2428 getbundle2partsmapping[stepname] = func
2428 getbundle2partsmapping[stepname] = func
2429 if idx is None:
2429 if idx is None:
2430 getbundle2partsorder.append(stepname)
2430 getbundle2partsorder.append(stepname)
2431 else:
2431 else:
2432 getbundle2partsorder.insert(idx, stepname)
2432 getbundle2partsorder.insert(idx, stepname)
2433 return func
2433 return func
2434
2434
2435 return dec
2435 return dec
2436
2436
2437
2437
2438 def bundle2requested(bundlecaps):
2438 def bundle2requested(bundlecaps):
2439 if bundlecaps is not None:
2439 if bundlecaps is not None:
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2440 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2441 return False
2441 return False
2442
2442
2443
2443
2444 def getbundlechunks(
2444 def getbundlechunks(
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2445 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2446 ):
2446 ):
2447 """Return chunks constituting a bundle's raw data.
2447 """Return chunks constituting a bundle's raw data.
2448
2448
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2449 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2450 passed.
2450 passed.
2451
2451
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2452 Returns a 2-tuple of a dict with metadata about the generated bundle
2453 and an iterator over raw chunks (of varying sizes).
2453 and an iterator over raw chunks (of varying sizes).
2454 """
2454 """
2455 kwargs = pycompat.byteskwargs(kwargs)
2455 kwargs = pycompat.byteskwargs(kwargs)
2456 info = {}
2456 info = {}
2457 usebundle2 = bundle2requested(bundlecaps)
2457 usebundle2 = bundle2requested(bundlecaps)
2458 # bundle10 case
2458 # bundle10 case
2459 if not usebundle2:
2459 if not usebundle2:
2460 if bundlecaps and not kwargs.get(b'cg', True):
2460 if bundlecaps and not kwargs.get(b'cg', True):
2461 raise ValueError(
2461 raise ValueError(
2462 _(b'request for bundle10 must include changegroup')
2462 _(b'request for bundle10 must include changegroup')
2463 )
2463 )
2464
2464
2465 if kwargs:
2465 if kwargs:
2466 raise ValueError(
2466 raise ValueError(
2467 _(b'unsupported getbundle arguments: %s')
2467 _(b'unsupported getbundle arguments: %s')
2468 % b', '.join(sorted(kwargs.keys()))
2468 % b', '.join(sorted(kwargs.keys()))
2469 )
2469 )
2470 outgoing = _computeoutgoing(repo, heads, common)
2470 outgoing = _computeoutgoing(repo, heads, common)
2471 info[b'bundleversion'] = 1
2471 info[b'bundleversion'] = 1
2472 return (
2472 return (
2473 info,
2473 info,
2474 changegroup.makestream(
2474 changegroup.makestream(
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2475 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2476 ),
2476 ),
2477 )
2477 )
2478
2478
2479 # bundle20 case
2479 # bundle20 case
2480 info[b'bundleversion'] = 2
2480 info[b'bundleversion'] = 2
2481 b2caps = {}
2481 b2caps = {}
2482 for bcaps in bundlecaps:
2482 for bcaps in bundlecaps:
2483 if bcaps.startswith(b'bundle2='):
2483 if bcaps.startswith(b'bundle2='):
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2484 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2485 b2caps.update(bundle2.decodecaps(blob))
2485 b2caps.update(bundle2.decodecaps(blob))
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2486 bundler = bundle2.bundle20(repo.ui, b2caps)
2487
2487
2488 kwargs[b'heads'] = heads
2488 kwargs[b'heads'] = heads
2489 kwargs[b'common'] = common
2489 kwargs[b'common'] = common
2490
2490
2491 for name in getbundle2partsorder:
2491 for name in getbundle2partsorder:
2492 func = getbundle2partsmapping[name]
2492 func = getbundle2partsmapping[name]
2493 func(
2493 func(
2494 bundler,
2494 bundler,
2495 repo,
2495 repo,
2496 source,
2496 source,
2497 bundlecaps=bundlecaps,
2497 bundlecaps=bundlecaps,
2498 b2caps=b2caps,
2498 b2caps=b2caps,
2499 **pycompat.strkwargs(kwargs)
2499 **pycompat.strkwargs(kwargs)
2500 )
2500 )
2501
2501
2502 info[b'prefercompressed'] = bundler.prefercompressed
2502 info[b'prefercompressed'] = bundler.prefercompressed
2503
2503
2504 return info, bundler.getchunks()
2504 return info, bundler.getchunks()
2505
2505
2506
2506
2507 @getbundle2partsgenerator(b'stream2')
2507 @getbundle2partsgenerator(b'stream2')
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2508 def _getbundlestream2(bundler, repo, *args, **kwargs):
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2509 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2510
2510
2511
2511
2512 @getbundle2partsgenerator(b'changegroup')
2512 @getbundle2partsgenerator(b'changegroup')
2513 def _getbundlechangegrouppart(
2513 def _getbundlechangegrouppart(
2514 bundler,
2514 bundler,
2515 repo,
2515 repo,
2516 source,
2516 source,
2517 bundlecaps=None,
2517 bundlecaps=None,
2518 b2caps=None,
2518 b2caps=None,
2519 heads=None,
2519 heads=None,
2520 common=None,
2520 common=None,
2521 **kwargs
2521 **kwargs
2522 ):
2522 ):
2523 """add a changegroup part to the requested bundle"""
2523 """add a changegroup part to the requested bundle"""
2524 if not kwargs.get('cg', True) or not b2caps:
2524 if not kwargs.get('cg', True) or not b2caps:
2525 return
2525 return
2526
2526
2527 version = b'01'
2527 version = b'01'
2528 cgversions = b2caps.get(b'changegroup')
2528 cgversions = b2caps.get(b'changegroup')
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2529 if cgversions: # 3.1 and 3.2 ship with an empty value
2530 cgversions = [
2530 cgversions = [
2531 v
2531 v
2532 for v in cgversions
2532 for v in cgversions
2533 if v in changegroup.supportedoutgoingversions(repo)
2533 if v in changegroup.supportedoutgoingversions(repo)
2534 ]
2534 ]
2535 if not cgversions:
2535 if not cgversions:
2536 raise error.Abort(_(b'no common changegroup version'))
2536 raise error.Abort(_(b'no common changegroup version'))
2537 version = max(cgversions)
2537 version = max(cgversions)
2538
2538
2539 outgoing = _computeoutgoing(repo, heads, common)
2539 outgoing = _computeoutgoing(repo, heads, common)
2540 if not outgoing.missing:
2540 if not outgoing.missing:
2541 return
2541 return
2542
2542
2543 if kwargs.get('narrow', False):
2543 if kwargs.get('narrow', False):
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2544 include = sorted(filter(bool, kwargs.get('includepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2545 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2546 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2547 else:
2547 else:
2548 matcher = None
2548 matcher = None
2549
2549
2550 cgstream = changegroup.makestream(
2550 cgstream = changegroup.makestream(
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2551 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2552 )
2552 )
2553
2553
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2554 part = bundler.newpart(b'changegroup', data=cgstream)
2555 if cgversions:
2555 if cgversions:
2556 part.addparam(b'version', version)
2556 part.addparam(b'version', version)
2557
2557
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2558 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2559
2559
2560 if b'treemanifest' in repo.requirements:
2560 if b'treemanifest' in repo.requirements:
2561 part.addparam(b'treemanifest', b'1')
2561 part.addparam(b'treemanifest', b'1')
2562
2562
2563 if b'exp-sidedata-flag' in repo.requirements:
2563 if b'exp-sidedata-flag' in repo.requirements:
2564 part.addparam(b'exp-sidedata', b'1')
2564 part.addparam(b'exp-sidedata', b'1')
2565
2565
2566 if (
2566 if (
2567 kwargs.get('narrow', False)
2567 kwargs.get('narrow', False)
2568 and kwargs.get('narrow_acl', False)
2568 and kwargs.get('narrow_acl', False)
2569 and (include or exclude)
2569 and (include or exclude)
2570 ):
2570 ):
2571 # this is mandatory because otherwise ACL clients won't work
2571 # this is mandatory because otherwise ACL clients won't work
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2572 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2573 narrowspecpart.data = b'%s\0%s' % (
2573 narrowspecpart.data = b'%s\0%s' % (
2574 b'\n'.join(include),
2574 b'\n'.join(include),
2575 b'\n'.join(exclude),
2575 b'\n'.join(exclude),
2576 )
2576 )
2577
2577
2578
2578
2579 @getbundle2partsgenerator(b'bookmarks')
2579 @getbundle2partsgenerator(b'bookmarks')
2580 def _getbundlebookmarkpart(
2580 def _getbundlebookmarkpart(
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2581 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2582 ):
2582 ):
2583 """add a bookmark part to the requested bundle"""
2583 """add a bookmark part to the requested bundle"""
2584 if not kwargs.get('bookmarks', False):
2584 if not kwargs.get('bookmarks', False):
2585 return
2585 return
2586 if not b2caps or b'bookmarks' not in b2caps:
2586 if not b2caps or b'bookmarks' not in b2caps:
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2587 raise error.Abort(_(b'no common bookmarks exchange method'))
2588 books = bookmod.listbinbookmarks(repo)
2588 books = bookmod.listbinbookmarks(repo)
2589 data = bookmod.binaryencode(books)
2589 data = bookmod.binaryencode(books)
2590 if data:
2590 if data:
2591 bundler.newpart(b'bookmarks', data=data)
2591 bundler.newpart(b'bookmarks', data=data)
2592
2592
2593
2593
2594 @getbundle2partsgenerator(b'listkeys')
2594 @getbundle2partsgenerator(b'listkeys')
2595 def _getbundlelistkeysparts(
2595 def _getbundlelistkeysparts(
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2596 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2597 ):
2597 ):
2598 """add parts containing listkeys namespaces to the requested bundle"""
2598 """add parts containing listkeys namespaces to the requested bundle"""
2599 listkeys = kwargs.get('listkeys', ())
2599 listkeys = kwargs.get('listkeys', ())
2600 for namespace in listkeys:
2600 for namespace in listkeys:
2601 part = bundler.newpart(b'listkeys')
2601 part = bundler.newpart(b'listkeys')
2602 part.addparam(b'namespace', namespace)
2602 part.addparam(b'namespace', namespace)
2603 keys = repo.listkeys(namespace).items()
2603 keys = repo.listkeys(namespace).items()
2604 part.data = pushkey.encodekeys(keys)
2604 part.data = pushkey.encodekeys(keys)
2605
2605
2606
2606
2607 @getbundle2partsgenerator(b'obsmarkers')
2607 @getbundle2partsgenerator(b'obsmarkers')
2608 def _getbundleobsmarkerpart(
2608 def _getbundleobsmarkerpart(
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2609 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2610 ):
2610 ):
2611 """add an obsolescence markers part to the requested bundle"""
2611 """add an obsolescence markers part to the requested bundle"""
2612 if kwargs.get('obsmarkers', False):
2612 if kwargs.get('obsmarkers', False):
2613 if heads is None:
2613 if heads is None:
2614 heads = repo.heads()
2614 heads = repo.heads()
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2615 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2616 markers = repo.obsstore.relevantmarkers(subset)
2616 markers = repo.obsstore.relevantmarkers(subset)
2617 markers = obsutil.sortedmarkers(markers)
2617 markers = obsutil.sortedmarkers(markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2618 bundle2.buildobsmarkerspart(bundler, markers)
2619
2619
2620
2620
2621 @getbundle2partsgenerator(b'phases')
2621 @getbundle2partsgenerator(b'phases')
2622 def _getbundlephasespart(
2622 def _getbundlephasespart(
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2623 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2624 ):
2624 ):
2625 """add phase heads part to the requested bundle"""
2625 """add phase heads part to the requested bundle"""
2626 if kwargs.get('phases', False):
2626 if kwargs.get('phases', False):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2627 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2628 raise error.Abort(_(b'no common phases exchange method'))
2628 raise error.Abort(_(b'no common phases exchange method'))
2629 if heads is None:
2629 if heads is None:
2630 heads = repo.heads()
2630 heads = repo.heads()
2631
2631
2632 headsbyphase = collections.defaultdict(set)
2632 headsbyphase = collections.defaultdict(set)
2633 if repo.publishing():
2633 if repo.publishing():
2634 headsbyphase[phases.public] = heads
2634 headsbyphase[phases.public] = heads
2635 else:
2635 else:
2636 # find the appropriate heads to move
2636 # find the appropriate heads to move
2637
2637
2638 phase = repo._phasecache.phase
2638 phase = repo._phasecache.phase
2639 node = repo.changelog.node
2639 node = repo.changelog.node
2640 rev = repo.changelog.rev
2640 rev = repo.changelog.rev
2641 for h in heads:
2641 for h in heads:
2642 headsbyphase[phase(repo, rev(h))].add(h)
2642 headsbyphase[phase(repo, rev(h))].add(h)
2643 seenphases = list(headsbyphase.keys())
2643 seenphases = list(headsbyphase.keys())
2644
2644
2645 # We do not handle anything but public and draft phase for now)
2645 # We do not handle anything but public and draft phase for now)
2646 if seenphases:
2646 if seenphases:
2647 assert max(seenphases) <= phases.draft
2647 assert max(seenphases) <= phases.draft
2648
2648
2649 # if client is pulling non-public changesets, we need to find
2649 # if client is pulling non-public changesets, we need to find
2650 # intermediate public heads.
2650 # intermediate public heads.
2651 draftheads = headsbyphase.get(phases.draft, set())
2651 draftheads = headsbyphase.get(phases.draft, set())
2652 if draftheads:
2652 if draftheads:
2653 publicheads = headsbyphase.get(phases.public, set())
2653 publicheads = headsbyphase.get(phases.public, set())
2654
2654
2655 revset = b'heads(only(%ln, %ln) and public())'
2655 revset = b'heads(only(%ln, %ln) and public())'
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2656 extraheads = repo.revs(revset, draftheads, publicheads)
2657 for r in extraheads:
2657 for r in extraheads:
2658 headsbyphase[phases.public].add(node(r))
2658 headsbyphase[phases.public].add(node(r))
2659
2659
2660 # transform data in a format used by the encoding function
2660 # transform data in a format used by the encoding function
2661 phasemapping = {
2661 phasemapping = {
2662 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2662 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2663 }
2663 }
2664
2664
2665 # generate the actual part
2665 # generate the actual part
2666 phasedata = phases.binaryencode(phasemapping)
2666 phasedata = phases.binaryencode(phasemapping)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2667 bundler.newpart(b'phase-heads', data=phasedata)
2668
2668
2669
2669
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2670 @getbundle2partsgenerator(b'hgtagsfnodes')
2671 def _getbundletagsfnodes(
2671 def _getbundletagsfnodes(
2672 bundler,
2672 bundler,
2673 repo,
2673 repo,
2674 source,
2674 source,
2675 bundlecaps=None,
2675 bundlecaps=None,
2676 b2caps=None,
2676 b2caps=None,
2677 heads=None,
2677 heads=None,
2678 common=None,
2678 common=None,
2679 **kwargs
2679 **kwargs
2680 ):
2680 ):
2681 """Transfer the .hgtags filenodes mapping.
2681 """Transfer the .hgtags filenodes mapping.
2682
2682
2683 Only values for heads in this bundle will be transferred.
2683 Only values for heads in this bundle will be transferred.
2684
2684
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2685 The part data consists of pairs of 20 byte changeset node and .hgtags
2686 filenodes raw values.
2686 filenodes raw values.
2687 """
2687 """
2688 # Don't send unless:
2688 # Don't send unless:
2689 # - changeset are being exchanged,
2689 # - changeset are being exchanged,
2690 # - the client supports it.
2690 # - the client supports it.
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2691 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2692 return
2692 return
2693
2693
2694 outgoing = _computeoutgoing(repo, heads, common)
2694 outgoing = _computeoutgoing(repo, heads, common)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2695 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2696
2696
2697
2697
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2698 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2699 def _getbundlerevbranchcache(
2699 def _getbundlerevbranchcache(
2700 bundler,
2700 bundler,
2701 repo,
2701 repo,
2702 source,
2702 source,
2703 bundlecaps=None,
2703 bundlecaps=None,
2704 b2caps=None,
2704 b2caps=None,
2705 heads=None,
2705 heads=None,
2706 common=None,
2706 common=None,
2707 **kwargs
2707 **kwargs
2708 ):
2708 ):
2709 """Transfer the rev-branch-cache mapping
2709 """Transfer the rev-branch-cache mapping
2710
2710
2711 The payload is a series of data related to each branch
2711 The payload is a series of data related to each branch
2712
2712
2713 1) branch name length
2713 1) branch name length
2714 2) number of open heads
2714 2) number of open heads
2715 3) number of closed heads
2715 3) number of closed heads
2716 4) open heads nodes
2716 4) open heads nodes
2717 5) closed heads nodes
2717 5) closed heads nodes
2718 """
2718 """
2719 # Don't send unless:
2719 # Don't send unless:
2720 # - changeset are being exchanged,
2720 # - changeset are being exchanged,
2721 # - the client supports it.
2721 # - the client supports it.
2722 # - narrow bundle isn't in play (not currently compatible).
2722 # - narrow bundle isn't in play (not currently compatible).
2723 if (
2723 if (
2724 not kwargs.get('cg', True)
2724 not kwargs.get('cg', True)
2725 or not b2caps
2725 or not b2caps
2726 or b'rev-branch-cache' not in b2caps
2726 or b'rev-branch-cache' not in b2caps
2727 or kwargs.get('narrow', False)
2727 or kwargs.get('narrow', False)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2728 or repo.ui.has_section(_NARROWACL_SECTION)
2729 ):
2729 ):
2730 return
2730 return
2731
2731
2732 outgoing = _computeoutgoing(repo, heads, common)
2732 outgoing = _computeoutgoing(repo, heads, common)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2733 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2734
2734
2735
2735
2736 def check_heads(repo, their_heads, context):
2736 def check_heads(repo, their_heads, context):
2737 """check if the heads of a repo have been modified
2737 """check if the heads of a repo have been modified
2738
2738
2739 Used by peer for unbundling.
2739 Used by peer for unbundling.
2740 """
2740 """
2741 heads = repo.heads()
2741 heads = repo.heads()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2742 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2743 if not (
2743 if not (
2744 their_heads == [b'force']
2744 their_heads == [b'force']
2745 or their_heads == heads
2745 or their_heads == heads
2746 or their_heads == [b'hashed', heads_hash]
2746 or their_heads == [b'hashed', heads_hash]
2747 ):
2747 ):
2748 # someone else committed/pushed/unbundled while we
2748 # someone else committed/pushed/unbundled while we
2749 # were transferring data
2749 # were transferring data
2750 raise error.PushRaced(
2750 raise error.PushRaced(
2751 b'repository changed while %s - please try again' % context
2751 b'repository changed while %s - please try again' % context
2752 )
2752 )
2753
2753
2754
2754
2755 def unbundle(repo, cg, heads, source, url):
2755 def unbundle(repo, cg, heads, source, url):
2756 """Apply a bundle to a repo.
2756 """Apply a bundle to a repo.
2757
2757
2758 this function makes sure the repo is locked during the application and have
2758 this function makes sure the repo is locked during the application and have
2759 mechanism to check that no push race occurred between the creation of the
2759 mechanism to check that no push race occurred between the creation of the
2760 bundle and its application.
2760 bundle and its application.
2761
2761
2762 If the push was raced as PushRaced exception is raised."""
2762 If the push was raced as PushRaced exception is raised."""
2763 r = 0
2763 r = 0
2764 # need a transaction when processing a bundle2 stream
2764 # need a transaction when processing a bundle2 stream
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2765 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2766 lockandtr = [None, None, None]
2766 lockandtr = [None, None, None]
2767 recordout = None
2767 recordout = None
2768 # quick fix for output mismatch with bundle2 in 3.4
2768 # quick fix for output mismatch with bundle2 in 3.4
2769 captureoutput = repo.ui.configbool(
2769 captureoutput = repo.ui.configbool(
2770 b'experimental', b'bundle2-output-capture'
2770 b'experimental', b'bundle2-output-capture'
2771 )
2771 )
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2772 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2773 captureoutput = True
2773 captureoutput = True
2774 try:
2774 try:
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2775 # note: outside bundle1, 'heads' is expected to be empty and this
2776 # 'check_heads' call wil be a no-op
2776 # 'check_heads' call wil be a no-op
2777 check_heads(repo, heads, b'uploading changes')
2777 check_heads(repo, heads, b'uploading changes')
2778 # push can proceed
2778 # push can proceed
2779 if not isinstance(cg, bundle2.unbundle20):
2779 if not isinstance(cg, bundle2.unbundle20):
2780 # legacy case: bundle1 (changegroup 01)
2780 # legacy case: bundle1 (changegroup 01)
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2781 txnname = b"\n".join([source, util.hidepassword(url)])
2782 with repo.lock(), repo.transaction(txnname) as tr:
2782 with repo.lock(), repo.transaction(txnname) as tr:
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2783 op = bundle2.applybundle(repo, cg, tr, source, url)
2784 r = bundle2.combinechangegroupresults(op)
2784 r = bundle2.combinechangegroupresults(op)
2785 else:
2785 else:
2786 r = None
2786 r = None
2787 try:
2787 try:
2788
2788
2789 def gettransaction():
2789 def gettransaction():
2790 if not lockandtr[2]:
2790 if not lockandtr[2]:
2791 if not bookmod.bookmarksinstore(repo):
2791 if not bookmod.bookmarksinstore(repo):
2792 lockandtr[0] = repo.wlock()
2792 lockandtr[0] = repo.wlock()
2793 lockandtr[1] = repo.lock()
2793 lockandtr[1] = repo.lock()
2794 lockandtr[2] = repo.transaction(source)
2794 lockandtr[2] = repo.transaction(source)
2795 lockandtr[2].hookargs[b'source'] = source
2795 lockandtr[2].hookargs[b'source'] = source
2796 lockandtr[2].hookargs[b'url'] = url
2796 lockandtr[2].hookargs[b'url'] = url
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2797 lockandtr[2].hookargs[b'bundle2'] = b'1'
2798 return lockandtr[2]
2798 return lockandtr[2]
2799
2799
2800 # Do greedy locking by default until we're satisfied with lazy
2800 # Do greedy locking by default until we're satisfied with lazy
2801 # locking.
2801 # locking.
2802 if not repo.ui.configbool(
2802 if not repo.ui.configbool(
2803 b'experimental', b'bundle2lazylocking'
2803 b'experimental', b'bundle2lazylocking'
2804 ):
2804 ):
2805 gettransaction()
2805 gettransaction()
2806
2806
2807 op = bundle2.bundleoperation(
2807 op = bundle2.bundleoperation(
2808 repo,
2808 repo,
2809 gettransaction,
2809 gettransaction,
2810 captureoutput=captureoutput,
2810 captureoutput=captureoutput,
2811 source=b'push',
2811 source=b'push',
2812 )
2812 )
2813 try:
2813 try:
2814 op = bundle2.processbundle(repo, cg, op=op)
2814 op = bundle2.processbundle(repo, cg, op=op)
2815 finally:
2815 finally:
2816 r = op.reply
2816 r = op.reply
2817 if captureoutput and r is not None:
2817 if captureoutput and r is not None:
2818 repo.ui.pushbuffer(error=True, subproc=True)
2818 repo.ui.pushbuffer(error=True, subproc=True)
2819
2819
2820 def recordout(output):
2820 def recordout(output):
2821 r.newpart(b'output', data=output, mandatory=False)
2821 r.newpart(b'output', data=output, mandatory=False)
2822
2822
2823 if lockandtr[2] is not None:
2823 if lockandtr[2] is not None:
2824 lockandtr[2].close()
2824 lockandtr[2].close()
2825 except BaseException as exc:
2825 except BaseException as exc:
2826 exc.duringunbundle2 = True
2826 exc.duringunbundle2 = True
2827 if captureoutput and r is not None:
2827 if captureoutput and r is not None:
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2828 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2829
2829
2830 def recordout(output):
2830 def recordout(output):
2831 part = bundle2.bundlepart(
2831 part = bundle2.bundlepart(
2832 b'output', data=output, mandatory=False
2832 b'output', data=output, mandatory=False
2833 )
2833 )
2834 parts.append(part)
2834 parts.append(part)
2835
2835
2836 raise
2836 raise
2837 finally:
2837 finally:
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2838 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2839 if recordout is not None:
2839 if recordout is not None:
2840 recordout(repo.ui.popbuffer())
2840 recordout(repo.ui.popbuffer())
2841 return r
2841 return r
2842
2842
2843
2843
2844 def _maybeapplyclonebundle(pullop):
2844 def _maybeapplyclonebundle(pullop):
2845 """Apply a clone bundle from a remote, if possible."""
2845 """Apply a clone bundle from a remote, if possible."""
2846
2846
2847 repo = pullop.repo
2847 repo = pullop.repo
2848 remote = pullop.remote
2848 remote = pullop.remote
2849
2849
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2850 if not repo.ui.configbool(b'ui', b'clonebundles'):
2851 return
2851 return
2852
2852
2853 # Only run if local repo is empty.
2853 # Only run if local repo is empty.
2854 if len(repo):
2854 if len(repo):
2855 return
2855 return
2856
2856
2857 if pullop.heads:
2857 if pullop.heads:
2858 return
2858 return
2859
2859
2860 if not remote.capable(b'clonebundles'):
2860 if not remote.capable(b'clonebundles'):
2861 return
2861 return
2862
2862
2863 with remote.commandexecutor() as e:
2863 with remote.commandexecutor() as e:
2864 res = e.callcommand(b'clonebundles', {}).result()
2864 res = e.callcommand(b'clonebundles', {}).result()
2865
2865
2866 # If we call the wire protocol command, that's good enough to record the
2866 # If we call the wire protocol command, that's good enough to record the
2867 # attempt.
2867 # attempt.
2868 pullop.clonebundleattempted = True
2868 pullop.clonebundleattempted = True
2869
2869
2870 entries = parseclonebundlesmanifest(repo, res)
2870 entries = parseclonebundlesmanifest(repo, res)
2871 if not entries:
2871 if not entries:
2872 repo.ui.note(
2872 repo.ui.note(
2873 _(
2873 _(
2874 b'no clone bundles available on remote; '
2874 b'no clone bundles available on remote; '
2875 b'falling back to regular clone\n'
2875 b'falling back to regular clone\n'
2876 )
2876 )
2877 )
2877 )
2878 return
2878 return
2879
2879
2880 entries = filterclonebundleentries(
2880 entries = filterclonebundleentries(
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2881 repo, entries, streamclonerequested=pullop.streamclonerequested
2882 )
2882 )
2883
2883
2884 if not entries:
2884 if not entries:
2885 # There is a thundering herd concern here. However, if a server
2885 # There is a thundering herd concern here. However, if a server
2886 # operator doesn't advertise bundles appropriate for its clients,
2886 # operator doesn't advertise bundles appropriate for its clients,
2887 # they deserve what's coming. Furthermore, from a client's
2887 # they deserve what's coming. Furthermore, from a client's
2888 # perspective, no automatic fallback would mean not being able to
2888 # perspective, no automatic fallback would mean not being able to
2889 # clone!
2889 # clone!
2890 repo.ui.warn(
2890 repo.ui.warn(
2891 _(
2891 _(
2892 b'no compatible clone bundles available on server; '
2892 b'no compatible clone bundles available on server; '
2893 b'falling back to regular clone\n'
2893 b'falling back to regular clone\n'
2894 )
2894 )
2895 )
2895 )
2896 repo.ui.warn(
2896 repo.ui.warn(
2897 _(b'(you may want to report this to the server operator)\n')
2897 _(b'(you may want to report this to the server operator)\n')
2898 )
2898 )
2899 return
2899 return
2900
2900
2901 entries = sortclonebundleentries(repo.ui, entries)
2901 entries = sortclonebundleentries(repo.ui, entries)
2902
2902
2903 url = entries[0][b'URL']
2903 url = entries[0][b'URL']
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2904 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2905 if trypullbundlefromurl(repo.ui, repo, url):
2905 if trypullbundlefromurl(repo.ui, repo, url):
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2906 repo.ui.status(_(b'finished applying clone bundle\n'))
2907 # Bundle failed.
2907 # Bundle failed.
2908 #
2908 #
2909 # We abort by default to avoid the thundering herd of
2909 # We abort by default to avoid the thundering herd of
2910 # clients flooding a server that was expecting expensive
2910 # clients flooding a server that was expecting expensive
2911 # clone load to be offloaded.
2911 # clone load to be offloaded.
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2912 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2913 repo.ui.warn(_(b'falling back to normal clone\n'))
2914 else:
2914 else:
2915 raise error.Abort(
2915 raise error.Abort(
2916 _(b'error applying bundle'),
2916 _(b'error applying bundle'),
2917 hint=_(
2917 hint=_(
2918 b'if this error persists, consider contacting '
2918 b'if this error persists, consider contacting '
2919 b'the server operator or disable clone '
2919 b'the server operator or disable clone '
2920 b'bundles via '
2920 b'bundles via '
2921 b'"--config ui.clonebundles=false"'
2921 b'"--config ui.clonebundles=false"'
2922 ),
2922 ),
2923 )
2923 )
2924
2924
2925
2925
2926 def parseclonebundlesmanifest(repo, s):
2926 def parseclonebundlesmanifest(repo, s):
2927 """Parses the raw text of a clone bundles manifest.
2927 """Parses the raw text of a clone bundles manifest.
2928
2928
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2930 to the URL and other keys are the attributes for the entry.
2930 to the URL and other keys are the attributes for the entry.
2931 """
2931 """
2932 m = []
2932 m = []
2933 for line in s.splitlines():
2933 for line in s.splitlines():
2934 fields = line.split()
2934 fields = line.split()
2935 if not fields:
2935 if not fields:
2936 continue
2936 continue
2937 attrs = {b'URL': fields[0]}
2937 attrs = {b'URL': fields[0]}
2938 for rawattr in fields[1:]:
2938 for rawattr in fields[1:]:
2939 key, value = rawattr.split(b'=', 1)
2939 key, value = rawattr.split(b'=', 1)
2940 key = urlreq.unquote(key)
2940 key = urlreq.unquote(key)
2941 value = urlreq.unquote(value)
2941 value = urlreq.unquote(value)
2942 attrs[key] = value
2942 attrs[key] = value
2943
2943
2944 # Parse BUNDLESPEC into components. This makes client-side
2944 # Parse BUNDLESPEC into components. This makes client-side
2945 # preferences easier to specify since you can prefer a single
2945 # preferences easier to specify since you can prefer a single
2946 # component of the BUNDLESPEC.
2946 # component of the BUNDLESPEC.
2947 if key == b'BUNDLESPEC':
2947 if key == b'BUNDLESPEC':
2948 try:
2948 try:
2949 bundlespec = parsebundlespec(repo, value)
2949 bundlespec = parsebundlespec(repo, value)
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2951 attrs[b'VERSION'] = bundlespec.version
2951 attrs[b'VERSION'] = bundlespec.version
2952 except error.InvalidBundleSpecification:
2952 except error.InvalidBundleSpecification:
2953 pass
2953 pass
2954 except error.UnsupportedBundleSpecification:
2954 except error.UnsupportedBundleSpecification:
2955 pass
2955 pass
2956
2956
2957 m.append(attrs)
2957 m.append(attrs)
2958
2958
2959 return m
2959 return m
2960
2960
2961
2961
2962 def isstreamclonespec(bundlespec):
2962 def isstreamclonespec(bundlespec):
2963 # Stream clone v1
2963 # Stream clone v1
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2965 return True
2965 return True
2966
2966
2967 # Stream clone v2
2967 # Stream clone v2
2968 if (
2968 if (
2969 bundlespec.wirecompression == b'UN'
2969 bundlespec.wirecompression == b'UN'
2970 and bundlespec.wireversion == b'02'
2970 and bundlespec.wireversion == b'02'
2971 and bundlespec.contentopts.get(b'streamv2')
2971 and bundlespec.contentopts.get(b'streamv2')
2972 ):
2972 ):
2973 return True
2973 return True
2974
2974
2975 return False
2975 return False
2976
2976
2977
2977
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2979 """Remove incompatible clone bundle manifest entries.
2979 """Remove incompatible clone bundle manifest entries.
2980
2980
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2982 and returns a new list consisting of only the entries that this client
2982 and returns a new list consisting of only the entries that this client
2983 should be able to apply.
2983 should be able to apply.
2984
2984
2985 There is no guarantee we'll be able to apply all returned entries because
2985 There is no guarantee we'll be able to apply all returned entries because
2986 the metadata we use to filter on may be missing or wrong.
2986 the metadata we use to filter on may be missing or wrong.
2987 """
2987 """
2988 newentries = []
2988 newentries = []
2989 for entry in entries:
2989 for entry in entries:
2990 spec = entry.get(b'BUNDLESPEC')
2990 spec = entry.get(b'BUNDLESPEC')
2991 if spec:
2991 if spec:
2992 try:
2992 try:
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2994
2994
2995 # If a stream clone was requested, filter out non-streamclone
2995 # If a stream clone was requested, filter out non-streamclone
2996 # entries.
2996 # entries.
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2998 repo.ui.debug(
2998 repo.ui.debug(
2999 b'filtering %s because not a stream clone\n'
2999 b'filtering %s because not a stream clone\n'
3000 % entry[b'URL']
3000 % entry[b'URL']
3001 )
3001 )
3002 continue
3002 continue
3003
3003
3004 except error.InvalidBundleSpecification as e:
3004 except error.InvalidBundleSpecification as e:
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3006 continue
3006 continue
3007 except error.UnsupportedBundleSpecification as e:
3007 except error.UnsupportedBundleSpecification as e:
3008 repo.ui.debug(
3008 repo.ui.debug(
3009 b'filtering %s because unsupported bundle '
3009 b'filtering %s because unsupported bundle '
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3011 )
3011 )
3012 continue
3012 continue
3013 # If we don't have a spec and requested a stream clone, we don't know
3013 # If we don't have a spec and requested a stream clone, we don't know
3014 # what the entry is so don't attempt to apply it.
3014 # what the entry is so don't attempt to apply it.
3015 elif streamclonerequested:
3015 elif streamclonerequested:
3016 repo.ui.debug(
3016 repo.ui.debug(
3017 b'filtering %s because cannot determine if a stream '
3017 b'filtering %s because cannot determine if a stream '
3018 b'clone bundle\n' % entry[b'URL']
3018 b'clone bundle\n' % entry[b'URL']
3019 )
3019 )
3020 continue
3020 continue
3021
3021
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3023 repo.ui.debug(
3023 repo.ui.debug(
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3025 )
3025 )
3026 continue
3026 continue
3027
3027
3028 if b'REQUIREDRAM' in entry:
3028 if b'REQUIREDRAM' in entry:
3029 try:
3029 try:
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3031 except error.ParseError:
3031 except error.ParseError:
3032 repo.ui.debug(
3032 repo.ui.debug(
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3034 % entry[b'URL']
3034 % entry[b'URL']
3035 )
3035 )
3036 continue
3036 continue
3037 actualram = repo.ui.estimatememory()
3037 actualram = repo.ui.estimatememory()
3038 if actualram is not None and actualram * 0.66 < requiredram:
3038 if actualram is not None and actualram * 0.66 < requiredram:
3039 repo.ui.debug(
3039 repo.ui.debug(
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3041 % entry[b'URL']
3041 % entry[b'URL']
3042 )
3042 )
3043 continue
3043 continue
3044
3044
3045 newentries.append(entry)
3045 newentries.append(entry)
3046
3046
3047 return newentries
3047 return newentries
3048
3048
3049
3049
3050 class clonebundleentry(object):
3050 class clonebundleentry(object):
3051 """Represents an item in a clone bundles manifest.
3051 """Represents an item in a clone bundles manifest.
3052
3052
3053 This rich class is needed to support sorting since sorted() in Python 3
3053 This rich class is needed to support sorting since sorted() in Python 3
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3055 won't work.
3055 won't work.
3056 """
3056 """
3057
3057
3058 def __init__(self, value, prefers):
3058 def __init__(self, value, prefers):
3059 self.value = value
3059 self.value = value
3060 self.prefers = prefers
3060 self.prefers = prefers
3061
3061
3062 def _cmp(self, other):
3062 def _cmp(self, other):
3063 for prefkey, prefvalue in self.prefers:
3063 for prefkey, prefvalue in self.prefers:
3064 avalue = self.value.get(prefkey)
3064 avalue = self.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3066
3066
3067 # Special case for b missing attribute and a matches exactly.
3067 # Special case for b missing attribute and a matches exactly.
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3069 return -1
3069 return -1
3070
3070
3071 # Special case for a missing attribute and b matches exactly.
3071 # Special case for a missing attribute and b matches exactly.
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3073 return 1
3073 return 1
3074
3074
3075 # We can't compare unless attribute present on both.
3075 # We can't compare unless attribute present on both.
3076 if avalue is None or bvalue is None:
3076 if avalue is None or bvalue is None:
3077 continue
3077 continue
3078
3078
3079 # Same values should fall back to next attribute.
3079 # Same values should fall back to next attribute.
3080 if avalue == bvalue:
3080 if avalue == bvalue:
3081 continue
3081 continue
3082
3082
3083 # Exact matches come first.
3083 # Exact matches come first.
3084 if avalue == prefvalue:
3084 if avalue == prefvalue:
3085 return -1
3085 return -1
3086 if bvalue == prefvalue:
3086 if bvalue == prefvalue:
3087 return 1
3087 return 1
3088
3088
3089 # Fall back to next attribute.
3089 # Fall back to next attribute.
3090 continue
3090 continue
3091
3091
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3093 # back to index order.
3093 # back to index order.
3094 return 0
3094 return 0
3095
3095
3096 def __lt__(self, other):
3096 def __lt__(self, other):
3097 return self._cmp(other) < 0
3097 return self._cmp(other) < 0
3098
3098
3099 def __gt__(self, other):
3099 def __gt__(self, other):
3100 return self._cmp(other) > 0
3100 return self._cmp(other) > 0
3101
3101
3102 def __eq__(self, other):
3102 def __eq__(self, other):
3103 return self._cmp(other) == 0
3103 return self._cmp(other) == 0
3104
3104
3105 def __le__(self, other):
3105 def __le__(self, other):
3106 return self._cmp(other) <= 0
3106 return self._cmp(other) <= 0
3107
3107
3108 def __ge__(self, other):
3108 def __ge__(self, other):
3109 return self._cmp(other) >= 0
3109 return self._cmp(other) >= 0
3110
3110
3111 def __ne__(self, other):
3111 def __ne__(self, other):
3112 return self._cmp(other) != 0
3112 return self._cmp(other) != 0
3113
3113
3114
3114
3115 def sortclonebundleentries(ui, entries):
3115 def sortclonebundleentries(ui, entries):
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3117 if not prefers:
3117 if not prefers:
3118 return list(entries)
3118 return list(entries)
3119
3119
3120 def _split(p):
3120 def _split(p):
3121 if b'=' not in p:
3121 if b'=' not in p:
3122 hint = _(b"each comma separated item should be key=value pairs")
3122 hint = _(b"each comma separated item should be key=value pairs")
3123 raise error.Abort(
3123 raise error.Abort(
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3125 )
3125 )
3126 return p.split(b'=', 1)
3126 return p.split(b'=', 1)
3127
3127
3128 prefers = [_split(p) for p in prefers]
3128 prefers = [_split(p) for p in prefers]
3129
3129
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3131 return [i.value for i in items]
3131 return [i.value for i in items]
3132
3132
3133
3133
3134 def trypullbundlefromurl(ui, repo, url):
3134 def trypullbundlefromurl(ui, repo, url):
3135 """Attempt to apply a bundle from a URL."""
3135 """Attempt to apply a bundle from a URL."""
3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3136 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3137 try:
3137 try:
3138 fh = urlmod.open(ui, url)
3138 fh = urlmod.open(ui, url)
3139 cg = readbundle(ui, fh, b'stream')
3139 cg = readbundle(ui, fh, b'stream')
3140
3140
3141 if isinstance(cg, streamclone.streamcloneapplier):
3141 if isinstance(cg, streamclone.streamcloneapplier):
3142 cg.apply(repo)
3142 cg.apply(repo)
3143 else:
3143 else:
3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3144 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3145 return True
3145 return True
3146 except urlerr.httperror as e:
3146 except urlerr.httperror as e:
3147 ui.warn(
3147 ui.warn(
3148 _(b'HTTP error fetching bundle: %s\n')
3148 _(b'HTTP error fetching bundle: %s\n')
3149 % stringutil.forcebytestr(e)
3149 % stringutil.forcebytestr(e)
3150 )
3150 )
3151 except urlerr.urlerror as e:
3151 except urlerr.urlerror as e:
3152 ui.warn(
3152 ui.warn(
3153 _(b'error fetching bundle: %s\n')
3153 _(b'error fetching bundle: %s\n')
3154 % stringutil.forcebytestr(e.reason)
3154 % stringutil.forcebytestr(e.reason)
3155 )
3155 )
3156
3156
3157 return False
3157 return False
@@ -1,3803 +1,3803 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 mergestate as mergestatemod,
47 mergestate as mergestatemod,
48 mergeutil,
48 mergeutil,
49 metadata,
49 metadata,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, missingheads=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
516 )
517
517
518 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
519
519
520 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531
531
532 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
533 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
534 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 extensions.loadall(ui)
537 extensions.loadall(ui)
538 extensions.populateui(ui)
538 extensions.populateui(ui)
539
539
540 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542
542
543 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
544
544
545 # We first validate the requirements are known.
545 # We first validate the requirements are known.
546 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
547
547
548 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
549 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
550
550
551 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
552 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
555 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
556 # in this hgrc.
556 # in this hgrc.
557 #
557 #
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
560 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
561 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
562 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
563
563
564 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
565 # Now get on with doing that.
565 # Now get on with doing that.
566
566
567 features = set()
567 features = set()
568
568
569 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
578
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
580
581 if not sharedvfs.exists():
581 if not sharedvfs.exists():
582 raise error.RepoError(
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
584 % sharedvfs.base
585 )
585 )
586
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
588
589 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
590 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
591 else:
591 else:
592 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
593 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
594 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
595
595
596 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
597 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
598 # of them.
598 # of them.
599 store = makestore(
599 store = makestore(
600 requirements,
600 requirements,
601 storebasepath,
601 storebasepath,
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 )
603 )
604 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
605
605
606 storevfs = store.vfs
606 storevfs = store.vfs
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608
608
609 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
612 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
615
615
616 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
617 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
618 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
619 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
620
620
621 bases = []
621 bases = []
622 extrastate = {}
622 extrastate = {}
623
623
624 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
625 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
626 # flexibility.
626 # flexibility.
627 typ = fn()(
627 typ = fn()(
628 ui=ui,
628 ui=ui,
629 intents=intents,
629 intents=intents,
630 requirements=requirements,
630 requirements=requirements,
631 features=features,
631 features=features,
632 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
633 hgvfs=hgvfs,
633 hgvfs=hgvfs,
634 store=store,
634 store=store,
635 storevfs=storevfs,
635 storevfs=storevfs,
636 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
637 cachevfs=cachevfs,
637 cachevfs=cachevfs,
638 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
639 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
640 extrastate=extrastate,
640 extrastate=extrastate,
641 baseclasses=bases,
641 baseclasses=bases,
642 )
642 )
643
643
644 if not isinstance(typ, type):
644 if not isinstance(typ, type):
645 raise error.ProgrammingError(
645 raise error.ProgrammingError(
646 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
647 )
647 )
648
648
649 bases.append(typ)
649 bases.append(typ)
650
650
651 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
652 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
653 # rich information about our constructed repo.
653 # rich information about our constructed repo.
654 name = pycompat.sysstr(
654 name = pycompat.sysstr(
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 )
656 )
657
657
658 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
659
659
660 return cls(
660 return cls(
661 baseui=baseui,
661 baseui=baseui,
662 ui=ui,
662 ui=ui,
663 origroot=path,
663 origroot=path,
664 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
665 hgvfs=hgvfs,
665 hgvfs=hgvfs,
666 requirements=requirements,
666 requirements=requirements,
667 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
668 sharedpath=storebasepath,
668 sharedpath=storebasepath,
669 store=store,
669 store=store,
670 cachevfs=cachevfs,
670 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
672 features=features,
672 features=features,
673 intents=intents,
673 intents=intents,
674 )
674 )
675
675
676
676
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
679
679
680 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
681 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
682
682
683 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
684
684
685 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
686 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
687 configs from alternate files or sources.
687 configs from alternate files or sources.
688 """
688 """
689 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
690 return False
690 return False
691 try:
691 try:
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 return True
693 return True
694 except IOError:
694 except IOError:
695 return False
695 return False
696
696
697
697
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
700
700
701 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703
703
704 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
705 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
706 """
706 """
707
707
708 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
709 # requirement is present.
709 # requirement is present.
710 autoextensions = {
710 autoextensions = {
711 b'git': [b'git'],
711 b'git': [b'git'],
712 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
713 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
714 }
714 }
715
715
716 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
717 if requirement not in requirements:
717 if requirement not in requirements:
718 continue
718 continue
719
719
720 for name in names:
720 for name in names:
721 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723
723
724
724
725 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
726 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
727 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
728 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
729
729
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 # relevant to this ui instance.
731 # relevant to this ui instance.
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
735 if fn.__module__ in modules:
735 if fn.__module__ in modules:
736 fn(ui, supported)
736 fn(ui, supported)
737
737
738 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
739 for name in util.compengines:
739 for name in util.compengines:
740 engine = util.compengines[name]
740 engine = util.compengines[name]
741 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
742 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
743 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
744 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
745
745
746 return supported
746 return supported
747
747
748
748
749 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
750 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
751
751
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
754 recognize.
754 recognize.
755
755
756 Returns a set of supported requirements.
756 Returns a set of supported requirements.
757 """
757 """
758 missing = set()
758 missing = set()
759
759
760 for requirement in requirements:
760 for requirement in requirements:
761 if requirement in supported:
761 if requirement in supported:
762 continue
762 continue
763
763
764 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766
766
767 missing.add(requirement)
767 missing.add(requirement)
768
768
769 if missing:
769 if missing:
770 raise error.RequirementError(
770 raise error.RequirementError(
771 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
772 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
773 hint=_(
773 hint=_(
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 b'for more information'
775 b'for more information'
776 ),
776 ),
777 )
777 )
778
778
779
779
780 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
781 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
782
782
783 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
784 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
785 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
786 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
787
787
788 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
789 checking.
789 checking.
790
790
791 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
792 """
792 """
793 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
794 raise error.RepoError(
794 raise error.RepoError(
795 _(
795 _(
796 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
797 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
798 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
799 )
799 )
800 )
800 )
801
801
802
802
803 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
804 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
805 if b'store' in requirements:
805 if b'store' in requirements:
806 if b'fncache' in requirements:
806 if b'fncache' in requirements:
807 return storemod.fncachestore(
807 return storemod.fncachestore(
808 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
809 )
809 )
810
810
811 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
812
812
813 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
814
814
815
815
816 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
817 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
818
818
819 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
820 """
820 """
821 options = {}
821 options = {}
822
822
823 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
824 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
825
825
826 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 if manifestcachesize is not None:
828 if manifestcachesize is not None:
829 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
830
830
831 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
832 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
833 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
834 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
835 # meaningful on such old repos.
835 # meaningful on such old repos.
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
839 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
840
840
841 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
842 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
843 else:
843 else:
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
846 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
847 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
848
848
849 return options
849 return options
850
850
851
851
852 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
854
854
855 options = {}
855 options = {}
856 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
857
857
858 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
859 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
860 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
861 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
862
862
863 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
864 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
865
865
866 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 if chunkcachesize is not None:
868 if chunkcachesize is not None:
869 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
870
870
871 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
872 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
873 )
873 )
874 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
875
875
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 lazydeltabase = False
877 lazydeltabase = False
878 if lazydelta:
878 if lazydelta:
879 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
880 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
881 )
881 )
882 if lazydeltabase is None:
882 if lazydeltabase is None:
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
885 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
886
886
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 if 0 <= chainspan:
888 if 0 <= chainspan:
889 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
890
890
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
893 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
894
894
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 srdensitythres = float(
896 srdensitythres = float(
897 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 )
898 )
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
901 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
902 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
903
903
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
906 if sparserevlog:
906 if sparserevlog:
907 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
908
908
909 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
911
911
912 maxchainlen = None
912 maxchainlen = None
913 if sparserevlog:
913 if sparserevlog:
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 if maxchainlen is not None:
917 if maxchainlen is not None:
918 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
919
919
920 for r in requirements:
920 for r in requirements:
921 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
922 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
923 #
923 #
924 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
925 prefix = r.startswith
925 prefix = r.startswith
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
928
928
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
931 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
936 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
939
939
940 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
941 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
942
942
943 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
944 options[b'rust.index'] = True
944 options[b'rust.index'] = True
945 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
946 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
951 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
953
953
954 return options
954 return options
955
955
956
956
957 def makemain(**kwargs):
957 def makemain(**kwargs):
958 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 return localrepository
959 return localrepository
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlogfilestorage(object):
963 class revlogfilestorage(object):
964 """File storage when using revlogs."""
964 """File storage when using revlogs."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
971
971
972
972
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
975 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
976
976
977 def file(self, path):
977 def file(self, path):
978 if path[0] == b'/':
978 if path[0] == b'/':
979 path = path[1:]
979 path = path[1:]
980
980
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982
982
983
983
984 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988
988
989 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
990 return revlognarrowfilestorage
990 return revlognarrowfilestorage
991 else:
991 else:
992 return revlogfilestorage
992 return revlogfilestorage
993
993
994
994
995 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
996 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
997 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
998 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
999 # functions can be wrapped.
999 # functions can be wrapped.
1000 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
1001 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 ]
1003 ]
1004
1004
1005
1005
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 class localrepository(object):
1007 class localrepository(object):
1008 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1009
1009
1010 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1011
1011
1012 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1013 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1014 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1017 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 used.
1019 used.
1020 """
1020 """
1021
1021
1022 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1023 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1024 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1025 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1026 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1027 supportedformats = {
1027 supportedformats = {
1028 b'revlogv1',
1028 b'revlogv1',
1029 b'generaldelta',
1029 b'generaldelta',
1030 b'treemanifest',
1030 b'treemanifest',
1031 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 }
1037 }
1038 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1039 b'store',
1039 b'store',
1040 b'fncache',
1040 b'fncache',
1041 b'shared',
1041 b'shared',
1042 b'relshared',
1042 b'relshared',
1043 b'dotencode',
1043 b'dotencode',
1044 b'exp-sparse',
1044 b'exp-sparse',
1045 b'internal-phase',
1045 b'internal-phase',
1046 }
1046 }
1047
1047
1048 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1049 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1050 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1051 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1052 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1053 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1054 # now.
1054 # now.
1055 b'hgrc',
1055 b'hgrc',
1056 b'requires',
1056 b'requires',
1057 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1058 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1059 b'cache/',
1059 b'cache/',
1060 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1061 b'dirstate',
1061 b'dirstate',
1062 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1063 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1064 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1065 b'bisect.state',
1065 b'bisect.state',
1066 }
1066 }
1067
1067
1068 def __init__(
1068 def __init__(
1069 self,
1069 self,
1070 baseui,
1070 baseui,
1071 ui,
1071 ui,
1072 origroot,
1072 origroot,
1073 wdirvfs,
1073 wdirvfs,
1074 hgvfs,
1074 hgvfs,
1075 requirements,
1075 requirements,
1076 supportedrequirements,
1076 supportedrequirements,
1077 sharedpath,
1077 sharedpath,
1078 store,
1078 store,
1079 cachevfs,
1079 cachevfs,
1080 wcachevfs,
1080 wcachevfs,
1081 features,
1081 features,
1082 intents=None,
1082 intents=None,
1083 ):
1083 ):
1084 """Create a new local repository instance.
1084 """Create a new local repository instance.
1085
1085
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 object.
1088 object.
1089
1089
1090 Arguments:
1090 Arguments:
1091
1091
1092 baseui
1092 baseui
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094
1094
1095 ui
1095 ui
1096 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1097
1097
1098 origroot
1098 origroot
1099 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1100
1100
1101 wdirvfs
1101 wdirvfs
1102 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1103
1103
1104 hgvfs
1104 hgvfs
1105 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1106
1106
1107 requirements
1107 requirements
1108 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1109
1109
1110 supportedrequirements
1110 supportedrequirements
1111 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1112 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1113
1113
1114 sharedpath
1114 sharedpath
1115 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1116 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1117
1117
1118 store
1118 store
1119 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1120 versioned storage.
1120 versioned storage.
1121
1121
1122 cachevfs
1122 cachevfs
1123 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1124
1124
1125 wcachevfs
1125 wcachevfs
1126 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1127
1127
1128 features
1128 features
1129 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1130 instance.
1130 instance.
1131
1131
1132 intents
1132 intents
1133 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1134 for.
1134 for.
1135 """
1135 """
1136 self.baseui = baseui
1136 self.baseui = baseui
1137 self.ui = ui
1137 self.ui = ui
1138 self.origroot = origroot
1138 self.origroot = origroot
1139 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1140 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1141 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 self.vfs = hgvfs
1143 self.vfs = hgvfs
1144 self.path = hgvfs.base
1144 self.path = hgvfs.base
1145 self.requirements = requirements
1145 self.requirements = requirements
1146 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1147 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1148 self.store = store
1148 self.store = store
1149 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1150 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1151 self.features = features
1151 self.features = features
1152
1152
1153 self.filtername = None
1153 self.filtername = None
1154
1154
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1157 ):
1157 ):
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1162 self._phasedefaults = []
1162 self._phasedefaults = []
1163
1163
1164 color.setup(self.ui)
1164 color.setup(self.ui)
1165
1165
1166 self.spath = self.store.path
1166 self.spath = self.store.path
1167 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1168 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 else: # standard vfs
1174 else: # standard vfs
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176
1176
1177 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1178
1178
1179 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1180 self._revbranchcache = None
1180 self._revbranchcache = None
1181 self._filterpats = {}
1181 self._filterpats = {}
1182 self._datafilters = {}
1182 self._datafilters = {}
1183 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1184
1184
1185 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1186 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1187 #
1187 #
1188 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1189 self._filecache = {}
1189 self._filecache = {}
1190
1190
1191 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1192 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1193 # - new changesets,
1193 # - new changesets,
1194 # - phase change,
1194 # - phase change,
1195 # - new obsolescence marker,
1195 # - new obsolescence marker,
1196 # - working directory parent change,
1196 # - working directory parent change,
1197 # - bookmark changes
1197 # - bookmark changes
1198 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1199
1199
1200 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1201 self._postdsstatus = []
1201 self._postdsstatus = []
1202
1202
1203 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1204 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1205
1205
1206 # Key to signature value.
1206 # Key to signature value.
1207 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1208 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1209 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1210
1210
1211 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1212
1212
1213 self.filecopiesmode = None
1213 self.filecopiesmode = None
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1216
1216
1217 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1218 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1219 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1220
1220
1221 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1222 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1223 repo = rref()
1223 repo = rref()
1224 if (
1224 if (
1225 repo is None
1225 repo is None
1226 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1227 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1228 ):
1228 ):
1229 return
1229 return
1230 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1231 return
1231 return
1232 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1233 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1234 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1235 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 # path prefixes covered by 'lock'
1238 # path prefixes covered by 'lock'
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 if repo._currentlock(repo._lockref) is None:
1241 if repo._currentlock(repo._lockref) is None:
1242 repo.ui.develwarn(
1242 repo.ui.develwarn(
1243 b'write with no lock: "%s"' % path,
1243 b'write with no lock: "%s"' % path,
1244 stacklevel=3,
1244 stacklevel=3,
1245 config=b'check-locks',
1245 config=b'check-locks',
1246 )
1246 )
1247 elif repo._currentlock(repo._wlockref) is None:
1247 elif repo._currentlock(repo._wlockref) is None:
1248 # rest of vfs files are covered by 'wlock'
1248 # rest of vfs files are covered by 'wlock'
1249 #
1249 #
1250 # exclude special files
1250 # exclude special files
1251 for prefix in self._wlockfreeprefix:
1251 for prefix in self._wlockfreeprefix:
1252 if path.startswith(prefix):
1252 if path.startswith(prefix):
1253 return
1253 return
1254 repo.ui.develwarn(
1254 repo.ui.develwarn(
1255 b'write with no wlock: "%s"' % path,
1255 b'write with no wlock: "%s"' % path,
1256 stacklevel=3,
1256 stacklevel=3,
1257 config=b'check-locks',
1257 config=b'check-locks',
1258 )
1258 )
1259 return ret
1259 return ret
1260
1260
1261 return checkvfs
1261 return checkvfs
1262
1262
1263 def _getsvfsward(self, origfunc):
1263 def _getsvfsward(self, origfunc):
1264 """build a ward for self.svfs"""
1264 """build a ward for self.svfs"""
1265 rref = weakref.ref(self)
1265 rref = weakref.ref(self)
1266
1266
1267 def checksvfs(path, mode=None):
1267 def checksvfs(path, mode=None):
1268 ret = origfunc(path, mode=mode)
1268 ret = origfunc(path, mode=mode)
1269 repo = rref()
1269 repo = rref()
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 return
1271 return
1272 if mode in (None, b'r', b'rb'):
1272 if mode in (None, b'r', b'rb'):
1273 return
1273 return
1274 if path.startswith(repo.sharedpath):
1274 if path.startswith(repo.sharedpath):
1275 # truncate name relative to the repository (.hg)
1275 # truncate name relative to the repository (.hg)
1276 path = path[len(repo.sharedpath) + 1 :]
1276 path = path[len(repo.sharedpath) + 1 :]
1277 if repo._currentlock(repo._lockref) is None:
1277 if repo._currentlock(repo._lockref) is None:
1278 repo.ui.develwarn(
1278 repo.ui.develwarn(
1279 b'write with no lock: "%s"' % path, stacklevel=4
1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 )
1280 )
1281 return ret
1281 return ret
1282
1282
1283 return checksvfs
1283 return checksvfs
1284
1284
1285 def close(self):
1285 def close(self):
1286 self._writecaches()
1286 self._writecaches()
1287
1287
1288 def _writecaches(self):
1288 def _writecaches(self):
1289 if self._revbranchcache:
1289 if self._revbranchcache:
1290 self._revbranchcache.write()
1290 self._revbranchcache.write()
1291
1291
1292 def _restrictcapabilities(self, caps):
1292 def _restrictcapabilities(self, caps):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 caps = set(caps)
1294 caps = set(caps)
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(self, role=b'client')
1296 bundle2.getrepocaps(self, role=b'client')
1297 )
1297 )
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 return caps
1299 return caps
1300
1300
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 # self -> auditor -> self._checknested -> self
1302 # self -> auditor -> self._checknested -> self
1303
1303
1304 @property
1304 @property
1305 def auditor(self):
1305 def auditor(self):
1306 # This is only used by context.workingctx.match in order to
1306 # This is only used by context.workingctx.match in order to
1307 # detect files in subrepos.
1307 # detect files in subrepos.
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1309
1309
1310 @property
1310 @property
1311 def nofsauditor(self):
1311 def nofsauditor(self):
1312 # This is only used by context.basectx.match in order to detect
1312 # This is only used by context.basectx.match in order to detect
1313 # files in subrepos.
1313 # files in subrepos.
1314 return pathutil.pathauditor(
1314 return pathutil.pathauditor(
1315 self.root, callback=self._checknested, realfs=False, cached=True
1315 self.root, callback=self._checknested, realfs=False, cached=True
1316 )
1316 )
1317
1317
1318 def _checknested(self, path):
1318 def _checknested(self, path):
1319 """Determine if path is a legal nested repository."""
1319 """Determine if path is a legal nested repository."""
1320 if not path.startswith(self.root):
1320 if not path.startswith(self.root):
1321 return False
1321 return False
1322 subpath = path[len(self.root) + 1 :]
1322 subpath = path[len(self.root) + 1 :]
1323 normsubpath = util.pconvert(subpath)
1323 normsubpath = util.pconvert(subpath)
1324
1324
1325 # XXX: Checking against the current working copy is wrong in
1325 # XXX: Checking against the current working copy is wrong in
1326 # the sense that it can reject things like
1326 # the sense that it can reject things like
1327 #
1327 #
1328 # $ hg cat -r 10 sub/x.txt
1328 # $ hg cat -r 10 sub/x.txt
1329 #
1329 #
1330 # if sub/ is no longer a subrepository in the working copy
1330 # if sub/ is no longer a subrepository in the working copy
1331 # parent revision.
1331 # parent revision.
1332 #
1332 #
1333 # However, it can of course also allow things that would have
1333 # However, it can of course also allow things that would have
1334 # been rejected before, such as the above cat command if sub/
1334 # been rejected before, such as the above cat command if sub/
1335 # is a subrepository now, but was a normal directory before.
1335 # is a subrepository now, but was a normal directory before.
1336 # The old path auditor would have rejected by mistake since it
1336 # The old path auditor would have rejected by mistake since it
1337 # panics when it sees sub/.hg/.
1337 # panics when it sees sub/.hg/.
1338 #
1338 #
1339 # All in all, checking against the working copy seems sensible
1339 # All in all, checking against the working copy seems sensible
1340 # since we want to prevent access to nested repositories on
1340 # since we want to prevent access to nested repositories on
1341 # the filesystem *now*.
1341 # the filesystem *now*.
1342 ctx = self[None]
1342 ctx = self[None]
1343 parts = util.splitpath(subpath)
1343 parts = util.splitpath(subpath)
1344 while parts:
1344 while parts:
1345 prefix = b'/'.join(parts)
1345 prefix = b'/'.join(parts)
1346 if prefix in ctx.substate:
1346 if prefix in ctx.substate:
1347 if prefix == normsubpath:
1347 if prefix == normsubpath:
1348 return True
1348 return True
1349 else:
1349 else:
1350 sub = ctx.sub(prefix)
1350 sub = ctx.sub(prefix)
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1352 else:
1352 else:
1353 parts.pop()
1353 parts.pop()
1354 return False
1354 return False
1355
1355
1356 def peer(self):
1356 def peer(self):
1357 return localpeer(self) # not cached to avoid reference cycle
1357 return localpeer(self) # not cached to avoid reference cycle
1358
1358
1359 def unfiltered(self):
1359 def unfiltered(self):
1360 """Return unfiltered version of the repository
1360 """Return unfiltered version of the repository
1361
1361
1362 Intended to be overwritten by filtered repo."""
1362 Intended to be overwritten by filtered repo."""
1363 return self
1363 return self
1364
1364
1365 def filtered(self, name, visibilityexceptions=None):
1365 def filtered(self, name, visibilityexceptions=None):
1366 """Return a filtered version of a repository
1366 """Return a filtered version of a repository
1367
1367
1368 The `name` parameter is the identifier of the requested view. This
1368 The `name` parameter is the identifier of the requested view. This
1369 will return a repoview object set "exactly" to the specified view.
1369 will return a repoview object set "exactly" to the specified view.
1370
1370
1371 This function does not apply recursive filtering to a repository. For
1371 This function does not apply recursive filtering to a repository. For
1372 example calling `repo.filtered("served")` will return a repoview using
1372 example calling `repo.filtered("served")` will return a repoview using
1373 the "served" view, regardless of the initial view used by `repo`.
1373 the "served" view, regardless of the initial view used by `repo`.
1374
1374
1375 In other word, there is always only one level of `repoview` "filtering".
1375 In other word, there is always only one level of `repoview` "filtering".
1376 """
1376 """
1377 if self._extrafilterid is not None and b'%' not in name:
1377 if self._extrafilterid is not None and b'%' not in name:
1378 name = name + b'%' + self._extrafilterid
1378 name = name + b'%' + self._extrafilterid
1379
1379
1380 cls = repoview.newtype(self.unfiltered().__class__)
1380 cls = repoview.newtype(self.unfiltered().__class__)
1381 return cls(self, name, visibilityexceptions)
1381 return cls(self, name, visibilityexceptions)
1382
1382
1383 @mixedrepostorecache(
1383 @mixedrepostorecache(
1384 (b'bookmarks', b'plain'),
1384 (b'bookmarks', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1386 (b'bookmarks', b''),
1386 (b'bookmarks', b''),
1387 (b'00changelog.i', b''),
1387 (b'00changelog.i', b''),
1388 )
1388 )
1389 def _bookmarks(self):
1389 def _bookmarks(self):
1390 # Since the multiple files involved in the transaction cannot be
1390 # Since the multiple files involved in the transaction cannot be
1391 # written atomically (with current repository format), there is a race
1391 # written atomically (with current repository format), there is a race
1392 # condition here.
1392 # condition here.
1393 #
1393 #
1394 # 1) changelog content A is read
1394 # 1) changelog content A is read
1395 # 2) outside transaction update changelog to content B
1395 # 2) outside transaction update changelog to content B
1396 # 3) outside transaction update bookmark file referring to content B
1396 # 3) outside transaction update bookmark file referring to content B
1397 # 4) bookmarks file content is read and filtered against changelog-A
1397 # 4) bookmarks file content is read and filtered against changelog-A
1398 #
1398 #
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1400 #
1400 #
1401 # Having this happening during read is not great, but it become worse
1401 # Having this happening during read is not great, but it become worse
1402 # when this happen during write because the bookmarks to the "unknown"
1402 # when this happen during write because the bookmarks to the "unknown"
1403 # nodes will be dropped for good. However, writes happen within locks.
1403 # nodes will be dropped for good. However, writes happen within locks.
1404 # This locking makes it possible to have a race free consistent read.
1404 # This locking makes it possible to have a race free consistent read.
1405 # For this purpose data read from disc before locking are
1405 # For this purpose data read from disc before locking are
1406 # "invalidated" right after the locks are taken. This invalidations are
1406 # "invalidated" right after the locks are taken. This invalidations are
1407 # "light", the `filecache` mechanism keep the data in memory and will
1407 # "light", the `filecache` mechanism keep the data in memory and will
1408 # reuse them if the underlying files did not changed. Not parsing the
1408 # reuse them if the underlying files did not changed. Not parsing the
1409 # same data multiple times helps performances.
1409 # same data multiple times helps performances.
1410 #
1410 #
1411 # Unfortunately in the case describe above, the files tracked by the
1411 # Unfortunately in the case describe above, the files tracked by the
1412 # bookmarks file cache might not have changed, but the in-memory
1412 # bookmarks file cache might not have changed, but the in-memory
1413 # content is still "wrong" because we used an older changelog content
1413 # content is still "wrong" because we used an older changelog content
1414 # to process the on-disk data. So after locking, the changelog would be
1414 # to process the on-disk data. So after locking, the changelog would be
1415 # refreshed but `_bookmarks` would be preserved.
1415 # refreshed but `_bookmarks` would be preserved.
1416 # Adding `00changelog.i` to the list of tracked file is not
1416 # Adding `00changelog.i` to the list of tracked file is not
1417 # enough, because at the time we build the content for `_bookmarks` in
1417 # enough, because at the time we build the content for `_bookmarks` in
1418 # (4), the changelog file has already diverged from the content used
1418 # (4), the changelog file has already diverged from the content used
1419 # for loading `changelog` in (1)
1419 # for loading `changelog` in (1)
1420 #
1420 #
1421 # To prevent the issue, we force the changelog to be explicitly
1421 # To prevent the issue, we force the changelog to be explicitly
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1423 # without the lock (with a narrower window), but it would no longer go
1423 # without the lock (with a narrower window), but it would no longer go
1424 # undetected during the lock time refresh.
1424 # undetected during the lock time refresh.
1425 #
1425 #
1426 # The new schedule is as follow
1426 # The new schedule is as follow
1427 #
1427 #
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 # 3) We force `changelog` filecache to be tested
1430 # 3) We force `changelog` filecache to be tested
1431 # 4) cachestat for `changelog` are captured (for changelog)
1431 # 4) cachestat for `changelog` are captured (for changelog)
1432 # 5) `_bookmarks` is computed and cached
1432 # 5) `_bookmarks` is computed and cached
1433 #
1433 #
1434 # The step in (3) ensure we have a changelog at least as recent as the
1434 # The step in (3) ensure we have a changelog at least as recent as the
1435 # cache stat computed in (1). As a result at locking time:
1435 # cache stat computed in (1). As a result at locking time:
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1437 # * otherwise -> the bookmarks get refreshed.
1437 # * otherwise -> the bookmarks get refreshed.
1438 self._refreshchangelog()
1438 self._refreshchangelog()
1439 return bookmarks.bmstore(self)
1439 return bookmarks.bmstore(self)
1440
1440
1441 def _refreshchangelog(self):
1441 def _refreshchangelog(self):
1442 """make sure the in memory changelog match the on-disk one"""
1442 """make sure the in memory changelog match the on-disk one"""
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 del self.changelog
1444 del self.changelog
1445
1445
1446 @property
1446 @property
1447 def _activebookmark(self):
1447 def _activebookmark(self):
1448 return self._bookmarks.active
1448 return self._bookmarks.active
1449
1449
1450 # _phasesets depend on changelog. what we need is to call
1450 # _phasesets depend on changelog. what we need is to call
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 # can't be easily expressed in filecache mechanism.
1452 # can't be easily expressed in filecache mechanism.
1453 @storecache(b'phaseroots', b'00changelog.i')
1453 @storecache(b'phaseroots', b'00changelog.i')
1454 def _phasecache(self):
1454 def _phasecache(self):
1455 return phases.phasecache(self, self._phasedefaults)
1455 return phases.phasecache(self, self._phasedefaults)
1456
1456
1457 @storecache(b'obsstore')
1457 @storecache(b'obsstore')
1458 def obsstore(self):
1458 def obsstore(self):
1459 return obsolete.makestore(self.ui, self)
1459 return obsolete.makestore(self.ui, self)
1460
1460
1461 @storecache(b'00changelog.i')
1461 @storecache(b'00changelog.i')
1462 def changelog(self):
1462 def changelog(self):
1463 # load dirstate before changelog to avoid race see issue6303
1463 # load dirstate before changelog to avoid race see issue6303
1464 self.dirstate.prefetch_parents()
1464 self.dirstate.prefetch_parents()
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1466
1466
1467 @storecache(b'00manifest.i')
1467 @storecache(b'00manifest.i')
1468 def manifestlog(self):
1468 def manifestlog(self):
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1470
1470
1471 @repofilecache(b'dirstate')
1471 @repofilecache(b'dirstate')
1472 def dirstate(self):
1472 def dirstate(self):
1473 return self._makedirstate()
1473 return self._makedirstate()
1474
1474
1475 def _makedirstate(self):
1475 def _makedirstate(self):
1476 """Extension point for wrapping the dirstate per-repo."""
1476 """Extension point for wrapping the dirstate per-repo."""
1477 sparsematchfn = lambda: sparse.matcher(self)
1477 sparsematchfn = lambda: sparse.matcher(self)
1478
1478
1479 return dirstate.dirstate(
1479 return dirstate.dirstate(
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 )
1481 )
1482
1482
1483 def _dirstatevalidate(self, node):
1483 def _dirstatevalidate(self, node):
1484 try:
1484 try:
1485 self.changelog.rev(node)
1485 self.changelog.rev(node)
1486 return node
1486 return node
1487 except error.LookupError:
1487 except error.LookupError:
1488 if not self._dirstatevalidatewarned:
1488 if not self._dirstatevalidatewarned:
1489 self._dirstatevalidatewarned = True
1489 self._dirstatevalidatewarned = True
1490 self.ui.warn(
1490 self.ui.warn(
1491 _(b"warning: ignoring unknown working parent %s!\n")
1491 _(b"warning: ignoring unknown working parent %s!\n")
1492 % short(node)
1492 % short(node)
1493 )
1493 )
1494 return nullid
1494 return nullid
1495
1495
1496 @storecache(narrowspec.FILENAME)
1496 @storecache(narrowspec.FILENAME)
1497 def narrowpats(self):
1497 def narrowpats(self):
1498 """matcher patterns for this repository's narrowspec
1498 """matcher patterns for this repository's narrowspec
1499
1499
1500 A tuple of (includes, excludes).
1500 A tuple of (includes, excludes).
1501 """
1501 """
1502 return narrowspec.load(self)
1502 return narrowspec.load(self)
1503
1503
1504 @storecache(narrowspec.FILENAME)
1504 @storecache(narrowspec.FILENAME)
1505 def _storenarrowmatch(self):
1505 def _storenarrowmatch(self):
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 return matchmod.always()
1507 return matchmod.always()
1508 include, exclude = self.narrowpats
1508 include, exclude = self.narrowpats
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1510
1510
1511 @storecache(narrowspec.FILENAME)
1511 @storecache(narrowspec.FILENAME)
1512 def _narrowmatch(self):
1512 def _narrowmatch(self):
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 return matchmod.always()
1514 return matchmod.always()
1515 narrowspec.checkworkingcopynarrowspec(self)
1515 narrowspec.checkworkingcopynarrowspec(self)
1516 include, exclude = self.narrowpats
1516 include, exclude = self.narrowpats
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1518
1518
1519 def narrowmatch(self, match=None, includeexact=False):
1519 def narrowmatch(self, match=None, includeexact=False):
1520 """matcher corresponding the the repo's narrowspec
1520 """matcher corresponding the the repo's narrowspec
1521
1521
1522 If `match` is given, then that will be intersected with the narrow
1522 If `match` is given, then that will be intersected with the narrow
1523 matcher.
1523 matcher.
1524
1524
1525 If `includeexact` is True, then any exact matches from `match` will
1525 If `includeexact` is True, then any exact matches from `match` will
1526 be included even if they're outside the narrowspec.
1526 be included even if they're outside the narrowspec.
1527 """
1527 """
1528 if match:
1528 if match:
1529 if includeexact and not self._narrowmatch.always():
1529 if includeexact and not self._narrowmatch.always():
1530 # do not exclude explicitly-specified paths so that they can
1530 # do not exclude explicitly-specified paths so that they can
1531 # be warned later on
1531 # be warned later on
1532 em = matchmod.exact(match.files())
1532 em = matchmod.exact(match.files())
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 return matchmod.intersectmatchers(match, nm)
1534 return matchmod.intersectmatchers(match, nm)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 return self._narrowmatch
1536 return self._narrowmatch
1537
1537
1538 def setnarrowpats(self, newincludes, newexcludes):
1538 def setnarrowpats(self, newincludes, newexcludes):
1539 narrowspec.save(self, newincludes, newexcludes)
1539 narrowspec.save(self, newincludes, newexcludes)
1540 self.invalidate(clearfilecache=True)
1540 self.invalidate(clearfilecache=True)
1541
1541
1542 @unfilteredpropertycache
1542 @unfilteredpropertycache
1543 def _quick_access_changeid_null(self):
1543 def _quick_access_changeid_null(self):
1544 return {
1544 return {
1545 b'null': (nullrev, nullid),
1545 b'null': (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1548 }
1548 }
1549
1549
1550 @unfilteredpropertycache
1550 @unfilteredpropertycache
1551 def _quick_access_changeid_wc(self):
1551 def _quick_access_changeid_wc(self):
1552 # also fast path access to the working copy parents
1552 # also fast path access to the working copy parents
1553 # however, only do it for filter that ensure wc is visible.
1553 # however, only do it for filter that ensure wc is visible.
1554 quick = {}
1554 quick = {}
1555 cl = self.unfiltered().changelog
1555 cl = self.unfiltered().changelog
1556 for node in self.dirstate.parents():
1556 for node in self.dirstate.parents():
1557 if node == nullid:
1557 if node == nullid:
1558 continue
1558 continue
1559 rev = cl.index.get_rev(node)
1559 rev = cl.index.get_rev(node)
1560 if rev is None:
1560 if rev is None:
1561 # unknown working copy parent case:
1561 # unknown working copy parent case:
1562 #
1562 #
1563 # skip the fast path and let higher code deal with it
1563 # skip the fast path and let higher code deal with it
1564 continue
1564 continue
1565 pair = (rev, node)
1565 pair = (rev, node)
1566 quick[rev] = pair
1566 quick[rev] = pair
1567 quick[node] = pair
1567 quick[node] = pair
1568 # also add the parents of the parents
1568 # also add the parents of the parents
1569 for r in cl.parentrevs(rev):
1569 for r in cl.parentrevs(rev):
1570 if r == nullrev:
1570 if r == nullrev:
1571 continue
1571 continue
1572 n = cl.node(r)
1572 n = cl.node(r)
1573 pair = (r, n)
1573 pair = (r, n)
1574 quick[r] = pair
1574 quick[r] = pair
1575 quick[n] = pair
1575 quick[n] = pair
1576 p1node = self.dirstate.p1()
1576 p1node = self.dirstate.p1()
1577 if p1node != nullid:
1577 if p1node != nullid:
1578 quick[b'.'] = quick[p1node]
1578 quick[b'.'] = quick[p1node]
1579 return quick
1579 return quick
1580
1580
1581 @unfilteredmethod
1581 @unfilteredmethod
1582 def _quick_access_changeid_invalidate(self):
1582 def _quick_access_changeid_invalidate(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1584 del self.__dict__['_quick_access_changeid_wc']
1584 del self.__dict__['_quick_access_changeid_wc']
1585
1585
1586 @property
1586 @property
1587 def _quick_access_changeid(self):
1587 def _quick_access_changeid(self):
1588 """an helper dictionnary for __getitem__ calls
1588 """an helper dictionnary for __getitem__ calls
1589
1589
1590 This contains a list of symbol we can recognise right away without
1590 This contains a list of symbol we can recognise right away without
1591 further processing.
1591 further processing.
1592 """
1592 """
1593 mapping = self._quick_access_changeid_null
1593 mapping = self._quick_access_changeid_null
1594 if self.filtername in repoview.filter_has_wc:
1594 if self.filtername in repoview.filter_has_wc:
1595 mapping = mapping.copy()
1595 mapping = mapping.copy()
1596 mapping.update(self._quick_access_changeid_wc)
1596 mapping.update(self._quick_access_changeid_wc)
1597 return mapping
1597 return mapping
1598
1598
1599 def __getitem__(self, changeid):
1599 def __getitem__(self, changeid):
1600 # dealing with special cases
1600 # dealing with special cases
1601 if changeid is None:
1601 if changeid is None:
1602 return context.workingctx(self)
1602 return context.workingctx(self)
1603 if isinstance(changeid, context.basectx):
1603 if isinstance(changeid, context.basectx):
1604 return changeid
1604 return changeid
1605
1605
1606 # dealing with multiple revisions
1606 # dealing with multiple revisions
1607 if isinstance(changeid, slice):
1607 if isinstance(changeid, slice):
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1609 return [
1609 return [
1610 self[i]
1610 self[i]
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 if i not in self.changelog.filteredrevs
1612 if i not in self.changelog.filteredrevs
1613 ]
1613 ]
1614
1614
1615 # dealing with some special values
1615 # dealing with some special values
1616 quick_access = self._quick_access_changeid.get(changeid)
1616 quick_access = self._quick_access_changeid.get(changeid)
1617 if quick_access is not None:
1617 if quick_access is not None:
1618 rev, node = quick_access
1618 rev, node = quick_access
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1620 if changeid == b'tip':
1620 if changeid == b'tip':
1621 node = self.changelog.tip()
1621 node = self.changelog.tip()
1622 rev = self.changelog.rev(node)
1622 rev = self.changelog.rev(node)
1623 return context.changectx(self, rev, node)
1623 return context.changectx(self, rev, node)
1624
1624
1625 # dealing with arbitrary values
1625 # dealing with arbitrary values
1626 try:
1626 try:
1627 if isinstance(changeid, int):
1627 if isinstance(changeid, int):
1628 node = self.changelog.node(changeid)
1628 node = self.changelog.node(changeid)
1629 rev = changeid
1629 rev = changeid
1630 elif changeid == b'.':
1630 elif changeid == b'.':
1631 # this is a hack to delay/avoid loading obsmarkers
1631 # this is a hack to delay/avoid loading obsmarkers
1632 # when we know that '.' won't be hidden
1632 # when we know that '.' won't be hidden
1633 node = self.dirstate.p1()
1633 node = self.dirstate.p1()
1634 rev = self.unfiltered().changelog.rev(node)
1634 rev = self.unfiltered().changelog.rev(node)
1635 elif len(changeid) == 20:
1635 elif len(changeid) == 20:
1636 try:
1636 try:
1637 node = changeid
1637 node = changeid
1638 rev = self.changelog.rev(changeid)
1638 rev = self.changelog.rev(changeid)
1639 except error.FilteredLookupError:
1639 except error.FilteredLookupError:
1640 changeid = hex(changeid) # for the error message
1640 changeid = hex(changeid) # for the error message
1641 raise
1641 raise
1642 except LookupError:
1642 except LookupError:
1643 # check if it might have come from damaged dirstate
1643 # check if it might have come from damaged dirstate
1644 #
1644 #
1645 # XXX we could avoid the unfiltered if we had a recognizable
1645 # XXX we could avoid the unfiltered if we had a recognizable
1646 # exception for filtered changeset access
1646 # exception for filtered changeset access
1647 if (
1647 if (
1648 self.local()
1648 self.local()
1649 and changeid in self.unfiltered().dirstate.parents()
1649 and changeid in self.unfiltered().dirstate.parents()
1650 ):
1650 ):
1651 msg = _(b"working directory has unknown parent '%s'!")
1651 msg = _(b"working directory has unknown parent '%s'!")
1652 raise error.Abort(msg % short(changeid))
1652 raise error.Abort(msg % short(changeid))
1653 changeid = hex(changeid) # for the error message
1653 changeid = hex(changeid) # for the error message
1654 raise
1654 raise
1655
1655
1656 elif len(changeid) == 40:
1656 elif len(changeid) == 40:
1657 node = bin(changeid)
1657 node = bin(changeid)
1658 rev = self.changelog.rev(node)
1658 rev = self.changelog.rev(node)
1659 else:
1659 else:
1660 raise error.ProgrammingError(
1660 raise error.ProgrammingError(
1661 b"unsupported changeid '%s' of type %s"
1661 b"unsupported changeid '%s' of type %s"
1662 % (changeid, pycompat.bytestr(type(changeid)))
1662 % (changeid, pycompat.bytestr(type(changeid)))
1663 )
1663 )
1664
1664
1665 return context.changectx(self, rev, node)
1665 return context.changectx(self, rev, node)
1666
1666
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1668 raise error.FilteredRepoLookupError(
1668 raise error.FilteredRepoLookupError(
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 )
1670 )
1671 except (IndexError, LookupError):
1671 except (IndexError, LookupError):
1672 raise error.RepoLookupError(
1672 raise error.RepoLookupError(
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 )
1674 )
1675 except error.WdirUnsupported:
1675 except error.WdirUnsupported:
1676 return context.workingctx(self)
1676 return context.workingctx(self)
1677
1677
1678 def __contains__(self, changeid):
1678 def __contains__(self, changeid):
1679 """True if the given changeid exists
1679 """True if the given changeid exists
1680
1680
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 specified.
1682 specified.
1683 """
1683 """
1684 try:
1684 try:
1685 self[changeid]
1685 self[changeid]
1686 return True
1686 return True
1687 except error.RepoLookupError:
1687 except error.RepoLookupError:
1688 return False
1688 return False
1689
1689
1690 def __nonzero__(self):
1690 def __nonzero__(self):
1691 return True
1691 return True
1692
1692
1693 __bool__ = __nonzero__
1693 __bool__ = __nonzero__
1694
1694
1695 def __len__(self):
1695 def __len__(self):
1696 # no need to pay the cost of repoview.changelog
1696 # no need to pay the cost of repoview.changelog
1697 unfi = self.unfiltered()
1697 unfi = self.unfiltered()
1698 return len(unfi.changelog)
1698 return len(unfi.changelog)
1699
1699
1700 def __iter__(self):
1700 def __iter__(self):
1701 return iter(self.changelog)
1701 return iter(self.changelog)
1702
1702
1703 def revs(self, expr, *args):
1703 def revs(self, expr, *args):
1704 '''Find revisions matching a revset.
1704 '''Find revisions matching a revset.
1705
1705
1706 The revset is specified as a string ``expr`` that may contain
1706 The revset is specified as a string ``expr`` that may contain
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708
1708
1709 Revset aliases from the configuration are not expanded. To expand
1709 Revset aliases from the configuration are not expanded. To expand
1710 user aliases, consider calling ``scmutil.revrange()`` or
1710 user aliases, consider calling ``scmutil.revrange()`` or
1711 ``repo.anyrevs([expr], user=True)``.
1711 ``repo.anyrevs([expr], user=True)``.
1712
1712
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1714 that contains integer revisions.
1714 that contains integer revisions.
1715 '''
1715 '''
1716 tree = revsetlang.spectree(expr, *args)
1716 tree = revsetlang.spectree(expr, *args)
1717 return revset.makematcher(tree)(self)
1717 return revset.makematcher(tree)(self)
1718
1718
1719 def set(self, expr, *args):
1719 def set(self, expr, *args):
1720 '''Find revisions matching a revset and emit changectx instances.
1720 '''Find revisions matching a revset and emit changectx instances.
1721
1721
1722 This is a convenience wrapper around ``revs()`` that iterates the
1722 This is a convenience wrapper around ``revs()`` that iterates the
1723 result and is a generator of changectx instances.
1723 result and is a generator of changectx instances.
1724
1724
1725 Revset aliases from the configuration are not expanded. To expand
1725 Revset aliases from the configuration are not expanded. To expand
1726 user aliases, consider calling ``scmutil.revrange()``.
1726 user aliases, consider calling ``scmutil.revrange()``.
1727 '''
1727 '''
1728 for r in self.revs(expr, *args):
1728 for r in self.revs(expr, *args):
1729 yield self[r]
1729 yield self[r]
1730
1730
1731 def anyrevs(self, specs, user=False, localalias=None):
1731 def anyrevs(self, specs, user=False, localalias=None):
1732 '''Find revisions matching one of the given revsets.
1732 '''Find revisions matching one of the given revsets.
1733
1733
1734 Revset aliases from the configuration are not expanded by default. To
1734 Revset aliases from the configuration are not expanded by default. To
1735 expand user aliases, specify ``user=True``. To provide some local
1735 expand user aliases, specify ``user=True``. To provide some local
1736 definitions overriding user aliases, set ``localalias`` to
1736 definitions overriding user aliases, set ``localalias`` to
1737 ``{name: definitionstring}``.
1737 ``{name: definitionstring}``.
1738 '''
1738 '''
1739 if specs == [b'null']:
1739 if specs == [b'null']:
1740 return revset.baseset([nullrev])
1740 return revset.baseset([nullrev])
1741 if specs == [b'.']:
1741 if specs == [b'.']:
1742 quick_data = self._quick_access_changeid.get(b'.')
1742 quick_data = self._quick_access_changeid.get(b'.')
1743 if quick_data is not None:
1743 if quick_data is not None:
1744 return revset.baseset([quick_data[0]])
1744 return revset.baseset([quick_data[0]])
1745 if user:
1745 if user:
1746 m = revset.matchany(
1746 m = revset.matchany(
1747 self.ui,
1747 self.ui,
1748 specs,
1748 specs,
1749 lookup=revset.lookupfn(self),
1749 lookup=revset.lookupfn(self),
1750 localalias=localalias,
1750 localalias=localalias,
1751 )
1751 )
1752 else:
1752 else:
1753 m = revset.matchany(None, specs, localalias=localalias)
1753 m = revset.matchany(None, specs, localalias=localalias)
1754 return m(self)
1754 return m(self)
1755
1755
1756 def url(self):
1756 def url(self):
1757 return b'file:' + self.root
1757 return b'file:' + self.root
1758
1758
1759 def hook(self, name, throw=False, **args):
1759 def hook(self, name, throw=False, **args):
1760 """Call a hook, passing this repo instance.
1760 """Call a hook, passing this repo instance.
1761
1761
1762 This a convenience method to aid invoking hooks. Extensions likely
1762 This a convenience method to aid invoking hooks. Extensions likely
1763 won't call this unless they have registered a custom hook or are
1763 won't call this unless they have registered a custom hook or are
1764 replacing code that is expected to call a hook.
1764 replacing code that is expected to call a hook.
1765 """
1765 """
1766 return hook.hook(self.ui, self, name, throw, **args)
1766 return hook.hook(self.ui, self, name, throw, **args)
1767
1767
1768 @filteredpropertycache
1768 @filteredpropertycache
1769 def _tagscache(self):
1769 def _tagscache(self):
1770 '''Returns a tagscache object that contains various tags related
1770 '''Returns a tagscache object that contains various tags related
1771 caches.'''
1771 caches.'''
1772
1772
1773 # This simplifies its cache management by having one decorated
1773 # This simplifies its cache management by having one decorated
1774 # function (this one) and the rest simply fetch things from it.
1774 # function (this one) and the rest simply fetch things from it.
1775 class tagscache(object):
1775 class tagscache(object):
1776 def __init__(self):
1776 def __init__(self):
1777 # These two define the set of tags for this repository. tags
1777 # These two define the set of tags for this repository. tags
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 # 'local'. (Global tags are defined by .hgtags across all
1779 # 'local'. (Global tags are defined by .hgtags across all
1780 # heads, and local tags are defined in .hg/localtags.)
1780 # heads, and local tags are defined in .hg/localtags.)
1781 # They constitute the in-memory cache of tags.
1781 # They constitute the in-memory cache of tags.
1782 self.tags = self.tagtypes = None
1782 self.tags = self.tagtypes = None
1783
1783
1784 self.nodetagscache = self.tagslist = None
1784 self.nodetagscache = self.tagslist = None
1785
1785
1786 cache = tagscache()
1786 cache = tagscache()
1787 cache.tags, cache.tagtypes = self._findtags()
1787 cache.tags, cache.tagtypes = self._findtags()
1788
1788
1789 return cache
1789 return cache
1790
1790
1791 def tags(self):
1791 def tags(self):
1792 '''return a mapping of tag to node'''
1792 '''return a mapping of tag to node'''
1793 t = {}
1793 t = {}
1794 if self.changelog.filteredrevs:
1794 if self.changelog.filteredrevs:
1795 tags, tt = self._findtags()
1795 tags, tt = self._findtags()
1796 else:
1796 else:
1797 tags = self._tagscache.tags
1797 tags = self._tagscache.tags
1798 rev = self.changelog.rev
1798 rev = self.changelog.rev
1799 for k, v in pycompat.iteritems(tags):
1799 for k, v in pycompat.iteritems(tags):
1800 try:
1800 try:
1801 # ignore tags to unknown nodes
1801 # ignore tags to unknown nodes
1802 rev(v)
1802 rev(v)
1803 t[k] = v
1803 t[k] = v
1804 except (error.LookupError, ValueError):
1804 except (error.LookupError, ValueError):
1805 pass
1805 pass
1806 return t
1806 return t
1807
1807
1808 def _findtags(self):
1808 def _findtags(self):
1809 '''Do the hard work of finding tags. Return a pair of dicts
1809 '''Do the hard work of finding tags. Return a pair of dicts
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 maps tag name to a string like \'global\' or \'local\'.
1811 maps tag name to a string like \'global\' or \'local\'.
1812 Subclasses or extensions are free to add their own tags, but
1812 Subclasses or extensions are free to add their own tags, but
1813 should be aware that the returned dicts will be retained for the
1813 should be aware that the returned dicts will be retained for the
1814 duration of the localrepo object.'''
1814 duration of the localrepo object.'''
1815
1815
1816 # XXX what tagtype should subclasses/extensions use? Currently
1816 # XXX what tagtype should subclasses/extensions use? Currently
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 # Should each extension invent its own tag type? Should there
1818 # Should each extension invent its own tag type? Should there
1819 # be one tagtype for all such "virtual" tags? Or is the status
1819 # be one tagtype for all such "virtual" tags? Or is the status
1820 # quo fine?
1820 # quo fine?
1821
1821
1822 # map tag name to (node, hist)
1822 # map tag name to (node, hist)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1824 # map tag name to tag type
1824 # map tag name to tag type
1825 tagtypes = {tag: b'global' for tag in alltags}
1825 tagtypes = {tag: b'global' for tag in alltags}
1826
1826
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828
1828
1829 # Build the return dicts. Have to re-encode tag names because
1829 # Build the return dicts. Have to re-encode tag names because
1830 # the tags module always uses UTF-8 (in order not to lose info
1830 # the tags module always uses UTF-8 (in order not to lose info
1831 # writing to the cache), but the rest of Mercurial wants them in
1831 # writing to the cache), but the rest of Mercurial wants them in
1832 # local encoding.
1832 # local encoding.
1833 tags = {}
1833 tags = {}
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 if node != nullid:
1835 if node != nullid:
1836 tags[encoding.tolocal(name)] = node
1836 tags[encoding.tolocal(name)] = node
1837 tags[b'tip'] = self.changelog.tip()
1837 tags[b'tip'] = self.changelog.tip()
1838 tagtypes = {
1838 tagtypes = {
1839 encoding.tolocal(name): value
1839 encoding.tolocal(name): value
1840 for (name, value) in pycompat.iteritems(tagtypes)
1840 for (name, value) in pycompat.iteritems(tagtypes)
1841 }
1841 }
1842 return (tags, tagtypes)
1842 return (tags, tagtypes)
1843
1843
1844 def tagtype(self, tagname):
1844 def tagtype(self, tagname):
1845 '''
1845 '''
1846 return the type of the given tag. result can be:
1846 return the type of the given tag. result can be:
1847
1847
1848 'local' : a local tag
1848 'local' : a local tag
1849 'global' : a global tag
1849 'global' : a global tag
1850 None : tag does not exist
1850 None : tag does not exist
1851 '''
1851 '''
1852
1852
1853 return self._tagscache.tagtypes.get(tagname)
1853 return self._tagscache.tagtypes.get(tagname)
1854
1854
1855 def tagslist(self):
1855 def tagslist(self):
1856 '''return a list of tags ordered by revision'''
1856 '''return a list of tags ordered by revision'''
1857 if not self._tagscache.tagslist:
1857 if not self._tagscache.tagslist:
1858 l = []
1858 l = []
1859 for t, n in pycompat.iteritems(self.tags()):
1859 for t, n in pycompat.iteritems(self.tags()):
1860 l.append((self.changelog.rev(n), t, n))
1860 l.append((self.changelog.rev(n), t, n))
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862
1862
1863 return self._tagscache.tagslist
1863 return self._tagscache.tagslist
1864
1864
1865 def nodetags(self, node):
1865 def nodetags(self, node):
1866 '''return the tags associated with a node'''
1866 '''return the tags associated with a node'''
1867 if not self._tagscache.nodetagscache:
1867 if not self._tagscache.nodetagscache:
1868 nodetagscache = {}
1868 nodetagscache = {}
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 nodetagscache.setdefault(n, []).append(t)
1870 nodetagscache.setdefault(n, []).append(t)
1871 for tags in pycompat.itervalues(nodetagscache):
1871 for tags in pycompat.itervalues(nodetagscache):
1872 tags.sort()
1872 tags.sort()
1873 self._tagscache.nodetagscache = nodetagscache
1873 self._tagscache.nodetagscache = nodetagscache
1874 return self._tagscache.nodetagscache.get(node, [])
1874 return self._tagscache.nodetagscache.get(node, [])
1875
1875
1876 def nodebookmarks(self, node):
1876 def nodebookmarks(self, node):
1877 """return the list of bookmarks pointing to the specified node"""
1877 """return the list of bookmarks pointing to the specified node"""
1878 return self._bookmarks.names(node)
1878 return self._bookmarks.names(node)
1879
1879
1880 def branchmap(self):
1880 def branchmap(self):
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 ordered by increasing revision number'''
1882 ordered by increasing revision number'''
1883 return self._branchcaches[self]
1883 return self._branchcaches[self]
1884
1884
1885 @unfilteredmethod
1885 @unfilteredmethod
1886 def revbranchcache(self):
1886 def revbranchcache(self):
1887 if not self._revbranchcache:
1887 if not self._revbranchcache:
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 return self._revbranchcache
1889 return self._revbranchcache
1890
1890
1891 def branchtip(self, branch, ignoremissing=False):
1891 def branchtip(self, branch, ignoremissing=False):
1892 '''return the tip node for a given branch
1892 '''return the tip node for a given branch
1893
1893
1894 If ignoremissing is True, then this method will not raise an error.
1894 If ignoremissing is True, then this method will not raise an error.
1895 This is helpful for callers that only expect None for a missing branch
1895 This is helpful for callers that only expect None for a missing branch
1896 (e.g. namespace).
1896 (e.g. namespace).
1897
1897
1898 '''
1898 '''
1899 try:
1899 try:
1900 return self.branchmap().branchtip(branch)
1900 return self.branchmap().branchtip(branch)
1901 except KeyError:
1901 except KeyError:
1902 if not ignoremissing:
1902 if not ignoremissing:
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 else:
1904 else:
1905 pass
1905 pass
1906
1906
1907 def lookup(self, key):
1907 def lookup(self, key):
1908 node = scmutil.revsymbol(self, key).node()
1908 node = scmutil.revsymbol(self, key).node()
1909 if node is None:
1909 if node is None:
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 return node
1911 return node
1912
1912
1913 def lookupbranch(self, key):
1913 def lookupbranch(self, key):
1914 if self.branchmap().hasbranch(key):
1914 if self.branchmap().hasbranch(key):
1915 return key
1915 return key
1916
1916
1917 return scmutil.revsymbol(self, key).branch()
1917 return scmutil.revsymbol(self, key).branch()
1918
1918
1919 def known(self, nodes):
1919 def known(self, nodes):
1920 cl = self.changelog
1920 cl = self.changelog
1921 get_rev = cl.index.get_rev
1921 get_rev = cl.index.get_rev
1922 filtered = cl.filteredrevs
1922 filtered = cl.filteredrevs
1923 result = []
1923 result = []
1924 for n in nodes:
1924 for n in nodes:
1925 r = get_rev(n)
1925 r = get_rev(n)
1926 resp = not (r is None or r in filtered)
1926 resp = not (r is None or r in filtered)
1927 result.append(resp)
1927 result.append(resp)
1928 return result
1928 return result
1929
1929
1930 def local(self):
1930 def local(self):
1931 return self
1931 return self
1932
1932
1933 def publishing(self):
1933 def publishing(self):
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1935 # so that we don't finalize changes shared between users via ssh or nfs
1935 # so that we don't finalize changes shared between users via ssh or nfs
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937
1937
1938 def cancopy(self):
1938 def cancopy(self):
1939 # so statichttprepo's override of local() works
1939 # so statichttprepo's override of local() works
1940 if not self.local():
1940 if not self.local():
1941 return False
1941 return False
1942 if not self.publishing():
1942 if not self.publishing():
1943 return True
1943 return True
1944 # if publishing we can't copy if there is filtered content
1944 # if publishing we can't copy if there is filtered content
1945 return not self.filtered(b'visible').changelog.filteredrevs
1945 return not self.filtered(b'visible').changelog.filteredrevs
1946
1946
1947 def shared(self):
1947 def shared(self):
1948 '''the type of shared repository (None if not shared)'''
1948 '''the type of shared repository (None if not shared)'''
1949 if self.sharedpath != self.path:
1949 if self.sharedpath != self.path:
1950 return b'store'
1950 return b'store'
1951 return None
1951 return None
1952
1952
1953 def wjoin(self, f, *insidef):
1953 def wjoin(self, f, *insidef):
1954 return self.vfs.reljoin(self.root, f, *insidef)
1954 return self.vfs.reljoin(self.root, f, *insidef)
1955
1955
1956 def setparents(self, p1, p2=nullid):
1956 def setparents(self, p1, p2=nullid):
1957 self[None].setparents(p1, p2)
1957 self[None].setparents(p1, p2)
1958 self._quick_access_changeid_invalidate()
1958 self._quick_access_changeid_invalidate()
1959
1959
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 """changeid must be a changeset revision, if specified.
1961 """changeid must be a changeset revision, if specified.
1962 fileid can be a file revision or node."""
1962 fileid can be a file revision or node."""
1963 return context.filectx(
1963 return context.filectx(
1964 self, path, changeid, fileid, changectx=changectx
1964 self, path, changeid, fileid, changectx=changectx
1965 )
1965 )
1966
1966
1967 def getcwd(self):
1967 def getcwd(self):
1968 return self.dirstate.getcwd()
1968 return self.dirstate.getcwd()
1969
1969
1970 def pathto(self, f, cwd=None):
1970 def pathto(self, f, cwd=None):
1971 return self.dirstate.pathto(f, cwd)
1971 return self.dirstate.pathto(f, cwd)
1972
1972
1973 def _loadfilter(self, filter):
1973 def _loadfilter(self, filter):
1974 if filter not in self._filterpats:
1974 if filter not in self._filterpats:
1975 l = []
1975 l = []
1976 for pat, cmd in self.ui.configitems(filter):
1976 for pat, cmd in self.ui.configitems(filter):
1977 if cmd == b'!':
1977 if cmd == b'!':
1978 continue
1978 continue
1979 mf = matchmod.match(self.root, b'', [pat])
1979 mf = matchmod.match(self.root, b'', [pat])
1980 fn = None
1980 fn = None
1981 params = cmd
1981 params = cmd
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 if cmd.startswith(name):
1983 if cmd.startswith(name):
1984 fn = filterfn
1984 fn = filterfn
1985 params = cmd[len(name) :].lstrip()
1985 params = cmd[len(name) :].lstrip()
1986 break
1986 break
1987 if not fn:
1987 if not fn:
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 fn.__name__ = 'commandfilter'
1989 fn.__name__ = 'commandfilter'
1990 # Wrap old filters not supporting keyword arguments
1990 # Wrap old filters not supporting keyword arguments
1991 if not pycompat.getargspec(fn)[2]:
1991 if not pycompat.getargspec(fn)[2]:
1992 oldfn = fn
1992 oldfn = fn
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 fn.__name__ = 'compat-' + oldfn.__name__
1994 fn.__name__ = 'compat-' + oldfn.__name__
1995 l.append((mf, fn, params))
1995 l.append((mf, fn, params))
1996 self._filterpats[filter] = l
1996 self._filterpats[filter] = l
1997 return self._filterpats[filter]
1997 return self._filterpats[filter]
1998
1998
1999 def _filter(self, filterpats, filename, data):
1999 def _filter(self, filterpats, filename, data):
2000 for mf, fn, cmd in filterpats:
2000 for mf, fn, cmd in filterpats:
2001 if mf(filename):
2001 if mf(filename):
2002 self.ui.debug(
2002 self.ui.debug(
2003 b"filtering %s through %s\n"
2003 b"filtering %s through %s\n"
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 )
2005 )
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 break
2007 break
2008
2008
2009 return data
2009 return data
2010
2010
2011 @unfilteredpropertycache
2011 @unfilteredpropertycache
2012 def _encodefilterpats(self):
2012 def _encodefilterpats(self):
2013 return self._loadfilter(b'encode')
2013 return self._loadfilter(b'encode')
2014
2014
2015 @unfilteredpropertycache
2015 @unfilteredpropertycache
2016 def _decodefilterpats(self):
2016 def _decodefilterpats(self):
2017 return self._loadfilter(b'decode')
2017 return self._loadfilter(b'decode')
2018
2018
2019 def adddatafilter(self, name, filter):
2019 def adddatafilter(self, name, filter):
2020 self._datafilters[name] = filter
2020 self._datafilters[name] = filter
2021
2021
2022 def wread(self, filename):
2022 def wread(self, filename):
2023 if self.wvfs.islink(filename):
2023 if self.wvfs.islink(filename):
2024 data = self.wvfs.readlink(filename)
2024 data = self.wvfs.readlink(filename)
2025 else:
2025 else:
2026 data = self.wvfs.read(filename)
2026 data = self.wvfs.read(filename)
2027 return self._filter(self._encodefilterpats, filename, data)
2027 return self._filter(self._encodefilterpats, filename, data)
2028
2028
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 """write ``data`` into ``filename`` in the working directory
2030 """write ``data`` into ``filename`` in the working directory
2031
2031
2032 This returns length of written (maybe decoded) data.
2032 This returns length of written (maybe decoded) data.
2033 """
2033 """
2034 data = self._filter(self._decodefilterpats, filename, data)
2034 data = self._filter(self._decodefilterpats, filename, data)
2035 if b'l' in flags:
2035 if b'l' in flags:
2036 self.wvfs.symlink(data, filename)
2036 self.wvfs.symlink(data, filename)
2037 else:
2037 else:
2038 self.wvfs.write(
2038 self.wvfs.write(
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2040 )
2040 )
2041 if b'x' in flags:
2041 if b'x' in flags:
2042 self.wvfs.setflags(filename, False, True)
2042 self.wvfs.setflags(filename, False, True)
2043 else:
2043 else:
2044 self.wvfs.setflags(filename, False, False)
2044 self.wvfs.setflags(filename, False, False)
2045 return len(data)
2045 return len(data)
2046
2046
2047 def wwritedata(self, filename, data):
2047 def wwritedata(self, filename, data):
2048 return self._filter(self._decodefilterpats, filename, data)
2048 return self._filter(self._decodefilterpats, filename, data)
2049
2049
2050 def currenttransaction(self):
2050 def currenttransaction(self):
2051 """return the current transaction or None if non exists"""
2051 """return the current transaction or None if non exists"""
2052 if self._transref:
2052 if self._transref:
2053 tr = self._transref()
2053 tr = self._transref()
2054 else:
2054 else:
2055 tr = None
2055 tr = None
2056
2056
2057 if tr and tr.running():
2057 if tr and tr.running():
2058 return tr
2058 return tr
2059 return None
2059 return None
2060
2060
2061 def transaction(self, desc, report=None):
2061 def transaction(self, desc, report=None):
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 b'devel', b'check-locks'
2063 b'devel', b'check-locks'
2064 ):
2064 ):
2065 if self._currentlock(self._lockref) is None:
2065 if self._currentlock(self._lockref) is None:
2066 raise error.ProgrammingError(b'transaction requires locking')
2066 raise error.ProgrammingError(b'transaction requires locking')
2067 tr = self.currenttransaction()
2067 tr = self.currenttransaction()
2068 if tr is not None:
2068 if tr is not None:
2069 return tr.nest(name=desc)
2069 return tr.nest(name=desc)
2070
2070
2071 # abort here if the journal already exists
2071 # abort here if the journal already exists
2072 if self.svfs.exists(b"journal"):
2072 if self.svfs.exists(b"journal"):
2073 raise error.RepoError(
2073 raise error.RepoError(
2074 _(b"abandoned transaction found"),
2074 _(b"abandoned transaction found"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2076 )
2076 )
2077
2077
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 ha = hex(hashutil.sha1(idbase).digest())
2079 ha = hex(hashutil.sha1(idbase).digest())
2080 txnid = b'TXN:' + ha
2080 txnid = b'TXN:' + ha
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082
2082
2083 self._writejournal(desc)
2083 self._writejournal(desc)
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 if report:
2085 if report:
2086 rp = report
2086 rp = report
2087 else:
2087 else:
2088 rp = self.ui.warn
2088 rp = self.ui.warn
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 # we must avoid cyclic reference between repo and transaction.
2090 # we must avoid cyclic reference between repo and transaction.
2091 reporef = weakref.ref(self)
2091 reporef = weakref.ref(self)
2092 # Code to track tag movement
2092 # Code to track tag movement
2093 #
2093 #
2094 # Since tags are all handled as file content, it is actually quite hard
2094 # Since tags are all handled as file content, it is actually quite hard
2095 # to track these movement from a code perspective. So we fallback to a
2095 # to track these movement from a code perspective. So we fallback to a
2096 # tracking at the repository level. One could envision to track changes
2096 # tracking at the repository level. One could envision to track changes
2097 # to the '.hgtags' file through changegroup apply but that fails to
2097 # to the '.hgtags' file through changegroup apply but that fails to
2098 # cope with case where transaction expose new heads without changegroup
2098 # cope with case where transaction expose new heads without changegroup
2099 # being involved (eg: phase movement).
2099 # being involved (eg: phase movement).
2100 #
2100 #
2101 # For now, We gate the feature behind a flag since this likely comes
2101 # For now, We gate the feature behind a flag since this likely comes
2102 # with performance impacts. The current code run more often than needed
2102 # with performance impacts. The current code run more often than needed
2103 # and do not use caches as much as it could. The current focus is on
2103 # and do not use caches as much as it could. The current focus is on
2104 # the behavior of the feature so we disable it by default. The flag
2104 # the behavior of the feature so we disable it by default. The flag
2105 # will be removed when we are happy with the performance impact.
2105 # will be removed when we are happy with the performance impact.
2106 #
2106 #
2107 # Once this feature is no longer experimental move the following
2107 # Once this feature is no longer experimental move the following
2108 # documentation to the appropriate help section:
2108 # documentation to the appropriate help section:
2109 #
2109 #
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 # tags (new or changed or deleted tags). In addition the details of
2111 # tags (new or changed or deleted tags). In addition the details of
2112 # these changes are made available in a file at:
2112 # these changes are made available in a file at:
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 # might exist from a previous transaction even if no tag were touched
2115 # might exist from a previous transaction even if no tag were touched
2116 # in this one. Changes are recorded in a line base format::
2116 # in this one. Changes are recorded in a line base format::
2117 #
2117 #
2118 # <action> <hex-node> <tag-name>\n
2118 # <action> <hex-node> <tag-name>\n
2119 #
2119 #
2120 # Actions are defined as follow:
2120 # Actions are defined as follow:
2121 # "-R": tag is removed,
2121 # "-R": tag is removed,
2122 # "+A": tag is added,
2122 # "+A": tag is added,
2123 # "-M": tag is moved (old value),
2123 # "-M": tag is moved (old value),
2124 # "+M": tag is moved (new value),
2124 # "+M": tag is moved (new value),
2125 tracktags = lambda x: None
2125 tracktags = lambda x: None
2126 # experimental config: experimental.hook-track-tags
2126 # experimental config: experimental.hook-track-tags
2127 shouldtracktags = self.ui.configbool(
2127 shouldtracktags = self.ui.configbool(
2128 b'experimental', b'hook-track-tags'
2128 b'experimental', b'hook-track-tags'
2129 )
2129 )
2130 if desc != b'strip' and shouldtracktags:
2130 if desc != b'strip' and shouldtracktags:
2131 oldheads = self.changelog.headrevs()
2131 oldheads = self.changelog.headrevs()
2132
2132
2133 def tracktags(tr2):
2133 def tracktags(tr2):
2134 repo = reporef()
2134 repo = reporef()
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 newheads = repo.changelog.headrevs()
2136 newheads = repo.changelog.headrevs()
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 # notes: we compare lists here.
2138 # notes: we compare lists here.
2139 # As we do it only once buiding set would not be cheaper
2139 # As we do it only once buiding set would not be cheaper
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 if changes:
2141 if changes:
2142 tr2.hookargs[b'tag_moved'] = b'1'
2142 tr2.hookargs[b'tag_moved'] = b'1'
2143 with repo.vfs(
2143 with repo.vfs(
2144 b'changes/tags.changes', b'w', atomictemp=True
2144 b'changes/tags.changes', b'w', atomictemp=True
2145 ) as changesfile:
2145 ) as changesfile:
2146 # note: we do not register the file to the transaction
2146 # note: we do not register the file to the transaction
2147 # because we needs it to still exist on the transaction
2147 # because we needs it to still exist on the transaction
2148 # is close (for txnclose hooks)
2148 # is close (for txnclose hooks)
2149 tagsmod.writediff(changesfile, changes)
2149 tagsmod.writediff(changesfile, changes)
2150
2150
2151 def validate(tr2):
2151 def validate(tr2):
2152 """will run pre-closing hooks"""
2152 """will run pre-closing hooks"""
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2154 # path for now
2154 # path for now
2155 #
2155 #
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 # dict is copied before these run. In addition we needs the data
2157 # dict is copied before these run. In addition we needs the data
2158 # available to in memory hooks too.
2158 # available to in memory hooks too.
2159 #
2159 #
2160 # Moreover, we also need to make sure this runs before txnclose
2160 # Moreover, we also need to make sure this runs before txnclose
2161 # hooks and there is no "pending" mechanism that would execute
2161 # hooks and there is no "pending" mechanism that would execute
2162 # logic only if hooks are about to run.
2162 # logic only if hooks are about to run.
2163 #
2163 #
2164 # Fixing this limitation of the transaction is also needed to track
2164 # Fixing this limitation of the transaction is also needed to track
2165 # other families of changes (bookmarks, phases, obsolescence).
2165 # other families of changes (bookmarks, phases, obsolescence).
2166 #
2166 #
2167 # This will have to be fixed before we remove the experimental
2167 # This will have to be fixed before we remove the experimental
2168 # gating.
2168 # gating.
2169 tracktags(tr2)
2169 tracktags(tr2)
2170 repo = reporef()
2170 repo = reporef()
2171
2171
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 singlehead = repo.ui.configbool(*singleheadopt)
2173 singlehead = repo.ui.configbool(*singleheadopt)
2174 if singlehead:
2174 if singlehead:
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 accountclosed = singleheadsub.get(
2176 accountclosed = singleheadsub.get(
2177 b"account-closed-heads", False
2177 b"account-closed-heads", False
2178 )
2178 )
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 for name, (old, new) in sorted(
2181 for name, (old, new) in sorted(
2182 tr.changes[b'bookmarks'].items()
2182 tr.changes[b'bookmarks'].items()
2183 ):
2183 ):
2184 args = tr.hookargs.copy()
2184 args = tr.hookargs.copy()
2185 args.update(bookmarks.preparehookargs(name, old, new))
2185 args.update(bookmarks.preparehookargs(name, old, new))
2186 repo.hook(
2186 repo.hook(
2187 b'pretxnclose-bookmark',
2187 b'pretxnclose-bookmark',
2188 throw=True,
2188 throw=True,
2189 **pycompat.strkwargs(args)
2189 **pycompat.strkwargs(args)
2190 )
2190 )
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 cl = repo.unfiltered().changelog
2192 cl = repo.unfiltered().changelog
2193 for revs, (old, new) in tr.changes[b'phases']:
2193 for revs, (old, new) in tr.changes[b'phases']:
2194 for rev in revs:
2194 for rev in revs:
2195 args = tr.hookargs.copy()
2195 args = tr.hookargs.copy()
2196 node = hex(cl.node(rev))
2196 node = hex(cl.node(rev))
2197 args.update(phases.preparehookargs(node, old, new))
2197 args.update(phases.preparehookargs(node, old, new))
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose-phase',
2199 b'pretxnclose-phase',
2200 throw=True,
2200 throw=True,
2201 **pycompat.strkwargs(args)
2201 **pycompat.strkwargs(args)
2202 )
2202 )
2203
2203
2204 repo.hook(
2204 repo.hook(
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 )
2206 )
2207
2207
2208 def releasefn(tr, success):
2208 def releasefn(tr, success):
2209 repo = reporef()
2209 repo = reporef()
2210 if repo is None:
2210 if repo is None:
2211 # If the repo has been GC'd (and this release function is being
2211 # If the repo has been GC'd (and this release function is being
2212 # called from transaction.__del__), there's not much we can do,
2212 # called from transaction.__del__), there's not much we can do,
2213 # so just leave the unfinished transaction there and let the
2213 # so just leave the unfinished transaction there and let the
2214 # user run `hg recover`.
2214 # user run `hg recover`.
2215 return
2215 return
2216 if success:
2216 if success:
2217 # this should be explicitly invoked here, because
2217 # this should be explicitly invoked here, because
2218 # in-memory changes aren't written out at closing
2218 # in-memory changes aren't written out at closing
2219 # transaction, if tr.addfilegenerator (via
2219 # transaction, if tr.addfilegenerator (via
2220 # dirstate.write or so) isn't invoked while
2220 # dirstate.write or so) isn't invoked while
2221 # transaction running
2221 # transaction running
2222 repo.dirstate.write(None)
2222 repo.dirstate.write(None)
2223 else:
2223 else:
2224 # discard all changes (including ones already written
2224 # discard all changes (including ones already written
2225 # out) in this transaction
2225 # out) in this transaction
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229
2229
2230 repo.invalidate(clearfilecache=True)
2230 repo.invalidate(clearfilecache=True)
2231
2231
2232 tr = transaction.transaction(
2232 tr = transaction.transaction(
2233 rp,
2233 rp,
2234 self.svfs,
2234 self.svfs,
2235 vfsmap,
2235 vfsmap,
2236 b"journal",
2236 b"journal",
2237 b"undo",
2237 b"undo",
2238 aftertrans(renames),
2238 aftertrans(renames),
2239 self.store.createmode,
2239 self.store.createmode,
2240 validator=validate,
2240 validator=validate,
2241 releasefn=releasefn,
2241 releasefn=releasefn,
2242 checkambigfiles=_cachedfiles,
2242 checkambigfiles=_cachedfiles,
2243 name=desc,
2243 name=desc,
2244 )
2244 )
2245 tr.changes[b'origrepolen'] = len(self)
2245 tr.changes[b'origrepolen'] = len(self)
2246 tr.changes[b'obsmarkers'] = set()
2246 tr.changes[b'obsmarkers'] = set()
2247 tr.changes[b'phases'] = []
2247 tr.changes[b'phases'] = []
2248 tr.changes[b'bookmarks'] = {}
2248 tr.changes[b'bookmarks'] = {}
2249
2249
2250 tr.hookargs[b'txnid'] = txnid
2250 tr.hookargs[b'txnid'] = txnid
2251 tr.hookargs[b'txnname'] = desc
2251 tr.hookargs[b'txnname'] = desc
2252 tr.hookargs[b'changes'] = tr.changes
2252 tr.hookargs[b'changes'] = tr.changes
2253 # note: writing the fncache only during finalize mean that the file is
2253 # note: writing the fncache only during finalize mean that the file is
2254 # outdated when running hooks. As fncache is used for streaming clone,
2254 # outdated when running hooks. As fncache is used for streaming clone,
2255 # this is not expected to break anything that happen during the hooks.
2255 # this is not expected to break anything that happen during the hooks.
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2257
2257
2258 def txnclosehook(tr2):
2258 def txnclosehook(tr2):
2259 """To be run if transaction is successful, will schedule a hook run
2259 """To be run if transaction is successful, will schedule a hook run
2260 """
2260 """
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2262 # This reduces memory consumption when there are multiple
2262 # This reduces memory consumption when there are multiple
2263 # transactions per lock. This can likely go away if issue5045
2263 # transactions per lock. This can likely go away if issue5045
2264 # fixes the function accumulation.
2264 # fixes the function accumulation.
2265 hookargs = tr2.hookargs
2265 hookargs = tr2.hookargs
2266
2266
2267 def hookfunc(unused_success):
2267 def hookfunc(unused_success):
2268 repo = reporef()
2268 repo = reporef()
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 for name, (old, new) in bmchanges:
2271 for name, (old, new) in bmchanges:
2272 args = tr.hookargs.copy()
2272 args = tr.hookargs.copy()
2273 args.update(bookmarks.preparehookargs(name, old, new))
2273 args.update(bookmarks.preparehookargs(name, old, new))
2274 repo.hook(
2274 repo.hook(
2275 b'txnclose-bookmark',
2275 b'txnclose-bookmark',
2276 throw=False,
2276 throw=False,
2277 **pycompat.strkwargs(args)
2277 **pycompat.strkwargs(args)
2278 )
2278 )
2279
2279
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 cl = repo.unfiltered().changelog
2281 cl = repo.unfiltered().changelog
2282 phasemv = sorted(
2282 phasemv = sorted(
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 )
2284 )
2285 for revs, (old, new) in phasemv:
2285 for revs, (old, new) in phasemv:
2286 for rev in revs:
2286 for rev in revs:
2287 args = tr.hookargs.copy()
2287 args = tr.hookargs.copy()
2288 node = hex(cl.node(rev))
2288 node = hex(cl.node(rev))
2289 args.update(phases.preparehookargs(node, old, new))
2289 args.update(phases.preparehookargs(node, old, new))
2290 repo.hook(
2290 repo.hook(
2291 b'txnclose-phase',
2291 b'txnclose-phase',
2292 throw=False,
2292 throw=False,
2293 **pycompat.strkwargs(args)
2293 **pycompat.strkwargs(args)
2294 )
2294 )
2295
2295
2296 repo.hook(
2296 repo.hook(
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 )
2298 )
2299
2299
2300 reporef()._afterlock(hookfunc)
2300 reporef()._afterlock(hookfunc)
2301
2301
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 # Include a leading "-" to make it happen before the transaction summary
2303 # Include a leading "-" to make it happen before the transaction summary
2304 # reports registered via scmutil.registersummarycallback() whose names
2304 # reports registered via scmutil.registersummarycallback() whose names
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2306 # callbacks run.
2306 # callbacks run.
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308
2308
2309 def txnaborthook(tr2):
2309 def txnaborthook(tr2):
2310 """To be run if transaction is aborted
2310 """To be run if transaction is aborted
2311 """
2311 """
2312 reporef().hook(
2312 reporef().hook(
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 )
2314 )
2315
2315
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2317 # avoid eager cache invalidation. in-memory data should be identical
2317 # avoid eager cache invalidation. in-memory data should be identical
2318 # to stored data if transaction has no error.
2318 # to stored data if transaction has no error.
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 self._transref = weakref.ref(tr)
2320 self._transref = weakref.ref(tr)
2321 scmutil.registersummarycallback(self, tr, desc)
2321 scmutil.registersummarycallback(self, tr, desc)
2322 return tr
2322 return tr
2323
2323
2324 def _journalfiles(self):
2324 def _journalfiles(self):
2325 return (
2325 return (
2326 (self.svfs, b'journal'),
2326 (self.svfs, b'journal'),
2327 (self.svfs, b'journal.narrowspec'),
2327 (self.svfs, b'journal.narrowspec'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2330 (self.vfs, b'journal.branch'),
2330 (self.vfs, b'journal.branch'),
2331 (self.vfs, b'journal.desc'),
2331 (self.vfs, b'journal.desc'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 (self.svfs, b'journal.phaseroots'),
2333 (self.svfs, b'journal.phaseroots'),
2334 )
2334 )
2335
2335
2336 def undofiles(self):
2336 def undofiles(self):
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338
2338
2339 @unfilteredmethod
2339 @unfilteredmethod
2340 def _writejournal(self, desc):
2340 def _writejournal(self, desc):
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2344 self.vfs.write(
2344 self.vfs.write(
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 )
2346 )
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 bookmarksvfs.write(
2349 bookmarksvfs.write(
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 )
2351 )
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353
2353
2354 def recover(self):
2354 def recover(self):
2355 with self.lock():
2355 with self.lock():
2356 if self.svfs.exists(b"journal"):
2356 if self.svfs.exists(b"journal"):
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 vfsmap = {
2358 vfsmap = {
2359 b'': self.svfs,
2359 b'': self.svfs,
2360 b'plain': self.vfs,
2360 b'plain': self.vfs,
2361 }
2361 }
2362 transaction.rollback(
2362 transaction.rollback(
2363 self.svfs,
2363 self.svfs,
2364 vfsmap,
2364 vfsmap,
2365 b"journal",
2365 b"journal",
2366 self.ui.warn,
2366 self.ui.warn,
2367 checkambigfiles=_cachedfiles,
2367 checkambigfiles=_cachedfiles,
2368 )
2368 )
2369 self.invalidate()
2369 self.invalidate()
2370 return True
2370 return True
2371 else:
2371 else:
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 return False
2373 return False
2374
2374
2375 def rollback(self, dryrun=False, force=False):
2375 def rollback(self, dryrun=False, force=False):
2376 wlock = lock = dsguard = None
2376 wlock = lock = dsguard = None
2377 try:
2377 try:
2378 wlock = self.wlock()
2378 wlock = self.wlock()
2379 lock = self.lock()
2379 lock = self.lock()
2380 if self.svfs.exists(b"undo"):
2380 if self.svfs.exists(b"undo"):
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382
2382
2383 return self._rollback(dryrun, force, dsguard)
2383 return self._rollback(dryrun, force, dsguard)
2384 else:
2384 else:
2385 self.ui.warn(_(b"no rollback information available\n"))
2385 self.ui.warn(_(b"no rollback information available\n"))
2386 return 1
2386 return 1
2387 finally:
2387 finally:
2388 release(dsguard, lock, wlock)
2388 release(dsguard, lock, wlock)
2389
2389
2390 @unfilteredmethod # Until we get smarter cache management
2390 @unfilteredmethod # Until we get smarter cache management
2391 def _rollback(self, dryrun, force, dsguard):
2391 def _rollback(self, dryrun, force, dsguard):
2392 ui = self.ui
2392 ui = self.ui
2393 try:
2393 try:
2394 args = self.vfs.read(b'undo.desc').splitlines()
2394 args = self.vfs.read(b'undo.desc').splitlines()
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 if len(args) >= 3:
2396 if len(args) >= 3:
2397 detail = args[2]
2397 detail = args[2]
2398 oldtip = oldlen - 1
2398 oldtip = oldlen - 1
2399
2399
2400 if detail and ui.verbose:
2400 if detail and ui.verbose:
2401 msg = _(
2401 msg = _(
2402 b'repository tip rolled back to revision %d'
2402 b'repository tip rolled back to revision %d'
2403 b' (undo %s: %s)\n'
2403 b' (undo %s: %s)\n'
2404 ) % (oldtip, desc, detail)
2404 ) % (oldtip, desc, detail)
2405 else:
2405 else:
2406 msg = _(
2406 msg = _(
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2408 ) % (oldtip, desc)
2408 ) % (oldtip, desc)
2409 except IOError:
2409 except IOError:
2410 msg = _(b'rolling back unknown transaction\n')
2410 msg = _(b'rolling back unknown transaction\n')
2411 desc = None
2411 desc = None
2412
2412
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 raise error.Abort(
2414 raise error.Abort(
2415 _(
2415 _(
2416 b'rollback of last commit while not checked out '
2416 b'rollback of last commit while not checked out '
2417 b'may lose data'
2417 b'may lose data'
2418 ),
2418 ),
2419 hint=_(b'use -f to force'),
2419 hint=_(b'use -f to force'),
2420 )
2420 )
2421
2421
2422 ui.status(msg)
2422 ui.status(msg)
2423 if dryrun:
2423 if dryrun:
2424 return 0
2424 return 0
2425
2425
2426 parents = self.dirstate.parents()
2426 parents = self.dirstate.parents()
2427 self.destroying()
2427 self.destroying()
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 transaction.rollback(
2429 transaction.rollback(
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 )
2431 )
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 bookmarksvfs.rename(
2434 bookmarksvfs.rename(
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 )
2436 )
2437 if self.svfs.exists(b'undo.phaseroots'):
2437 if self.svfs.exists(b'undo.phaseroots'):
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 self.invalidate()
2439 self.invalidate()
2440
2440
2441 has_node = self.changelog.index.has_node
2441 has_node = self.changelog.index.has_node
2442 parentgone = any(not has_node(p) for p in parents)
2442 parentgone = any(not has_node(p) for p in parents)
2443 if parentgone:
2443 if parentgone:
2444 # prevent dirstateguard from overwriting already restored one
2444 # prevent dirstateguard from overwriting already restored one
2445 dsguard.close()
2445 dsguard.close()
2446
2446
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 try:
2450 try:
2451 branch = self.vfs.read(b'undo.branch')
2451 branch = self.vfs.read(b'undo.branch')
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2453 except IOError:
2453 except IOError:
2454 ui.warn(
2454 ui.warn(
2455 _(
2455 _(
2456 b'named branch could not be reset: '
2456 b'named branch could not be reset: '
2457 b'current branch is still \'%s\'\n'
2457 b'current branch is still \'%s\'\n'
2458 )
2458 )
2459 % self.dirstate.branch()
2459 % self.dirstate.branch()
2460 )
2460 )
2461
2461
2462 parents = tuple([p.rev() for p in self[None].parents()])
2462 parents = tuple([p.rev() for p in self[None].parents()])
2463 if len(parents) > 1:
2463 if len(parents) > 1:
2464 ui.status(
2464 ui.status(
2465 _(
2465 _(
2466 b'working directory now based on '
2466 b'working directory now based on '
2467 b'revisions %d and %d\n'
2467 b'revisions %d and %d\n'
2468 )
2468 )
2469 % parents
2469 % parents
2470 )
2470 )
2471 else:
2471 else:
2472 ui.status(
2472 ui.status(
2473 _(b'working directory now based on revision %d\n') % parents
2473 _(b'working directory now based on revision %d\n') % parents
2474 )
2474 )
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476
2476
2477 # TODO: if we know which new heads may result from this rollback, pass
2477 # TODO: if we know which new heads may result from this rollback, pass
2478 # them to destroy(), which will prevent the branchhead cache from being
2478 # them to destroy(), which will prevent the branchhead cache from being
2479 # invalidated.
2479 # invalidated.
2480 self.destroyed()
2480 self.destroyed()
2481 return 0
2481 return 0
2482
2482
2483 def _buildcacheupdater(self, newtransaction):
2483 def _buildcacheupdater(self, newtransaction):
2484 """called during transaction to build the callback updating cache
2484 """called during transaction to build the callback updating cache
2485
2485
2486 Lives on the repository to help extension who might want to augment
2486 Lives on the repository to help extension who might want to augment
2487 this logic. For this purpose, the created transaction is passed to the
2487 this logic. For this purpose, the created transaction is passed to the
2488 method.
2488 method.
2489 """
2489 """
2490 # we must avoid cyclic reference between repo and transaction.
2490 # we must avoid cyclic reference between repo and transaction.
2491 reporef = weakref.ref(self)
2491 reporef = weakref.ref(self)
2492
2492
2493 def updater(tr):
2493 def updater(tr):
2494 repo = reporef()
2494 repo = reporef()
2495 repo.updatecaches(tr)
2495 repo.updatecaches(tr)
2496
2496
2497 return updater
2497 return updater
2498
2498
2499 @unfilteredmethod
2499 @unfilteredmethod
2500 def updatecaches(self, tr=None, full=False):
2500 def updatecaches(self, tr=None, full=False):
2501 """warm appropriate caches
2501 """warm appropriate caches
2502
2502
2503 If this function is called after a transaction closed. The transaction
2503 If this function is called after a transaction closed. The transaction
2504 will be available in the 'tr' argument. This can be used to selectively
2504 will be available in the 'tr' argument. This can be used to selectively
2505 update caches relevant to the changes in that transaction.
2505 update caches relevant to the changes in that transaction.
2506
2506
2507 If 'full' is set, make sure all caches the function knows about have
2507 If 'full' is set, make sure all caches the function knows about have
2508 up-to-date data. Even the ones usually loaded more lazily.
2508 up-to-date data. Even the ones usually loaded more lazily.
2509 """
2509 """
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 # During strip, many caches are invalid but
2511 # During strip, many caches are invalid but
2512 # later call to `destroyed` will refresh them.
2512 # later call to `destroyed` will refresh them.
2513 return
2513 return
2514
2514
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2517 self.ui.debug(b'updating the branch cache\n')
2517 self.ui.debug(b'updating the branch cache\n')
2518 self.filtered(b'served').branchmap()
2518 self.filtered(b'served').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2520
2520
2521 if full:
2521 if full:
2522 unfi = self.unfiltered()
2522 unfi = self.unfiltered()
2523
2523
2524 self.changelog.update_caches(transaction=tr)
2524 self.changelog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2526
2526
2527 rbc = unfi.revbranchcache()
2527 rbc = unfi.revbranchcache()
2528 for r in unfi.changelog:
2528 for r in unfi.changelog:
2529 rbc.branchinfo(r)
2529 rbc.branchinfo(r)
2530 rbc.write()
2530 rbc.write()
2531
2531
2532 # ensure the working copy parents are in the manifestfulltextcache
2532 # ensure the working copy parents are in the manifestfulltextcache
2533 for ctx in self[b'.'].parents():
2533 for ctx in self[b'.'].parents():
2534 ctx.manifest() # accessing the manifest is enough
2534 ctx.manifest() # accessing the manifest is enough
2535
2535
2536 # accessing fnode cache warms the cache
2536 # accessing fnode cache warms the cache
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 # accessing tags warm the cache
2538 # accessing tags warm the cache
2539 self.tags()
2539 self.tags()
2540 self.filtered(b'served').tags()
2540 self.filtered(b'served').tags()
2541
2541
2542 # The `full` arg is documented as updating even the lazily-loaded
2542 # The `full` arg is documented as updating even the lazily-loaded
2543 # caches immediately, so we're forcing a write to cause these caches
2543 # caches immediately, so we're forcing a write to cause these caches
2544 # to be warmed up even if they haven't explicitly been requested
2544 # to be warmed up even if they haven't explicitly been requested
2545 # yet (if they've never been used by hg, they won't ever have been
2545 # yet (if they've never been used by hg, they won't ever have been
2546 # written, even if they're a subset of another kind of cache that
2546 # written, even if they're a subset of another kind of cache that
2547 # *has* been used).
2547 # *has* been used).
2548 for filt in repoview.filtertable.keys():
2548 for filt in repoview.filtertable.keys():
2549 filtered = self.filtered(filt)
2549 filtered = self.filtered(filt)
2550 filtered.branchmap().write(filtered)
2550 filtered.branchmap().write(filtered)
2551
2551
2552 def invalidatecaches(self):
2552 def invalidatecaches(self):
2553
2553
2554 if '_tagscache' in vars(self):
2554 if '_tagscache' in vars(self):
2555 # can't use delattr on proxy
2555 # can't use delattr on proxy
2556 del self.__dict__['_tagscache']
2556 del self.__dict__['_tagscache']
2557
2557
2558 self._branchcaches.clear()
2558 self._branchcaches.clear()
2559 self.invalidatevolatilesets()
2559 self.invalidatevolatilesets()
2560 self._sparsesignaturecache.clear()
2560 self._sparsesignaturecache.clear()
2561
2561
2562 def invalidatevolatilesets(self):
2562 def invalidatevolatilesets(self):
2563 self.filteredrevcache.clear()
2563 self.filteredrevcache.clear()
2564 obsolete.clearobscaches(self)
2564 obsolete.clearobscaches(self)
2565 self._quick_access_changeid_invalidate()
2565 self._quick_access_changeid_invalidate()
2566
2566
2567 def invalidatedirstate(self):
2567 def invalidatedirstate(self):
2568 '''Invalidates the dirstate, causing the next call to dirstate
2568 '''Invalidates the dirstate, causing the next call to dirstate
2569 to check if it was modified since the last time it was read,
2569 to check if it was modified since the last time it was read,
2570 rereading it if it has.
2570 rereading it if it has.
2571
2571
2572 This is different to dirstate.invalidate() that it doesn't always
2572 This is different to dirstate.invalidate() that it doesn't always
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2575 known good state).'''
2575 known good state).'''
2576 if hasunfilteredcache(self, 'dirstate'):
2576 if hasunfilteredcache(self, 'dirstate'):
2577 for k in self.dirstate._filecache:
2577 for k in self.dirstate._filecache:
2578 try:
2578 try:
2579 delattr(self.dirstate, k)
2579 delattr(self.dirstate, k)
2580 except AttributeError:
2580 except AttributeError:
2581 pass
2581 pass
2582 delattr(self.unfiltered(), 'dirstate')
2582 delattr(self.unfiltered(), 'dirstate')
2583
2583
2584 def invalidate(self, clearfilecache=False):
2584 def invalidate(self, clearfilecache=False):
2585 '''Invalidates both store and non-store parts other than dirstate
2585 '''Invalidates both store and non-store parts other than dirstate
2586
2586
2587 If a transaction is running, invalidation of store is omitted,
2587 If a transaction is running, invalidation of store is omitted,
2588 because discarding in-memory changes might cause inconsistency
2588 because discarding in-memory changes might cause inconsistency
2589 (e.g. incomplete fncache causes unintentional failure, but
2589 (e.g. incomplete fncache causes unintentional failure, but
2590 redundant one doesn't).
2590 redundant one doesn't).
2591 '''
2591 '''
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 for k in list(self._filecache.keys()):
2593 for k in list(self._filecache.keys()):
2594 # dirstate is invalidated separately in invalidatedirstate()
2594 # dirstate is invalidated separately in invalidatedirstate()
2595 if k == b'dirstate':
2595 if k == b'dirstate':
2596 continue
2596 continue
2597 if (
2597 if (
2598 k == b'changelog'
2598 k == b'changelog'
2599 and self.currenttransaction()
2599 and self.currenttransaction()
2600 and self.changelog._delayed
2600 and self.changelog._delayed
2601 ):
2601 ):
2602 # The changelog object may store unwritten revisions. We don't
2602 # The changelog object may store unwritten revisions. We don't
2603 # want to lose them.
2603 # want to lose them.
2604 # TODO: Solve the problem instead of working around it.
2604 # TODO: Solve the problem instead of working around it.
2605 continue
2605 continue
2606
2606
2607 if clearfilecache:
2607 if clearfilecache:
2608 del self._filecache[k]
2608 del self._filecache[k]
2609 try:
2609 try:
2610 delattr(unfiltered, k)
2610 delattr(unfiltered, k)
2611 except AttributeError:
2611 except AttributeError:
2612 pass
2612 pass
2613 self.invalidatecaches()
2613 self.invalidatecaches()
2614 if not self.currenttransaction():
2614 if not self.currenttransaction():
2615 # TODO: Changing contents of store outside transaction
2615 # TODO: Changing contents of store outside transaction
2616 # causes inconsistency. We should make in-memory store
2616 # causes inconsistency. We should make in-memory store
2617 # changes detectable, and abort if changed.
2617 # changes detectable, and abort if changed.
2618 self.store.invalidatecaches()
2618 self.store.invalidatecaches()
2619
2619
2620 def invalidateall(self):
2620 def invalidateall(self):
2621 '''Fully invalidates both store and non-store parts, causing the
2621 '''Fully invalidates both store and non-store parts, causing the
2622 subsequent operation to reread any outside changes.'''
2622 subsequent operation to reread any outside changes.'''
2623 # extension should hook this to invalidate its caches
2623 # extension should hook this to invalidate its caches
2624 self.invalidate()
2624 self.invalidate()
2625 self.invalidatedirstate()
2625 self.invalidatedirstate()
2626
2626
2627 @unfilteredmethod
2627 @unfilteredmethod
2628 def _refreshfilecachestats(self, tr):
2628 def _refreshfilecachestats(self, tr):
2629 """Reload stats of cached files so that they are flagged as valid"""
2629 """Reload stats of cached files so that they are flagged as valid"""
2630 for k, ce in self._filecache.items():
2630 for k, ce in self._filecache.items():
2631 k = pycompat.sysstr(k)
2631 k = pycompat.sysstr(k)
2632 if k == 'dirstate' or k not in self.__dict__:
2632 if k == 'dirstate' or k not in self.__dict__:
2633 continue
2633 continue
2634 ce.refresh()
2634 ce.refresh()
2635
2635
2636 def _lock(
2636 def _lock(
2637 self,
2637 self,
2638 vfs,
2638 vfs,
2639 lockname,
2639 lockname,
2640 wait,
2640 wait,
2641 releasefn,
2641 releasefn,
2642 acquirefn,
2642 acquirefn,
2643 desc,
2643 desc,
2644 inheritchecker=None,
2644 inheritchecker=None,
2645 parentenvvar=None,
2645 parentenvvar=None,
2646 ):
2646 ):
2647 parentlock = None
2647 parentlock = None
2648 # the contents of parentenvvar are used by the underlying lock to
2648 # the contents of parentenvvar are used by the underlying lock to
2649 # determine whether it can be inherited
2649 # determine whether it can be inherited
2650 if parentenvvar is not None:
2650 if parentenvvar is not None:
2651 parentlock = encoding.environ.get(parentenvvar)
2651 parentlock = encoding.environ.get(parentenvvar)
2652
2652
2653 timeout = 0
2653 timeout = 0
2654 warntimeout = 0
2654 warntimeout = 0
2655 if wait:
2655 if wait:
2656 timeout = self.ui.configint(b"ui", b"timeout")
2656 timeout = self.ui.configint(b"ui", b"timeout")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 # internal config: ui.signal-safe-lock
2658 # internal config: ui.signal-safe-lock
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660
2660
2661 l = lockmod.trylock(
2661 l = lockmod.trylock(
2662 self.ui,
2662 self.ui,
2663 vfs,
2663 vfs,
2664 lockname,
2664 lockname,
2665 timeout,
2665 timeout,
2666 warntimeout,
2666 warntimeout,
2667 releasefn=releasefn,
2667 releasefn=releasefn,
2668 acquirefn=acquirefn,
2668 acquirefn=acquirefn,
2669 desc=desc,
2669 desc=desc,
2670 inheritchecker=inheritchecker,
2670 inheritchecker=inheritchecker,
2671 parentlock=parentlock,
2671 parentlock=parentlock,
2672 signalsafe=signalsafe,
2672 signalsafe=signalsafe,
2673 )
2673 )
2674 return l
2674 return l
2675
2675
2676 def _afterlock(self, callback):
2676 def _afterlock(self, callback):
2677 """add a callback to be run when the repository is fully unlocked
2677 """add a callback to be run when the repository is fully unlocked
2678
2678
2679 The callback will be executed when the outermost lock is released
2679 The callback will be executed when the outermost lock is released
2680 (with wlock being higher level than 'lock')."""
2680 (with wlock being higher level than 'lock')."""
2681 for ref in (self._wlockref, self._lockref):
2681 for ref in (self._wlockref, self._lockref):
2682 l = ref and ref()
2682 l = ref and ref()
2683 if l and l.held:
2683 if l and l.held:
2684 l.postrelease.append(callback)
2684 l.postrelease.append(callback)
2685 break
2685 break
2686 else: # no lock have been found.
2686 else: # no lock have been found.
2687 callback(True)
2687 callback(True)
2688
2688
2689 def lock(self, wait=True):
2689 def lock(self, wait=True):
2690 '''Lock the repository store (.hg/store) and return a weak reference
2690 '''Lock the repository store (.hg/store) and return a weak reference
2691 to the lock. Use this before modifying the store (e.g. committing or
2691 to the lock. Use this before modifying the store (e.g. committing or
2692 stripping). If you are opening a transaction, get a lock as well.)
2692 stripping). If you are opening a transaction, get a lock as well.)
2693
2693
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 'wlock' first to avoid a dead-lock hazard.'''
2695 'wlock' first to avoid a dead-lock hazard.'''
2696 l = self._currentlock(self._lockref)
2696 l = self._currentlock(self._lockref)
2697 if l is not None:
2697 if l is not None:
2698 l.lock()
2698 l.lock()
2699 return l
2699 return l
2700
2700
2701 l = self._lock(
2701 l = self._lock(
2702 vfs=self.svfs,
2702 vfs=self.svfs,
2703 lockname=b"lock",
2703 lockname=b"lock",
2704 wait=wait,
2704 wait=wait,
2705 releasefn=None,
2705 releasefn=None,
2706 acquirefn=self.invalidate,
2706 acquirefn=self.invalidate,
2707 desc=_(b'repository %s') % self.origroot,
2707 desc=_(b'repository %s') % self.origroot,
2708 )
2708 )
2709 self._lockref = weakref.ref(l)
2709 self._lockref = weakref.ref(l)
2710 return l
2710 return l
2711
2711
2712 def _wlockchecktransaction(self):
2712 def _wlockchecktransaction(self):
2713 if self.currenttransaction() is not None:
2713 if self.currenttransaction() is not None:
2714 raise error.LockInheritanceContractViolation(
2714 raise error.LockInheritanceContractViolation(
2715 b'wlock cannot be inherited in the middle of a transaction'
2715 b'wlock cannot be inherited in the middle of a transaction'
2716 )
2716 )
2717
2717
2718 def wlock(self, wait=True):
2718 def wlock(self, wait=True):
2719 '''Lock the non-store parts of the repository (everything under
2719 '''Lock the non-store parts of the repository (everything under
2720 .hg except .hg/store) and return a weak reference to the lock.
2720 .hg except .hg/store) and return a weak reference to the lock.
2721
2721
2722 Use this before modifying files in .hg.
2722 Use this before modifying files in .hg.
2723
2723
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 'wlock' first to avoid a dead-lock hazard.'''
2725 'wlock' first to avoid a dead-lock hazard.'''
2726 l = self._wlockref and self._wlockref()
2726 l = self._wlockref and self._wlockref()
2727 if l is not None and l.held:
2727 if l is not None and l.held:
2728 l.lock()
2728 l.lock()
2729 return l
2729 return l
2730
2730
2731 # We do not need to check for non-waiting lock acquisition. Such
2731 # We do not need to check for non-waiting lock acquisition. Such
2732 # acquisition would not cause dead-lock as they would just fail.
2732 # acquisition would not cause dead-lock as they would just fail.
2733 if wait and (
2733 if wait and (
2734 self.ui.configbool(b'devel', b'all-warnings')
2734 self.ui.configbool(b'devel', b'all-warnings')
2735 or self.ui.configbool(b'devel', b'check-locks')
2735 or self.ui.configbool(b'devel', b'check-locks')
2736 ):
2736 ):
2737 if self._currentlock(self._lockref) is not None:
2737 if self._currentlock(self._lockref) is not None:
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739
2739
2740 def unlock():
2740 def unlock():
2741 if self.dirstate.pendingparentchange():
2741 if self.dirstate.pendingparentchange():
2742 self.dirstate.invalidate()
2742 self.dirstate.invalidate()
2743 else:
2743 else:
2744 self.dirstate.write(None)
2744 self.dirstate.write(None)
2745
2745
2746 self._filecache[b'dirstate'].refresh()
2746 self._filecache[b'dirstate'].refresh()
2747
2747
2748 l = self._lock(
2748 l = self._lock(
2749 self.vfs,
2749 self.vfs,
2750 b"wlock",
2750 b"wlock",
2751 wait,
2751 wait,
2752 unlock,
2752 unlock,
2753 self.invalidatedirstate,
2753 self.invalidatedirstate,
2754 _(b'working directory of %s') % self.origroot,
2754 _(b'working directory of %s') % self.origroot,
2755 inheritchecker=self._wlockchecktransaction,
2755 inheritchecker=self._wlockchecktransaction,
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2757 )
2757 )
2758 self._wlockref = weakref.ref(l)
2758 self._wlockref = weakref.ref(l)
2759 return l
2759 return l
2760
2760
2761 def _currentlock(self, lockref):
2761 def _currentlock(self, lockref):
2762 """Returns the lock if it's held, or None if it's not."""
2762 """Returns the lock if it's held, or None if it's not."""
2763 if lockref is None:
2763 if lockref is None:
2764 return None
2764 return None
2765 l = lockref()
2765 l = lockref()
2766 if l is None or not l.held:
2766 if l is None or not l.held:
2767 return None
2767 return None
2768 return l
2768 return l
2769
2769
2770 def currentwlock(self):
2770 def currentwlock(self):
2771 """Returns the wlock if it's held, or None if it's not."""
2771 """Returns the wlock if it's held, or None if it's not."""
2772 return self._currentlock(self._wlockref)
2772 return self._currentlock(self._wlockref)
2773
2773
2774 def _filecommit(
2774 def _filecommit(
2775 self,
2775 self,
2776 fctx,
2776 fctx,
2777 manifest1,
2777 manifest1,
2778 manifest2,
2778 manifest2,
2779 linkrev,
2779 linkrev,
2780 tr,
2780 tr,
2781 changelist,
2781 changelist,
2782 includecopymeta,
2782 includecopymeta,
2783 ):
2783 ):
2784 """
2784 """
2785 commit an individual file as part of a larger transaction
2785 commit an individual file as part of a larger transaction
2786
2786
2787 input:
2787 input:
2788
2788
2789 fctx: a file context with the content we are trying to commit
2789 fctx: a file context with the content we are trying to commit
2790 manifest1: manifest of changeset first parent
2790 manifest1: manifest of changeset first parent
2791 manifest2: manifest of changeset second parent
2791 manifest2: manifest of changeset second parent
2792 linkrev: revision number of the changeset being created
2792 linkrev: revision number of the changeset being created
2793 tr: current transation
2793 tr: current transation
2794 changelist: list of file being changed (modified inplace)
2794 changelist: list of file being changed (modified inplace)
2795 individual: boolean, set to False to skip storing the copy data
2795 individual: boolean, set to False to skip storing the copy data
2796 (only used by the Google specific feature of using
2796 (only used by the Google specific feature of using
2797 changeset extra as copy source of truth).
2797 changeset extra as copy source of truth).
2798
2798
2799 output:
2799 output:
2800
2800
2801 The resulting filenode
2801 The resulting filenode
2802 """
2802 """
2803
2803
2804 fname = fctx.path()
2804 fname = fctx.path()
2805 fparent1 = manifest1.get(fname, nullid)
2805 fparent1 = manifest1.get(fname, nullid)
2806 fparent2 = manifest2.get(fname, nullid)
2806 fparent2 = manifest2.get(fname, nullid)
2807 if isinstance(fctx, context.filectx):
2807 if isinstance(fctx, context.filectx):
2808 node = fctx.filenode()
2808 node = fctx.filenode()
2809 if node in [fparent1, fparent2]:
2809 if node in [fparent1, fparent2]:
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2810 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2811 if (
2811 if (
2812 fparent1 != nullid
2812 fparent1 != nullid
2813 and manifest1.flags(fname) != fctx.flags()
2813 and manifest1.flags(fname) != fctx.flags()
2814 ) or (
2814 ) or (
2815 fparent2 != nullid
2815 fparent2 != nullid
2816 and manifest2.flags(fname) != fctx.flags()
2816 and manifest2.flags(fname) != fctx.flags()
2817 ):
2817 ):
2818 changelist.append(fname)
2818 changelist.append(fname)
2819 return node
2819 return node
2820
2820
2821 flog = self.file(fname)
2821 flog = self.file(fname)
2822 meta = {}
2822 meta = {}
2823 cfname = fctx.copysource()
2823 cfname = fctx.copysource()
2824 if cfname and cfname != fname:
2824 if cfname and cfname != fname:
2825 # Mark the new revision of this file as a copy of another
2825 # Mark the new revision of this file as a copy of another
2826 # file. This copy data will effectively act as a parent
2826 # file. This copy data will effectively act as a parent
2827 # of this new revision. If this is a merge, the first
2827 # of this new revision. If this is a merge, the first
2828 # parent will be the nullid (meaning "look up the copy data")
2828 # parent will be the nullid (meaning "look up the copy data")
2829 # and the second one will be the other parent. For example:
2829 # and the second one will be the other parent. For example:
2830 #
2830 #
2831 # 0 --- 1 --- 3 rev1 changes file foo
2831 # 0 --- 1 --- 3 rev1 changes file foo
2832 # \ / rev2 renames foo to bar and changes it
2832 # \ / rev2 renames foo to bar and changes it
2833 # \- 2 -/ rev3 should have bar with all changes and
2833 # \- 2 -/ rev3 should have bar with all changes and
2834 # should record that bar descends from
2834 # should record that bar descends from
2835 # bar in rev2 and foo in rev1
2835 # bar in rev2 and foo in rev1
2836 #
2836 #
2837 # this allows this merge to succeed:
2837 # this allows this merge to succeed:
2838 #
2838 #
2839 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2839 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2840 # \ / merging rev3 and rev4 should use bar@rev2
2840 # \ / merging rev3 and rev4 should use bar@rev2
2841 # \- 2 --- 4 as the merge base
2841 # \- 2 --- 4 as the merge base
2842 #
2842 #
2843
2843
2844 cnode = manifest1.get(cfname)
2844 cnode = manifest1.get(cfname)
2845 newfparent = fparent2
2845 newfparent = fparent2
2846
2846
2847 if manifest2: # branch merge
2847 if manifest2: # branch merge
2848 if fparent2 == nullid or cnode is None: # copied on remote side
2848 if fparent2 == nullid or cnode is None: # copied on remote side
2849 if cfname in manifest2:
2849 if cfname in manifest2:
2850 cnode = manifest2[cfname]
2850 cnode = manifest2[cfname]
2851 newfparent = fparent1
2851 newfparent = fparent1
2852
2852
2853 # Here, we used to search backwards through history to try to find
2853 # Here, we used to search backwards through history to try to find
2854 # where the file copy came from if the source of a copy was not in
2854 # where the file copy came from if the source of a copy was not in
2855 # the parent directory. However, this doesn't actually make sense to
2855 # the parent directory. However, this doesn't actually make sense to
2856 # do (what does a copy from something not in your working copy even
2856 # do (what does a copy from something not in your working copy even
2857 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2857 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2858 # the user that copy information was dropped, so if they didn't
2858 # the user that copy information was dropped, so if they didn't
2859 # expect this outcome it can be fixed, but this is the correct
2859 # expect this outcome it can be fixed, but this is the correct
2860 # behavior in this circumstance.
2860 # behavior in this circumstance.
2861
2861
2862 if cnode:
2862 if cnode:
2863 self.ui.debug(
2863 self.ui.debug(
2864 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2864 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2865 )
2865 )
2866 if includecopymeta:
2866 if includecopymeta:
2867 meta[b"copy"] = cfname
2867 meta[b"copy"] = cfname
2868 meta[b"copyrev"] = hex(cnode)
2868 meta[b"copyrev"] = hex(cnode)
2869 fparent1, fparent2 = nullid, newfparent
2869 fparent1, fparent2 = nullid, newfparent
2870 else:
2870 else:
2871 self.ui.warn(
2871 self.ui.warn(
2872 _(
2872 _(
2873 b"warning: can't find ancestor for '%s' "
2873 b"warning: can't find ancestor for '%s' "
2874 b"copied from '%s'!\n"
2874 b"copied from '%s'!\n"
2875 )
2875 )
2876 % (fname, cfname)
2876 % (fname, cfname)
2877 )
2877 )
2878
2878
2879 elif fparent1 == nullid:
2879 elif fparent1 == nullid:
2880 fparent1, fparent2 = fparent2, nullid
2880 fparent1, fparent2 = fparent2, nullid
2881 elif fparent2 != nullid:
2881 elif fparent2 != nullid:
2882 # is one parent an ancestor of the other?
2882 # is one parent an ancestor of the other?
2883 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2883 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2884 if fparent1 in fparentancestors:
2884 if fparent1 in fparentancestors:
2885 fparent1, fparent2 = fparent2, nullid
2885 fparent1, fparent2 = fparent2, nullid
2886 elif fparent2 in fparentancestors:
2886 elif fparent2 in fparentancestors:
2887 fparent2 = nullid
2887 fparent2 = nullid
2888 elif not fparentancestors:
2888 elif not fparentancestors:
2889 # TODO: this whole if-else might be simplified much more
2889 # TODO: this whole if-else might be simplified much more
2890 ms = mergestatemod.mergestate.read(self)
2890 ms = mergestatemod.mergestate.read(self)
2891 if (
2891 if (
2892 fname in ms
2892 fname in ms
2893 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2893 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2894 ):
2894 ):
2895 fparent1, fparent2 = fparent2, nullid
2895 fparent1, fparent2 = fparent2, nullid
2896
2896
2897 # is the file changed?
2897 # is the file changed?
2898 text = fctx.data()
2898 text = fctx.data()
2899 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2899 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2900 changelist.append(fname)
2900 changelist.append(fname)
2901 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2901 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2902 # are just the flags changed during merge?
2902 # are just the flags changed during merge?
2903 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2903 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2904 changelist.append(fname)
2904 changelist.append(fname)
2905
2905
2906 return fparent1
2906 return fparent1
2907
2907
2908 def checkcommitpatterns(self, wctx, match, status, fail):
2908 def checkcommitpatterns(self, wctx, match, status, fail):
2909 """check for commit arguments that aren't committable"""
2909 """check for commit arguments that aren't committable"""
2910 if match.isexact() or match.prefix():
2910 if match.isexact() or match.prefix():
2911 matched = set(status.modified + status.added + status.removed)
2911 matched = set(status.modified + status.added + status.removed)
2912
2912
2913 for f in match.files():
2913 for f in match.files():
2914 f = self.dirstate.normalize(f)
2914 f = self.dirstate.normalize(f)
2915 if f == b'.' or f in matched or f in wctx.substate:
2915 if f == b'.' or f in matched or f in wctx.substate:
2916 continue
2916 continue
2917 if f in status.deleted:
2917 if f in status.deleted:
2918 fail(f, _(b'file not found!'))
2918 fail(f, _(b'file not found!'))
2919 # Is it a directory that exists or used to exist?
2919 # Is it a directory that exists or used to exist?
2920 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2920 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2921 d = f + b'/'
2921 d = f + b'/'
2922 for mf in matched:
2922 for mf in matched:
2923 if mf.startswith(d):
2923 if mf.startswith(d):
2924 break
2924 break
2925 else:
2925 else:
2926 fail(f, _(b"no match under directory!"))
2926 fail(f, _(b"no match under directory!"))
2927 elif f not in self.dirstate:
2927 elif f not in self.dirstate:
2928 fail(f, _(b"file not tracked!"))
2928 fail(f, _(b"file not tracked!"))
2929
2929
2930 @unfilteredmethod
2930 @unfilteredmethod
2931 def commit(
2931 def commit(
2932 self,
2932 self,
2933 text=b"",
2933 text=b"",
2934 user=None,
2934 user=None,
2935 date=None,
2935 date=None,
2936 match=None,
2936 match=None,
2937 force=False,
2937 force=False,
2938 editor=None,
2938 editor=None,
2939 extra=None,
2939 extra=None,
2940 ):
2940 ):
2941 """Add a new revision to current repository.
2941 """Add a new revision to current repository.
2942
2942
2943 Revision information is gathered from the working directory,
2943 Revision information is gathered from the working directory,
2944 match can be used to filter the committed files. If editor is
2944 match can be used to filter the committed files. If editor is
2945 supplied, it is called to get a commit message.
2945 supplied, it is called to get a commit message.
2946 """
2946 """
2947 if extra is None:
2947 if extra is None:
2948 extra = {}
2948 extra = {}
2949
2949
2950 def fail(f, msg):
2950 def fail(f, msg):
2951 raise error.Abort(b'%s: %s' % (f, msg))
2951 raise error.Abort(b'%s: %s' % (f, msg))
2952
2952
2953 if not match:
2953 if not match:
2954 match = matchmod.always()
2954 match = matchmod.always()
2955
2955
2956 if not force:
2956 if not force:
2957 match.bad = fail
2957 match.bad = fail
2958
2958
2959 # lock() for recent changelog (see issue4368)
2959 # lock() for recent changelog (see issue4368)
2960 with self.wlock(), self.lock():
2960 with self.wlock(), self.lock():
2961 wctx = self[None]
2961 wctx = self[None]
2962 merge = len(wctx.parents()) > 1
2962 merge = len(wctx.parents()) > 1
2963
2963
2964 if not force and merge and not match.always():
2964 if not force and merge and not match.always():
2965 raise error.Abort(
2965 raise error.Abort(
2966 _(
2966 _(
2967 b'cannot partially commit a merge '
2967 b'cannot partially commit a merge '
2968 b'(do not specify files or patterns)'
2968 b'(do not specify files or patterns)'
2969 )
2969 )
2970 )
2970 )
2971
2971
2972 status = self.status(match=match, clean=force)
2972 status = self.status(match=match, clean=force)
2973 if force:
2973 if force:
2974 status.modified.extend(
2974 status.modified.extend(
2975 status.clean
2975 status.clean
2976 ) # mq may commit clean files
2976 ) # mq may commit clean files
2977
2977
2978 # check subrepos
2978 # check subrepos
2979 subs, commitsubs, newstate = subrepoutil.precommit(
2979 subs, commitsubs, newstate = subrepoutil.precommit(
2980 self.ui, wctx, status, match, force=force
2980 self.ui, wctx, status, match, force=force
2981 )
2981 )
2982
2982
2983 # make sure all explicit patterns are matched
2983 # make sure all explicit patterns are matched
2984 if not force:
2984 if not force:
2985 self.checkcommitpatterns(wctx, match, status, fail)
2985 self.checkcommitpatterns(wctx, match, status, fail)
2986
2986
2987 cctx = context.workingcommitctx(
2987 cctx = context.workingcommitctx(
2988 self, status, text, user, date, extra
2988 self, status, text, user, date, extra
2989 )
2989 )
2990
2990
2991 ms = mergestatemod.mergestate.read(self)
2991 ms = mergestatemod.mergestate.read(self)
2992 mergeutil.checkunresolved(ms)
2992 mergeutil.checkunresolved(ms)
2993
2993
2994 # internal config: ui.allowemptycommit
2994 # internal config: ui.allowemptycommit
2995 if cctx.isempty() and not self.ui.configbool(
2995 if cctx.isempty() and not self.ui.configbool(
2996 b'ui', b'allowemptycommit'
2996 b'ui', b'allowemptycommit'
2997 ):
2997 ):
2998 self.ui.debug(b'nothing to commit, clearing merge state\n')
2998 self.ui.debug(b'nothing to commit, clearing merge state\n')
2999 ms.reset()
2999 ms.reset()
3000 return None
3000 return None
3001
3001
3002 if merge and cctx.deleted():
3002 if merge and cctx.deleted():
3003 raise error.Abort(_(b"cannot commit merge with missing files"))
3003 raise error.Abort(_(b"cannot commit merge with missing files"))
3004
3004
3005 if editor:
3005 if editor:
3006 cctx._text = editor(self, cctx, subs)
3006 cctx._text = editor(self, cctx, subs)
3007 edited = text != cctx._text
3007 edited = text != cctx._text
3008
3008
3009 # Save commit message in case this transaction gets rolled back
3009 # Save commit message in case this transaction gets rolled back
3010 # (e.g. by a pretxncommit hook). Leave the content alone on
3010 # (e.g. by a pretxncommit hook). Leave the content alone on
3011 # the assumption that the user will use the same editor again.
3011 # the assumption that the user will use the same editor again.
3012 msgfn = self.savecommitmessage(cctx._text)
3012 msgfn = self.savecommitmessage(cctx._text)
3013
3013
3014 # commit subs and write new state
3014 # commit subs and write new state
3015 if subs:
3015 if subs:
3016 uipathfn = scmutil.getuipathfn(self)
3016 uipathfn = scmutil.getuipathfn(self)
3017 for s in sorted(commitsubs):
3017 for s in sorted(commitsubs):
3018 sub = wctx.sub(s)
3018 sub = wctx.sub(s)
3019 self.ui.status(
3019 self.ui.status(
3020 _(b'committing subrepository %s\n')
3020 _(b'committing subrepository %s\n')
3021 % uipathfn(subrepoutil.subrelpath(sub))
3021 % uipathfn(subrepoutil.subrelpath(sub))
3022 )
3022 )
3023 sr = sub.commit(cctx._text, user, date)
3023 sr = sub.commit(cctx._text, user, date)
3024 newstate[s] = (newstate[s][0], sr)
3024 newstate[s] = (newstate[s][0], sr)
3025 subrepoutil.writestate(self, newstate)
3025 subrepoutil.writestate(self, newstate)
3026
3026
3027 p1, p2 = self.dirstate.parents()
3027 p1, p2 = self.dirstate.parents()
3028 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3028 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3029 try:
3029 try:
3030 self.hook(
3030 self.hook(
3031 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3031 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3032 )
3032 )
3033 with self.transaction(b'commit'):
3033 with self.transaction(b'commit'):
3034 ret = self.commitctx(cctx, True)
3034 ret = self.commitctx(cctx, True)
3035 # update bookmarks, dirstate and mergestate
3035 # update bookmarks, dirstate and mergestate
3036 bookmarks.update(self, [p1, p2], ret)
3036 bookmarks.update(self, [p1, p2], ret)
3037 cctx.markcommitted(ret)
3037 cctx.markcommitted(ret)
3038 ms.reset()
3038 ms.reset()
3039 except: # re-raises
3039 except: # re-raises
3040 if edited:
3040 if edited:
3041 self.ui.write(
3041 self.ui.write(
3042 _(b'note: commit message saved in %s\n') % msgfn
3042 _(b'note: commit message saved in %s\n') % msgfn
3043 )
3043 )
3044 self.ui.write(
3044 self.ui.write(
3045 _(
3045 _(
3046 b"note: use 'hg commit --logfile "
3046 b"note: use 'hg commit --logfile "
3047 b".hg/last-message.txt --edit' to reuse it\n"
3047 b".hg/last-message.txt --edit' to reuse it\n"
3048 )
3048 )
3049 )
3049 )
3050 raise
3050 raise
3051
3051
3052 def commithook(unused_success):
3052 def commithook(unused_success):
3053 # hack for command that use a temporary commit (eg: histedit)
3053 # hack for command that use a temporary commit (eg: histedit)
3054 # temporary commit got stripped before hook release
3054 # temporary commit got stripped before hook release
3055 if self.changelog.hasnode(ret):
3055 if self.changelog.hasnode(ret):
3056 self.hook(
3056 self.hook(
3057 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3057 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3058 )
3058 )
3059
3059
3060 self._afterlock(commithook)
3060 self._afterlock(commithook)
3061 return ret
3061 return ret
3062
3062
3063 @unfilteredmethod
3063 @unfilteredmethod
3064 def commitctx(self, ctx, error=False, origctx=None):
3064 def commitctx(self, ctx, error=False, origctx=None):
3065 """Add a new revision to current repository.
3065 """Add a new revision to current repository.
3066 Revision information is passed via the context argument.
3066 Revision information is passed via the context argument.
3067
3067
3068 ctx.files() should list all files involved in this commit, i.e.
3068 ctx.files() should list all files involved in this commit, i.e.
3069 modified/added/removed files. On merge, it may be wider than the
3069 modified/added/removed files. On merge, it may be wider than the
3070 ctx.files() to be committed, since any file nodes derived directly
3070 ctx.files() to be committed, since any file nodes derived directly
3071 from p1 or p2 are excluded from the committed ctx.files().
3071 from p1 or p2 are excluded from the committed ctx.files().
3072
3072
3073 origctx is for convert to work around the problem that bug
3073 origctx is for convert to work around the problem that bug
3074 fixes to the files list in changesets change hashes. For
3074 fixes to the files list in changesets change hashes. For
3075 convert to be the identity, it can pass an origctx and this
3075 convert to be the identity, it can pass an origctx and this
3076 function will use the same files list when it makes sense to
3076 function will use the same files list when it makes sense to
3077 do so.
3077 do so.
3078 """
3078 """
3079
3079
3080 p1, p2 = ctx.p1(), ctx.p2()
3080 p1, p2 = ctx.p1(), ctx.p2()
3081 user = ctx.user()
3081 user = ctx.user()
3082
3082
3083 if self.filecopiesmode == b'changeset-sidedata':
3083 if self.filecopiesmode == b'changeset-sidedata':
3084 writechangesetcopy = True
3084 writechangesetcopy = True
3085 writefilecopymeta = True
3085 writefilecopymeta = True
3086 writecopiesto = None
3086 writecopiesto = None
3087 else:
3087 else:
3088 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3088 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3089 writefilecopymeta = writecopiesto != b'changeset-only'
3089 writefilecopymeta = writecopiesto != b'changeset-only'
3090 writechangesetcopy = writecopiesto in (
3090 writechangesetcopy = writecopiesto in (
3091 b'changeset-only',
3091 b'changeset-only',
3092 b'compatibility',
3092 b'compatibility',
3093 )
3093 )
3094 p1copies, p2copies = None, None
3094 p1copies, p2copies = None, None
3095 if writechangesetcopy:
3095 if writechangesetcopy:
3096 p1copies = ctx.p1copies()
3096 p1copies = ctx.p1copies()
3097 p2copies = ctx.p2copies()
3097 p2copies = ctx.p2copies()
3098 filesadded, filesremoved = None, None
3098 filesadded, filesremoved = None, None
3099 with self.lock(), self.transaction(b"commit") as tr:
3099 with self.lock(), self.transaction(b"commit") as tr:
3100 trp = weakref.proxy(tr)
3100 trp = weakref.proxy(tr)
3101
3101
3102 if ctx.manifestnode():
3102 if ctx.manifestnode():
3103 # reuse an existing manifest revision
3103 # reuse an existing manifest revision
3104 self.ui.debug(b'reusing known manifest\n')
3104 self.ui.debug(b'reusing known manifest\n')
3105 mn = ctx.manifestnode()
3105 mn = ctx.manifestnode()
3106 files = ctx.files()
3106 files = ctx.files()
3107 if writechangesetcopy:
3107 if writechangesetcopy:
3108 filesadded = ctx.filesadded()
3108 filesadded = ctx.filesadded()
3109 filesremoved = ctx.filesremoved()
3109 filesremoved = ctx.filesremoved()
3110 elif ctx.files():
3110 elif ctx.files():
3111 m1ctx = p1.manifestctx()
3111 m1ctx = p1.manifestctx()
3112 m2ctx = p2.manifestctx()
3112 m2ctx = p2.manifestctx()
3113 mctx = m1ctx.copy()
3113 mctx = m1ctx.copy()
3114
3114
3115 m = mctx.read()
3115 m = mctx.read()
3116 m1 = m1ctx.read()
3116 m1 = m1ctx.read()
3117 m2 = m2ctx.read()
3117 m2 = m2ctx.read()
3118
3118
3119 # check in files
3119 # check in files
3120 added = []
3120 added = []
3121 changed = []
3121 changed = []
3122 removed = list(ctx.removed())
3122 removed = list(ctx.removed())
3123 linkrev = len(self)
3123 linkrev = len(self)
3124 self.ui.note(_(b"committing files:\n"))
3124 self.ui.note(_(b"committing files:\n"))
3125 uipathfn = scmutil.getuipathfn(self)
3125 uipathfn = scmutil.getuipathfn(self)
3126 for f in sorted(ctx.modified() + ctx.added()):
3126 for f in sorted(ctx.modified() + ctx.added()):
3127 self.ui.note(uipathfn(f) + b"\n")
3127 self.ui.note(uipathfn(f) + b"\n")
3128 try:
3128 try:
3129 fctx = ctx[f]
3129 fctx = ctx[f]
3130 if fctx is None:
3130 if fctx is None:
3131 removed.append(f)
3131 removed.append(f)
3132 else:
3132 else:
3133 added.append(f)
3133 added.append(f)
3134 m[f] = self._filecommit(
3134 m[f] = self._filecommit(
3135 fctx,
3135 fctx,
3136 m1,
3136 m1,
3137 m2,
3137 m2,
3138 linkrev,
3138 linkrev,
3139 trp,
3139 trp,
3140 changed,
3140 changed,
3141 writefilecopymeta,
3141 writefilecopymeta,
3142 )
3142 )
3143 m.setflag(f, fctx.flags())
3143 m.setflag(f, fctx.flags())
3144 except OSError:
3144 except OSError:
3145 self.ui.warn(
3145 self.ui.warn(
3146 _(b"trouble committing %s!\n") % uipathfn(f)
3146 _(b"trouble committing %s!\n") % uipathfn(f)
3147 )
3147 )
3148 raise
3148 raise
3149 except IOError as inst:
3149 except IOError as inst:
3150 errcode = getattr(inst, 'errno', errno.ENOENT)
3150 errcode = getattr(inst, 'errno', errno.ENOENT)
3151 if error or errcode and errcode != errno.ENOENT:
3151 if error or errcode and errcode != errno.ENOENT:
3152 self.ui.warn(
3152 self.ui.warn(
3153 _(b"trouble committing %s!\n") % uipathfn(f)
3153 _(b"trouble committing %s!\n") % uipathfn(f)
3154 )
3154 )
3155 raise
3155 raise
3156
3156
3157 # update manifest
3157 # update manifest
3158 removed = [f for f in removed if f in m1 or f in m2]
3158 removed = [f for f in removed if f in m1 or f in m2]
3159 drop = sorted([f for f in removed if f in m])
3159 drop = sorted([f for f in removed if f in m])
3160 for f in drop:
3160 for f in drop:
3161 del m[f]
3161 del m[f]
3162 if p2.rev() != nullrev:
3162 if p2.rev() != nullrev:
3163 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3163 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3164 removed = [f for f in removed if not rf(f)]
3164 removed = [f for f in removed if not rf(f)]
3165
3165
3166 files = changed + removed
3166 files = changed + removed
3167 md = None
3167 md = None
3168 if not files:
3168 if not files:
3169 # if no "files" actually changed in terms of the changelog,
3169 # if no "files" actually changed in terms of the changelog,
3170 # try hard to detect unmodified manifest entry so that the
3170 # try hard to detect unmodified manifest entry so that the
3171 # exact same commit can be reproduced later on convert.
3171 # exact same commit can be reproduced later on convert.
3172 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3172 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3173 if not files and md:
3173 if not files and md:
3174 self.ui.debug(
3174 self.ui.debug(
3175 b'not reusing manifest (no file change in '
3175 b'not reusing manifest (no file change in '
3176 b'changelog, but manifest differs)\n'
3176 b'changelog, but manifest differs)\n'
3177 )
3177 )
3178 if files or md:
3178 if files or md:
3179 self.ui.note(_(b"committing manifest\n"))
3179 self.ui.note(_(b"committing manifest\n"))
3180 # we're using narrowmatch here since it's already applied at
3180 # we're using narrowmatch here since it's already applied at
3181 # other stages (such as dirstate.walk), so we're already
3181 # other stages (such as dirstate.walk), so we're already
3182 # ignoring things outside of narrowspec in most cases. The
3182 # ignoring things outside of narrowspec in most cases. The
3183 # one case where we might have files outside the narrowspec
3183 # one case where we might have files outside the narrowspec
3184 # at this point is merges, and we already error out in the
3184 # at this point is merges, and we already error out in the
3185 # case where the merge has files outside of the narrowspec,
3185 # case where the merge has files outside of the narrowspec,
3186 # so this is safe.
3186 # so this is safe.
3187 mn = mctx.write(
3187 mn = mctx.write(
3188 trp,
3188 trp,
3189 linkrev,
3189 linkrev,
3190 p1.manifestnode(),
3190 p1.manifestnode(),
3191 p2.manifestnode(),
3191 p2.manifestnode(),
3192 added,
3192 added,
3193 drop,
3193 drop,
3194 match=self.narrowmatch(),
3194 match=self.narrowmatch(),
3195 )
3195 )
3196
3196
3197 if writechangesetcopy:
3197 if writechangesetcopy:
3198 filesadded = [
3198 filesadded = [
3199 f for f in changed if not (f in m1 or f in m2)
3199 f for f in changed if not (f in m1 or f in m2)
3200 ]
3200 ]
3201 filesremoved = removed
3201 filesremoved = removed
3202 else:
3202 else:
3203 self.ui.debug(
3203 self.ui.debug(
3204 b'reusing manifest from p1 (listed files '
3204 b'reusing manifest from p1 (listed files '
3205 b'actually unchanged)\n'
3205 b'actually unchanged)\n'
3206 )
3206 )
3207 mn = p1.manifestnode()
3207 mn = p1.manifestnode()
3208 else:
3208 else:
3209 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3209 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3210 mn = p1.manifestnode()
3210 mn = p1.manifestnode()
3211 files = []
3211 files = []
3212
3212
3213 if writecopiesto == b'changeset-only':
3213 if writecopiesto == b'changeset-only':
3214 # If writing only to changeset extras, use None to indicate that
3214 # If writing only to changeset extras, use None to indicate that
3215 # no entry should be written. If writing to both, write an empty
3215 # no entry should be written. If writing to both, write an empty
3216 # entry to prevent the reader from falling back to reading
3216 # entry to prevent the reader from falling back to reading
3217 # filelogs.
3217 # filelogs.
3218 p1copies = p1copies or None
3218 p1copies = p1copies or None
3219 p2copies = p2copies or None
3219 p2copies = p2copies or None
3220 filesadded = filesadded or None
3220 filesadded = filesadded or None
3221 filesremoved = filesremoved or None
3221 filesremoved = filesremoved or None
3222
3222
3223 if origctx and origctx.manifestnode() == mn:
3223 if origctx and origctx.manifestnode() == mn:
3224 files = origctx.files()
3224 files = origctx.files()
3225
3225
3226 # update changelog
3226 # update changelog
3227 self.ui.note(_(b"committing changelog\n"))
3227 self.ui.note(_(b"committing changelog\n"))
3228 self.changelog.delayupdate(tr)
3228 self.changelog.delayupdate(tr)
3229 n = self.changelog.add(
3229 n = self.changelog.add(
3230 mn,
3230 mn,
3231 files,
3231 files,
3232 ctx.description(),
3232 ctx.description(),
3233 trp,
3233 trp,
3234 p1.node(),
3234 p1.node(),
3235 p2.node(),
3235 p2.node(),
3236 user,
3236 user,
3237 ctx.date(),
3237 ctx.date(),
3238 ctx.extra().copy(),
3238 ctx.extra().copy(),
3239 p1copies,
3239 p1copies,
3240 p2copies,
3240 p2copies,
3241 filesadded,
3241 filesadded,
3242 filesremoved,
3242 filesremoved,
3243 )
3243 )
3244 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3244 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3245 self.hook(
3245 self.hook(
3246 b'pretxncommit',
3246 b'pretxncommit',
3247 throw=True,
3247 throw=True,
3248 node=hex(n),
3248 node=hex(n),
3249 parent1=xp1,
3249 parent1=xp1,
3250 parent2=xp2,
3250 parent2=xp2,
3251 )
3251 )
3252 # set the new commit is proper phase
3252 # set the new commit is proper phase
3253 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3253 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3254 if targetphase:
3254 if targetphase:
3255 # retract boundary do not alter parent changeset.
3255 # retract boundary do not alter parent changeset.
3256 # if a parent have higher the resulting phase will
3256 # if a parent have higher the resulting phase will
3257 # be compliant anyway
3257 # be compliant anyway
3258 #
3258 #
3259 # if minimal phase was 0 we don't need to retract anything
3259 # if minimal phase was 0 we don't need to retract anything
3260 phases.registernew(self, tr, targetphase, [n])
3260 phases.registernew(self, tr, targetphase, [n])
3261 return n
3261 return n
3262
3262
3263 @unfilteredmethod
3263 @unfilteredmethod
3264 def destroying(self):
3264 def destroying(self):
3265 '''Inform the repository that nodes are about to be destroyed.
3265 '''Inform the repository that nodes are about to be destroyed.
3266 Intended for use by strip and rollback, so there's a common
3266 Intended for use by strip and rollback, so there's a common
3267 place for anything that has to be done before destroying history.
3267 place for anything that has to be done before destroying history.
3268
3268
3269 This is mostly useful for saving state that is in memory and waiting
3269 This is mostly useful for saving state that is in memory and waiting
3270 to be flushed when the current lock is released. Because a call to
3270 to be flushed when the current lock is released. Because a call to
3271 destroyed is imminent, the repo will be invalidated causing those
3271 destroyed is imminent, the repo will be invalidated causing those
3272 changes to stay in memory (waiting for the next unlock), or vanish
3272 changes to stay in memory (waiting for the next unlock), or vanish
3273 completely.
3273 completely.
3274 '''
3274 '''
3275 # When using the same lock to commit and strip, the phasecache is left
3275 # When using the same lock to commit and strip, the phasecache is left
3276 # dirty after committing. Then when we strip, the repo is invalidated,
3276 # dirty after committing. Then when we strip, the repo is invalidated,
3277 # causing those changes to disappear.
3277 # causing those changes to disappear.
3278 if '_phasecache' in vars(self):
3278 if '_phasecache' in vars(self):
3279 self._phasecache.write()
3279 self._phasecache.write()
3280
3280
3281 @unfilteredmethod
3281 @unfilteredmethod
3282 def destroyed(self):
3282 def destroyed(self):
3283 '''Inform the repository that nodes have been destroyed.
3283 '''Inform the repository that nodes have been destroyed.
3284 Intended for use by strip and rollback, so there's a common
3284 Intended for use by strip and rollback, so there's a common
3285 place for anything that has to be done after destroying history.
3285 place for anything that has to be done after destroying history.
3286 '''
3286 '''
3287 # When one tries to:
3287 # When one tries to:
3288 # 1) destroy nodes thus calling this method (e.g. strip)
3288 # 1) destroy nodes thus calling this method (e.g. strip)
3289 # 2) use phasecache somewhere (e.g. commit)
3289 # 2) use phasecache somewhere (e.g. commit)
3290 #
3290 #
3291 # then 2) will fail because the phasecache contains nodes that were
3291 # then 2) will fail because the phasecache contains nodes that were
3292 # removed. We can either remove phasecache from the filecache,
3292 # removed. We can either remove phasecache from the filecache,
3293 # causing it to reload next time it is accessed, or simply filter
3293 # causing it to reload next time it is accessed, or simply filter
3294 # the removed nodes now and write the updated cache.
3294 # the removed nodes now and write the updated cache.
3295 self._phasecache.filterunknown(self)
3295 self._phasecache.filterunknown(self)
3296 self._phasecache.write()
3296 self._phasecache.write()
3297
3297
3298 # refresh all repository caches
3298 # refresh all repository caches
3299 self.updatecaches()
3299 self.updatecaches()
3300
3300
3301 # Ensure the persistent tag cache is updated. Doing it now
3301 # Ensure the persistent tag cache is updated. Doing it now
3302 # means that the tag cache only has to worry about destroyed
3302 # means that the tag cache only has to worry about destroyed
3303 # heads immediately after a strip/rollback. That in turn
3303 # heads immediately after a strip/rollback. That in turn
3304 # guarantees that "cachetip == currenttip" (comparing both rev
3304 # guarantees that "cachetip == currenttip" (comparing both rev
3305 # and node) always means no nodes have been added or destroyed.
3305 # and node) always means no nodes have been added or destroyed.
3306
3306
3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3307 # XXX this is suboptimal when qrefresh'ing: we strip the current
3308 # head, refresh the tag cache, then immediately add a new head.
3308 # head, refresh the tag cache, then immediately add a new head.
3309 # But I think doing it this way is necessary for the "instant
3309 # But I think doing it this way is necessary for the "instant
3310 # tag cache retrieval" case to work.
3310 # tag cache retrieval" case to work.
3311 self.invalidate()
3311 self.invalidate()
3312
3312
3313 def status(
3313 def status(
3314 self,
3314 self,
3315 node1=b'.',
3315 node1=b'.',
3316 node2=None,
3316 node2=None,
3317 match=None,
3317 match=None,
3318 ignored=False,
3318 ignored=False,
3319 clean=False,
3319 clean=False,
3320 unknown=False,
3320 unknown=False,
3321 listsubrepos=False,
3321 listsubrepos=False,
3322 ):
3322 ):
3323 '''a convenience method that calls node1.status(node2)'''
3323 '''a convenience method that calls node1.status(node2)'''
3324 return self[node1].status(
3324 return self[node1].status(
3325 node2, match, ignored, clean, unknown, listsubrepos
3325 node2, match, ignored, clean, unknown, listsubrepos
3326 )
3326 )
3327
3327
3328 def addpostdsstatus(self, ps):
3328 def addpostdsstatus(self, ps):
3329 """Add a callback to run within the wlock, at the point at which status
3329 """Add a callback to run within the wlock, at the point at which status
3330 fixups happen.
3330 fixups happen.
3331
3331
3332 On status completion, callback(wctx, status) will be called with the
3332 On status completion, callback(wctx, status) will be called with the
3333 wlock held, unless the dirstate has changed from underneath or the wlock
3333 wlock held, unless the dirstate has changed from underneath or the wlock
3334 couldn't be grabbed.
3334 couldn't be grabbed.
3335
3335
3336 Callbacks should not capture and use a cached copy of the dirstate --
3336 Callbacks should not capture and use a cached copy of the dirstate --
3337 it might change in the meanwhile. Instead, they should access the
3337 it might change in the meanwhile. Instead, they should access the
3338 dirstate via wctx.repo().dirstate.
3338 dirstate via wctx.repo().dirstate.
3339
3339
3340 This list is emptied out after each status run -- extensions should
3340 This list is emptied out after each status run -- extensions should
3341 make sure it adds to this list each time dirstate.status is called.
3341 make sure it adds to this list each time dirstate.status is called.
3342 Extensions should also make sure they don't call this for statuses
3342 Extensions should also make sure they don't call this for statuses
3343 that don't involve the dirstate.
3343 that don't involve the dirstate.
3344 """
3344 """
3345
3345
3346 # The list is located here for uniqueness reasons -- it is actually
3346 # The list is located here for uniqueness reasons -- it is actually
3347 # managed by the workingctx, but that isn't unique per-repo.
3347 # managed by the workingctx, but that isn't unique per-repo.
3348 self._postdsstatus.append(ps)
3348 self._postdsstatus.append(ps)
3349
3349
3350 def postdsstatus(self):
3350 def postdsstatus(self):
3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3351 """Used by workingctx to get the list of post-dirstate-status hooks."""
3352 return self._postdsstatus
3352 return self._postdsstatus
3353
3353
3354 def clearpostdsstatus(self):
3354 def clearpostdsstatus(self):
3355 """Used by workingctx to clear post-dirstate-status hooks."""
3355 """Used by workingctx to clear post-dirstate-status hooks."""
3356 del self._postdsstatus[:]
3356 del self._postdsstatus[:]
3357
3357
3358 def heads(self, start=None):
3358 def heads(self, start=None):
3359 if start is None:
3359 if start is None:
3360 cl = self.changelog
3360 cl = self.changelog
3361 headrevs = reversed(cl.headrevs())
3361 headrevs = reversed(cl.headrevs())
3362 return [cl.node(rev) for rev in headrevs]
3362 return [cl.node(rev) for rev in headrevs]
3363
3363
3364 heads = self.changelog.heads(start)
3364 heads = self.changelog.heads(start)
3365 # sort the output in rev descending order
3365 # sort the output in rev descending order
3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3366 return sorted(heads, key=self.changelog.rev, reverse=True)
3367
3367
3368 def branchheads(self, branch=None, start=None, closed=False):
3368 def branchheads(self, branch=None, start=None, closed=False):
3369 '''return a (possibly filtered) list of heads for the given branch
3369 '''return a (possibly filtered) list of heads for the given branch
3370
3370
3371 Heads are returned in topological order, from newest to oldest.
3371 Heads are returned in topological order, from newest to oldest.
3372 If branch is None, use the dirstate branch.
3372 If branch is None, use the dirstate branch.
3373 If start is not None, return only heads reachable from start.
3373 If start is not None, return only heads reachable from start.
3374 If closed is True, return heads that are marked as closed as well.
3374 If closed is True, return heads that are marked as closed as well.
3375 '''
3375 '''
3376 if branch is None:
3376 if branch is None:
3377 branch = self[None].branch()
3377 branch = self[None].branch()
3378 branches = self.branchmap()
3378 branches = self.branchmap()
3379 if not branches.hasbranch(branch):
3379 if not branches.hasbranch(branch):
3380 return []
3380 return []
3381 # the cache returns heads ordered lowest to highest
3381 # the cache returns heads ordered lowest to highest
3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3382 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3383 if start is not None:
3383 if start is not None:
3384 # filter out the heads that cannot be reached from startrev
3384 # filter out the heads that cannot be reached from startrev
3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3385 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3386 bheads = [h for h in bheads if h in fbheads]
3386 bheads = [h for h in bheads if h in fbheads]
3387 return bheads
3387 return bheads
3388
3388
3389 def branches(self, nodes):
3389 def branches(self, nodes):
3390 if not nodes:
3390 if not nodes:
3391 nodes = [self.changelog.tip()]
3391 nodes = [self.changelog.tip()]
3392 b = []
3392 b = []
3393 for n in nodes:
3393 for n in nodes:
3394 t = n
3394 t = n
3395 while True:
3395 while True:
3396 p = self.changelog.parents(n)
3396 p = self.changelog.parents(n)
3397 if p[1] != nullid or p[0] == nullid:
3397 if p[1] != nullid or p[0] == nullid:
3398 b.append((t, n, p[0], p[1]))
3398 b.append((t, n, p[0], p[1]))
3399 break
3399 break
3400 n = p[0]
3400 n = p[0]
3401 return b
3401 return b
3402
3402
3403 def between(self, pairs):
3403 def between(self, pairs):
3404 r = []
3404 r = []
3405
3405
3406 for top, bottom in pairs:
3406 for top, bottom in pairs:
3407 n, l, i = top, [], 0
3407 n, l, i = top, [], 0
3408 f = 1
3408 f = 1
3409
3409
3410 while n != bottom and n != nullid:
3410 while n != bottom and n != nullid:
3411 p = self.changelog.parents(n)[0]
3411 p = self.changelog.parents(n)[0]
3412 if i == f:
3412 if i == f:
3413 l.append(n)
3413 l.append(n)
3414 f = f * 2
3414 f = f * 2
3415 n = p
3415 n = p
3416 i += 1
3416 i += 1
3417
3417
3418 r.append(l)
3418 r.append(l)
3419
3419
3420 return r
3420 return r
3421
3421
3422 def checkpush(self, pushop):
3422 def checkpush(self, pushop):
3423 """Extensions can override this function if additional checks have
3423 """Extensions can override this function if additional checks have
3424 to be performed before pushing, or call it if they override push
3424 to be performed before pushing, or call it if they override push
3425 command.
3425 command.
3426 """
3426 """
3427
3427
3428 @unfilteredpropertycache
3428 @unfilteredpropertycache
3429 def prepushoutgoinghooks(self):
3429 def prepushoutgoinghooks(self):
3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3430 """Return util.hooks consists of a pushop with repo, remote, outgoing
3431 methods, which are called before pushing changesets.
3431 methods, which are called before pushing changesets.
3432 """
3432 """
3433 return util.hooks()
3433 return util.hooks()
3434
3434
3435 def pushkey(self, namespace, key, old, new):
3435 def pushkey(self, namespace, key, old, new):
3436 try:
3436 try:
3437 tr = self.currenttransaction()
3437 tr = self.currenttransaction()
3438 hookargs = {}
3438 hookargs = {}
3439 if tr is not None:
3439 if tr is not None:
3440 hookargs.update(tr.hookargs)
3440 hookargs.update(tr.hookargs)
3441 hookargs = pycompat.strkwargs(hookargs)
3441 hookargs = pycompat.strkwargs(hookargs)
3442 hookargs['namespace'] = namespace
3442 hookargs['namespace'] = namespace
3443 hookargs['key'] = key
3443 hookargs['key'] = key
3444 hookargs['old'] = old
3444 hookargs['old'] = old
3445 hookargs['new'] = new
3445 hookargs['new'] = new
3446 self.hook(b'prepushkey', throw=True, **hookargs)
3446 self.hook(b'prepushkey', throw=True, **hookargs)
3447 except error.HookAbort as exc:
3447 except error.HookAbort as exc:
3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3448 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3449 if exc.hint:
3449 if exc.hint:
3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3450 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3451 return False
3451 return False
3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3452 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3453 ret = pushkey.push(self, namespace, key, old, new)
3453 ret = pushkey.push(self, namespace, key, old, new)
3454
3454
3455 def runhook(unused_success):
3455 def runhook(unused_success):
3456 self.hook(
3456 self.hook(
3457 b'pushkey',
3457 b'pushkey',
3458 namespace=namespace,
3458 namespace=namespace,
3459 key=key,
3459 key=key,
3460 old=old,
3460 old=old,
3461 new=new,
3461 new=new,
3462 ret=ret,
3462 ret=ret,
3463 )
3463 )
3464
3464
3465 self._afterlock(runhook)
3465 self._afterlock(runhook)
3466 return ret
3466 return ret
3467
3467
3468 def listkeys(self, namespace):
3468 def listkeys(self, namespace):
3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3469 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3470 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3471 values = pushkey.list(self, namespace)
3471 values = pushkey.list(self, namespace)
3472 self.hook(b'listkeys', namespace=namespace, values=values)
3472 self.hook(b'listkeys', namespace=namespace, values=values)
3473 return values
3473 return values
3474
3474
3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3475 def debugwireargs(self, one, two, three=None, four=None, five=None):
3476 '''used to test argument passing over the wire'''
3476 '''used to test argument passing over the wire'''
3477 return b"%s %s %s %s %s" % (
3477 return b"%s %s %s %s %s" % (
3478 one,
3478 one,
3479 two,
3479 two,
3480 pycompat.bytestr(three),
3480 pycompat.bytestr(three),
3481 pycompat.bytestr(four),
3481 pycompat.bytestr(four),
3482 pycompat.bytestr(five),
3482 pycompat.bytestr(five),
3483 )
3483 )
3484
3484
3485 def savecommitmessage(self, text):
3485 def savecommitmessage(self, text):
3486 fp = self.vfs(b'last-message.txt', b'wb')
3486 fp = self.vfs(b'last-message.txt', b'wb')
3487 try:
3487 try:
3488 fp.write(text)
3488 fp.write(text)
3489 finally:
3489 finally:
3490 fp.close()
3490 fp.close()
3491 return self.pathto(fp.name[len(self.root) + 1 :])
3491 return self.pathto(fp.name[len(self.root) + 1 :])
3492
3492
3493
3493
3494 # used to avoid circular references so destructors work
3494 # used to avoid circular references so destructors work
3495 def aftertrans(files):
3495 def aftertrans(files):
3496 renamefiles = [tuple(t) for t in files]
3496 renamefiles = [tuple(t) for t in files]
3497
3497
3498 def a():
3498 def a():
3499 for vfs, src, dest in renamefiles:
3499 for vfs, src, dest in renamefiles:
3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3501 # leaving both src and dest on disk. delete dest to make sure
3501 # leaving both src and dest on disk. delete dest to make sure
3502 # the rename couldn't be such a no-op.
3502 # the rename couldn't be such a no-op.
3503 vfs.tryunlink(dest)
3503 vfs.tryunlink(dest)
3504 try:
3504 try:
3505 vfs.rename(src, dest)
3505 vfs.rename(src, dest)
3506 except OSError: # journal file does not yet exist
3506 except OSError: # journal file does not yet exist
3507 pass
3507 pass
3508
3508
3509 return a
3509 return a
3510
3510
3511
3511
3512 def undoname(fn):
3512 def undoname(fn):
3513 base, name = os.path.split(fn)
3513 base, name = os.path.split(fn)
3514 assert name.startswith(b'journal')
3514 assert name.startswith(b'journal')
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516
3516
3517
3517
3518 def instance(ui, path, create, intents=None, createopts=None):
3518 def instance(ui, path, create, intents=None, createopts=None):
3519 localpath = util.urllocalpath(path)
3519 localpath = util.urllocalpath(path)
3520 if create:
3520 if create:
3521 createrepository(ui, localpath, createopts=createopts)
3521 createrepository(ui, localpath, createopts=createopts)
3522
3522
3523 return makelocalrepository(ui, localpath, intents=intents)
3523 return makelocalrepository(ui, localpath, intents=intents)
3524
3524
3525
3525
3526 def islocal(path):
3526 def islocal(path):
3527 return True
3527 return True
3528
3528
3529
3529
3530 def defaultcreateopts(ui, createopts=None):
3530 def defaultcreateopts(ui, createopts=None):
3531 """Populate the default creation options for a repository.
3531 """Populate the default creation options for a repository.
3532
3532
3533 A dictionary of explicitly requested creation options can be passed
3533 A dictionary of explicitly requested creation options can be passed
3534 in. Missing keys will be populated.
3534 in. Missing keys will be populated.
3535 """
3535 """
3536 createopts = dict(createopts or {})
3536 createopts = dict(createopts or {})
3537
3537
3538 if b'backend' not in createopts:
3538 if b'backend' not in createopts:
3539 # experimental config: storage.new-repo-backend
3539 # experimental config: storage.new-repo-backend
3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3541
3541
3542 return createopts
3542 return createopts
3543
3543
3544
3544
3545 def newreporequirements(ui, createopts):
3545 def newreporequirements(ui, createopts):
3546 """Determine the set of requirements for a new local repository.
3546 """Determine the set of requirements for a new local repository.
3547
3547
3548 Extensions can wrap this function to specify custom requirements for
3548 Extensions can wrap this function to specify custom requirements for
3549 new repositories.
3549 new repositories.
3550 """
3550 """
3551 # If the repo is being created from a shared repository, we copy
3551 # If the repo is being created from a shared repository, we copy
3552 # its requirements.
3552 # its requirements.
3553 if b'sharedrepo' in createopts:
3553 if b'sharedrepo' in createopts:
3554 requirements = set(createopts[b'sharedrepo'].requirements)
3554 requirements = set(createopts[b'sharedrepo'].requirements)
3555 if createopts.get(b'sharedrelative'):
3555 if createopts.get(b'sharedrelative'):
3556 requirements.add(b'relshared')
3556 requirements.add(b'relshared')
3557 else:
3557 else:
3558 requirements.add(b'shared')
3558 requirements.add(b'shared')
3559
3559
3560 return requirements
3560 return requirements
3561
3561
3562 if b'backend' not in createopts:
3562 if b'backend' not in createopts:
3563 raise error.ProgrammingError(
3563 raise error.ProgrammingError(
3564 b'backend key not present in createopts; '
3564 b'backend key not present in createopts; '
3565 b'was defaultcreateopts() called?'
3565 b'was defaultcreateopts() called?'
3566 )
3566 )
3567
3567
3568 if createopts[b'backend'] != b'revlogv1':
3568 if createopts[b'backend'] != b'revlogv1':
3569 raise error.Abort(
3569 raise error.Abort(
3570 _(
3570 _(
3571 b'unable to determine repository requirements for '
3571 b'unable to determine repository requirements for '
3572 b'storage backend: %s'
3572 b'storage backend: %s'
3573 )
3573 )
3574 % createopts[b'backend']
3574 % createopts[b'backend']
3575 )
3575 )
3576
3576
3577 requirements = {b'revlogv1'}
3577 requirements = {b'revlogv1'}
3578 if ui.configbool(b'format', b'usestore'):
3578 if ui.configbool(b'format', b'usestore'):
3579 requirements.add(b'store')
3579 requirements.add(b'store')
3580 if ui.configbool(b'format', b'usefncache'):
3580 if ui.configbool(b'format', b'usefncache'):
3581 requirements.add(b'fncache')
3581 requirements.add(b'fncache')
3582 if ui.configbool(b'format', b'dotencode'):
3582 if ui.configbool(b'format', b'dotencode'):
3583 requirements.add(b'dotencode')
3583 requirements.add(b'dotencode')
3584
3584
3585 compengines = ui.configlist(b'format', b'revlog-compression')
3585 compengines = ui.configlist(b'format', b'revlog-compression')
3586 for compengine in compengines:
3586 for compengine in compengines:
3587 if compengine in util.compengines:
3587 if compengine in util.compengines:
3588 break
3588 break
3589 else:
3589 else:
3590 raise error.Abort(
3590 raise error.Abort(
3591 _(
3591 _(
3592 b'compression engines %s defined by '
3592 b'compression engines %s defined by '
3593 b'format.revlog-compression not available'
3593 b'format.revlog-compression not available'
3594 )
3594 )
3595 % b', '.join(b'"%s"' % e for e in compengines),
3595 % b', '.join(b'"%s"' % e for e in compengines),
3596 hint=_(
3596 hint=_(
3597 b'run "hg debuginstall" to list available '
3597 b'run "hg debuginstall" to list available '
3598 b'compression engines'
3598 b'compression engines'
3599 ),
3599 ),
3600 )
3600 )
3601
3601
3602 # zlib is the historical default and doesn't need an explicit requirement.
3602 # zlib is the historical default and doesn't need an explicit requirement.
3603 if compengine == b'zstd':
3603 if compengine == b'zstd':
3604 requirements.add(b'revlog-compression-zstd')
3604 requirements.add(b'revlog-compression-zstd')
3605 elif compengine != b'zlib':
3605 elif compengine != b'zlib':
3606 requirements.add(b'exp-compression-%s' % compengine)
3606 requirements.add(b'exp-compression-%s' % compengine)
3607
3607
3608 if scmutil.gdinitconfig(ui):
3608 if scmutil.gdinitconfig(ui):
3609 requirements.add(b'generaldelta')
3609 requirements.add(b'generaldelta')
3610 if ui.configbool(b'format', b'sparse-revlog'):
3610 if ui.configbool(b'format', b'sparse-revlog'):
3611 requirements.add(SPARSEREVLOG_REQUIREMENT)
3611 requirements.add(SPARSEREVLOG_REQUIREMENT)
3612
3612
3613 # experimental config: format.exp-use-side-data
3613 # experimental config: format.exp-use-side-data
3614 if ui.configbool(b'format', b'exp-use-side-data'):
3614 if ui.configbool(b'format', b'exp-use-side-data'):
3615 requirements.add(SIDEDATA_REQUIREMENT)
3615 requirements.add(SIDEDATA_REQUIREMENT)
3616 # experimental config: format.exp-use-copies-side-data-changeset
3616 # experimental config: format.exp-use-copies-side-data-changeset
3617 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3617 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3618 requirements.add(SIDEDATA_REQUIREMENT)
3618 requirements.add(SIDEDATA_REQUIREMENT)
3619 requirements.add(COPIESSDC_REQUIREMENT)
3619 requirements.add(COPIESSDC_REQUIREMENT)
3620 if ui.configbool(b'experimental', b'treemanifest'):
3620 if ui.configbool(b'experimental', b'treemanifest'):
3621 requirements.add(b'treemanifest')
3621 requirements.add(b'treemanifest')
3622
3622
3623 revlogv2 = ui.config(b'experimental', b'revlogv2')
3623 revlogv2 = ui.config(b'experimental', b'revlogv2')
3624 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3624 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3625 requirements.remove(b'revlogv1')
3625 requirements.remove(b'revlogv1')
3626 # generaldelta is implied by revlogv2.
3626 # generaldelta is implied by revlogv2.
3627 requirements.discard(b'generaldelta')
3627 requirements.discard(b'generaldelta')
3628 requirements.add(REVLOGV2_REQUIREMENT)
3628 requirements.add(REVLOGV2_REQUIREMENT)
3629 # experimental config: format.internal-phase
3629 # experimental config: format.internal-phase
3630 if ui.configbool(b'format', b'internal-phase'):
3630 if ui.configbool(b'format', b'internal-phase'):
3631 requirements.add(b'internal-phase')
3631 requirements.add(b'internal-phase')
3632
3632
3633 if createopts.get(b'narrowfiles'):
3633 if createopts.get(b'narrowfiles'):
3634 requirements.add(repository.NARROW_REQUIREMENT)
3634 requirements.add(repository.NARROW_REQUIREMENT)
3635
3635
3636 if createopts.get(b'lfs'):
3636 if createopts.get(b'lfs'):
3637 requirements.add(b'lfs')
3637 requirements.add(b'lfs')
3638
3638
3639 if ui.configbool(b'format', b'bookmarks-in-store'):
3639 if ui.configbool(b'format', b'bookmarks-in-store'):
3640 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3640 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3641
3641
3642 if ui.configbool(b'format', b'use-persistent-nodemap'):
3642 if ui.configbool(b'format', b'use-persistent-nodemap'):
3643 requirements.add(NODEMAP_REQUIREMENT)
3643 requirements.add(NODEMAP_REQUIREMENT)
3644
3644
3645 return requirements
3645 return requirements
3646
3646
3647
3647
3648 def filterknowncreateopts(ui, createopts):
3648 def filterknowncreateopts(ui, createopts):
3649 """Filters a dict of repo creation options against options that are known.
3649 """Filters a dict of repo creation options against options that are known.
3650
3650
3651 Receives a dict of repo creation options and returns a dict of those
3651 Receives a dict of repo creation options and returns a dict of those
3652 options that we don't know how to handle.
3652 options that we don't know how to handle.
3653
3653
3654 This function is called as part of repository creation. If the
3654 This function is called as part of repository creation. If the
3655 returned dict contains any items, repository creation will not
3655 returned dict contains any items, repository creation will not
3656 be allowed, as it means there was a request to create a repository
3656 be allowed, as it means there was a request to create a repository
3657 with options not recognized by loaded code.
3657 with options not recognized by loaded code.
3658
3658
3659 Extensions can wrap this function to filter out creation options
3659 Extensions can wrap this function to filter out creation options
3660 they know how to handle.
3660 they know how to handle.
3661 """
3661 """
3662 known = {
3662 known = {
3663 b'backend',
3663 b'backend',
3664 b'lfs',
3664 b'lfs',
3665 b'narrowfiles',
3665 b'narrowfiles',
3666 b'sharedrepo',
3666 b'sharedrepo',
3667 b'sharedrelative',
3667 b'sharedrelative',
3668 b'shareditems',
3668 b'shareditems',
3669 b'shallowfilestore',
3669 b'shallowfilestore',
3670 }
3670 }
3671
3671
3672 return {k: v for k, v in createopts.items() if k not in known}
3672 return {k: v for k, v in createopts.items() if k not in known}
3673
3673
3674
3674
3675 def createrepository(ui, path, createopts=None):
3675 def createrepository(ui, path, createopts=None):
3676 """Create a new repository in a vfs.
3676 """Create a new repository in a vfs.
3677
3677
3678 ``path`` path to the new repo's working directory.
3678 ``path`` path to the new repo's working directory.
3679 ``createopts`` options for the new repository.
3679 ``createopts`` options for the new repository.
3680
3680
3681 The following keys for ``createopts`` are recognized:
3681 The following keys for ``createopts`` are recognized:
3682
3682
3683 backend
3683 backend
3684 The storage backend to use.
3684 The storage backend to use.
3685 lfs
3685 lfs
3686 Repository will be created with ``lfs`` requirement. The lfs extension
3686 Repository will be created with ``lfs`` requirement. The lfs extension
3687 will automatically be loaded when the repository is accessed.
3687 will automatically be loaded when the repository is accessed.
3688 narrowfiles
3688 narrowfiles
3689 Set up repository to support narrow file storage.
3689 Set up repository to support narrow file storage.
3690 sharedrepo
3690 sharedrepo
3691 Repository object from which storage should be shared.
3691 Repository object from which storage should be shared.
3692 sharedrelative
3692 sharedrelative
3693 Boolean indicating if the path to the shared repo should be
3693 Boolean indicating if the path to the shared repo should be
3694 stored as relative. By default, the pointer to the "parent" repo
3694 stored as relative. By default, the pointer to the "parent" repo
3695 is stored as an absolute path.
3695 is stored as an absolute path.
3696 shareditems
3696 shareditems
3697 Set of items to share to the new repository (in addition to storage).
3697 Set of items to share to the new repository (in addition to storage).
3698 shallowfilestore
3698 shallowfilestore
3699 Indicates that storage for files should be shallow (not all ancestor
3699 Indicates that storage for files should be shallow (not all ancestor
3700 revisions are known).
3700 revisions are known).
3701 """
3701 """
3702 createopts = defaultcreateopts(ui, createopts=createopts)
3702 createopts = defaultcreateopts(ui, createopts=createopts)
3703
3703
3704 unknownopts = filterknowncreateopts(ui, createopts)
3704 unknownopts = filterknowncreateopts(ui, createopts)
3705
3705
3706 if not isinstance(unknownopts, dict):
3706 if not isinstance(unknownopts, dict):
3707 raise error.ProgrammingError(
3707 raise error.ProgrammingError(
3708 b'filterknowncreateopts() did not return a dict'
3708 b'filterknowncreateopts() did not return a dict'
3709 )
3709 )
3710
3710
3711 if unknownopts:
3711 if unknownopts:
3712 raise error.Abort(
3712 raise error.Abort(
3713 _(
3713 _(
3714 b'unable to create repository because of unknown '
3714 b'unable to create repository because of unknown '
3715 b'creation option: %s'
3715 b'creation option: %s'
3716 )
3716 )
3717 % b', '.join(sorted(unknownopts)),
3717 % b', '.join(sorted(unknownopts)),
3718 hint=_(b'is a required extension not loaded?'),
3718 hint=_(b'is a required extension not loaded?'),
3719 )
3719 )
3720
3720
3721 requirements = newreporequirements(ui, createopts=createopts)
3721 requirements = newreporequirements(ui, createopts=createopts)
3722
3722
3723 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3723 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3724
3724
3725 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3725 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3726 if hgvfs.exists():
3726 if hgvfs.exists():
3727 raise error.RepoError(_(b'repository %s already exists') % path)
3727 raise error.RepoError(_(b'repository %s already exists') % path)
3728
3728
3729 if b'sharedrepo' in createopts:
3729 if b'sharedrepo' in createopts:
3730 sharedpath = createopts[b'sharedrepo'].sharedpath
3730 sharedpath = createopts[b'sharedrepo'].sharedpath
3731
3731
3732 if createopts.get(b'sharedrelative'):
3732 if createopts.get(b'sharedrelative'):
3733 try:
3733 try:
3734 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3734 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3735 except (IOError, ValueError) as e:
3735 except (IOError, ValueError) as e:
3736 # ValueError is raised on Windows if the drive letters differ
3736 # ValueError is raised on Windows if the drive letters differ
3737 # on each path.
3737 # on each path.
3738 raise error.Abort(
3738 raise error.Abort(
3739 _(b'cannot calculate relative path'),
3739 _(b'cannot calculate relative path'),
3740 hint=stringutil.forcebytestr(e),
3740 hint=stringutil.forcebytestr(e),
3741 )
3741 )
3742
3742
3743 if not wdirvfs.exists():
3743 if not wdirvfs.exists():
3744 wdirvfs.makedirs()
3744 wdirvfs.makedirs()
3745
3745
3746 hgvfs.makedir(notindexed=True)
3746 hgvfs.makedir(notindexed=True)
3747 if b'sharedrepo' not in createopts:
3747 if b'sharedrepo' not in createopts:
3748 hgvfs.mkdir(b'cache')
3748 hgvfs.mkdir(b'cache')
3749 hgvfs.mkdir(b'wcache')
3749 hgvfs.mkdir(b'wcache')
3750
3750
3751 if b'store' in requirements and b'sharedrepo' not in createopts:
3751 if b'store' in requirements and b'sharedrepo' not in createopts:
3752 hgvfs.mkdir(b'store')
3752 hgvfs.mkdir(b'store')
3753
3753
3754 # We create an invalid changelog outside the store so very old
3754 # We create an invalid changelog outside the store so very old
3755 # Mercurial versions (which didn't know about the requirements
3755 # Mercurial versions (which didn't know about the requirements
3756 # file) encounter an error on reading the changelog. This
3756 # file) encounter an error on reading the changelog. This
3757 # effectively locks out old clients and prevents them from
3757 # effectively locks out old clients and prevents them from
3758 # mucking with a repo in an unknown format.
3758 # mucking with a repo in an unknown format.
3759 #
3759 #
3760 # The revlog header has version 2, which won't be recognized by
3760 # The revlog header has version 2, which won't be recognized by
3761 # such old clients.
3761 # such old clients.
3762 hgvfs.append(
3762 hgvfs.append(
3763 b'00changelog.i',
3763 b'00changelog.i',
3764 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3764 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3765 b'layout',
3765 b'layout',
3766 )
3766 )
3767
3767
3768 scmutil.writerequires(hgvfs, requirements)
3768 scmutil.writerequires(hgvfs, requirements)
3769
3769
3770 # Write out file telling readers where to find the shared store.
3770 # Write out file telling readers where to find the shared store.
3771 if b'sharedrepo' in createopts:
3771 if b'sharedrepo' in createopts:
3772 hgvfs.write(b'sharedpath', sharedpath)
3772 hgvfs.write(b'sharedpath', sharedpath)
3773
3773
3774 if createopts.get(b'shareditems'):
3774 if createopts.get(b'shareditems'):
3775 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3775 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3776 hgvfs.write(b'shared', shared)
3776 hgvfs.write(b'shared', shared)
3777
3777
3778
3778
3779 def poisonrepository(repo):
3779 def poisonrepository(repo):
3780 """Poison a repository instance so it can no longer be used."""
3780 """Poison a repository instance so it can no longer be used."""
3781 # Perform any cleanup on the instance.
3781 # Perform any cleanup on the instance.
3782 repo.close()
3782 repo.close()
3783
3783
3784 # Our strategy is to replace the type of the object with one that
3784 # Our strategy is to replace the type of the object with one that
3785 # has all attribute lookups result in error.
3785 # has all attribute lookups result in error.
3786 #
3786 #
3787 # But we have to allow the close() method because some constructors
3787 # But we have to allow the close() method because some constructors
3788 # of repos call close() on repo references.
3788 # of repos call close() on repo references.
3789 class poisonedrepository(object):
3789 class poisonedrepository(object):
3790 def __getattribute__(self, item):
3790 def __getattribute__(self, item):
3791 if item == 'close':
3791 if item == 'close':
3792 return object.__getattribute__(self, item)
3792 return object.__getattribute__(self, item)
3793
3793
3794 raise error.ProgrammingError(
3794 raise error.ProgrammingError(
3795 b'repo instances should not be used after unshare'
3795 b'repo instances should not be used after unshare'
3796 )
3796 )
3797
3797
3798 def close(self):
3798 def close(self):
3799 pass
3799 pass
3800
3800
3801 # We may have a repoview, which intercepts __setattr__. So be sure
3801 # We may have a repoview, which intercepts __setattr__. So be sure
3802 # we operate at the lowest level possible.
3802 # we operate at the lowest level possible.
3803 object.__setattr__(repo, '__class__', poisonedrepository)
3803 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,539 +1,539 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 discovery,
21 discovery,
22 error,
22 error,
23 exchange,
23 exchange,
24 obsolete,
24 obsolete,
25 obsutil,
25 obsutil,
26 pathutil,
26 pathutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 hashutil,
32 hashutil,
33 stringutil,
33 stringutil,
34 )
34 )
35
35
36
36
37 def backupbundle(
37 def backupbundle(
38 repo, bases, heads, node, suffix, compress=True, obsolescence=True
38 repo, bases, heads, node, suffix, compress=True, obsolescence=True
39 ):
39 ):
40 """create a bundle with the specified revisions as a backup"""
40 """create a bundle with the specified revisions as a backup"""
41
41
42 backupdir = b"strip-backup"
42 backupdir = b"strip-backup"
43 vfs = repo.vfs
43 vfs = repo.vfs
44 if not vfs.isdir(backupdir):
44 if not vfs.isdir(backupdir):
45 vfs.mkdir(backupdir)
45 vfs.mkdir(backupdir)
46
46
47 # Include a hash of all the nodes in the filename for uniqueness
47 # Include a hash of all the nodes in the filename for uniqueness
48 allcommits = repo.set(b'%ln::%ln', bases, heads)
48 allcommits = repo.set(b'%ln::%ln', bases, heads)
49 allhashes = sorted(c.hex() for c in allcommits)
49 allhashes = sorted(c.hex() for c in allcommits)
50 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
50 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
51 name = b"%s/%s-%s-%s.hg" % (
51 name = b"%s/%s-%s-%s.hg" % (
52 backupdir,
52 backupdir,
53 short(node),
53 short(node),
54 hex(totalhash[:4]),
54 hex(totalhash[:4]),
55 suffix,
55 suffix,
56 )
56 )
57
57
58 cgversion = changegroup.localversion(repo)
58 cgversion = changegroup.localversion(repo)
59 comp = None
59 comp = None
60 if cgversion != b'01':
60 if cgversion != b'01':
61 bundletype = b"HG20"
61 bundletype = b"HG20"
62 if compress:
62 if compress:
63 comp = b'BZ'
63 comp = b'BZ'
64 elif compress:
64 elif compress:
65 bundletype = b"HG10BZ"
65 bundletype = b"HG10BZ"
66 else:
66 else:
67 bundletype = b"HG10UN"
67 bundletype = b"HG10UN"
68
68
69 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
69 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
70 contentopts = {
70 contentopts = {
71 b'cg.version': cgversion,
71 b'cg.version': cgversion,
72 b'obsolescence': obsolescence,
72 b'obsolescence': obsolescence,
73 b'phases': True,
73 b'phases': True,
74 }
74 }
75 return bundle2.writenewbundle(
75 return bundle2.writenewbundle(
76 repo.ui,
76 repo.ui,
77 repo,
77 repo,
78 b'strip',
78 b'strip',
79 name,
79 name,
80 bundletype,
80 bundletype,
81 outgoing,
81 outgoing,
82 contentopts,
82 contentopts,
83 vfs,
83 vfs,
84 compression=comp,
84 compression=comp,
85 )
85 )
86
86
87
87
88 def _collectfiles(repo, striprev):
88 def _collectfiles(repo, striprev):
89 """find out the filelogs affected by the strip"""
89 """find out the filelogs affected by the strip"""
90 files = set()
90 files = set()
91
91
92 for x in pycompat.xrange(striprev, len(repo)):
92 for x in pycompat.xrange(striprev, len(repo)):
93 files.update(repo[x].files())
93 files.update(repo[x].files())
94
94
95 return sorted(files)
95 return sorted(files)
96
96
97
97
98 def _collectrevlog(revlog, striprev):
98 def _collectrevlog(revlog, striprev):
99 _, brokenset = revlog.getstrippoint(striprev)
99 _, brokenset = revlog.getstrippoint(striprev)
100 return [revlog.linkrev(r) for r in brokenset]
100 return [revlog.linkrev(r) for r in brokenset]
101
101
102
102
103 def _collectbrokencsets(repo, files, striprev):
103 def _collectbrokencsets(repo, files, striprev):
104 """return the changesets which will be broken by the truncation"""
104 """return the changesets which will be broken by the truncation"""
105 s = set()
105 s = set()
106
106
107 for revlog in manifestrevlogs(repo):
107 for revlog in manifestrevlogs(repo):
108 s.update(_collectrevlog(revlog, striprev))
108 s.update(_collectrevlog(revlog, striprev))
109 for fname in files:
109 for fname in files:
110 s.update(_collectrevlog(repo.file(fname), striprev))
110 s.update(_collectrevlog(repo.file(fname), striprev))
111
111
112 return s
112 return s
113
113
114
114
115 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
115 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
116 # This function requires the caller to lock the repo, but it operates
116 # This function requires the caller to lock the repo, but it operates
117 # within a transaction of its own, and thus requires there to be no current
117 # within a transaction of its own, and thus requires there to be no current
118 # transaction when it is called.
118 # transaction when it is called.
119 if repo.currenttransaction() is not None:
119 if repo.currenttransaction() is not None:
120 raise error.ProgrammingError(b'cannot strip from inside a transaction')
120 raise error.ProgrammingError(b'cannot strip from inside a transaction')
121
121
122 # Simple way to maintain backwards compatibility for this
122 # Simple way to maintain backwards compatibility for this
123 # argument.
123 # argument.
124 if backup in [b'none', b'strip']:
124 if backup in [b'none', b'strip']:
125 backup = False
125 backup = False
126
126
127 repo = repo.unfiltered()
127 repo = repo.unfiltered()
128 repo.destroying()
128 repo.destroying()
129 vfs = repo.vfs
129 vfs = repo.vfs
130 # load bookmark before changelog to avoid side effect from outdated
130 # load bookmark before changelog to avoid side effect from outdated
131 # changelog (see repo._refreshchangelog)
131 # changelog (see repo._refreshchangelog)
132 repo._bookmarks
132 repo._bookmarks
133 cl = repo.changelog
133 cl = repo.changelog
134
134
135 # TODO handle undo of merge sets
135 # TODO handle undo of merge sets
136 if isinstance(nodelist, bytes):
136 if isinstance(nodelist, bytes):
137 nodelist = [nodelist]
137 nodelist = [nodelist]
138 striplist = [cl.rev(node) for node in nodelist]
138 striplist = [cl.rev(node) for node in nodelist]
139 striprev = min(striplist)
139 striprev = min(striplist)
140
140
141 files = _collectfiles(repo, striprev)
141 files = _collectfiles(repo, striprev)
142 saverevs = _collectbrokencsets(repo, files, striprev)
142 saverevs = _collectbrokencsets(repo, files, striprev)
143
143
144 # Some revisions with rev > striprev may not be descendants of striprev.
144 # Some revisions with rev > striprev may not be descendants of striprev.
145 # We have to find these revisions and put them in a bundle, so that
145 # We have to find these revisions and put them in a bundle, so that
146 # we can restore them after the truncations.
146 # we can restore them after the truncations.
147 # To create the bundle we use repo.changegroupsubset which requires
147 # To create the bundle we use repo.changegroupsubset which requires
148 # the list of heads and bases of the set of interesting revisions.
148 # the list of heads and bases of the set of interesting revisions.
149 # (head = revision in the set that has no descendant in the set;
149 # (head = revision in the set that has no descendant in the set;
150 # base = revision in the set that has no ancestor in the set)
150 # base = revision in the set that has no ancestor in the set)
151 tostrip = set(striplist)
151 tostrip = set(striplist)
152 saveheads = set(saverevs)
152 saveheads = set(saverevs)
153 for r in cl.revs(start=striprev + 1):
153 for r in cl.revs(start=striprev + 1):
154 if any(p in tostrip for p in cl.parentrevs(r)):
154 if any(p in tostrip for p in cl.parentrevs(r)):
155 tostrip.add(r)
155 tostrip.add(r)
156
156
157 if r not in tostrip:
157 if r not in tostrip:
158 saverevs.add(r)
158 saverevs.add(r)
159 saveheads.difference_update(cl.parentrevs(r))
159 saveheads.difference_update(cl.parentrevs(r))
160 saveheads.add(r)
160 saveheads.add(r)
161 saveheads = [cl.node(r) for r in saveheads]
161 saveheads = [cl.node(r) for r in saveheads]
162
162
163 # compute base nodes
163 # compute base nodes
164 if saverevs:
164 if saverevs:
165 descendants = set(cl.descendants(saverevs))
165 descendants = set(cl.descendants(saverevs))
166 saverevs.difference_update(descendants)
166 saverevs.difference_update(descendants)
167 savebases = [cl.node(r) for r in saverevs]
167 savebases = [cl.node(r) for r in saverevs]
168 stripbases = [cl.node(r) for r in tostrip]
168 stripbases = [cl.node(r) for r in tostrip]
169
169
170 stripobsidx = obsmarkers = ()
170 stripobsidx = obsmarkers = ()
171 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
171 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
172 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
172 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
173 if obsmarkers:
173 if obsmarkers:
174 stripobsidx = [
174 stripobsidx = [
175 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
175 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
176 ]
176 ]
177
177
178 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
178 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
179
179
180 backupfile = None
180 backupfile = None
181 node = nodelist[-1]
181 node = nodelist[-1]
182 if backup:
182 if backup:
183 backupfile = _createstripbackup(repo, stripbases, node, topic)
183 backupfile = _createstripbackup(repo, stripbases, node, topic)
184 # create a changegroup for all the branches we need to keep
184 # create a changegroup for all the branches we need to keep
185 tmpbundlefile = None
185 tmpbundlefile = None
186 if saveheads:
186 if saveheads:
187 # do not compress temporary bundle if we remove it from disk later
187 # do not compress temporary bundle if we remove it from disk later
188 #
188 #
189 # We do not include obsolescence, it might re-introduce prune markers
189 # We do not include obsolescence, it might re-introduce prune markers
190 # we are trying to strip. This is harmless since the stripped markers
190 # we are trying to strip. This is harmless since the stripped markers
191 # are already backed up and we did not touched the markers for the
191 # are already backed up and we did not touched the markers for the
192 # saved changesets.
192 # saved changesets.
193 tmpbundlefile = backupbundle(
193 tmpbundlefile = backupbundle(
194 repo,
194 repo,
195 savebases,
195 savebases,
196 saveheads,
196 saveheads,
197 node,
197 node,
198 b'temp',
198 b'temp',
199 compress=False,
199 compress=False,
200 obsolescence=False,
200 obsolescence=False,
201 )
201 )
202
202
203 with ui.uninterruptible():
203 with ui.uninterruptible():
204 try:
204 try:
205 with repo.transaction(b"strip") as tr:
205 with repo.transaction(b"strip") as tr:
206 # TODO this code violates the interface abstraction of the
206 # TODO this code violates the interface abstraction of the
207 # transaction and makes assumptions that file storage is
207 # transaction and makes assumptions that file storage is
208 # using append-only files. We'll need some kind of storage
208 # using append-only files. We'll need some kind of storage
209 # API to handle stripping for us.
209 # API to handle stripping for us.
210 offset = len(tr._entries)
210 offset = len(tr._entries)
211
211
212 tr.startgroup()
212 tr.startgroup()
213 cl.strip(striprev, tr)
213 cl.strip(striprev, tr)
214 stripmanifest(repo, striprev, tr, files)
214 stripmanifest(repo, striprev, tr, files)
215
215
216 for fn in files:
216 for fn in files:
217 repo.file(fn).strip(striprev, tr)
217 repo.file(fn).strip(striprev, tr)
218 tr.endgroup()
218 tr.endgroup()
219
219
220 for i in pycompat.xrange(offset, len(tr._entries)):
220 for i in pycompat.xrange(offset, len(tr._entries)):
221 file, troffset, ignore = tr._entries[i]
221 file, troffset, ignore = tr._entries[i]
222 with repo.svfs(file, b'a', checkambig=True) as fp:
222 with repo.svfs(file, b'a', checkambig=True) as fp:
223 fp.truncate(troffset)
223 fp.truncate(troffset)
224 if troffset == 0:
224 if troffset == 0:
225 repo.store.markremoved(file)
225 repo.store.markremoved(file)
226
226
227 deleteobsmarkers(repo.obsstore, stripobsidx)
227 deleteobsmarkers(repo.obsstore, stripobsidx)
228 del repo.obsstore
228 del repo.obsstore
229 repo.invalidatevolatilesets()
229 repo.invalidatevolatilesets()
230 repo._phasecache.filterunknown(repo)
230 repo._phasecache.filterunknown(repo)
231
231
232 if tmpbundlefile:
232 if tmpbundlefile:
233 ui.note(_(b"adding branch\n"))
233 ui.note(_(b"adding branch\n"))
234 f = vfs.open(tmpbundlefile, b"rb")
234 f = vfs.open(tmpbundlefile, b"rb")
235 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
235 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
236 if not repo.ui.verbose:
236 if not repo.ui.verbose:
237 # silence internal shuffling chatter
237 # silence internal shuffling chatter
238 repo.ui.pushbuffer()
238 repo.ui.pushbuffer()
239 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
239 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
240 txnname = b'strip'
240 txnname = b'strip'
241 if not isinstance(gen, bundle2.unbundle20):
241 if not isinstance(gen, bundle2.unbundle20):
242 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
242 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
243 with repo.transaction(txnname) as tr:
243 with repo.transaction(txnname) as tr:
244 bundle2.applybundle(
244 bundle2.applybundle(
245 repo, gen, tr, source=b'strip', url=tmpbundleurl
245 repo, gen, tr, source=b'strip', url=tmpbundleurl
246 )
246 )
247 if not repo.ui.verbose:
247 if not repo.ui.verbose:
248 repo.ui.popbuffer()
248 repo.ui.popbuffer()
249 f.close()
249 f.close()
250
250
251 with repo.transaction(b'repair') as tr:
251 with repo.transaction(b'repair') as tr:
252 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
252 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
253 repo._bookmarks.applychanges(repo, tr, bmchanges)
253 repo._bookmarks.applychanges(repo, tr, bmchanges)
254
254
255 # remove undo files
255 # remove undo files
256 for undovfs, undofile in repo.undofiles():
256 for undovfs, undofile in repo.undofiles():
257 try:
257 try:
258 undovfs.unlink(undofile)
258 undovfs.unlink(undofile)
259 except OSError as e:
259 except OSError as e:
260 if e.errno != errno.ENOENT:
260 if e.errno != errno.ENOENT:
261 ui.warn(
261 ui.warn(
262 _(b'error removing %s: %s\n')
262 _(b'error removing %s: %s\n')
263 % (
263 % (
264 undovfs.join(undofile),
264 undovfs.join(undofile),
265 stringutil.forcebytestr(e),
265 stringutil.forcebytestr(e),
266 )
266 )
267 )
267 )
268
268
269 except: # re-raises
269 except: # re-raises
270 if backupfile:
270 if backupfile:
271 ui.warn(
271 ui.warn(
272 _(b"strip failed, backup bundle stored in '%s'\n")
272 _(b"strip failed, backup bundle stored in '%s'\n")
273 % vfs.join(backupfile)
273 % vfs.join(backupfile)
274 )
274 )
275 if tmpbundlefile:
275 if tmpbundlefile:
276 ui.warn(
276 ui.warn(
277 _(b"strip failed, unrecovered changes stored in '%s'\n")
277 _(b"strip failed, unrecovered changes stored in '%s'\n")
278 % vfs.join(tmpbundlefile)
278 % vfs.join(tmpbundlefile)
279 )
279 )
280 ui.warn(
280 ui.warn(
281 _(
281 _(
282 b"(fix the problem, then recover the changesets with "
282 b"(fix the problem, then recover the changesets with "
283 b"\"hg unbundle '%s'\")\n"
283 b"\"hg unbundle '%s'\")\n"
284 )
284 )
285 % vfs.join(tmpbundlefile)
285 % vfs.join(tmpbundlefile)
286 )
286 )
287 raise
287 raise
288 else:
288 else:
289 if tmpbundlefile:
289 if tmpbundlefile:
290 # Remove temporary bundle only if there were no exceptions
290 # Remove temporary bundle only if there were no exceptions
291 vfs.unlink(tmpbundlefile)
291 vfs.unlink(tmpbundlefile)
292
292
293 repo.destroyed()
293 repo.destroyed()
294 # return the backup file path (or None if 'backup' was False) so
294 # return the backup file path (or None if 'backup' was False) so
295 # extensions can use it
295 # extensions can use it
296 return backupfile
296 return backupfile
297
297
298
298
299 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
299 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
300 """perform a "soft" strip using the archived phase"""
300 """perform a "soft" strip using the archived phase"""
301 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
301 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
302 if not tostrip:
302 if not tostrip:
303 return None
303 return None
304
304
305 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
305 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
306 if backup:
306 if backup:
307 node = tostrip[0]
307 node = tostrip[0]
308 backupfile = _createstripbackup(repo, tostrip, node, topic)
308 backupfile = _createstripbackup(repo, tostrip, node, topic)
309
309
310 with repo.transaction(b'strip') as tr:
310 with repo.transaction(b'strip') as tr:
311 phases.retractboundary(repo, tr, phases.archived, tostrip)
311 phases.retractboundary(repo, tr, phases.archived, tostrip)
312 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
312 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
313 repo._bookmarks.applychanges(repo, tr, bmchanges)
313 repo._bookmarks.applychanges(repo, tr, bmchanges)
314 return backupfile
314 return backupfile
315
315
316
316
317 def _bookmarkmovements(repo, tostrip):
317 def _bookmarkmovements(repo, tostrip):
318 # compute necessary bookmark movement
318 # compute necessary bookmark movement
319 bm = repo._bookmarks
319 bm = repo._bookmarks
320 updatebm = []
320 updatebm = []
321 for m in bm:
321 for m in bm:
322 rev = repo[bm[m]].rev()
322 rev = repo[bm[m]].rev()
323 if rev in tostrip:
323 if rev in tostrip:
324 updatebm.append(m)
324 updatebm.append(m)
325 newbmtarget = None
325 newbmtarget = None
326 # If we need to move bookmarks, compute bookmark
326 # If we need to move bookmarks, compute bookmark
327 # targets. Otherwise we can skip doing this logic.
327 # targets. Otherwise we can skip doing this logic.
328 if updatebm:
328 if updatebm:
329 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
329 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
330 # but is much faster
330 # but is much faster
331 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
331 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
332 if newbmtarget:
332 if newbmtarget:
333 newbmtarget = repo[newbmtarget.first()].node()
333 newbmtarget = repo[newbmtarget.first()].node()
334 else:
334 else:
335 newbmtarget = b'.'
335 newbmtarget = b'.'
336 return newbmtarget, updatebm
336 return newbmtarget, updatebm
337
337
338
338
339 def _createstripbackup(repo, stripbases, node, topic):
339 def _createstripbackup(repo, stripbases, node, topic):
340 # backup the changeset we are about to strip
340 # backup the changeset we are about to strip
341 vfs = repo.vfs
341 vfs = repo.vfs
342 cl = repo.changelog
342 cl = repo.changelog
343 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
343 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
344 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
344 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
345 repo.ui.log(
345 repo.ui.log(
346 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
346 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
347 )
347 )
348 return backupfile
348 return backupfile
349
349
350
350
351 def safestriproots(ui, repo, nodes):
351 def safestriproots(ui, repo, nodes):
352 """return list of roots of nodes where descendants are covered by nodes"""
352 """return list of roots of nodes where descendants are covered by nodes"""
353 torev = repo.unfiltered().changelog.rev
353 torev = repo.unfiltered().changelog.rev
354 revs = {torev(n) for n in nodes}
354 revs = {torev(n) for n in nodes}
355 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
355 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
356 # orphaned = affected - wanted
356 # orphaned = affected - wanted
357 # affected = descendants(roots(wanted))
357 # affected = descendants(roots(wanted))
358 # wanted = revs
358 # wanted = revs
359 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
359 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
360 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
360 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
361 notstrip = revs - tostrip
361 notstrip = revs - tostrip
362 if notstrip:
362 if notstrip:
363 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
363 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
364 ui.warn(
364 ui.warn(
365 _(b'warning: orphaned descendants detected, not stripping %s\n')
365 _(b'warning: orphaned descendants detected, not stripping %s\n')
366 % nodestr
366 % nodestr
367 )
367 )
368 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
368 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
369
369
370
370
371 class stripcallback(object):
371 class stripcallback(object):
372 """used as a transaction postclose callback"""
372 """used as a transaction postclose callback"""
373
373
374 def __init__(self, ui, repo, backup, topic):
374 def __init__(self, ui, repo, backup, topic):
375 self.ui = ui
375 self.ui = ui
376 self.repo = repo
376 self.repo = repo
377 self.backup = backup
377 self.backup = backup
378 self.topic = topic or b'backup'
378 self.topic = topic or b'backup'
379 self.nodelist = []
379 self.nodelist = []
380
380
381 def addnodes(self, nodes):
381 def addnodes(self, nodes):
382 self.nodelist.extend(nodes)
382 self.nodelist.extend(nodes)
383
383
384 def __call__(self, tr):
384 def __call__(self, tr):
385 roots = safestriproots(self.ui, self.repo, self.nodelist)
385 roots = safestriproots(self.ui, self.repo, self.nodelist)
386 if roots:
386 if roots:
387 strip(self.ui, self.repo, roots, self.backup, self.topic)
387 strip(self.ui, self.repo, roots, self.backup, self.topic)
388
388
389
389
390 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
390 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
391 """like strip, but works inside transaction and won't strip irreverent revs
391 """like strip, but works inside transaction and won't strip irreverent revs
392
392
393 nodelist must explicitly contain all descendants. Otherwise a warning will
393 nodelist must explicitly contain all descendants. Otherwise a warning will
394 be printed that some nodes are not stripped.
394 be printed that some nodes are not stripped.
395
395
396 Will do a backup if `backup` is True. The last non-None "topic" will be
396 Will do a backup if `backup` is True. The last non-None "topic" will be
397 used as the backup topic name. The default backup topic name is "backup".
397 used as the backup topic name. The default backup topic name is "backup".
398 """
398 """
399 tr = repo.currenttransaction()
399 tr = repo.currenttransaction()
400 if not tr:
400 if not tr:
401 nodes = safestriproots(ui, repo, nodelist)
401 nodes = safestriproots(ui, repo, nodelist)
402 return strip(ui, repo, nodes, backup=backup, topic=topic)
402 return strip(ui, repo, nodes, backup=backup, topic=topic)
403 # transaction postclose callbacks are called in alphabet order.
403 # transaction postclose callbacks are called in alphabet order.
404 # use '\xff' as prefix so we are likely to be called last.
404 # use '\xff' as prefix so we are likely to be called last.
405 callback = tr.getpostclose(b'\xffstrip')
405 callback = tr.getpostclose(b'\xffstrip')
406 if callback is None:
406 if callback is None:
407 callback = stripcallback(ui, repo, backup=backup, topic=topic)
407 callback = stripcallback(ui, repo, backup=backup, topic=topic)
408 tr.addpostclose(b'\xffstrip', callback)
408 tr.addpostclose(b'\xffstrip', callback)
409 if topic:
409 if topic:
410 callback.topic = topic
410 callback.topic = topic
411 callback.addnodes(nodelist)
411 callback.addnodes(nodelist)
412
412
413
413
414 def stripmanifest(repo, striprev, tr, files):
414 def stripmanifest(repo, striprev, tr, files):
415 for revlog in manifestrevlogs(repo):
415 for revlog in manifestrevlogs(repo):
416 revlog.strip(striprev, tr)
416 revlog.strip(striprev, tr)
417
417
418
418
419 def manifestrevlogs(repo):
419 def manifestrevlogs(repo):
420 yield repo.manifestlog.getstorage(b'')
420 yield repo.manifestlog.getstorage(b'')
421 if b'treemanifest' in repo.requirements:
421 if b'treemanifest' in repo.requirements:
422 # This logic is safe if treemanifest isn't enabled, but also
422 # This logic is safe if treemanifest isn't enabled, but also
423 # pointless, so we skip it if treemanifest isn't enabled.
423 # pointless, so we skip it if treemanifest isn't enabled.
424 for unencoded, encoded, size in repo.store.datafiles():
424 for unencoded, encoded, size in repo.store.datafiles():
425 if unencoded.startswith(b'meta/') and unencoded.endswith(
425 if unencoded.startswith(b'meta/') and unencoded.endswith(
426 b'00manifest.i'
426 b'00manifest.i'
427 ):
427 ):
428 dir = unencoded[5:-12]
428 dir = unencoded[5:-12]
429 yield repo.manifestlog.getstorage(dir)
429 yield repo.manifestlog.getstorage(dir)
430
430
431
431
432 def rebuildfncache(ui, repo):
432 def rebuildfncache(ui, repo):
433 """Rebuilds the fncache file from repo history.
433 """Rebuilds the fncache file from repo history.
434
434
435 Missing entries will be added. Extra entries will be removed.
435 Missing entries will be added. Extra entries will be removed.
436 """
436 """
437 repo = repo.unfiltered()
437 repo = repo.unfiltered()
438
438
439 if b'fncache' not in repo.requirements:
439 if b'fncache' not in repo.requirements:
440 ui.warn(
440 ui.warn(
441 _(
441 _(
442 b'(not rebuilding fncache because repository does not '
442 b'(not rebuilding fncache because repository does not '
443 b'support fncache)\n'
443 b'support fncache)\n'
444 )
444 )
445 )
445 )
446 return
446 return
447
447
448 with repo.lock():
448 with repo.lock():
449 fnc = repo.store.fncache
449 fnc = repo.store.fncache
450 fnc.ensureloaded(warn=ui.warn)
450 fnc.ensureloaded(warn=ui.warn)
451
451
452 oldentries = set(fnc.entries)
452 oldentries = set(fnc.entries)
453 newentries = set()
453 newentries = set()
454 seenfiles = set()
454 seenfiles = set()
455
455
456 progress = ui.makeprogress(
456 progress = ui.makeprogress(
457 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
457 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
458 )
458 )
459 for rev in repo:
459 for rev in repo:
460 progress.update(rev)
460 progress.update(rev)
461
461
462 ctx = repo[rev]
462 ctx = repo[rev]
463 for f in ctx.files():
463 for f in ctx.files():
464 # This is to minimize I/O.
464 # This is to minimize I/O.
465 if f in seenfiles:
465 if f in seenfiles:
466 continue
466 continue
467 seenfiles.add(f)
467 seenfiles.add(f)
468
468
469 i = b'data/%s.i' % f
469 i = b'data/%s.i' % f
470 d = b'data/%s.d' % f
470 d = b'data/%s.d' % f
471
471
472 if repo.store._exists(i):
472 if repo.store._exists(i):
473 newentries.add(i)
473 newentries.add(i)
474 if repo.store._exists(d):
474 if repo.store._exists(d):
475 newentries.add(d)
475 newentries.add(d)
476
476
477 progress.complete()
477 progress.complete()
478
478
479 if b'treemanifest' in repo.requirements:
479 if b'treemanifest' in repo.requirements:
480 # This logic is safe if treemanifest isn't enabled, but also
480 # This logic is safe if treemanifest isn't enabled, but also
481 # pointless, so we skip it if treemanifest isn't enabled.
481 # pointless, so we skip it if treemanifest isn't enabled.
482 for dir in pathutil.dirs(seenfiles):
482 for dir in pathutil.dirs(seenfiles):
483 i = b'meta/%s/00manifest.i' % dir
483 i = b'meta/%s/00manifest.i' % dir
484 d = b'meta/%s/00manifest.d' % dir
484 d = b'meta/%s/00manifest.d' % dir
485
485
486 if repo.store._exists(i):
486 if repo.store._exists(i):
487 newentries.add(i)
487 newentries.add(i)
488 if repo.store._exists(d):
488 if repo.store._exists(d):
489 newentries.add(d)
489 newentries.add(d)
490
490
491 addcount = len(newentries - oldentries)
491 addcount = len(newentries - oldentries)
492 removecount = len(oldentries - newentries)
492 removecount = len(oldentries - newentries)
493 for p in sorted(oldentries - newentries):
493 for p in sorted(oldentries - newentries):
494 ui.write(_(b'removing %s\n') % p)
494 ui.write(_(b'removing %s\n') % p)
495 for p in sorted(newentries - oldentries):
495 for p in sorted(newentries - oldentries):
496 ui.write(_(b'adding %s\n') % p)
496 ui.write(_(b'adding %s\n') % p)
497
497
498 if addcount or removecount:
498 if addcount or removecount:
499 ui.write(
499 ui.write(
500 _(b'%d items added, %d removed from fncache\n')
500 _(b'%d items added, %d removed from fncache\n')
501 % (addcount, removecount)
501 % (addcount, removecount)
502 )
502 )
503 fnc.entries = newentries
503 fnc.entries = newentries
504 fnc._dirty = True
504 fnc._dirty = True
505
505
506 with repo.transaction(b'fncache') as tr:
506 with repo.transaction(b'fncache') as tr:
507 fnc.write(tr)
507 fnc.write(tr)
508 else:
508 else:
509 ui.write(_(b'fncache already up to date\n'))
509 ui.write(_(b'fncache already up to date\n'))
510
510
511
511
512 def deleteobsmarkers(obsstore, indices):
512 def deleteobsmarkers(obsstore, indices):
513 """Delete some obsmarkers from obsstore and return how many were deleted
513 """Delete some obsmarkers from obsstore and return how many were deleted
514
514
515 'indices' is a list of ints which are the indices
515 'indices' is a list of ints which are the indices
516 of the markers to be deleted.
516 of the markers to be deleted.
517
517
518 Every invocation of this function completely rewrites the obsstore file,
518 Every invocation of this function completely rewrites the obsstore file,
519 skipping the markers we want to be removed. The new temporary file is
519 skipping the markers we want to be removed. The new temporary file is
520 created, remaining markers are written there and on .close() this file
520 created, remaining markers are written there and on .close() this file
521 gets atomically renamed to obsstore, thus guaranteeing consistency."""
521 gets atomically renamed to obsstore, thus guaranteeing consistency."""
522 if not indices:
522 if not indices:
523 # we don't want to rewrite the obsstore with the same content
523 # we don't want to rewrite the obsstore with the same content
524 return
524 return
525
525
526 left = []
526 left = []
527 current = obsstore._all
527 current = obsstore._all
528 n = 0
528 n = 0
529 for i, m in enumerate(current):
529 for i, m in enumerate(current):
530 if i in indices:
530 if i in indices:
531 n += 1
531 n += 1
532 continue
532 continue
533 left.append(m)
533 left.append(m)
534
534
535 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
535 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
536 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
536 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
537 newobsstorefile.write(bytes)
537 newobsstorefile.write(bytes)
538 newobsstorefile.close()
538 newobsstorefile.close()
539 return n
539 return n
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now