##// END OF EJS Templates
path: pass `path` to `peer` in infinite push...
marmoute -
r50608:6f89a1d7 default
parent child Browse files
Show More
@@ -1,1385 +1,1385 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 IMPORTANT: if you use this extension, please contact
9 IMPORTANT: if you use this extension, please contact
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 be unused and barring learning of users of this functionality, we will
11 be unused and barring learning of users of this functionality, we will
12 delete this code at the end of 2020.
12 delete this code at the end of 2020.
13
13
14 [infinitepush]
14 [infinitepush]
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 branchpattern = PATTERN
16 branchpattern = PATTERN
17
17
18 # Server or client
18 # Server or client
19 server = False
19 server = False
20
20
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 indextype = disk
22 indextype = disk
23
23
24 # Server-side option. Used only if indextype=sql.
24 # Server-side option. Used only if indextype=sql.
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27
27
28 # Server-side option. Used only if indextype=disk.
28 # Server-side option. Used only if indextype=disk.
29 # Filesystem path to the index store
29 # Filesystem path to the index store
30 indexpath = PATH
30 indexpath = PATH
31
31
32 # Server-side option. Possible values: 'disk' or 'external'
32 # Server-side option. Possible values: 'disk' or 'external'
33 # Fails if not set
33 # Fails if not set
34 storetype = disk
34 storetype = disk
35
35
36 # Server-side option.
36 # Server-side option.
37 # Path to the binary that will save bundle to the bundlestore
37 # Path to the binary that will save bundle to the bundlestore
38 # Formatted cmd line will be passed to it (see `put_args`)
38 # Formatted cmd line will be passed to it (see `put_args`)
39 put_binary = put
39 put_binary = put
40
40
41 # Serser-side option. Used only if storetype=external.
41 # Serser-side option. Used only if storetype=external.
42 # Format cmd-line string for put binary. Placeholder: {filename}
42 # Format cmd-line string for put binary. Placeholder: {filename}
43 put_args = {filename}
43 put_args = {filename}
44
44
45 # Server-side option.
45 # Server-side option.
46 # Path to the binary that get bundle from the bundlestore.
46 # Path to the binary that get bundle from the bundlestore.
47 # Formatted cmd line will be passed to it (see `get_args`)
47 # Formatted cmd line will be passed to it (see `get_args`)
48 get_binary = get
48 get_binary = get
49
49
50 # Serser-side option. Used only if storetype=external.
50 # Serser-side option. Used only if storetype=external.
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 get_args = {filename} {handle}
52 get_args = {filename} {handle}
53
53
54 # Server-side option
54 # Server-side option
55 logfile = FIlE
55 logfile = FIlE
56
56
57 # Server-side option
57 # Server-side option
58 loglevel = DEBUG
58 loglevel = DEBUG
59
59
60 # Server-side option. Used only if indextype=sql.
60 # Server-side option. Used only if indextype=sql.
61 # Sets mysql wait_timeout option.
61 # Sets mysql wait_timeout option.
62 waittimeout = 300
62 waittimeout = 300
63
63
64 # Server-side option. Used only if indextype=sql.
64 # Server-side option. Used only if indextype=sql.
65 # Sets mysql innodb_lock_wait_timeout option.
65 # Sets mysql innodb_lock_wait_timeout option.
66 locktimeout = 120
66 locktimeout = 120
67
67
68 # Server-side option. Used only if indextype=sql.
68 # Server-side option. Used only if indextype=sql.
69 # Name of the repository
69 # Name of the repository
70 reponame = ''
70 reponame = ''
71
71
72 # Client-side option. Used by --list-remote option. List of remote scratch
72 # Client-side option. Used by --list-remote option. List of remote scratch
73 # patterns to list if no patterns are specified.
73 # patterns to list if no patterns are specified.
74 defaultremotepatterns = ['*']
74 defaultremotepatterns = ['*']
75
75
76 # Instructs infinitepush to forward all received bundle2 parts to the
76 # Instructs infinitepush to forward all received bundle2 parts to the
77 # bundle for storage. Defaults to False.
77 # bundle for storage. Defaults to False.
78 storeallparts = True
78 storeallparts = True
79
79
80 # routes each incoming push to the bundlestore. defaults to False
80 # routes each incoming push to the bundlestore. defaults to False
81 pushtobundlestore = True
81 pushtobundlestore = True
82
82
83 [remotenames]
83 [remotenames]
84 # Client-side option
84 # Client-side option
85 # This option should be set only if remotenames extension is enabled.
85 # This option should be set only if remotenames extension is enabled.
86 # Whether remote bookmarks are tracked by remotenames extension.
86 # Whether remote bookmarks are tracked by remotenames extension.
87 bookmarks = True
87 bookmarks = True
88 """
88 """
89
89
90
90
91 import collections
91 import collections
92 import contextlib
92 import contextlib
93 import functools
93 import functools
94 import logging
94 import logging
95 import os
95 import os
96 import random
96 import random
97 import re
97 import re
98 import socket
98 import socket
99 import subprocess
99 import subprocess
100 import time
100 import time
101
101
102 from mercurial.node import (
102 from mercurial.node import (
103 bin,
103 bin,
104 hex,
104 hex,
105 )
105 )
106
106
107 from mercurial.i18n import _
107 from mercurial.i18n import _
108
108
109 from mercurial.pycompat import (
109 from mercurial.pycompat import (
110 getattr,
110 getattr,
111 open,
111 open,
112 )
112 )
113
113
114 from mercurial.utils import (
114 from mercurial.utils import (
115 procutil,
115 procutil,
116 stringutil,
116 stringutil,
117 urlutil,
117 urlutil,
118 )
118 )
119
119
120 from mercurial import (
120 from mercurial import (
121 bundle2,
121 bundle2,
122 changegroup,
122 changegroup,
123 commands,
123 commands,
124 discovery,
124 discovery,
125 encoding,
125 encoding,
126 error,
126 error,
127 exchange,
127 exchange,
128 extensions,
128 extensions,
129 hg,
129 hg,
130 localrepo,
130 localrepo,
131 phases,
131 phases,
132 pushkey,
132 pushkey,
133 pycompat,
133 pycompat,
134 registrar,
134 registrar,
135 util,
135 util,
136 wireprototypes,
136 wireprototypes,
137 wireprotov1peer,
137 wireprotov1peer,
138 wireprotov1server,
138 wireprotov1server,
139 )
139 )
140
140
141 from . import (
141 from . import (
142 bundleparts,
142 bundleparts,
143 common,
143 common,
144 )
144 )
145
145
146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # be specifying the version(s) of Mercurial they are tested with, or
148 # be specifying the version(s) of Mercurial they are tested with, or
149 # leave the attribute unspecified.
149 # leave the attribute unspecified.
150 testedwith = b'ships-with-hg-core'
150 testedwith = b'ships-with-hg-core'
151
151
152 configtable = {}
152 configtable = {}
153 configitem = registrar.configitem(configtable)
153 configitem = registrar.configitem(configtable)
154
154
155 configitem(
155 configitem(
156 b'infinitepush',
156 b'infinitepush',
157 b'server',
157 b'server',
158 default=False,
158 default=False,
159 )
159 )
160 configitem(
160 configitem(
161 b'infinitepush',
161 b'infinitepush',
162 b'storetype',
162 b'storetype',
163 default=b'',
163 default=b'',
164 )
164 )
165 configitem(
165 configitem(
166 b'infinitepush',
166 b'infinitepush',
167 b'indextype',
167 b'indextype',
168 default=b'',
168 default=b'',
169 )
169 )
170 configitem(
170 configitem(
171 b'infinitepush',
171 b'infinitepush',
172 b'indexpath',
172 b'indexpath',
173 default=b'',
173 default=b'',
174 )
174 )
175 configitem(
175 configitem(
176 b'infinitepush',
176 b'infinitepush',
177 b'storeallparts',
177 b'storeallparts',
178 default=False,
178 default=False,
179 )
179 )
180 configitem(
180 configitem(
181 b'infinitepush',
181 b'infinitepush',
182 b'reponame',
182 b'reponame',
183 default=b'',
183 default=b'',
184 )
184 )
185 configitem(
185 configitem(
186 b'scratchbranch',
186 b'scratchbranch',
187 b'storepath',
187 b'storepath',
188 default=b'',
188 default=b'',
189 )
189 )
190 configitem(
190 configitem(
191 b'infinitepush',
191 b'infinitepush',
192 b'branchpattern',
192 b'branchpattern',
193 default=b'',
193 default=b'',
194 )
194 )
195 configitem(
195 configitem(
196 b'infinitepush',
196 b'infinitepush',
197 b'pushtobundlestore',
197 b'pushtobundlestore',
198 default=False,
198 default=False,
199 )
199 )
200 configitem(
200 configitem(
201 b'experimental',
201 b'experimental',
202 b'server-bundlestore-bookmark',
202 b'server-bundlestore-bookmark',
203 default=b'',
203 default=b'',
204 )
204 )
205 configitem(
205 configitem(
206 b'experimental',
206 b'experimental',
207 b'infinitepush-scratchpush',
207 b'infinitepush-scratchpush',
208 default=False,
208 default=False,
209 )
209 )
210
210
211 experimental = b'experimental'
211 experimental = b'experimental'
212 configbookmark = b'server-bundlestore-bookmark'
212 configbookmark = b'server-bundlestore-bookmark'
213 configscratchpush = b'infinitepush-scratchpush'
213 configscratchpush = b'infinitepush-scratchpush'
214
214
215 scratchbranchparttype = bundleparts.scratchbranchparttype
215 scratchbranchparttype = bundleparts.scratchbranchparttype
216 revsetpredicate = registrar.revsetpredicate()
216 revsetpredicate = registrar.revsetpredicate()
217 templatekeyword = registrar.templatekeyword()
217 templatekeyword = registrar.templatekeyword()
218 _scratchbranchmatcher = lambda x: False
218 _scratchbranchmatcher = lambda x: False
219 _maybehash = re.compile('^[a-f0-9]+$').search
219 _maybehash = re.compile('^[a-f0-9]+$').search
220
220
221
221
222 def _buildexternalbundlestore(ui):
222 def _buildexternalbundlestore(ui):
223 put_args = ui.configlist(b'infinitepush', b'put_args', [])
223 put_args = ui.configlist(b'infinitepush', b'put_args', [])
224 put_binary = ui.config(b'infinitepush', b'put_binary')
224 put_binary = ui.config(b'infinitepush', b'put_binary')
225 if not put_binary:
225 if not put_binary:
226 raise error.Abort(b'put binary is not specified')
226 raise error.Abort(b'put binary is not specified')
227 get_args = ui.configlist(b'infinitepush', b'get_args', [])
227 get_args = ui.configlist(b'infinitepush', b'get_args', [])
228 get_binary = ui.config(b'infinitepush', b'get_binary')
228 get_binary = ui.config(b'infinitepush', b'get_binary')
229 if not get_binary:
229 if not get_binary:
230 raise error.Abort(b'get binary is not specified')
230 raise error.Abort(b'get binary is not specified')
231 from . import store
231 from . import store
232
232
233 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
233 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
234
234
235
235
236 def _buildsqlindex(ui):
236 def _buildsqlindex(ui):
237 sqlhost = ui.config(b'infinitepush', b'sqlhost')
237 sqlhost = ui.config(b'infinitepush', b'sqlhost')
238 if not sqlhost:
238 if not sqlhost:
239 raise error.Abort(_(b'please set infinitepush.sqlhost'))
239 raise error.Abort(_(b'please set infinitepush.sqlhost'))
240 host, port, db, user, password = sqlhost.split(b':')
240 host, port, db, user, password = sqlhost.split(b':')
241 reponame = ui.config(b'infinitepush', b'reponame')
241 reponame = ui.config(b'infinitepush', b'reponame')
242 if not reponame:
242 if not reponame:
243 raise error.Abort(_(b'please set infinitepush.reponame'))
243 raise error.Abort(_(b'please set infinitepush.reponame'))
244
244
245 logfile = ui.config(b'infinitepush', b'logfile', b'')
245 logfile = ui.config(b'infinitepush', b'logfile', b'')
246 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
246 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
247 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
247 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
248 from . import sqlindexapi
248 from . import sqlindexapi
249
249
250 return sqlindexapi.sqlindexapi(
250 return sqlindexapi.sqlindexapi(
251 reponame,
251 reponame,
252 host,
252 host,
253 port,
253 port,
254 db,
254 db,
255 user,
255 user,
256 password,
256 password,
257 logfile,
257 logfile,
258 _getloglevel(ui),
258 _getloglevel(ui),
259 waittimeout=waittimeout,
259 waittimeout=waittimeout,
260 locktimeout=locktimeout,
260 locktimeout=locktimeout,
261 )
261 )
262
262
263
263
264 def _getloglevel(ui):
264 def _getloglevel(ui):
265 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
265 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
266 numeric_loglevel = getattr(logging, loglevel.upper(), None)
266 numeric_loglevel = getattr(logging, loglevel.upper(), None)
267 if not isinstance(numeric_loglevel, int):
267 if not isinstance(numeric_loglevel, int):
268 raise error.Abort(_(b'invalid log level %s') % loglevel)
268 raise error.Abort(_(b'invalid log level %s') % loglevel)
269 return numeric_loglevel
269 return numeric_loglevel
270
270
271
271
272 def _tryhoist(ui, remotebookmark):
272 def _tryhoist(ui, remotebookmark):
273 """returns a bookmarks with hoisted part removed
273 """returns a bookmarks with hoisted part removed
274
274
275 Remotenames extension has a 'hoist' config that allows to use remote
275 Remotenames extension has a 'hoist' config that allows to use remote
276 bookmarks without specifying remote path. For example, 'hg update master'
276 bookmarks without specifying remote path. For example, 'hg update master'
277 works as well as 'hg update remote/master'. We want to allow the same in
277 works as well as 'hg update remote/master'. We want to allow the same in
278 infinitepush.
278 infinitepush.
279 """
279 """
280
280
281 if common.isremotebooksenabled(ui):
281 if common.isremotebooksenabled(ui):
282 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
282 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
283 if remotebookmark.startswith(hoist):
283 if remotebookmark.startswith(hoist):
284 return remotebookmark[len(hoist) :]
284 return remotebookmark[len(hoist) :]
285 return remotebookmark
285 return remotebookmark
286
286
287
287
288 class bundlestore:
288 class bundlestore:
289 def __init__(self, repo):
289 def __init__(self, repo):
290 self._repo = repo
290 self._repo = repo
291 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
291 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
292 if storetype == b'disk':
292 if storetype == b'disk':
293 from . import store
293 from . import store
294
294
295 self.store = store.filebundlestore(self._repo.ui, self._repo)
295 self.store = store.filebundlestore(self._repo.ui, self._repo)
296 elif storetype == b'external':
296 elif storetype == b'external':
297 self.store = _buildexternalbundlestore(self._repo.ui)
297 self.store = _buildexternalbundlestore(self._repo.ui)
298 else:
298 else:
299 raise error.Abort(
299 raise error.Abort(
300 _(b'unknown infinitepush store type specified %s') % storetype
300 _(b'unknown infinitepush store type specified %s') % storetype
301 )
301 )
302
302
303 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
303 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
304 if indextype == b'disk':
304 if indextype == b'disk':
305 from . import fileindexapi
305 from . import fileindexapi
306
306
307 self.index = fileindexapi.fileindexapi(self._repo)
307 self.index = fileindexapi.fileindexapi(self._repo)
308 elif indextype == b'sql':
308 elif indextype == b'sql':
309 self.index = _buildsqlindex(self._repo.ui)
309 self.index = _buildsqlindex(self._repo.ui)
310 else:
310 else:
311 raise error.Abort(
311 raise error.Abort(
312 _(b'unknown infinitepush index type specified %s') % indextype
312 _(b'unknown infinitepush index type specified %s') % indextype
313 )
313 )
314
314
315
315
316 def _isserver(ui):
316 def _isserver(ui):
317 return ui.configbool(b'infinitepush', b'server')
317 return ui.configbool(b'infinitepush', b'server')
318
318
319
319
320 def reposetup(ui, repo):
320 def reposetup(ui, repo):
321 if _isserver(ui) and repo.local():
321 if _isserver(ui) and repo.local():
322 repo.bundlestore = bundlestore(repo)
322 repo.bundlestore = bundlestore(repo)
323
323
324
324
325 def extsetup(ui):
325 def extsetup(ui):
326 commonsetup(ui)
326 commonsetup(ui)
327 if _isserver(ui):
327 if _isserver(ui):
328 serverextsetup(ui)
328 serverextsetup(ui)
329 else:
329 else:
330 clientextsetup(ui)
330 clientextsetup(ui)
331
331
332
332
333 def commonsetup(ui):
333 def commonsetup(ui):
334 wireprotov1server.commands[b'listkeyspatterns'] = (
334 wireprotov1server.commands[b'listkeyspatterns'] = (
335 wireprotolistkeyspatterns,
335 wireprotolistkeyspatterns,
336 b'namespace patterns',
336 b'namespace patterns',
337 )
337 )
338 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
338 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
339 if scratchbranchpat:
339 if scratchbranchpat:
340 global _scratchbranchmatcher
340 global _scratchbranchmatcher
341 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
341 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
342 scratchbranchpat
342 scratchbranchpat
343 )
343 )
344
344
345
345
346 def serverextsetup(ui):
346 def serverextsetup(ui):
347 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
347 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
348
348
349 def newpushkeyhandler(*args, **kwargs):
349 def newpushkeyhandler(*args, **kwargs):
350 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
350 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
351
351
352 newpushkeyhandler.params = origpushkeyhandler.params
352 newpushkeyhandler.params = origpushkeyhandler.params
353 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
353 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
354
354
355 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
355 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
356 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
356 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
357 orighandlephasehandler, *args, **kwargs
357 orighandlephasehandler, *args, **kwargs
358 )
358 )
359 newphaseheadshandler.params = orighandlephasehandler.params
359 newphaseheadshandler.params = orighandlephasehandler.params
360 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
360 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
361
361
362 extensions.wrapfunction(
362 extensions.wrapfunction(
363 localrepo.localrepository, b'listkeys', localrepolistkeys
363 localrepo.localrepository, b'listkeys', localrepolistkeys
364 )
364 )
365 wireprotov1server.commands[b'lookup'] = (
365 wireprotov1server.commands[b'lookup'] = (
366 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
366 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
367 b'key',
367 b'key',
368 )
368 )
369 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
369 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
370
370
371 extensions.wrapfunction(bundle2, b'processparts', processparts)
371 extensions.wrapfunction(bundle2, b'processparts', processparts)
372
372
373
373
374 def clientextsetup(ui):
374 def clientextsetup(ui):
375 entry = extensions.wrapcommand(commands.table, b'push', _push)
375 entry = extensions.wrapcommand(commands.table, b'push', _push)
376
376
377 entry[1].append(
377 entry[1].append(
378 (
378 (
379 b'',
379 b'',
380 b'bundle-store',
380 b'bundle-store',
381 None,
381 None,
382 _(b'force push to go to bundle store (EXPERIMENTAL)'),
382 _(b'force push to go to bundle store (EXPERIMENTAL)'),
383 )
383 )
384 )
384 )
385
385
386 extensions.wrapcommand(commands.table, b'pull', _pull)
386 extensions.wrapcommand(commands.table, b'pull', _pull)
387
387
388 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
388 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
389
389
390 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
390 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
391
391
392 partorder = exchange.b2partsgenorder
392 partorder = exchange.b2partsgenorder
393 index = partorder.index(b'changeset')
393 index = partorder.index(b'changeset')
394 partorder.insert(
394 partorder.insert(
395 index, partorder.pop(partorder.index(scratchbranchparttype))
395 index, partorder.pop(partorder.index(scratchbranchparttype))
396 )
396 )
397
397
398
398
399 def _checkheads(orig, pushop):
399 def _checkheads(orig, pushop):
400 if pushop.ui.configbool(experimental, configscratchpush, False):
400 if pushop.ui.configbool(experimental, configscratchpush, False):
401 return
401 return
402 return orig(pushop)
402 return orig(pushop)
403
403
404
404
405 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
405 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
406 patterns = wireprototypes.decodelist(patterns)
406 patterns = wireprototypes.decodelist(patterns)
407 d = repo.listkeys(encoding.tolocal(namespace), patterns).items()
407 d = repo.listkeys(encoding.tolocal(namespace), patterns).items()
408 return pushkey.encodekeys(d)
408 return pushkey.encodekeys(d)
409
409
410
410
411 def localrepolistkeys(orig, self, namespace, patterns=None):
411 def localrepolistkeys(orig, self, namespace, patterns=None):
412 if namespace == b'bookmarks' and patterns:
412 if namespace == b'bookmarks' and patterns:
413 index = self.bundlestore.index
413 index = self.bundlestore.index
414 results = {}
414 results = {}
415 bookmarks = orig(self, namespace)
415 bookmarks = orig(self, namespace)
416 for pattern in patterns:
416 for pattern in patterns:
417 results.update(index.getbookmarks(pattern))
417 results.update(index.getbookmarks(pattern))
418 if pattern.endswith(b'*'):
418 if pattern.endswith(b'*'):
419 pattern = b're:^' + pattern[:-1] + b'.*'
419 pattern = b're:^' + pattern[:-1] + b'.*'
420 kind, pat, matcher = stringutil.stringmatcher(pattern)
420 kind, pat, matcher = stringutil.stringmatcher(pattern)
421 for bookmark, node in bookmarks.items():
421 for bookmark, node in bookmarks.items():
422 if matcher(bookmark):
422 if matcher(bookmark):
423 results[bookmark] = node
423 results[bookmark] = node
424 return results
424 return results
425 else:
425 else:
426 return orig(self, namespace)
426 return orig(self, namespace)
427
427
428
428
429 @wireprotov1peer.batchable
429 @wireprotov1peer.batchable
430 def listkeyspatterns(self, namespace, patterns):
430 def listkeyspatterns(self, namespace, patterns):
431 if not self.capable(b'pushkey'):
431 if not self.capable(b'pushkey'):
432 return {}, None
432 return {}, None
433 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
433 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
434
434
435 def decode(d):
435 def decode(d):
436 self.ui.debug(
436 self.ui.debug(
437 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
437 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
438 )
438 )
439 return pushkey.decodekeys(d)
439 return pushkey.decodekeys(d)
440
440
441 return {
441 return {
442 b'namespace': encoding.fromlocal(namespace),
442 b'namespace': encoding.fromlocal(namespace),
443 b'patterns': wireprototypes.encodelist(patterns),
443 b'patterns': wireprototypes.encodelist(patterns),
444 }, decode
444 }, decode
445
445
446
446
447 def _readbundlerevs(bundlerepo):
447 def _readbundlerevs(bundlerepo):
448 return list(bundlerepo.revs(b'bundle()'))
448 return list(bundlerepo.revs(b'bundle()'))
449
449
450
450
451 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
451 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
452 """Tells remotefilelog to include all changed files to the changegroup
452 """Tells remotefilelog to include all changed files to the changegroup
453
453
454 By default remotefilelog doesn't include file content to the changegroup.
454 By default remotefilelog doesn't include file content to the changegroup.
455 But we need to include it if we are fetching from bundlestore.
455 But we need to include it if we are fetching from bundlestore.
456 """
456 """
457 changedfiles = set()
457 changedfiles = set()
458 cl = bundlerepo.changelog
458 cl = bundlerepo.changelog
459 for r in bundlerevs:
459 for r in bundlerevs:
460 # [3] means changed files
460 # [3] means changed files
461 changedfiles.update(cl.read(r)[3])
461 changedfiles.update(cl.read(r)[3])
462 if not changedfiles:
462 if not changedfiles:
463 return bundlecaps
463 return bundlecaps
464
464
465 changedfiles = b'\0'.join(changedfiles)
465 changedfiles = b'\0'.join(changedfiles)
466 newcaps = []
466 newcaps = []
467 appended = False
467 appended = False
468 for cap in bundlecaps or []:
468 for cap in bundlecaps or []:
469 if cap.startswith(b'excludepattern='):
469 if cap.startswith(b'excludepattern='):
470 newcaps.append(b'\0'.join((cap, changedfiles)))
470 newcaps.append(b'\0'.join((cap, changedfiles)))
471 appended = True
471 appended = True
472 else:
472 else:
473 newcaps.append(cap)
473 newcaps.append(cap)
474 if not appended:
474 if not appended:
475 # Not found excludepattern cap. Just append it
475 # Not found excludepattern cap. Just append it
476 newcaps.append(b'excludepattern=' + changedfiles)
476 newcaps.append(b'excludepattern=' + changedfiles)
477
477
478 return newcaps
478 return newcaps
479
479
480
480
481 def _rebundle(bundlerepo, bundleroots, unknownhead):
481 def _rebundle(bundlerepo, bundleroots, unknownhead):
482 """
482 """
483 Bundle may include more revision then user requested. For example,
483 Bundle may include more revision then user requested. For example,
484 if user asks for revision but bundle also consists its descendants.
484 if user asks for revision but bundle also consists its descendants.
485 This function will filter out all revision that user is not requested.
485 This function will filter out all revision that user is not requested.
486 """
486 """
487 parts = []
487 parts = []
488
488
489 version = b'02'
489 version = b'02'
490 outgoing = discovery.outgoing(
490 outgoing = discovery.outgoing(
491 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
491 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
492 )
492 )
493 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
493 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
494 cgstream = util.chunkbuffer(cgstream).read()
494 cgstream = util.chunkbuffer(cgstream).read()
495 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
495 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
496 cgpart.addparam(b'version', version)
496 cgpart.addparam(b'version', version)
497 parts.append(cgpart)
497 parts.append(cgpart)
498
498
499 return parts
499 return parts
500
500
501
501
502 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
502 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
503 cl = bundlerepo.changelog
503 cl = bundlerepo.changelog
504 bundleroots = []
504 bundleroots = []
505 for rev in bundlerevs:
505 for rev in bundlerevs:
506 node = cl.node(rev)
506 node = cl.node(rev)
507 parents = cl.parents(node)
507 parents = cl.parents(node)
508 for parent in parents:
508 for parent in parents:
509 # include all revs that exist in the main repo
509 # include all revs that exist in the main repo
510 # to make sure that bundle may apply client-side
510 # to make sure that bundle may apply client-side
511 if parent in oldrepo:
511 if parent in oldrepo:
512 bundleroots.append(parent)
512 bundleroots.append(parent)
513 return bundleroots
513 return bundleroots
514
514
515
515
516 def _needsrebundling(head, bundlerepo):
516 def _needsrebundling(head, bundlerepo):
517 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
517 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
518 return not (
518 return not (
519 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
519 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
520 )
520 )
521
521
522
522
523 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
523 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
524 """generates bundle that will be send to the user
524 """generates bundle that will be send to the user
525
525
526 returns tuple with raw bundle string and bundle type
526 returns tuple with raw bundle string and bundle type
527 """
527 """
528 parts = []
528 parts = []
529 if not _needsrebundling(head, bundlerepo):
529 if not _needsrebundling(head, bundlerepo):
530 with util.posixfile(bundlefile, b"rb") as f:
530 with util.posixfile(bundlefile, b"rb") as f:
531 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
531 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
532 if isinstance(unbundler, changegroup.cg1unpacker):
532 if isinstance(unbundler, changegroup.cg1unpacker):
533 part = bundle2.bundlepart(
533 part = bundle2.bundlepart(
534 b'changegroup', data=unbundler._stream.read()
534 b'changegroup', data=unbundler._stream.read()
535 )
535 )
536 part.addparam(b'version', b'01')
536 part.addparam(b'version', b'01')
537 parts.append(part)
537 parts.append(part)
538 elif isinstance(unbundler, bundle2.unbundle20):
538 elif isinstance(unbundler, bundle2.unbundle20):
539 haschangegroup = False
539 haschangegroup = False
540 for part in unbundler.iterparts():
540 for part in unbundler.iterparts():
541 if part.type == b'changegroup':
541 if part.type == b'changegroup':
542 haschangegroup = True
542 haschangegroup = True
543 newpart = bundle2.bundlepart(part.type, data=part.read())
543 newpart = bundle2.bundlepart(part.type, data=part.read())
544 for key, value in part.params.items():
544 for key, value in part.params.items():
545 newpart.addparam(key, value)
545 newpart.addparam(key, value)
546 parts.append(newpart)
546 parts.append(newpart)
547
547
548 if not haschangegroup:
548 if not haschangegroup:
549 raise error.Abort(
549 raise error.Abort(
550 b'unexpected bundle without changegroup part, '
550 b'unexpected bundle without changegroup part, '
551 + b'head: %s' % hex(head),
551 + b'head: %s' % hex(head),
552 hint=b'report to administrator',
552 hint=b'report to administrator',
553 )
553 )
554 else:
554 else:
555 raise error.Abort(b'unknown bundle type')
555 raise error.Abort(b'unknown bundle type')
556 else:
556 else:
557 parts = _rebundle(bundlerepo, bundleroots, head)
557 parts = _rebundle(bundlerepo, bundleroots, head)
558
558
559 return parts
559 return parts
560
560
561
561
562 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
562 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
563 heads = heads or []
563 heads = heads or []
564 # newheads are parents of roots of scratch bundles that were requested
564 # newheads are parents of roots of scratch bundles that were requested
565 newphases = {}
565 newphases = {}
566 scratchbundles = []
566 scratchbundles = []
567 newheads = []
567 newheads = []
568 scratchheads = []
568 scratchheads = []
569 nodestobundle = {}
569 nodestobundle = {}
570 allbundlestocleanup = []
570 allbundlestocleanup = []
571 try:
571 try:
572 for head in heads:
572 for head in heads:
573 if not repo.changelog.index.has_node(head):
573 if not repo.changelog.index.has_node(head):
574 if head not in nodestobundle:
574 if head not in nodestobundle:
575 newbundlefile = common.downloadbundle(repo, head)
575 newbundlefile = common.downloadbundle(repo, head)
576 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
576 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
577 bundlerepo = hg.repository(repo.ui, bundlepath)
577 bundlerepo = hg.repository(repo.ui, bundlepath)
578
578
579 allbundlestocleanup.append((bundlerepo, newbundlefile))
579 allbundlestocleanup.append((bundlerepo, newbundlefile))
580 bundlerevs = set(_readbundlerevs(bundlerepo))
580 bundlerevs = set(_readbundlerevs(bundlerepo))
581 bundlecaps = _includefilelogstobundle(
581 bundlecaps = _includefilelogstobundle(
582 bundlecaps, bundlerepo, bundlerevs, repo.ui
582 bundlecaps, bundlerepo, bundlerevs, repo.ui
583 )
583 )
584 cl = bundlerepo.changelog
584 cl = bundlerepo.changelog
585 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
585 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
586 for rev in bundlerevs:
586 for rev in bundlerevs:
587 node = cl.node(rev)
587 node = cl.node(rev)
588 newphases[hex(node)] = str(phases.draft)
588 newphases[hex(node)] = str(phases.draft)
589 nodestobundle[node] = (
589 nodestobundle[node] = (
590 bundlerepo,
590 bundlerepo,
591 bundleroots,
591 bundleroots,
592 newbundlefile,
592 newbundlefile,
593 )
593 )
594
594
595 scratchbundles.append(
595 scratchbundles.append(
596 _generateoutputparts(head, *nodestobundle[head])
596 _generateoutputparts(head, *nodestobundle[head])
597 )
597 )
598 newheads.extend(bundleroots)
598 newheads.extend(bundleroots)
599 scratchheads.append(head)
599 scratchheads.append(head)
600 finally:
600 finally:
601 for bundlerepo, bundlefile in allbundlestocleanup:
601 for bundlerepo, bundlefile in allbundlestocleanup:
602 bundlerepo.close()
602 bundlerepo.close()
603 try:
603 try:
604 os.unlink(bundlefile)
604 os.unlink(bundlefile)
605 except (IOError, OSError):
605 except (IOError, OSError):
606 # if we can't cleanup the file then just ignore the error,
606 # if we can't cleanup the file then just ignore the error,
607 # no need to fail
607 # no need to fail
608 pass
608 pass
609
609
610 pullfrombundlestore = bool(scratchbundles)
610 pullfrombundlestore = bool(scratchbundles)
611 wrappedchangegrouppart = False
611 wrappedchangegrouppart = False
612 wrappedlistkeys = False
612 wrappedlistkeys = False
613 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
613 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
614 try:
614 try:
615
615
616 def _changegrouppart(bundler, *args, **kwargs):
616 def _changegrouppart(bundler, *args, **kwargs):
617 # Order is important here. First add non-scratch part
617 # Order is important here. First add non-scratch part
618 # and only then add parts with scratch bundles because
618 # and only then add parts with scratch bundles because
619 # non-scratch part contains parents of roots of scratch bundles.
619 # non-scratch part contains parents of roots of scratch bundles.
620 result = oldchangegrouppart(bundler, *args, **kwargs)
620 result = oldchangegrouppart(bundler, *args, **kwargs)
621 for bundle in scratchbundles:
621 for bundle in scratchbundles:
622 for part in bundle:
622 for part in bundle:
623 bundler.addpart(part)
623 bundler.addpart(part)
624 return result
624 return result
625
625
626 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
626 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
627 wrappedchangegrouppart = True
627 wrappedchangegrouppart = True
628
628
629 def _listkeys(orig, self, namespace):
629 def _listkeys(orig, self, namespace):
630 origvalues = orig(self, namespace)
630 origvalues = orig(self, namespace)
631 if namespace == b'phases' and pullfrombundlestore:
631 if namespace == b'phases' and pullfrombundlestore:
632 if origvalues.get(b'publishing') == b'True':
632 if origvalues.get(b'publishing') == b'True':
633 # Make repo non-publishing to preserve draft phase
633 # Make repo non-publishing to preserve draft phase
634 del origvalues[b'publishing']
634 del origvalues[b'publishing']
635 origvalues.update(newphases)
635 origvalues.update(newphases)
636 return origvalues
636 return origvalues
637
637
638 extensions.wrapfunction(
638 extensions.wrapfunction(
639 localrepo.localrepository, b'listkeys', _listkeys
639 localrepo.localrepository, b'listkeys', _listkeys
640 )
640 )
641 wrappedlistkeys = True
641 wrappedlistkeys = True
642 heads = list((set(newheads) | set(heads)) - set(scratchheads))
642 heads = list((set(newheads) | set(heads)) - set(scratchheads))
643 result = orig(
643 result = orig(
644 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
644 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
645 )
645 )
646 finally:
646 finally:
647 if wrappedchangegrouppart:
647 if wrappedchangegrouppart:
648 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
648 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
649 if wrappedlistkeys:
649 if wrappedlistkeys:
650 extensions.unwrapfunction(
650 extensions.unwrapfunction(
651 localrepo.localrepository, b'listkeys', _listkeys
651 localrepo.localrepository, b'listkeys', _listkeys
652 )
652 )
653 return result
653 return result
654
654
655
655
656 def _lookupwrap(orig):
656 def _lookupwrap(orig):
657 def _lookup(repo, proto, key):
657 def _lookup(repo, proto, key):
658 localkey = encoding.tolocal(key)
658 localkey = encoding.tolocal(key)
659
659
660 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
660 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
661 scratchnode = repo.bundlestore.index.getnode(localkey)
661 scratchnode = repo.bundlestore.index.getnode(localkey)
662 if scratchnode:
662 if scratchnode:
663 return b"%d %s\n" % (1, scratchnode)
663 return b"%d %s\n" % (1, scratchnode)
664 else:
664 else:
665 return b"%d %s\n" % (
665 return b"%d %s\n" % (
666 0,
666 0,
667 b'scratch branch %s not found' % localkey,
667 b'scratch branch %s not found' % localkey,
668 )
668 )
669 else:
669 else:
670 try:
670 try:
671 r = hex(repo.lookup(localkey))
671 r = hex(repo.lookup(localkey))
672 return b"%d %s\n" % (1, r)
672 return b"%d %s\n" % (1, r)
673 except Exception as inst:
673 except Exception as inst:
674 if repo.bundlestore.index.getbundle(localkey):
674 if repo.bundlestore.index.getbundle(localkey):
675 return b"%d %s\n" % (1, localkey)
675 return b"%d %s\n" % (1, localkey)
676 else:
676 else:
677 r = stringutil.forcebytestr(inst)
677 r = stringutil.forcebytestr(inst)
678 return b"%d %s\n" % (0, r)
678 return b"%d %s\n" % (0, r)
679
679
680 return _lookup
680 return _lookup
681
681
682
682
683 def _pull(orig, ui, repo, source=b"default", **opts):
683 def _pull(orig, ui, repo, source=b"default", **opts):
684 opts = pycompat.byteskwargs(opts)
684 opts = pycompat.byteskwargs(opts)
685 # Copy paste from `pull` command
685 # Copy paste from `pull` command
686 source, branches = urlutil.get_unique_pull_path(
686 source, branches = urlutil.get_unique_pull_path(
687 b"infinite-push's pull",
687 b"infinite-push's pull",
688 repo,
688 repo,
689 ui,
689 ui,
690 source,
690 source,
691 default_branches=opts.get(b'branch'),
691 default_branches=opts.get(b'branch'),
692 )
692 )
693
693
694 scratchbookmarks = {}
694 scratchbookmarks = {}
695 unfi = repo.unfiltered()
695 unfi = repo.unfiltered()
696 unknownnodes = []
696 unknownnodes = []
697 for rev in opts.get(b'rev', []):
697 for rev in opts.get(b'rev', []):
698 if rev not in unfi:
698 if rev not in unfi:
699 unknownnodes.append(rev)
699 unknownnodes.append(rev)
700 if opts.get(b'bookmark'):
700 if opts.get(b'bookmark'):
701 bookmarks = []
701 bookmarks = []
702 revs = opts.get(b'rev') or []
702 revs = opts.get(b'rev') or []
703 for bookmark in opts.get(b'bookmark'):
703 for bookmark in opts.get(b'bookmark'):
704 if _scratchbranchmatcher(bookmark):
704 if _scratchbranchmatcher(bookmark):
705 # rev is not known yet
705 # rev is not known yet
706 # it will be fetched with listkeyspatterns next
706 # it will be fetched with listkeyspatterns next
707 scratchbookmarks[bookmark] = b'REVTOFETCH'
707 scratchbookmarks[bookmark] = b'REVTOFETCH'
708 else:
708 else:
709 bookmarks.append(bookmark)
709 bookmarks.append(bookmark)
710
710
711 if scratchbookmarks:
711 if scratchbookmarks:
712 other = hg.peer(repo, opts, source)
712 other = hg.peer(repo, opts, source)
713 try:
713 try:
714 fetchedbookmarks = other.listkeyspatterns(
714 fetchedbookmarks = other.listkeyspatterns(
715 b'bookmarks', patterns=scratchbookmarks
715 b'bookmarks', patterns=scratchbookmarks
716 )
716 )
717 for bookmark in scratchbookmarks:
717 for bookmark in scratchbookmarks:
718 if bookmark not in fetchedbookmarks:
718 if bookmark not in fetchedbookmarks:
719 raise error.Abort(
719 raise error.Abort(
720 b'remote bookmark %s not found!' % bookmark
720 b'remote bookmark %s not found!' % bookmark
721 )
721 )
722 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
722 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
723 revs.append(fetchedbookmarks[bookmark])
723 revs.append(fetchedbookmarks[bookmark])
724 finally:
724 finally:
725 other.close()
725 other.close()
726 opts[b'bookmark'] = bookmarks
726 opts[b'bookmark'] = bookmarks
727 opts[b'rev'] = revs
727 opts[b'rev'] = revs
728
728
729 if scratchbookmarks or unknownnodes:
729 if scratchbookmarks or unknownnodes:
730 # Set anyincoming to True
730 # Set anyincoming to True
731 extensions.wrapfunction(
731 extensions.wrapfunction(
732 discovery, b'findcommonincoming', _findcommonincoming
732 discovery, b'findcommonincoming', _findcommonincoming
733 )
733 )
734 try:
734 try:
735 # Remote scratch bookmarks will be deleted because remotenames doesn't
735 # Remote scratch bookmarks will be deleted because remotenames doesn't
736 # know about them. Let's save it before pull and restore after
736 # know about them. Let's save it before pull and restore after
737 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
737 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
738 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
738 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
739 # TODO(stash): race condition is possible
739 # TODO(stash): race condition is possible
740 # if scratch bookmarks was updated right after orig.
740 # if scratch bookmarks was updated right after orig.
741 # But that's unlikely and shouldn't be harmful.
741 # But that's unlikely and shouldn't be harmful.
742 if common.isremotebooksenabled(ui):
742 if common.isremotebooksenabled(ui):
743 remotescratchbookmarks.update(scratchbookmarks)
743 remotescratchbookmarks.update(scratchbookmarks)
744 _saveremotebookmarks(repo, remotescratchbookmarks, source)
744 _saveremotebookmarks(repo, remotescratchbookmarks, source)
745 else:
745 else:
746 _savelocalbookmarks(repo, scratchbookmarks)
746 _savelocalbookmarks(repo, scratchbookmarks)
747 return result
747 return result
748 finally:
748 finally:
749 if scratchbookmarks:
749 if scratchbookmarks:
750 extensions.unwrapfunction(discovery, b'findcommonincoming')
750 extensions.unwrapfunction(discovery, b'findcommonincoming')
751
751
752
752
753 def _readscratchremotebookmarks(ui, repo, other):
753 def _readscratchremotebookmarks(ui, repo, other):
754 if common.isremotebooksenabled(ui):
754 if common.isremotebooksenabled(ui):
755 remotenamesext = extensions.find(b'remotenames')
755 remotenamesext = extensions.find(b'remotenames')
756 remotepath = remotenamesext.activepath(repo.ui, other)
756 remotepath = remotenamesext.activepath(repo.ui, other)
757 result = {}
757 result = {}
758 # Let's refresh remotenames to make sure we have it up to date
758 # Let's refresh remotenames to make sure we have it up to date
759 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
759 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
760 # and it results in deleting scratch bookmarks. Our best guess how to
760 # and it results in deleting scratch bookmarks. Our best guess how to
761 # fix it is to use `clearnames()`
761 # fix it is to use `clearnames()`
762 repo._remotenames.clearnames()
762 repo._remotenames.clearnames()
763 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
763 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
764 path, bookname = remotenamesext.splitremotename(remotebookmark)
764 path, bookname = remotenamesext.splitremotename(remotebookmark)
765 if path == remotepath and _scratchbranchmatcher(bookname):
765 if path == remotepath and _scratchbranchmatcher(bookname):
766 nodes = repo.names[b'remotebookmarks'].nodes(
766 nodes = repo.names[b'remotebookmarks'].nodes(
767 repo, remotebookmark
767 repo, remotebookmark
768 )
768 )
769 if nodes:
769 if nodes:
770 result[bookname] = hex(nodes[0])
770 result[bookname] = hex(nodes[0])
771 return result
771 return result
772 else:
772 else:
773 return {}
773 return {}
774
774
775
775
776 def _saveremotebookmarks(repo, newbookmarks, remote):
776 def _saveremotebookmarks(repo, newbookmarks, remote):
777 remotenamesext = extensions.find(b'remotenames')
777 remotenamesext = extensions.find(b'remotenames')
778 remotepath = remotenamesext.activepath(repo.ui, remote)
778 remotepath = remotenamesext.activepath(repo.ui, remote)
779 branches = collections.defaultdict(list)
779 branches = collections.defaultdict(list)
780 bookmarks = {}
780 bookmarks = {}
781 remotenames = remotenamesext.readremotenames(repo)
781 remotenames = remotenamesext.readremotenames(repo)
782 for hexnode, nametype, remote, rname in remotenames:
782 for hexnode, nametype, remote, rname in remotenames:
783 if remote != remotepath:
783 if remote != remotepath:
784 continue
784 continue
785 if nametype == b'bookmarks':
785 if nametype == b'bookmarks':
786 if rname in newbookmarks:
786 if rname in newbookmarks:
787 # It's possible if we have a normal bookmark that matches
787 # It's possible if we have a normal bookmark that matches
788 # scratch branch pattern. In this case just use the current
788 # scratch branch pattern. In this case just use the current
789 # bookmark node
789 # bookmark node
790 del newbookmarks[rname]
790 del newbookmarks[rname]
791 bookmarks[rname] = hexnode
791 bookmarks[rname] = hexnode
792 elif nametype == b'branches':
792 elif nametype == b'branches':
793 # saveremotenames expects 20 byte binary nodes for branches
793 # saveremotenames expects 20 byte binary nodes for branches
794 branches[rname].append(bin(hexnode))
794 branches[rname].append(bin(hexnode))
795
795
796 for bookmark, hexnode in newbookmarks.items():
796 for bookmark, hexnode in newbookmarks.items():
797 bookmarks[bookmark] = hexnode
797 bookmarks[bookmark] = hexnode
798 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
798 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
799
799
800
800
801 def _savelocalbookmarks(repo, bookmarks):
801 def _savelocalbookmarks(repo, bookmarks):
802 if not bookmarks:
802 if not bookmarks:
803 return
803 return
804 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
804 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
805 changes = []
805 changes = []
806 for scratchbook, node in bookmarks.items():
806 for scratchbook, node in bookmarks.items():
807 changectx = repo[node]
807 changectx = repo[node]
808 changes.append((scratchbook, changectx.node()))
808 changes.append((scratchbook, changectx.node()))
809 repo._bookmarks.applychanges(repo, tr, changes)
809 repo._bookmarks.applychanges(repo, tr, changes)
810
810
811
811
812 def _findcommonincoming(orig, *args, **kwargs):
812 def _findcommonincoming(orig, *args, **kwargs):
813 common, inc, remoteheads = orig(*args, **kwargs)
813 common, inc, remoteheads = orig(*args, **kwargs)
814 return common, True, remoteheads
814 return common, True, remoteheads
815
815
816
816
817 def _push(orig, ui, repo, *dests, **opts):
817 def _push(orig, ui, repo, *dests, **opts):
818 opts = pycompat.byteskwargs(opts)
818 opts = pycompat.byteskwargs(opts)
819 bookmark = opts.get(b'bookmark')
819 bookmark = opts.get(b'bookmark')
820 # we only support pushing one infinitepush bookmark at once
820 # we only support pushing one infinitepush bookmark at once
821 if len(bookmark) == 1:
821 if len(bookmark) == 1:
822 bookmark = bookmark[0]
822 bookmark = bookmark[0]
823 else:
823 else:
824 bookmark = b''
824 bookmark = b''
825
825
826 oldphasemove = None
826 oldphasemove = None
827 overrides = {(experimental, configbookmark): bookmark}
827 overrides = {(experimental, configbookmark): bookmark}
828
828
829 with ui.configoverride(overrides, b'infinitepush'):
829 with ui.configoverride(overrides, b'infinitepush'):
830 scratchpush = opts.get(b'bundle_store')
830 scratchpush = opts.get(b'bundle_store')
831 if _scratchbranchmatcher(bookmark):
831 if _scratchbranchmatcher(bookmark):
832 scratchpush = True
832 scratchpush = True
833 # bundle2 can be sent back after push (for example, bundle2
833 # bundle2 can be sent back after push (for example, bundle2
834 # containing `pushkey` part to update bookmarks)
834 # containing `pushkey` part to update bookmarks)
835 ui.setconfig(experimental, b'bundle2.pushback', True)
835 ui.setconfig(experimental, b'bundle2.pushback', True)
836
836
837 if scratchpush:
837 if scratchpush:
838 # this is an infinitepush, we don't want the bookmark to be applied
838 # this is an infinitepush, we don't want the bookmark to be applied
839 # rather that should be stored in the bundlestore
839 # rather that should be stored in the bundlestore
840 opts[b'bookmark'] = []
840 opts[b'bookmark'] = []
841 ui.setconfig(experimental, configscratchpush, True)
841 ui.setconfig(experimental, configscratchpush, True)
842 oldphasemove = extensions.wrapfunction(
842 oldphasemove = extensions.wrapfunction(
843 exchange, b'_localphasemove', _phasemove
843 exchange, b'_localphasemove', _phasemove
844 )
844 )
845
845
846 paths = list(urlutil.get_push_paths(repo, ui, dests))
846 paths = list(urlutil.get_push_paths(repo, ui, dests))
847 if len(paths) > 1:
847 if len(paths) > 1:
848 msg = _(b'cannot push to multiple path with infinitepush')
848 msg = _(b'cannot push to multiple path with infinitepush')
849 raise error.Abort(msg)
849 raise error.Abort(msg)
850
850
851 path = paths[0]
851 path = paths[0]
852 destpath = path.loc
852 destpath = path.loc
853 # Remote scratch bookmarks will be deleted because remotenames doesn't
853 # Remote scratch bookmarks will be deleted because remotenames doesn't
854 # know about them. Let's save it before push and restore after
854 # know about them. Let's save it before push and restore after
855 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
855 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
856 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
856 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
857 if common.isremotebooksenabled(ui):
857 if common.isremotebooksenabled(ui):
858 if bookmark and scratchpush:
858 if bookmark and scratchpush:
859 other = hg.peer(repo, opts, destpath)
859 other = hg.peer(repo, opts, path)
860 try:
860 try:
861 fetchedbookmarks = other.listkeyspatterns(
861 fetchedbookmarks = other.listkeyspatterns(
862 b'bookmarks', patterns=[bookmark]
862 b'bookmarks', patterns=[bookmark]
863 )
863 )
864 remotescratchbookmarks.update(fetchedbookmarks)
864 remotescratchbookmarks.update(fetchedbookmarks)
865 finally:
865 finally:
866 other.close()
866 other.close()
867 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
867 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
868 if oldphasemove:
868 if oldphasemove:
869 exchange._localphasemove = oldphasemove
869 exchange._localphasemove = oldphasemove
870 return result
870 return result
871
871
872
872
873 def _deleteinfinitepushbookmarks(ui, repo, path, names):
873 def _deleteinfinitepushbookmarks(ui, repo, path, names):
874 """Prune remote names by removing the bookmarks we don't want anymore,
874 """Prune remote names by removing the bookmarks we don't want anymore,
875 then writing the result back to disk
875 then writing the result back to disk
876 """
876 """
877 remotenamesext = extensions.find(b'remotenames')
877 remotenamesext = extensions.find(b'remotenames')
878
878
879 # remotename format is:
879 # remotename format is:
880 # (node, nametype ("branches" or "bookmarks"), remote, name)
880 # (node, nametype ("branches" or "bookmarks"), remote, name)
881 nametype_idx = 1
881 nametype_idx = 1
882 remote_idx = 2
882 remote_idx = 2
883 name_idx = 3
883 name_idx = 3
884 remotenames = [
884 remotenames = [
885 remotename
885 remotename
886 for remotename in remotenamesext.readremotenames(repo)
886 for remotename in remotenamesext.readremotenames(repo)
887 if remotename[remote_idx] == path
887 if remotename[remote_idx] == path
888 ]
888 ]
889 remote_bm_names = [
889 remote_bm_names = [
890 remotename[name_idx]
890 remotename[name_idx]
891 for remotename in remotenames
891 for remotename in remotenames
892 if remotename[nametype_idx] == b"bookmarks"
892 if remotename[nametype_idx] == b"bookmarks"
893 ]
893 ]
894
894
895 for name in names:
895 for name in names:
896 if name not in remote_bm_names:
896 if name not in remote_bm_names:
897 raise error.Abort(
897 raise error.Abort(
898 _(
898 _(
899 b"infinitepush bookmark '{}' does not exist "
899 b"infinitepush bookmark '{}' does not exist "
900 b"in path '{}'"
900 b"in path '{}'"
901 ).format(name, path)
901 ).format(name, path)
902 )
902 )
903
903
904 bookmarks = {}
904 bookmarks = {}
905 branches = collections.defaultdict(list)
905 branches = collections.defaultdict(list)
906 for node, nametype, remote, name in remotenames:
906 for node, nametype, remote, name in remotenames:
907 if nametype == b"bookmarks" and name not in names:
907 if nametype == b"bookmarks" and name not in names:
908 bookmarks[name] = node
908 bookmarks[name] = node
909 elif nametype == b"branches":
909 elif nametype == b"branches":
910 # saveremotenames wants binary nodes for branches
910 # saveremotenames wants binary nodes for branches
911 branches[name].append(bin(node))
911 branches[name].append(bin(node))
912
912
913 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
913 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
914
914
915
915
916 def _phasemove(orig, pushop, nodes, phase=phases.public):
916 def _phasemove(orig, pushop, nodes, phase=phases.public):
917 """prevent commits from being marked public
917 """prevent commits from being marked public
918
918
919 Since these are going to a scratch branch, they aren't really being
919 Since these are going to a scratch branch, they aren't really being
920 published."""
920 published."""
921
921
922 if phase != phases.public:
922 if phase != phases.public:
923 orig(pushop, nodes, phase)
923 orig(pushop, nodes, phase)
924
924
925
925
926 @exchange.b2partsgenerator(scratchbranchparttype)
926 @exchange.b2partsgenerator(scratchbranchparttype)
927 def partgen(pushop, bundler):
927 def partgen(pushop, bundler):
928 bookmark = pushop.ui.config(experimental, configbookmark)
928 bookmark = pushop.ui.config(experimental, configbookmark)
929 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
929 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
930 if b'changesets' in pushop.stepsdone or not scratchpush:
930 if b'changesets' in pushop.stepsdone or not scratchpush:
931 return
931 return
932
932
933 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
933 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
934 return
934 return
935
935
936 pushop.stepsdone.add(b'changesets')
936 pushop.stepsdone.add(b'changesets')
937 if not pushop.outgoing.missing:
937 if not pushop.outgoing.missing:
938 pushop.ui.status(_(b'no changes found\n'))
938 pushop.ui.status(_(b'no changes found\n'))
939 pushop.cgresult = 0
939 pushop.cgresult = 0
940 return
940 return
941
941
942 # This parameter tells the server that the following bundle is an
942 # This parameter tells the server that the following bundle is an
943 # infinitepush. This let's it switch the part processing to our infinitepush
943 # infinitepush. This let's it switch the part processing to our infinitepush
944 # code path.
944 # code path.
945 bundler.addparam(b"infinitepush", b"True")
945 bundler.addparam(b"infinitepush", b"True")
946
946
947 scratchparts = bundleparts.getscratchbranchparts(
947 scratchparts = bundleparts.getscratchbranchparts(
948 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
948 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
949 )
949 )
950
950
951 for scratchpart in scratchparts:
951 for scratchpart in scratchparts:
952 bundler.addpart(scratchpart)
952 bundler.addpart(scratchpart)
953
953
954 def handlereply(op):
954 def handlereply(op):
955 # server either succeeds or aborts; no code to read
955 # server either succeeds or aborts; no code to read
956 pushop.cgresult = 1
956 pushop.cgresult = 1
957
957
958 return handlereply
958 return handlereply
959
959
960
960
961 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
961 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
962
962
963
963
964 def _getrevs(bundle, oldnode, force, bookmark):
964 def _getrevs(bundle, oldnode, force, bookmark):
965 b'extracts and validates the revs to be imported'
965 b'extracts and validates the revs to be imported'
966 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
966 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
967
967
968 # new bookmark
968 # new bookmark
969 if oldnode is None:
969 if oldnode is None:
970 return revs
970 return revs
971
971
972 # Fast forward update
972 # Fast forward update
973 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
973 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
974 return revs
974 return revs
975
975
976 return revs
976 return revs
977
977
978
978
979 @contextlib.contextmanager
979 @contextlib.contextmanager
980 def logservicecall(logger, service, **kwargs):
980 def logservicecall(logger, service, **kwargs):
981 start = time.time()
981 start = time.time()
982 logger(service, eventtype=b'start', **kwargs)
982 logger(service, eventtype=b'start', **kwargs)
983 try:
983 try:
984 yield
984 yield
985 logger(
985 logger(
986 service,
986 service,
987 eventtype=b'success',
987 eventtype=b'success',
988 elapsedms=(time.time() - start) * 1000,
988 elapsedms=(time.time() - start) * 1000,
989 **kwargs
989 **kwargs
990 )
990 )
991 except Exception as e:
991 except Exception as e:
992 logger(
992 logger(
993 service,
993 service,
994 eventtype=b'failure',
994 eventtype=b'failure',
995 elapsedms=(time.time() - start) * 1000,
995 elapsedms=(time.time() - start) * 1000,
996 errormsg=stringutil.forcebytestr(e),
996 errormsg=stringutil.forcebytestr(e),
997 **kwargs
997 **kwargs
998 )
998 )
999 raise
999 raise
1000
1000
1001
1001
1002 def _getorcreateinfinitepushlogger(op):
1002 def _getorcreateinfinitepushlogger(op):
1003 logger = op.records[b'infinitepushlogger']
1003 logger = op.records[b'infinitepushlogger']
1004 if not logger:
1004 if not logger:
1005 ui = op.repo.ui
1005 ui = op.repo.ui
1006 try:
1006 try:
1007 username = procutil.getuser()
1007 username = procutil.getuser()
1008 except Exception:
1008 except Exception:
1009 username = b'unknown'
1009 username = b'unknown'
1010 # Generate random request id to be able to find all logged entries
1010 # Generate random request id to be able to find all logged entries
1011 # for the same request. Since requestid is pseudo-generated it may
1011 # for the same request. Since requestid is pseudo-generated it may
1012 # not be unique, but we assume that (hostname, username, requestid)
1012 # not be unique, but we assume that (hostname, username, requestid)
1013 # is unique.
1013 # is unique.
1014 random.seed()
1014 random.seed()
1015 requestid = random.randint(0, 2000000000)
1015 requestid = random.randint(0, 2000000000)
1016 hostname = socket.gethostname()
1016 hostname = socket.gethostname()
1017 logger = functools.partial(
1017 logger = functools.partial(
1018 ui.log,
1018 ui.log,
1019 b'infinitepush',
1019 b'infinitepush',
1020 user=username,
1020 user=username,
1021 requestid=requestid,
1021 requestid=requestid,
1022 hostname=hostname,
1022 hostname=hostname,
1023 reponame=ui.config(b'infinitepush', b'reponame'),
1023 reponame=ui.config(b'infinitepush', b'reponame'),
1024 )
1024 )
1025 op.records.add(b'infinitepushlogger', logger)
1025 op.records.add(b'infinitepushlogger', logger)
1026 else:
1026 else:
1027 logger = logger[0]
1027 logger = logger[0]
1028 return logger
1028 return logger
1029
1029
1030
1030
1031 def storetobundlestore(orig, repo, op, unbundler):
1031 def storetobundlestore(orig, repo, op, unbundler):
1032 """stores the incoming bundle coming from push command to the bundlestore
1032 """stores the incoming bundle coming from push command to the bundlestore
1033 instead of applying on the revlogs"""
1033 instead of applying on the revlogs"""
1034
1034
1035 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1035 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1036 bundler = bundle2.bundle20(repo.ui)
1036 bundler = bundle2.bundle20(repo.ui)
1037
1037
1038 # processing each part and storing it in bundler
1038 # processing each part and storing it in bundler
1039 with bundle2.partiterator(repo, op, unbundler) as parts:
1039 with bundle2.partiterator(repo, op, unbundler) as parts:
1040 for part in parts:
1040 for part in parts:
1041 bundlepart = None
1041 bundlepart = None
1042 if part.type == b'replycaps':
1042 if part.type == b'replycaps':
1043 # This configures the current operation to allow reply parts.
1043 # This configures the current operation to allow reply parts.
1044 bundle2._processpart(op, part)
1044 bundle2._processpart(op, part)
1045 else:
1045 else:
1046 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1046 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1047 for key, value in part.params.items():
1047 for key, value in part.params.items():
1048 bundlepart.addparam(key, value)
1048 bundlepart.addparam(key, value)
1049
1049
1050 # Certain parts require a response
1050 # Certain parts require a response
1051 if part.type in (b'pushkey', b'changegroup'):
1051 if part.type in (b'pushkey', b'changegroup'):
1052 if op.reply is not None:
1052 if op.reply is not None:
1053 rpart = op.reply.newpart(b'reply:%s' % part.type)
1053 rpart = op.reply.newpart(b'reply:%s' % part.type)
1054 rpart.addparam(
1054 rpart.addparam(
1055 b'in-reply-to', b'%d' % part.id, mandatory=False
1055 b'in-reply-to', b'%d' % part.id, mandatory=False
1056 )
1056 )
1057 rpart.addparam(b'return', b'1', mandatory=False)
1057 rpart.addparam(b'return', b'1', mandatory=False)
1058
1058
1059 op.records.add(
1059 op.records.add(
1060 part.type,
1060 part.type,
1061 {
1061 {
1062 b'return': 1,
1062 b'return': 1,
1063 },
1063 },
1064 )
1064 )
1065 if bundlepart:
1065 if bundlepart:
1066 bundler.addpart(bundlepart)
1066 bundler.addpart(bundlepart)
1067
1067
1068 # storing the bundle in the bundlestore
1068 # storing the bundle in the bundlestore
1069 buf = util.chunkbuffer(bundler.getchunks())
1069 buf = util.chunkbuffer(bundler.getchunks())
1070 fd, bundlefile = pycompat.mkstemp()
1070 fd, bundlefile = pycompat.mkstemp()
1071 try:
1071 try:
1072 try:
1072 try:
1073 fp = os.fdopen(fd, 'wb')
1073 fp = os.fdopen(fd, 'wb')
1074 fp.write(buf.read())
1074 fp.write(buf.read())
1075 finally:
1075 finally:
1076 fp.close()
1076 fp.close()
1077 storebundle(op, {}, bundlefile)
1077 storebundle(op, {}, bundlefile)
1078 finally:
1078 finally:
1079 try:
1079 try:
1080 os.unlink(bundlefile)
1080 os.unlink(bundlefile)
1081 except Exception:
1081 except Exception:
1082 # we would rather see the original exception
1082 # we would rather see the original exception
1083 pass
1083 pass
1084
1084
1085
1085
1086 def processparts(orig, repo, op, unbundler):
1086 def processparts(orig, repo, op, unbundler):
1087
1087
1088 # make sure we don't wrap processparts in case of `hg unbundle`
1088 # make sure we don't wrap processparts in case of `hg unbundle`
1089 if op.source == b'unbundle':
1089 if op.source == b'unbundle':
1090 return orig(repo, op, unbundler)
1090 return orig(repo, op, unbundler)
1091
1091
1092 # this server routes each push to bundle store
1092 # this server routes each push to bundle store
1093 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1093 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1094 return storetobundlestore(orig, repo, op, unbundler)
1094 return storetobundlestore(orig, repo, op, unbundler)
1095
1095
1096 if unbundler.params.get(b'infinitepush') != b'True':
1096 if unbundler.params.get(b'infinitepush') != b'True':
1097 return orig(repo, op, unbundler)
1097 return orig(repo, op, unbundler)
1098
1098
1099 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1099 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1100
1100
1101 bundler = bundle2.bundle20(repo.ui)
1101 bundler = bundle2.bundle20(repo.ui)
1102 cgparams = None
1102 cgparams = None
1103 with bundle2.partiterator(repo, op, unbundler) as parts:
1103 with bundle2.partiterator(repo, op, unbundler) as parts:
1104 for part in parts:
1104 for part in parts:
1105 bundlepart = None
1105 bundlepart = None
1106 if part.type == b'replycaps':
1106 if part.type == b'replycaps':
1107 # This configures the current operation to allow reply parts.
1107 # This configures the current operation to allow reply parts.
1108 bundle2._processpart(op, part)
1108 bundle2._processpart(op, part)
1109 elif part.type == bundleparts.scratchbranchparttype:
1109 elif part.type == bundleparts.scratchbranchparttype:
1110 # Scratch branch parts need to be converted to normal
1110 # Scratch branch parts need to be converted to normal
1111 # changegroup parts, and the extra parameters stored for later
1111 # changegroup parts, and the extra parameters stored for later
1112 # when we upload to the store. Eventually those parameters will
1112 # when we upload to the store. Eventually those parameters will
1113 # be put on the actual bundle instead of this part, then we can
1113 # be put on the actual bundle instead of this part, then we can
1114 # send a vanilla changegroup instead of the scratchbranch part.
1114 # send a vanilla changegroup instead of the scratchbranch part.
1115 cgversion = part.params.get(b'cgversion', b'01')
1115 cgversion = part.params.get(b'cgversion', b'01')
1116 bundlepart = bundle2.bundlepart(
1116 bundlepart = bundle2.bundlepart(
1117 b'changegroup', data=part.read()
1117 b'changegroup', data=part.read()
1118 )
1118 )
1119 bundlepart.addparam(b'version', cgversion)
1119 bundlepart.addparam(b'version', cgversion)
1120 cgparams = part.params
1120 cgparams = part.params
1121
1121
1122 # If we're not dumping all parts into the new bundle, we need to
1122 # If we're not dumping all parts into the new bundle, we need to
1123 # alert the future pushkey and phase-heads handler to skip
1123 # alert the future pushkey and phase-heads handler to skip
1124 # the part.
1124 # the part.
1125 if not handleallparts:
1125 if not handleallparts:
1126 op.records.add(
1126 op.records.add(
1127 scratchbranchparttype + b'_skippushkey', True
1127 scratchbranchparttype + b'_skippushkey', True
1128 )
1128 )
1129 op.records.add(
1129 op.records.add(
1130 scratchbranchparttype + b'_skipphaseheads', True
1130 scratchbranchparttype + b'_skipphaseheads', True
1131 )
1131 )
1132 else:
1132 else:
1133 if handleallparts:
1133 if handleallparts:
1134 # Ideally we would not process any parts, and instead just
1134 # Ideally we would not process any parts, and instead just
1135 # forward them to the bundle for storage, but since this
1135 # forward them to the bundle for storage, but since this
1136 # differs from previous behavior, we need to put it behind a
1136 # differs from previous behavior, we need to put it behind a
1137 # config flag for incremental rollout.
1137 # config flag for incremental rollout.
1138 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1138 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1139 for key, value in part.params.items():
1139 for key, value in part.params.items():
1140 bundlepart.addparam(key, value)
1140 bundlepart.addparam(key, value)
1141
1141
1142 # Certain parts require a response
1142 # Certain parts require a response
1143 if part.type == b'pushkey':
1143 if part.type == b'pushkey':
1144 if op.reply is not None:
1144 if op.reply is not None:
1145 rpart = op.reply.newpart(b'reply:pushkey')
1145 rpart = op.reply.newpart(b'reply:pushkey')
1146 rpart.addparam(
1146 rpart.addparam(
1147 b'in-reply-to', str(part.id), mandatory=False
1147 b'in-reply-to', str(part.id), mandatory=False
1148 )
1148 )
1149 rpart.addparam(b'return', b'1', mandatory=False)
1149 rpart.addparam(b'return', b'1', mandatory=False)
1150 else:
1150 else:
1151 bundle2._processpart(op, part)
1151 bundle2._processpart(op, part)
1152
1152
1153 if handleallparts:
1153 if handleallparts:
1154 op.records.add(
1154 op.records.add(
1155 part.type,
1155 part.type,
1156 {
1156 {
1157 b'return': 1,
1157 b'return': 1,
1158 },
1158 },
1159 )
1159 )
1160 if bundlepart:
1160 if bundlepart:
1161 bundler.addpart(bundlepart)
1161 bundler.addpart(bundlepart)
1162
1162
1163 # If commits were sent, store them
1163 # If commits were sent, store them
1164 if cgparams:
1164 if cgparams:
1165 buf = util.chunkbuffer(bundler.getchunks())
1165 buf = util.chunkbuffer(bundler.getchunks())
1166 fd, bundlefile = pycompat.mkstemp()
1166 fd, bundlefile = pycompat.mkstemp()
1167 try:
1167 try:
1168 try:
1168 try:
1169 fp = os.fdopen(fd, 'wb')
1169 fp = os.fdopen(fd, 'wb')
1170 fp.write(buf.read())
1170 fp.write(buf.read())
1171 finally:
1171 finally:
1172 fp.close()
1172 fp.close()
1173 storebundle(op, cgparams, bundlefile)
1173 storebundle(op, cgparams, bundlefile)
1174 finally:
1174 finally:
1175 try:
1175 try:
1176 os.unlink(bundlefile)
1176 os.unlink(bundlefile)
1177 except Exception:
1177 except Exception:
1178 # we would rather see the original exception
1178 # we would rather see the original exception
1179 pass
1179 pass
1180
1180
1181
1181
1182 def storebundle(op, params, bundlefile):
1182 def storebundle(op, params, bundlefile):
1183 log = _getorcreateinfinitepushlogger(op)
1183 log = _getorcreateinfinitepushlogger(op)
1184 parthandlerstart = time.time()
1184 parthandlerstart = time.time()
1185 log(scratchbranchparttype, eventtype=b'start')
1185 log(scratchbranchparttype, eventtype=b'start')
1186 index = op.repo.bundlestore.index
1186 index = op.repo.bundlestore.index
1187 store = op.repo.bundlestore.store
1187 store = op.repo.bundlestore.store
1188 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1188 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1189
1189
1190 bundle = None
1190 bundle = None
1191 try: # guards bundle
1191 try: # guards bundle
1192 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1192 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1193 bundle = hg.repository(op.repo.ui, bundlepath)
1193 bundle = hg.repository(op.repo.ui, bundlepath)
1194
1194
1195 bookmark = params.get(b'bookmark')
1195 bookmark = params.get(b'bookmark')
1196 bookprevnode = params.get(b'bookprevnode', b'')
1196 bookprevnode = params.get(b'bookprevnode', b'')
1197 force = params.get(b'force')
1197 force = params.get(b'force')
1198
1198
1199 if bookmark:
1199 if bookmark:
1200 oldnode = index.getnode(bookmark)
1200 oldnode = index.getnode(bookmark)
1201 else:
1201 else:
1202 oldnode = None
1202 oldnode = None
1203 bundleheads = bundle.revs(b'heads(bundle())')
1203 bundleheads = bundle.revs(b'heads(bundle())')
1204 if bookmark and len(bundleheads) > 1:
1204 if bookmark and len(bundleheads) > 1:
1205 raise error.Abort(
1205 raise error.Abort(
1206 _(b'cannot push more than one head to a scratch branch')
1206 _(b'cannot push more than one head to a scratch branch')
1207 )
1207 )
1208
1208
1209 revs = _getrevs(bundle, oldnode, force, bookmark)
1209 revs = _getrevs(bundle, oldnode, force, bookmark)
1210
1210
1211 # Notify the user of what is being pushed
1211 # Notify the user of what is being pushed
1212 plural = b's' if len(revs) > 1 else b''
1212 plural = b's' if len(revs) > 1 else b''
1213 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1213 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1214 maxoutput = 10
1214 maxoutput = 10
1215 for i in range(0, min(len(revs), maxoutput)):
1215 for i in range(0, min(len(revs), maxoutput)):
1216 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1216 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1217 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1217 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1218
1218
1219 if len(revs) > maxoutput + 1:
1219 if len(revs) > maxoutput + 1:
1220 op.repo.ui.warn(b" ...\n")
1220 op.repo.ui.warn(b" ...\n")
1221 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1221 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1222 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1222 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1223
1223
1224 nodesctx = [bundle[rev] for rev in revs]
1224 nodesctx = [bundle[rev] for rev in revs]
1225 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1225 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1226 if bundleheads:
1226 if bundleheads:
1227 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1227 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1228 else:
1228 else:
1229 newheadscount = 0
1229 newheadscount = 0
1230 # If there's a bookmark specified, there should be only one head,
1230 # If there's a bookmark specified, there should be only one head,
1231 # so we choose the last node, which will be that head.
1231 # so we choose the last node, which will be that head.
1232 # If a bug or malicious client allows there to be a bookmark
1232 # If a bug or malicious client allows there to be a bookmark
1233 # with multiple heads, we will place the bookmark on the last head.
1233 # with multiple heads, we will place the bookmark on the last head.
1234 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1234 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1235 key = None
1235 key = None
1236 if newheadscount:
1236 if newheadscount:
1237 with open(bundlefile, b'rb') as f:
1237 with open(bundlefile, b'rb') as f:
1238 bundledata = f.read()
1238 bundledata = f.read()
1239 with logservicecall(
1239 with logservicecall(
1240 log, b'bundlestore', bundlesize=len(bundledata)
1240 log, b'bundlestore', bundlesize=len(bundledata)
1241 ):
1241 ):
1242 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1242 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1243 if len(bundledata) > bundlesizelimit:
1243 if len(bundledata) > bundlesizelimit:
1244 error_msg = (
1244 error_msg = (
1245 b'bundle is too big: %d bytes. '
1245 b'bundle is too big: %d bytes. '
1246 + b'max allowed size is 100 MB'
1246 + b'max allowed size is 100 MB'
1247 )
1247 )
1248 raise error.Abort(error_msg % (len(bundledata),))
1248 raise error.Abort(error_msg % (len(bundledata),))
1249 key = store.write(bundledata)
1249 key = store.write(bundledata)
1250
1250
1251 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1251 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1252 if key:
1252 if key:
1253 index.addbundle(key, nodesctx)
1253 index.addbundle(key, nodesctx)
1254 if bookmark:
1254 if bookmark:
1255 index.addbookmark(bookmark, bookmarknode)
1255 index.addbookmark(bookmark, bookmarknode)
1256 _maybeaddpushbackpart(
1256 _maybeaddpushbackpart(
1257 op, bookmark, bookmarknode, bookprevnode, params
1257 op, bookmark, bookmarknode, bookprevnode, params
1258 )
1258 )
1259 log(
1259 log(
1260 scratchbranchparttype,
1260 scratchbranchparttype,
1261 eventtype=b'success',
1261 eventtype=b'success',
1262 elapsedms=(time.time() - parthandlerstart) * 1000,
1262 elapsedms=(time.time() - parthandlerstart) * 1000,
1263 )
1263 )
1264
1264
1265 except Exception as e:
1265 except Exception as e:
1266 log(
1266 log(
1267 scratchbranchparttype,
1267 scratchbranchparttype,
1268 eventtype=b'failure',
1268 eventtype=b'failure',
1269 elapsedms=(time.time() - parthandlerstart) * 1000,
1269 elapsedms=(time.time() - parthandlerstart) * 1000,
1270 errormsg=stringutil.forcebytestr(e),
1270 errormsg=stringutil.forcebytestr(e),
1271 )
1271 )
1272 raise
1272 raise
1273 finally:
1273 finally:
1274 if bundle:
1274 if bundle:
1275 bundle.close()
1275 bundle.close()
1276
1276
1277
1277
1278 @bundle2.parthandler(
1278 @bundle2.parthandler(
1279 scratchbranchparttype,
1279 scratchbranchparttype,
1280 (
1280 (
1281 b'bookmark',
1281 b'bookmark',
1282 b'bookprevnode',
1282 b'bookprevnode',
1283 b'force',
1283 b'force',
1284 b'pushbackbookmarks',
1284 b'pushbackbookmarks',
1285 b'cgversion',
1285 b'cgversion',
1286 ),
1286 ),
1287 )
1287 )
1288 def bundle2scratchbranch(op, part):
1288 def bundle2scratchbranch(op, part):
1289 '''unbundle a bundle2 part containing a changegroup to store'''
1289 '''unbundle a bundle2 part containing a changegroup to store'''
1290
1290
1291 bundler = bundle2.bundle20(op.repo.ui)
1291 bundler = bundle2.bundle20(op.repo.ui)
1292 cgversion = part.params.get(b'cgversion', b'01')
1292 cgversion = part.params.get(b'cgversion', b'01')
1293 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1293 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1294 cgpart.addparam(b'version', cgversion)
1294 cgpart.addparam(b'version', cgversion)
1295 bundler.addpart(cgpart)
1295 bundler.addpart(cgpart)
1296 buf = util.chunkbuffer(bundler.getchunks())
1296 buf = util.chunkbuffer(bundler.getchunks())
1297
1297
1298 fd, bundlefile = pycompat.mkstemp()
1298 fd, bundlefile = pycompat.mkstemp()
1299 try:
1299 try:
1300 try:
1300 try:
1301 fp = os.fdopen(fd, 'wb')
1301 fp = os.fdopen(fd, 'wb')
1302 fp.write(buf.read())
1302 fp.write(buf.read())
1303 finally:
1303 finally:
1304 fp.close()
1304 fp.close()
1305 storebundle(op, part.params, bundlefile)
1305 storebundle(op, part.params, bundlefile)
1306 finally:
1306 finally:
1307 try:
1307 try:
1308 os.unlink(bundlefile)
1308 os.unlink(bundlefile)
1309 except FileNotFoundError:
1309 except FileNotFoundError:
1310 pass
1310 pass
1311
1311
1312 return 1
1312 return 1
1313
1313
1314
1314
1315 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1315 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1316 if params.get(b'pushbackbookmarks'):
1316 if params.get(b'pushbackbookmarks'):
1317 if op.reply and b'pushback' in op.reply.capabilities:
1317 if op.reply and b'pushback' in op.reply.capabilities:
1318 params = {
1318 params = {
1319 b'namespace': b'bookmarks',
1319 b'namespace': b'bookmarks',
1320 b'key': bookmark,
1320 b'key': bookmark,
1321 b'new': newnode,
1321 b'new': newnode,
1322 b'old': oldnode,
1322 b'old': oldnode,
1323 }
1323 }
1324 op.reply.newpart(b'pushkey', mandatoryparams=params.items())
1324 op.reply.newpart(b'pushkey', mandatoryparams=params.items())
1325
1325
1326
1326
1327 def bundle2pushkey(orig, op, part):
1327 def bundle2pushkey(orig, op, part):
1328 """Wrapper of bundle2.handlepushkey()
1328 """Wrapper of bundle2.handlepushkey()
1329
1329
1330 The only goal is to skip calling the original function if flag is set.
1330 The only goal is to skip calling the original function if flag is set.
1331 It's set if infinitepush push is happening.
1331 It's set if infinitepush push is happening.
1332 """
1332 """
1333 if op.records[scratchbranchparttype + b'_skippushkey']:
1333 if op.records[scratchbranchparttype + b'_skippushkey']:
1334 if op.reply is not None:
1334 if op.reply is not None:
1335 rpart = op.reply.newpart(b'reply:pushkey')
1335 rpart = op.reply.newpart(b'reply:pushkey')
1336 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1336 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1337 rpart.addparam(b'return', b'1', mandatory=False)
1337 rpart.addparam(b'return', b'1', mandatory=False)
1338 return 1
1338 return 1
1339
1339
1340 return orig(op, part)
1340 return orig(op, part)
1341
1341
1342
1342
1343 def bundle2handlephases(orig, op, part):
1343 def bundle2handlephases(orig, op, part):
1344 """Wrapper of bundle2.handlephases()
1344 """Wrapper of bundle2.handlephases()
1345
1345
1346 The only goal is to skip calling the original function if flag is set.
1346 The only goal is to skip calling the original function if flag is set.
1347 It's set if infinitepush push is happening.
1347 It's set if infinitepush push is happening.
1348 """
1348 """
1349
1349
1350 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1350 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1351 return
1351 return
1352
1352
1353 return orig(op, part)
1353 return orig(op, part)
1354
1354
1355
1355
1356 def _asyncsavemetadata(root, nodes):
1356 def _asyncsavemetadata(root, nodes):
1357 """starts a separate process that fills metadata for the nodes
1357 """starts a separate process that fills metadata for the nodes
1358
1358
1359 This function creates a separate process and doesn't wait for it's
1359 This function creates a separate process and doesn't wait for it's
1360 completion. This was done to avoid slowing down pushes
1360 completion. This was done to avoid slowing down pushes
1361 """
1361 """
1362
1362
1363 maxnodes = 50
1363 maxnodes = 50
1364 if len(nodes) > maxnodes:
1364 if len(nodes) > maxnodes:
1365 return
1365 return
1366 nodesargs = []
1366 nodesargs = []
1367 for node in nodes:
1367 for node in nodes:
1368 nodesargs.append(b'--node')
1368 nodesargs.append(b'--node')
1369 nodesargs.append(node)
1369 nodesargs.append(node)
1370 with open(os.devnull, b'w+b') as devnull:
1370 with open(os.devnull, b'w+b') as devnull:
1371 cmdline = [
1371 cmdline = [
1372 util.hgexecutable(),
1372 util.hgexecutable(),
1373 b'debugfillinfinitepushmetadata',
1373 b'debugfillinfinitepushmetadata',
1374 b'-R',
1374 b'-R',
1375 root,
1375 root,
1376 ] + nodesargs
1376 ] + nodesargs
1377 # Process will run in background. We don't care about the return code
1377 # Process will run in background. We don't care about the return code
1378 subprocess.Popen(
1378 subprocess.Popen(
1379 pycompat.rapply(procutil.tonativestr, cmdline),
1379 pycompat.rapply(procutil.tonativestr, cmdline),
1380 close_fds=True,
1380 close_fds=True,
1381 shell=False,
1381 shell=False,
1382 stdin=devnull,
1382 stdin=devnull,
1383 stdout=devnull,
1383 stdout=devnull,
1384 stderr=devnull,
1384 stderr=devnull,
1385 )
1385 )
General Comments 0
You need to be logged in to leave comments. Login now