##// END OF EJS Templates
infinitepush: mark extension as likely to be deleted...
Augie Fackler -
r43381:e5d53562 default
parent child Browse files
Show More
@@ -1,1339 +1,1344 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 IMPORTANT: if you use this extension, please contact
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 be unused and barring learning of users of this functionality, we will
12 delete this code at the end of 2020.
13
9 [infinitepush]
14 [infinitepush]
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 branchpattern = PATTERN
16 branchpattern = PATTERN
12
17
13 # Server or client
18 # Server or client
14 server = False
19 server = False
15
20
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 indextype = disk
22 indextype = disk
18
23
19 # Server-side option. Used only if indextype=sql.
24 # Server-side option. Used only if indextype=sql.
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22
27
23 # Server-side option. Used only if indextype=disk.
28 # Server-side option. Used only if indextype=disk.
24 # Filesystem path to the index store
29 # Filesystem path to the index store
25 indexpath = PATH
30 indexpath = PATH
26
31
27 # Server-side option. Possible values: 'disk' or 'external'
32 # Server-side option. Possible values: 'disk' or 'external'
28 # Fails if not set
33 # Fails if not set
29 storetype = disk
34 storetype = disk
30
35
31 # Server-side option.
36 # Server-side option.
32 # Path to the binary that will save bundle to the bundlestore
37 # Path to the binary that will save bundle to the bundlestore
33 # Formatted cmd line will be passed to it (see `put_args`)
38 # Formatted cmd line will be passed to it (see `put_args`)
34 put_binary = put
39 put_binary = put
35
40
36 # Serser-side option. Used only if storetype=external.
41 # Serser-side option. Used only if storetype=external.
37 # Format cmd-line string for put binary. Placeholder: {filename}
42 # Format cmd-line string for put binary. Placeholder: {filename}
38 put_args = {filename}
43 put_args = {filename}
39
44
40 # Server-side option.
45 # Server-side option.
41 # Path to the binary that get bundle from the bundlestore.
46 # Path to the binary that get bundle from the bundlestore.
42 # Formatted cmd line will be passed to it (see `get_args`)
47 # Formatted cmd line will be passed to it (see `get_args`)
43 get_binary = get
48 get_binary = get
44
49
45 # Serser-side option. Used only if storetype=external.
50 # Serser-side option. Used only if storetype=external.
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 get_args = {filename} {handle}
52 get_args = {filename} {handle}
48
53
49 # Server-side option
54 # Server-side option
50 logfile = FIlE
55 logfile = FIlE
51
56
52 # Server-side option
57 # Server-side option
53 loglevel = DEBUG
58 loglevel = DEBUG
54
59
55 # Server-side option. Used only if indextype=sql.
60 # Server-side option. Used only if indextype=sql.
56 # Sets mysql wait_timeout option.
61 # Sets mysql wait_timeout option.
57 waittimeout = 300
62 waittimeout = 300
58
63
59 # Server-side option. Used only if indextype=sql.
64 # Server-side option. Used only if indextype=sql.
60 # Sets mysql innodb_lock_wait_timeout option.
65 # Sets mysql innodb_lock_wait_timeout option.
61 locktimeout = 120
66 locktimeout = 120
62
67
63 # Server-side option. Used only if indextype=sql.
68 # Server-side option. Used only if indextype=sql.
64 # Name of the repository
69 # Name of the repository
65 reponame = ''
70 reponame = ''
66
71
67 # Client-side option. Used by --list-remote option. List of remote scratch
72 # Client-side option. Used by --list-remote option. List of remote scratch
68 # patterns to list if no patterns are specified.
73 # patterns to list if no patterns are specified.
69 defaultremotepatterns = ['*']
74 defaultremotepatterns = ['*']
70
75
71 # Instructs infinitepush to forward all received bundle2 parts to the
76 # Instructs infinitepush to forward all received bundle2 parts to the
72 # bundle for storage. Defaults to False.
77 # bundle for storage. Defaults to False.
73 storeallparts = True
78 storeallparts = True
74
79
75 # routes each incoming push to the bundlestore. defaults to False
80 # routes each incoming push to the bundlestore. defaults to False
76 pushtobundlestore = True
81 pushtobundlestore = True
77
82
78 [remotenames]
83 [remotenames]
79 # Client-side option
84 # Client-side option
80 # This option should be set only if remotenames extension is enabled.
85 # This option should be set only if remotenames extension is enabled.
81 # Whether remote bookmarks are tracked by remotenames extension.
86 # Whether remote bookmarks are tracked by remotenames extension.
82 bookmarks = True
87 bookmarks = True
83 """
88 """
84
89
85 from __future__ import absolute_import
90 from __future__ import absolute_import
86
91
87 import collections
92 import collections
88 import contextlib
93 import contextlib
89 import errno
94 import errno
90 import functools
95 import functools
91 import logging
96 import logging
92 import os
97 import os
93 import random
98 import random
94 import re
99 import re
95 import socket
100 import socket
96 import subprocess
101 import subprocess
97 import time
102 import time
98
103
99 from mercurial.node import (
104 from mercurial.node import (
100 bin,
105 bin,
101 hex,
106 hex,
102 )
107 )
103
108
104 from mercurial.i18n import _
109 from mercurial.i18n import _
105
110
106 from mercurial.pycompat import (
111 from mercurial.pycompat import (
107 getattr,
112 getattr,
108 open,
113 open,
109 )
114 )
110
115
111 from mercurial.utils import (
116 from mercurial.utils import (
112 procutil,
117 procutil,
113 stringutil,
118 stringutil,
114 )
119 )
115
120
116 from mercurial import (
121 from mercurial import (
117 bundle2,
122 bundle2,
118 changegroup,
123 changegroup,
119 commands,
124 commands,
120 discovery,
125 discovery,
121 encoding,
126 encoding,
122 error,
127 error,
123 exchange,
128 exchange,
124 extensions,
129 extensions,
125 hg,
130 hg,
126 localrepo,
131 localrepo,
127 phases,
132 phases,
128 pushkey,
133 pushkey,
129 pycompat,
134 pycompat,
130 registrar,
135 registrar,
131 util,
136 util,
132 wireprototypes,
137 wireprototypes,
133 wireprotov1peer,
138 wireprotov1peer,
134 wireprotov1server,
139 wireprotov1server,
135 )
140 )
136
141
137 from . import (
142 from . import (
138 bundleparts,
143 bundleparts,
139 common,
144 common,
140 )
145 )
141
146
142 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
143 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
144 # be specifying the version(s) of Mercurial they are tested with, or
149 # be specifying the version(s) of Mercurial they are tested with, or
145 # leave the attribute unspecified.
150 # leave the attribute unspecified.
146 testedwith = b'ships-with-hg-core'
151 testedwith = b'ships-with-hg-core'
147
152
148 configtable = {}
153 configtable = {}
149 configitem = registrar.configitem(configtable)
154 configitem = registrar.configitem(configtable)
150
155
151 configitem(
156 configitem(
152 b'infinitepush', b'server', default=False,
157 b'infinitepush', b'server', default=False,
153 )
158 )
154 configitem(
159 configitem(
155 b'infinitepush', b'storetype', default=b'',
160 b'infinitepush', b'storetype', default=b'',
156 )
161 )
157 configitem(
162 configitem(
158 b'infinitepush', b'indextype', default=b'',
163 b'infinitepush', b'indextype', default=b'',
159 )
164 )
160 configitem(
165 configitem(
161 b'infinitepush', b'indexpath', default=b'',
166 b'infinitepush', b'indexpath', default=b'',
162 )
167 )
163 configitem(
168 configitem(
164 b'infinitepush', b'storeallparts', default=False,
169 b'infinitepush', b'storeallparts', default=False,
165 )
170 )
166 configitem(
171 configitem(
167 b'infinitepush', b'reponame', default=b'',
172 b'infinitepush', b'reponame', default=b'',
168 )
173 )
169 configitem(
174 configitem(
170 b'scratchbranch', b'storepath', default=b'',
175 b'scratchbranch', b'storepath', default=b'',
171 )
176 )
172 configitem(
177 configitem(
173 b'infinitepush', b'branchpattern', default=b'',
178 b'infinitepush', b'branchpattern', default=b'',
174 )
179 )
175 configitem(
180 configitem(
176 b'infinitepush', b'pushtobundlestore', default=False,
181 b'infinitepush', b'pushtobundlestore', default=False,
177 )
182 )
178 configitem(
183 configitem(
179 b'experimental', b'server-bundlestore-bookmark', default=b'',
184 b'experimental', b'server-bundlestore-bookmark', default=b'',
180 )
185 )
181 configitem(
186 configitem(
182 b'experimental', b'infinitepush-scratchpush', default=False,
187 b'experimental', b'infinitepush-scratchpush', default=False,
183 )
188 )
184
189
185 experimental = b'experimental'
190 experimental = b'experimental'
186 configbookmark = b'server-bundlestore-bookmark'
191 configbookmark = b'server-bundlestore-bookmark'
187 configscratchpush = b'infinitepush-scratchpush'
192 configscratchpush = b'infinitepush-scratchpush'
188
193
189 scratchbranchparttype = bundleparts.scratchbranchparttype
194 scratchbranchparttype = bundleparts.scratchbranchparttype
190 revsetpredicate = registrar.revsetpredicate()
195 revsetpredicate = registrar.revsetpredicate()
191 templatekeyword = registrar.templatekeyword()
196 templatekeyword = registrar.templatekeyword()
192 _scratchbranchmatcher = lambda x: False
197 _scratchbranchmatcher = lambda x: False
193 _maybehash = re.compile(r'^[a-f0-9]+$').search
198 _maybehash = re.compile(r'^[a-f0-9]+$').search
194
199
195
200
196 def _buildexternalbundlestore(ui):
201 def _buildexternalbundlestore(ui):
197 put_args = ui.configlist(b'infinitepush', b'put_args', [])
202 put_args = ui.configlist(b'infinitepush', b'put_args', [])
198 put_binary = ui.config(b'infinitepush', b'put_binary')
203 put_binary = ui.config(b'infinitepush', b'put_binary')
199 if not put_binary:
204 if not put_binary:
200 raise error.Abort(b'put binary is not specified')
205 raise error.Abort(b'put binary is not specified')
201 get_args = ui.configlist(b'infinitepush', b'get_args', [])
206 get_args = ui.configlist(b'infinitepush', b'get_args', [])
202 get_binary = ui.config(b'infinitepush', b'get_binary')
207 get_binary = ui.config(b'infinitepush', b'get_binary')
203 if not get_binary:
208 if not get_binary:
204 raise error.Abort(b'get binary is not specified')
209 raise error.Abort(b'get binary is not specified')
205 from . import store
210 from . import store
206
211
207 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
212 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
208
213
209
214
210 def _buildsqlindex(ui):
215 def _buildsqlindex(ui):
211 sqlhost = ui.config(b'infinitepush', b'sqlhost')
216 sqlhost = ui.config(b'infinitepush', b'sqlhost')
212 if not sqlhost:
217 if not sqlhost:
213 raise error.Abort(_(b'please set infinitepush.sqlhost'))
218 raise error.Abort(_(b'please set infinitepush.sqlhost'))
214 host, port, db, user, password = sqlhost.split(b':')
219 host, port, db, user, password = sqlhost.split(b':')
215 reponame = ui.config(b'infinitepush', b'reponame')
220 reponame = ui.config(b'infinitepush', b'reponame')
216 if not reponame:
221 if not reponame:
217 raise error.Abort(_(b'please set infinitepush.reponame'))
222 raise error.Abort(_(b'please set infinitepush.reponame'))
218
223
219 logfile = ui.config(b'infinitepush', b'logfile', b'')
224 logfile = ui.config(b'infinitepush', b'logfile', b'')
220 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
225 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
221 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
226 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
222 from . import sqlindexapi
227 from . import sqlindexapi
223
228
224 return sqlindexapi.sqlindexapi(
229 return sqlindexapi.sqlindexapi(
225 reponame,
230 reponame,
226 host,
231 host,
227 port,
232 port,
228 db,
233 db,
229 user,
234 user,
230 password,
235 password,
231 logfile,
236 logfile,
232 _getloglevel(ui),
237 _getloglevel(ui),
233 waittimeout=waittimeout,
238 waittimeout=waittimeout,
234 locktimeout=locktimeout,
239 locktimeout=locktimeout,
235 )
240 )
236
241
237
242
238 def _getloglevel(ui):
243 def _getloglevel(ui):
239 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
244 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
240 numeric_loglevel = getattr(logging, loglevel.upper(), None)
245 numeric_loglevel = getattr(logging, loglevel.upper(), None)
241 if not isinstance(numeric_loglevel, int):
246 if not isinstance(numeric_loglevel, int):
242 raise error.Abort(_(b'invalid log level %s') % loglevel)
247 raise error.Abort(_(b'invalid log level %s') % loglevel)
243 return numeric_loglevel
248 return numeric_loglevel
244
249
245
250
246 def _tryhoist(ui, remotebookmark):
251 def _tryhoist(ui, remotebookmark):
247 '''returns a bookmarks with hoisted part removed
252 '''returns a bookmarks with hoisted part removed
248
253
249 Remotenames extension has a 'hoist' config that allows to use remote
254 Remotenames extension has a 'hoist' config that allows to use remote
250 bookmarks without specifying remote path. For example, 'hg update master'
255 bookmarks without specifying remote path. For example, 'hg update master'
251 works as well as 'hg update remote/master'. We want to allow the same in
256 works as well as 'hg update remote/master'. We want to allow the same in
252 infinitepush.
257 infinitepush.
253 '''
258 '''
254
259
255 if common.isremotebooksenabled(ui):
260 if common.isremotebooksenabled(ui):
256 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
257 if remotebookmark.startswith(hoist):
262 if remotebookmark.startswith(hoist):
258 return remotebookmark[len(hoist) :]
263 return remotebookmark[len(hoist) :]
259 return remotebookmark
264 return remotebookmark
260
265
261
266
262 class bundlestore(object):
267 class bundlestore(object):
263 def __init__(self, repo):
268 def __init__(self, repo):
264 self._repo = repo
269 self._repo = repo
265 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
270 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
266 if storetype == b'disk':
271 if storetype == b'disk':
267 from . import store
272 from . import store
268
273
269 self.store = store.filebundlestore(self._repo.ui, self._repo)
274 self.store = store.filebundlestore(self._repo.ui, self._repo)
270 elif storetype == b'external':
275 elif storetype == b'external':
271 self.store = _buildexternalbundlestore(self._repo.ui)
276 self.store = _buildexternalbundlestore(self._repo.ui)
272 else:
277 else:
273 raise error.Abort(
278 raise error.Abort(
274 _(b'unknown infinitepush store type specified %s') % storetype
279 _(b'unknown infinitepush store type specified %s') % storetype
275 )
280 )
276
281
277 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
282 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
278 if indextype == b'disk':
283 if indextype == b'disk':
279 from . import fileindexapi
284 from . import fileindexapi
280
285
281 self.index = fileindexapi.fileindexapi(self._repo)
286 self.index = fileindexapi.fileindexapi(self._repo)
282 elif indextype == b'sql':
287 elif indextype == b'sql':
283 self.index = _buildsqlindex(self._repo.ui)
288 self.index = _buildsqlindex(self._repo.ui)
284 else:
289 else:
285 raise error.Abort(
290 raise error.Abort(
286 _(b'unknown infinitepush index type specified %s') % indextype
291 _(b'unknown infinitepush index type specified %s') % indextype
287 )
292 )
288
293
289
294
290 def _isserver(ui):
295 def _isserver(ui):
291 return ui.configbool(b'infinitepush', b'server')
296 return ui.configbool(b'infinitepush', b'server')
292
297
293
298
294 def reposetup(ui, repo):
299 def reposetup(ui, repo):
295 if _isserver(ui) and repo.local():
300 if _isserver(ui) and repo.local():
296 repo.bundlestore = bundlestore(repo)
301 repo.bundlestore = bundlestore(repo)
297
302
298
303
299 def extsetup(ui):
304 def extsetup(ui):
300 commonsetup(ui)
305 commonsetup(ui)
301 if _isserver(ui):
306 if _isserver(ui):
302 serverextsetup(ui)
307 serverextsetup(ui)
303 else:
308 else:
304 clientextsetup(ui)
309 clientextsetup(ui)
305
310
306
311
307 def commonsetup(ui):
312 def commonsetup(ui):
308 wireprotov1server.commands[b'listkeyspatterns'] = (
313 wireprotov1server.commands[b'listkeyspatterns'] = (
309 wireprotolistkeyspatterns,
314 wireprotolistkeyspatterns,
310 b'namespace patterns',
315 b'namespace patterns',
311 )
316 )
312 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
317 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
313 if scratchbranchpat:
318 if scratchbranchpat:
314 global _scratchbranchmatcher
319 global _scratchbranchmatcher
315 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
320 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
316 scratchbranchpat
321 scratchbranchpat
317 )
322 )
318
323
319
324
320 def serverextsetup(ui):
325 def serverextsetup(ui):
321 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
326 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
322
327
323 def newpushkeyhandler(*args, **kwargs):
328 def newpushkeyhandler(*args, **kwargs):
324 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
329 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
325
330
326 newpushkeyhandler.params = origpushkeyhandler.params
331 newpushkeyhandler.params = origpushkeyhandler.params
327 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
332 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
328
333
329 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
334 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
330 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
335 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
331 orighandlephasehandler, *args, **kwargs
336 orighandlephasehandler, *args, **kwargs
332 )
337 )
333 newphaseheadshandler.params = orighandlephasehandler.params
338 newphaseheadshandler.params = orighandlephasehandler.params
334 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
339 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
335
340
336 extensions.wrapfunction(
341 extensions.wrapfunction(
337 localrepo.localrepository, b'listkeys', localrepolistkeys
342 localrepo.localrepository, b'listkeys', localrepolistkeys
338 )
343 )
339 wireprotov1server.commands[b'lookup'] = (
344 wireprotov1server.commands[b'lookup'] = (
340 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
345 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
341 b'key',
346 b'key',
342 )
347 )
343 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
348 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
344
349
345 extensions.wrapfunction(bundle2, b'processparts', processparts)
350 extensions.wrapfunction(bundle2, b'processparts', processparts)
346
351
347
352
348 def clientextsetup(ui):
353 def clientextsetup(ui):
349 entry = extensions.wrapcommand(commands.table, b'push', _push)
354 entry = extensions.wrapcommand(commands.table, b'push', _push)
350
355
351 entry[1].append(
356 entry[1].append(
352 (
357 (
353 b'',
358 b'',
354 b'bundle-store',
359 b'bundle-store',
355 None,
360 None,
356 _(b'force push to go to bundle store (EXPERIMENTAL)'),
361 _(b'force push to go to bundle store (EXPERIMENTAL)'),
357 )
362 )
358 )
363 )
359
364
360 extensions.wrapcommand(commands.table, b'pull', _pull)
365 extensions.wrapcommand(commands.table, b'pull', _pull)
361
366
362 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
367 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
363
368
364 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
369 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
365
370
366 partorder = exchange.b2partsgenorder
371 partorder = exchange.b2partsgenorder
367 index = partorder.index(b'changeset')
372 index = partorder.index(b'changeset')
368 partorder.insert(
373 partorder.insert(
369 index, partorder.pop(partorder.index(scratchbranchparttype))
374 index, partorder.pop(partorder.index(scratchbranchparttype))
370 )
375 )
371
376
372
377
373 def _checkheads(orig, pushop):
378 def _checkheads(orig, pushop):
374 if pushop.ui.configbool(experimental, configscratchpush, False):
379 if pushop.ui.configbool(experimental, configscratchpush, False):
375 return
380 return
376 return orig(pushop)
381 return orig(pushop)
377
382
378
383
379 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
384 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
380 patterns = wireprototypes.decodelist(patterns)
385 patterns = wireprototypes.decodelist(patterns)
381 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
386 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
382 return pushkey.encodekeys(d)
387 return pushkey.encodekeys(d)
383
388
384
389
385 def localrepolistkeys(orig, self, namespace, patterns=None):
390 def localrepolistkeys(orig, self, namespace, patterns=None):
386 if namespace == b'bookmarks' and patterns:
391 if namespace == b'bookmarks' and patterns:
387 index = self.bundlestore.index
392 index = self.bundlestore.index
388 results = {}
393 results = {}
389 bookmarks = orig(self, namespace)
394 bookmarks = orig(self, namespace)
390 for pattern in patterns:
395 for pattern in patterns:
391 results.update(index.getbookmarks(pattern))
396 results.update(index.getbookmarks(pattern))
392 if pattern.endswith(b'*'):
397 if pattern.endswith(b'*'):
393 pattern = b're:^' + pattern[:-1] + b'.*'
398 pattern = b're:^' + pattern[:-1] + b'.*'
394 kind, pat, matcher = stringutil.stringmatcher(pattern)
399 kind, pat, matcher = stringutil.stringmatcher(pattern)
395 for bookmark, node in pycompat.iteritems(bookmarks):
400 for bookmark, node in pycompat.iteritems(bookmarks):
396 if matcher(bookmark):
401 if matcher(bookmark):
397 results[bookmark] = node
402 results[bookmark] = node
398 return results
403 return results
399 else:
404 else:
400 return orig(self, namespace)
405 return orig(self, namespace)
401
406
402
407
403 @wireprotov1peer.batchable
408 @wireprotov1peer.batchable
404 def listkeyspatterns(self, namespace, patterns):
409 def listkeyspatterns(self, namespace, patterns):
405 if not self.capable(b'pushkey'):
410 if not self.capable(b'pushkey'):
406 yield {}, None
411 yield {}, None
407 f = wireprotov1peer.future()
412 f = wireprotov1peer.future()
408 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
413 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
409 yield {
414 yield {
410 b'namespace': encoding.fromlocal(namespace),
415 b'namespace': encoding.fromlocal(namespace),
411 b'patterns': wireprototypes.encodelist(patterns),
416 b'patterns': wireprototypes.encodelist(patterns),
412 }, f
417 }, f
413 d = f.value
418 d = f.value
414 self.ui.debug(
419 self.ui.debug(
415 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
420 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
416 )
421 )
417 yield pushkey.decodekeys(d)
422 yield pushkey.decodekeys(d)
418
423
419
424
420 def _readbundlerevs(bundlerepo):
425 def _readbundlerevs(bundlerepo):
421 return list(bundlerepo.revs(b'bundle()'))
426 return list(bundlerepo.revs(b'bundle()'))
422
427
423
428
424 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
425 '''Tells remotefilelog to include all changed files to the changegroup
430 '''Tells remotefilelog to include all changed files to the changegroup
426
431
427 By default remotefilelog doesn't include file content to the changegroup.
432 By default remotefilelog doesn't include file content to the changegroup.
428 But we need to include it if we are fetching from bundlestore.
433 But we need to include it if we are fetching from bundlestore.
429 '''
434 '''
430 changedfiles = set()
435 changedfiles = set()
431 cl = bundlerepo.changelog
436 cl = bundlerepo.changelog
432 for r in bundlerevs:
437 for r in bundlerevs:
433 # [3] means changed files
438 # [3] means changed files
434 changedfiles.update(cl.read(r)[3])
439 changedfiles.update(cl.read(r)[3])
435 if not changedfiles:
440 if not changedfiles:
436 return bundlecaps
441 return bundlecaps
437
442
438 changedfiles = b'\0'.join(changedfiles)
443 changedfiles = b'\0'.join(changedfiles)
439 newcaps = []
444 newcaps = []
440 appended = False
445 appended = False
441 for cap in bundlecaps or []:
446 for cap in bundlecaps or []:
442 if cap.startswith(b'excludepattern='):
447 if cap.startswith(b'excludepattern='):
443 newcaps.append(b'\0'.join((cap, changedfiles)))
448 newcaps.append(b'\0'.join((cap, changedfiles)))
444 appended = True
449 appended = True
445 else:
450 else:
446 newcaps.append(cap)
451 newcaps.append(cap)
447 if not appended:
452 if not appended:
448 # Not found excludepattern cap. Just append it
453 # Not found excludepattern cap. Just append it
449 newcaps.append(b'excludepattern=' + changedfiles)
454 newcaps.append(b'excludepattern=' + changedfiles)
450
455
451 return newcaps
456 return newcaps
452
457
453
458
454 def _rebundle(bundlerepo, bundleroots, unknownhead):
459 def _rebundle(bundlerepo, bundleroots, unknownhead):
455 '''
460 '''
456 Bundle may include more revision then user requested. For example,
461 Bundle may include more revision then user requested. For example,
457 if user asks for revision but bundle also consists its descendants.
462 if user asks for revision but bundle also consists its descendants.
458 This function will filter out all revision that user is not requested.
463 This function will filter out all revision that user is not requested.
459 '''
464 '''
460 parts = []
465 parts = []
461
466
462 version = b'02'
467 version = b'02'
463 outgoing = discovery.outgoing(
468 outgoing = discovery.outgoing(
464 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
469 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
465 )
470 )
466 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
471 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
467 cgstream = util.chunkbuffer(cgstream).read()
472 cgstream = util.chunkbuffer(cgstream).read()
468 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
473 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
469 cgpart.addparam(b'version', version)
474 cgpart.addparam(b'version', version)
470 parts.append(cgpart)
475 parts.append(cgpart)
471
476
472 return parts
477 return parts
473
478
474
479
475 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
480 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
476 cl = bundlerepo.changelog
481 cl = bundlerepo.changelog
477 bundleroots = []
482 bundleroots = []
478 for rev in bundlerevs:
483 for rev in bundlerevs:
479 node = cl.node(rev)
484 node = cl.node(rev)
480 parents = cl.parents(node)
485 parents = cl.parents(node)
481 for parent in parents:
486 for parent in parents:
482 # include all revs that exist in the main repo
487 # include all revs that exist in the main repo
483 # to make sure that bundle may apply client-side
488 # to make sure that bundle may apply client-side
484 if parent in oldrepo:
489 if parent in oldrepo:
485 bundleroots.append(parent)
490 bundleroots.append(parent)
486 return bundleroots
491 return bundleroots
487
492
488
493
489 def _needsrebundling(head, bundlerepo):
494 def _needsrebundling(head, bundlerepo):
490 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
495 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
491 return not (
496 return not (
492 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
493 )
498 )
494
499
495
500
496 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
497 '''generates bundle that will be send to the user
502 '''generates bundle that will be send to the user
498
503
499 returns tuple with raw bundle string and bundle type
504 returns tuple with raw bundle string and bundle type
500 '''
505 '''
501 parts = []
506 parts = []
502 if not _needsrebundling(head, bundlerepo):
507 if not _needsrebundling(head, bundlerepo):
503 with util.posixfile(bundlefile, b"rb") as f:
508 with util.posixfile(bundlefile, b"rb") as f:
504 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
505 if isinstance(unbundler, changegroup.cg1unpacker):
510 if isinstance(unbundler, changegroup.cg1unpacker):
506 part = bundle2.bundlepart(
511 part = bundle2.bundlepart(
507 b'changegroup', data=unbundler._stream.read()
512 b'changegroup', data=unbundler._stream.read()
508 )
513 )
509 part.addparam(b'version', b'01')
514 part.addparam(b'version', b'01')
510 parts.append(part)
515 parts.append(part)
511 elif isinstance(unbundler, bundle2.unbundle20):
516 elif isinstance(unbundler, bundle2.unbundle20):
512 haschangegroup = False
517 haschangegroup = False
513 for part in unbundler.iterparts():
518 for part in unbundler.iterparts():
514 if part.type == b'changegroup':
519 if part.type == b'changegroup':
515 haschangegroup = True
520 haschangegroup = True
516 newpart = bundle2.bundlepart(part.type, data=part.read())
521 newpart = bundle2.bundlepart(part.type, data=part.read())
517 for key, value in pycompat.iteritems(part.params):
522 for key, value in pycompat.iteritems(part.params):
518 newpart.addparam(key, value)
523 newpart.addparam(key, value)
519 parts.append(newpart)
524 parts.append(newpart)
520
525
521 if not haschangegroup:
526 if not haschangegroup:
522 raise error.Abort(
527 raise error.Abort(
523 b'unexpected bundle without changegroup part, '
528 b'unexpected bundle without changegroup part, '
524 + b'head: %s' % hex(head),
529 + b'head: %s' % hex(head),
525 hint=b'report to administrator',
530 hint=b'report to administrator',
526 )
531 )
527 else:
532 else:
528 raise error.Abort(b'unknown bundle type')
533 raise error.Abort(b'unknown bundle type')
529 else:
534 else:
530 parts = _rebundle(bundlerepo, bundleroots, head)
535 parts = _rebundle(bundlerepo, bundleroots, head)
531
536
532 return parts
537 return parts
533
538
534
539
535 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
540 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
536 heads = heads or []
541 heads = heads or []
537 # newheads are parents of roots of scratch bundles that were requested
542 # newheads are parents of roots of scratch bundles that were requested
538 newphases = {}
543 newphases = {}
539 scratchbundles = []
544 scratchbundles = []
540 newheads = []
545 newheads = []
541 scratchheads = []
546 scratchheads = []
542 nodestobundle = {}
547 nodestobundle = {}
543 allbundlestocleanup = []
548 allbundlestocleanup = []
544 try:
549 try:
545 for head in heads:
550 for head in heads:
546 if head not in repo.changelog.nodemap:
551 if head not in repo.changelog.nodemap:
547 if head not in nodestobundle:
552 if head not in nodestobundle:
548 newbundlefile = common.downloadbundle(repo, head)
553 newbundlefile = common.downloadbundle(repo, head)
549 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
554 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
550 bundlerepo = hg.repository(repo.ui, bundlepath)
555 bundlerepo = hg.repository(repo.ui, bundlepath)
551
556
552 allbundlestocleanup.append((bundlerepo, newbundlefile))
557 allbundlestocleanup.append((bundlerepo, newbundlefile))
553 bundlerevs = set(_readbundlerevs(bundlerepo))
558 bundlerevs = set(_readbundlerevs(bundlerepo))
554 bundlecaps = _includefilelogstobundle(
559 bundlecaps = _includefilelogstobundle(
555 bundlecaps, bundlerepo, bundlerevs, repo.ui
560 bundlecaps, bundlerepo, bundlerevs, repo.ui
556 )
561 )
557 cl = bundlerepo.changelog
562 cl = bundlerepo.changelog
558 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
563 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
559 for rev in bundlerevs:
564 for rev in bundlerevs:
560 node = cl.node(rev)
565 node = cl.node(rev)
561 newphases[hex(node)] = str(phases.draft)
566 newphases[hex(node)] = str(phases.draft)
562 nodestobundle[node] = (
567 nodestobundle[node] = (
563 bundlerepo,
568 bundlerepo,
564 bundleroots,
569 bundleroots,
565 newbundlefile,
570 newbundlefile,
566 )
571 )
567
572
568 scratchbundles.append(
573 scratchbundles.append(
569 _generateoutputparts(head, *nodestobundle[head])
574 _generateoutputparts(head, *nodestobundle[head])
570 )
575 )
571 newheads.extend(bundleroots)
576 newheads.extend(bundleroots)
572 scratchheads.append(head)
577 scratchheads.append(head)
573 finally:
578 finally:
574 for bundlerepo, bundlefile in allbundlestocleanup:
579 for bundlerepo, bundlefile in allbundlestocleanup:
575 bundlerepo.close()
580 bundlerepo.close()
576 try:
581 try:
577 os.unlink(bundlefile)
582 os.unlink(bundlefile)
578 except (IOError, OSError):
583 except (IOError, OSError):
579 # if we can't cleanup the file then just ignore the error,
584 # if we can't cleanup the file then just ignore the error,
580 # no need to fail
585 # no need to fail
581 pass
586 pass
582
587
583 pullfrombundlestore = bool(scratchbundles)
588 pullfrombundlestore = bool(scratchbundles)
584 wrappedchangegrouppart = False
589 wrappedchangegrouppart = False
585 wrappedlistkeys = False
590 wrappedlistkeys = False
586 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
591 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
587 try:
592 try:
588
593
589 def _changegrouppart(bundler, *args, **kwargs):
594 def _changegrouppart(bundler, *args, **kwargs):
590 # Order is important here. First add non-scratch part
595 # Order is important here. First add non-scratch part
591 # and only then add parts with scratch bundles because
596 # and only then add parts with scratch bundles because
592 # non-scratch part contains parents of roots of scratch bundles.
597 # non-scratch part contains parents of roots of scratch bundles.
593 result = oldchangegrouppart(bundler, *args, **kwargs)
598 result = oldchangegrouppart(bundler, *args, **kwargs)
594 for bundle in scratchbundles:
599 for bundle in scratchbundles:
595 for part in bundle:
600 for part in bundle:
596 bundler.addpart(part)
601 bundler.addpart(part)
597 return result
602 return result
598
603
599 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
604 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
600 wrappedchangegrouppart = True
605 wrappedchangegrouppart = True
601
606
602 def _listkeys(orig, self, namespace):
607 def _listkeys(orig, self, namespace):
603 origvalues = orig(self, namespace)
608 origvalues = orig(self, namespace)
604 if namespace == b'phases' and pullfrombundlestore:
609 if namespace == b'phases' and pullfrombundlestore:
605 if origvalues.get(b'publishing') == b'True':
610 if origvalues.get(b'publishing') == b'True':
606 # Make repo non-publishing to preserve draft phase
611 # Make repo non-publishing to preserve draft phase
607 del origvalues[b'publishing']
612 del origvalues[b'publishing']
608 origvalues.update(newphases)
613 origvalues.update(newphases)
609 return origvalues
614 return origvalues
610
615
611 extensions.wrapfunction(
616 extensions.wrapfunction(
612 localrepo.localrepository, b'listkeys', _listkeys
617 localrepo.localrepository, b'listkeys', _listkeys
613 )
618 )
614 wrappedlistkeys = True
619 wrappedlistkeys = True
615 heads = list((set(newheads) | set(heads)) - set(scratchheads))
620 heads = list((set(newheads) | set(heads)) - set(scratchheads))
616 result = orig(
621 result = orig(
617 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
622 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
618 )
623 )
619 finally:
624 finally:
620 if wrappedchangegrouppart:
625 if wrappedchangegrouppart:
621 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
626 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
622 if wrappedlistkeys:
627 if wrappedlistkeys:
623 extensions.unwrapfunction(
628 extensions.unwrapfunction(
624 localrepo.localrepository, b'listkeys', _listkeys
629 localrepo.localrepository, b'listkeys', _listkeys
625 )
630 )
626 return result
631 return result
627
632
628
633
629 def _lookupwrap(orig):
634 def _lookupwrap(orig):
630 def _lookup(repo, proto, key):
635 def _lookup(repo, proto, key):
631 localkey = encoding.tolocal(key)
636 localkey = encoding.tolocal(key)
632
637
633 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
638 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
634 scratchnode = repo.bundlestore.index.getnode(localkey)
639 scratchnode = repo.bundlestore.index.getnode(localkey)
635 if scratchnode:
640 if scratchnode:
636 return b"%d %s\n" % (1, scratchnode)
641 return b"%d %s\n" % (1, scratchnode)
637 else:
642 else:
638 return b"%d %s\n" % (
643 return b"%d %s\n" % (
639 0,
644 0,
640 b'scratch branch %s not found' % localkey,
645 b'scratch branch %s not found' % localkey,
641 )
646 )
642 else:
647 else:
643 try:
648 try:
644 r = hex(repo.lookup(localkey))
649 r = hex(repo.lookup(localkey))
645 return b"%d %s\n" % (1, r)
650 return b"%d %s\n" % (1, r)
646 except Exception as inst:
651 except Exception as inst:
647 if repo.bundlestore.index.getbundle(localkey):
652 if repo.bundlestore.index.getbundle(localkey):
648 return b"%d %s\n" % (1, localkey)
653 return b"%d %s\n" % (1, localkey)
649 else:
654 else:
650 r = stringutil.forcebytestr(inst)
655 r = stringutil.forcebytestr(inst)
651 return b"%d %s\n" % (0, r)
656 return b"%d %s\n" % (0, r)
652
657
653 return _lookup
658 return _lookup
654
659
655
660
656 def _pull(orig, ui, repo, source=b"default", **opts):
661 def _pull(orig, ui, repo, source=b"default", **opts):
657 opts = pycompat.byteskwargs(opts)
662 opts = pycompat.byteskwargs(opts)
658 # Copy paste from `pull` command
663 # Copy paste from `pull` command
659 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
664 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
660
665
661 scratchbookmarks = {}
666 scratchbookmarks = {}
662 unfi = repo.unfiltered()
667 unfi = repo.unfiltered()
663 unknownnodes = []
668 unknownnodes = []
664 for rev in opts.get(b'rev', []):
669 for rev in opts.get(b'rev', []):
665 if rev not in unfi:
670 if rev not in unfi:
666 unknownnodes.append(rev)
671 unknownnodes.append(rev)
667 if opts.get(b'bookmark'):
672 if opts.get(b'bookmark'):
668 bookmarks = []
673 bookmarks = []
669 revs = opts.get(b'rev') or []
674 revs = opts.get(b'rev') or []
670 for bookmark in opts.get(b'bookmark'):
675 for bookmark in opts.get(b'bookmark'):
671 if _scratchbranchmatcher(bookmark):
676 if _scratchbranchmatcher(bookmark):
672 # rev is not known yet
677 # rev is not known yet
673 # it will be fetched with listkeyspatterns next
678 # it will be fetched with listkeyspatterns next
674 scratchbookmarks[bookmark] = b'REVTOFETCH'
679 scratchbookmarks[bookmark] = b'REVTOFETCH'
675 else:
680 else:
676 bookmarks.append(bookmark)
681 bookmarks.append(bookmark)
677
682
678 if scratchbookmarks:
683 if scratchbookmarks:
679 other = hg.peer(repo, opts, source)
684 other = hg.peer(repo, opts, source)
680 fetchedbookmarks = other.listkeyspatterns(
685 fetchedbookmarks = other.listkeyspatterns(
681 b'bookmarks', patterns=scratchbookmarks
686 b'bookmarks', patterns=scratchbookmarks
682 )
687 )
683 for bookmark in scratchbookmarks:
688 for bookmark in scratchbookmarks:
684 if bookmark not in fetchedbookmarks:
689 if bookmark not in fetchedbookmarks:
685 raise error.Abort(
690 raise error.Abort(
686 b'remote bookmark %s not found!' % bookmark
691 b'remote bookmark %s not found!' % bookmark
687 )
692 )
688 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
693 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
689 revs.append(fetchedbookmarks[bookmark])
694 revs.append(fetchedbookmarks[bookmark])
690 opts[b'bookmark'] = bookmarks
695 opts[b'bookmark'] = bookmarks
691 opts[b'rev'] = revs
696 opts[b'rev'] = revs
692
697
693 if scratchbookmarks or unknownnodes:
698 if scratchbookmarks or unknownnodes:
694 # Set anyincoming to True
699 # Set anyincoming to True
695 extensions.wrapfunction(
700 extensions.wrapfunction(
696 discovery, b'findcommonincoming', _findcommonincoming
701 discovery, b'findcommonincoming', _findcommonincoming
697 )
702 )
698 try:
703 try:
699 # Remote scratch bookmarks will be deleted because remotenames doesn't
704 # Remote scratch bookmarks will be deleted because remotenames doesn't
700 # know about them. Let's save it before pull and restore after
705 # know about them. Let's save it before pull and restore after
701 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
706 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
702 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
707 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
703 # TODO(stash): race condition is possible
708 # TODO(stash): race condition is possible
704 # if scratch bookmarks was updated right after orig.
709 # if scratch bookmarks was updated right after orig.
705 # But that's unlikely and shouldn't be harmful.
710 # But that's unlikely and shouldn't be harmful.
706 if common.isremotebooksenabled(ui):
711 if common.isremotebooksenabled(ui):
707 remotescratchbookmarks.update(scratchbookmarks)
712 remotescratchbookmarks.update(scratchbookmarks)
708 _saveremotebookmarks(repo, remotescratchbookmarks, source)
713 _saveremotebookmarks(repo, remotescratchbookmarks, source)
709 else:
714 else:
710 _savelocalbookmarks(repo, scratchbookmarks)
715 _savelocalbookmarks(repo, scratchbookmarks)
711 return result
716 return result
712 finally:
717 finally:
713 if scratchbookmarks:
718 if scratchbookmarks:
714 extensions.unwrapfunction(discovery, b'findcommonincoming')
719 extensions.unwrapfunction(discovery, b'findcommonincoming')
715
720
716
721
717 def _readscratchremotebookmarks(ui, repo, other):
722 def _readscratchremotebookmarks(ui, repo, other):
718 if common.isremotebooksenabled(ui):
723 if common.isremotebooksenabled(ui):
719 remotenamesext = extensions.find(b'remotenames')
724 remotenamesext = extensions.find(b'remotenames')
720 remotepath = remotenamesext.activepath(repo.ui, other)
725 remotepath = remotenamesext.activepath(repo.ui, other)
721 result = {}
726 result = {}
722 # Let's refresh remotenames to make sure we have it up to date
727 # Let's refresh remotenames to make sure we have it up to date
723 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
728 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
724 # and it results in deleting scratch bookmarks. Our best guess how to
729 # and it results in deleting scratch bookmarks. Our best guess how to
725 # fix it is to use `clearnames()`
730 # fix it is to use `clearnames()`
726 repo._remotenames.clearnames()
731 repo._remotenames.clearnames()
727 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
732 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
728 path, bookname = remotenamesext.splitremotename(remotebookmark)
733 path, bookname = remotenamesext.splitremotename(remotebookmark)
729 if path == remotepath and _scratchbranchmatcher(bookname):
734 if path == remotepath and _scratchbranchmatcher(bookname):
730 nodes = repo.names[b'remotebookmarks'].nodes(
735 nodes = repo.names[b'remotebookmarks'].nodes(
731 repo, remotebookmark
736 repo, remotebookmark
732 )
737 )
733 if nodes:
738 if nodes:
734 result[bookname] = hex(nodes[0])
739 result[bookname] = hex(nodes[0])
735 return result
740 return result
736 else:
741 else:
737 return {}
742 return {}
738
743
739
744
740 def _saveremotebookmarks(repo, newbookmarks, remote):
745 def _saveremotebookmarks(repo, newbookmarks, remote):
741 remotenamesext = extensions.find(b'remotenames')
746 remotenamesext = extensions.find(b'remotenames')
742 remotepath = remotenamesext.activepath(repo.ui, remote)
747 remotepath = remotenamesext.activepath(repo.ui, remote)
743 branches = collections.defaultdict(list)
748 branches = collections.defaultdict(list)
744 bookmarks = {}
749 bookmarks = {}
745 remotenames = remotenamesext.readremotenames(repo)
750 remotenames = remotenamesext.readremotenames(repo)
746 for hexnode, nametype, remote, rname in remotenames:
751 for hexnode, nametype, remote, rname in remotenames:
747 if remote != remotepath:
752 if remote != remotepath:
748 continue
753 continue
749 if nametype == b'bookmarks':
754 if nametype == b'bookmarks':
750 if rname in newbookmarks:
755 if rname in newbookmarks:
751 # It's possible if we have a normal bookmark that matches
756 # It's possible if we have a normal bookmark that matches
752 # scratch branch pattern. In this case just use the current
757 # scratch branch pattern. In this case just use the current
753 # bookmark node
758 # bookmark node
754 del newbookmarks[rname]
759 del newbookmarks[rname]
755 bookmarks[rname] = hexnode
760 bookmarks[rname] = hexnode
756 elif nametype == b'branches':
761 elif nametype == b'branches':
757 # saveremotenames expects 20 byte binary nodes for branches
762 # saveremotenames expects 20 byte binary nodes for branches
758 branches[rname].append(bin(hexnode))
763 branches[rname].append(bin(hexnode))
759
764
760 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
765 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
761 bookmarks[bookmark] = hexnode
766 bookmarks[bookmark] = hexnode
762 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
767 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
763
768
764
769
765 def _savelocalbookmarks(repo, bookmarks):
770 def _savelocalbookmarks(repo, bookmarks):
766 if not bookmarks:
771 if not bookmarks:
767 return
772 return
768 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
773 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
769 changes = []
774 changes = []
770 for scratchbook, node in pycompat.iteritems(bookmarks):
775 for scratchbook, node in pycompat.iteritems(bookmarks):
771 changectx = repo[node]
776 changectx = repo[node]
772 changes.append((scratchbook, changectx.node()))
777 changes.append((scratchbook, changectx.node()))
773 repo._bookmarks.applychanges(repo, tr, changes)
778 repo._bookmarks.applychanges(repo, tr, changes)
774
779
775
780
776 def _findcommonincoming(orig, *args, **kwargs):
781 def _findcommonincoming(orig, *args, **kwargs):
777 common, inc, remoteheads = orig(*args, **kwargs)
782 common, inc, remoteheads = orig(*args, **kwargs)
778 return common, True, remoteheads
783 return common, True, remoteheads
779
784
780
785
781 def _push(orig, ui, repo, dest=None, *args, **opts):
786 def _push(orig, ui, repo, dest=None, *args, **opts):
782 opts = pycompat.byteskwargs(opts)
787 opts = pycompat.byteskwargs(opts)
783 bookmark = opts.get(b'bookmark')
788 bookmark = opts.get(b'bookmark')
784 # we only support pushing one infinitepush bookmark at once
789 # we only support pushing one infinitepush bookmark at once
785 if len(bookmark) == 1:
790 if len(bookmark) == 1:
786 bookmark = bookmark[0]
791 bookmark = bookmark[0]
787 else:
792 else:
788 bookmark = b''
793 bookmark = b''
789
794
790 oldphasemove = None
795 oldphasemove = None
791 overrides = {(experimental, configbookmark): bookmark}
796 overrides = {(experimental, configbookmark): bookmark}
792
797
793 with ui.configoverride(overrides, b'infinitepush'):
798 with ui.configoverride(overrides, b'infinitepush'):
794 scratchpush = opts.get(b'bundle_store')
799 scratchpush = opts.get(b'bundle_store')
795 if _scratchbranchmatcher(bookmark):
800 if _scratchbranchmatcher(bookmark):
796 scratchpush = True
801 scratchpush = True
797 # bundle2 can be sent back after push (for example, bundle2
802 # bundle2 can be sent back after push (for example, bundle2
798 # containing `pushkey` part to update bookmarks)
803 # containing `pushkey` part to update bookmarks)
799 ui.setconfig(experimental, b'bundle2.pushback', True)
804 ui.setconfig(experimental, b'bundle2.pushback', True)
800
805
801 if scratchpush:
806 if scratchpush:
802 # this is an infinitepush, we don't want the bookmark to be applied
807 # this is an infinitepush, we don't want the bookmark to be applied
803 # rather that should be stored in the bundlestore
808 # rather that should be stored in the bundlestore
804 opts[b'bookmark'] = []
809 opts[b'bookmark'] = []
805 ui.setconfig(experimental, configscratchpush, True)
810 ui.setconfig(experimental, configscratchpush, True)
806 oldphasemove = extensions.wrapfunction(
811 oldphasemove = extensions.wrapfunction(
807 exchange, b'_localphasemove', _phasemove
812 exchange, b'_localphasemove', _phasemove
808 )
813 )
809 # Copy-paste from `push` command
814 # Copy-paste from `push` command
810 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
815 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
811 if not path:
816 if not path:
812 raise error.Abort(
817 raise error.Abort(
813 _(b'default repository not configured!'),
818 _(b'default repository not configured!'),
814 hint=_(b"see 'hg help config.paths'"),
819 hint=_(b"see 'hg help config.paths'"),
815 )
820 )
816 destpath = path.pushloc or path.loc
821 destpath = path.pushloc or path.loc
817 # Remote scratch bookmarks will be deleted because remotenames doesn't
822 # Remote scratch bookmarks will be deleted because remotenames doesn't
818 # know about them. Let's save it before push and restore after
823 # know about them. Let's save it before push and restore after
819 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
824 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
820 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
825 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
821 if common.isremotebooksenabled(ui):
826 if common.isremotebooksenabled(ui):
822 if bookmark and scratchpush:
827 if bookmark and scratchpush:
823 other = hg.peer(repo, opts, destpath)
828 other = hg.peer(repo, opts, destpath)
824 fetchedbookmarks = other.listkeyspatterns(
829 fetchedbookmarks = other.listkeyspatterns(
825 b'bookmarks', patterns=[bookmark]
830 b'bookmarks', patterns=[bookmark]
826 )
831 )
827 remotescratchbookmarks.update(fetchedbookmarks)
832 remotescratchbookmarks.update(fetchedbookmarks)
828 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
833 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
829 if oldphasemove:
834 if oldphasemove:
830 exchange._localphasemove = oldphasemove
835 exchange._localphasemove = oldphasemove
831 return result
836 return result
832
837
833
838
834 def _deleteinfinitepushbookmarks(ui, repo, path, names):
839 def _deleteinfinitepushbookmarks(ui, repo, path, names):
835 """Prune remote names by removing the bookmarks we don't want anymore,
840 """Prune remote names by removing the bookmarks we don't want anymore,
836 then writing the result back to disk
841 then writing the result back to disk
837 """
842 """
838 remotenamesext = extensions.find(b'remotenames')
843 remotenamesext = extensions.find(b'remotenames')
839
844
840 # remotename format is:
845 # remotename format is:
841 # (node, nametype ("branches" or "bookmarks"), remote, name)
846 # (node, nametype ("branches" or "bookmarks"), remote, name)
842 nametype_idx = 1
847 nametype_idx = 1
843 remote_idx = 2
848 remote_idx = 2
844 name_idx = 3
849 name_idx = 3
845 remotenames = [
850 remotenames = [
846 remotename
851 remotename
847 for remotename in remotenamesext.readremotenames(repo)
852 for remotename in remotenamesext.readremotenames(repo)
848 if remotename[remote_idx] == path
853 if remotename[remote_idx] == path
849 ]
854 ]
850 remote_bm_names = [
855 remote_bm_names = [
851 remotename[name_idx]
856 remotename[name_idx]
852 for remotename in remotenames
857 for remotename in remotenames
853 if remotename[nametype_idx] == b"bookmarks"
858 if remotename[nametype_idx] == b"bookmarks"
854 ]
859 ]
855
860
856 for name in names:
861 for name in names:
857 if name not in remote_bm_names:
862 if name not in remote_bm_names:
858 raise error.Abort(
863 raise error.Abort(
859 _(
864 _(
860 b"infinitepush bookmark '{}' does not exist "
865 b"infinitepush bookmark '{}' does not exist "
861 b"in path '{}'"
866 b"in path '{}'"
862 ).format(name, path)
867 ).format(name, path)
863 )
868 )
864
869
865 bookmarks = {}
870 bookmarks = {}
866 branches = collections.defaultdict(list)
871 branches = collections.defaultdict(list)
867 for node, nametype, remote, name in remotenames:
872 for node, nametype, remote, name in remotenames:
868 if nametype == b"bookmarks" and name not in names:
873 if nametype == b"bookmarks" and name not in names:
869 bookmarks[name] = node
874 bookmarks[name] = node
870 elif nametype == b"branches":
875 elif nametype == b"branches":
871 # saveremotenames wants binary nodes for branches
876 # saveremotenames wants binary nodes for branches
872 branches[name].append(bin(node))
877 branches[name].append(bin(node))
873
878
874 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
879 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
875
880
876
881
877 def _phasemove(orig, pushop, nodes, phase=phases.public):
882 def _phasemove(orig, pushop, nodes, phase=phases.public):
878 """prevent commits from being marked public
883 """prevent commits from being marked public
879
884
880 Since these are going to a scratch branch, they aren't really being
885 Since these are going to a scratch branch, they aren't really being
881 published."""
886 published."""
882
887
883 if phase != phases.public:
888 if phase != phases.public:
884 orig(pushop, nodes, phase)
889 orig(pushop, nodes, phase)
885
890
886
891
887 @exchange.b2partsgenerator(scratchbranchparttype)
892 @exchange.b2partsgenerator(scratchbranchparttype)
888 def partgen(pushop, bundler):
893 def partgen(pushop, bundler):
889 bookmark = pushop.ui.config(experimental, configbookmark)
894 bookmark = pushop.ui.config(experimental, configbookmark)
890 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
895 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
891 if b'changesets' in pushop.stepsdone or not scratchpush:
896 if b'changesets' in pushop.stepsdone or not scratchpush:
892 return
897 return
893
898
894 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
899 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
895 return
900 return
896
901
897 pushop.stepsdone.add(b'changesets')
902 pushop.stepsdone.add(b'changesets')
898 if not pushop.outgoing.missing:
903 if not pushop.outgoing.missing:
899 pushop.ui.status(_(b'no changes found\n'))
904 pushop.ui.status(_(b'no changes found\n'))
900 pushop.cgresult = 0
905 pushop.cgresult = 0
901 return
906 return
902
907
903 # This parameter tells the server that the following bundle is an
908 # This parameter tells the server that the following bundle is an
904 # infinitepush. This let's it switch the part processing to our infinitepush
909 # infinitepush. This let's it switch the part processing to our infinitepush
905 # code path.
910 # code path.
906 bundler.addparam(b"infinitepush", b"True")
911 bundler.addparam(b"infinitepush", b"True")
907
912
908 scratchparts = bundleparts.getscratchbranchparts(
913 scratchparts = bundleparts.getscratchbranchparts(
909 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
914 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
910 )
915 )
911
916
912 for scratchpart in scratchparts:
917 for scratchpart in scratchparts:
913 bundler.addpart(scratchpart)
918 bundler.addpart(scratchpart)
914
919
915 def handlereply(op):
920 def handlereply(op):
916 # server either succeeds or aborts; no code to read
921 # server either succeeds or aborts; no code to read
917 pushop.cgresult = 1
922 pushop.cgresult = 1
918
923
919 return handlereply
924 return handlereply
920
925
921
926
922 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
927 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
923
928
924
929
925 def _getrevs(bundle, oldnode, force, bookmark):
930 def _getrevs(bundle, oldnode, force, bookmark):
926 b'extracts and validates the revs to be imported'
931 b'extracts and validates the revs to be imported'
927 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
932 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
928
933
929 # new bookmark
934 # new bookmark
930 if oldnode is None:
935 if oldnode is None:
931 return revs
936 return revs
932
937
933 # Fast forward update
938 # Fast forward update
934 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
939 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
935 return revs
940 return revs
936
941
937 return revs
942 return revs
938
943
939
944
940 @contextlib.contextmanager
945 @contextlib.contextmanager
941 def logservicecall(logger, service, **kwargs):
946 def logservicecall(logger, service, **kwargs):
942 start = time.time()
947 start = time.time()
943 logger(service, eventtype=b'start', **kwargs)
948 logger(service, eventtype=b'start', **kwargs)
944 try:
949 try:
945 yield
950 yield
946 logger(
951 logger(
947 service,
952 service,
948 eventtype=b'success',
953 eventtype=b'success',
949 elapsedms=(time.time() - start) * 1000,
954 elapsedms=(time.time() - start) * 1000,
950 **kwargs
955 **kwargs
951 )
956 )
952 except Exception as e:
957 except Exception as e:
953 logger(
958 logger(
954 service,
959 service,
955 eventtype=b'failure',
960 eventtype=b'failure',
956 elapsedms=(time.time() - start) * 1000,
961 elapsedms=(time.time() - start) * 1000,
957 errormsg=str(e),
962 errormsg=str(e),
958 **kwargs
963 **kwargs
959 )
964 )
960 raise
965 raise
961
966
962
967
963 def _getorcreateinfinitepushlogger(op):
968 def _getorcreateinfinitepushlogger(op):
964 logger = op.records[b'infinitepushlogger']
969 logger = op.records[b'infinitepushlogger']
965 if not logger:
970 if not logger:
966 ui = op.repo.ui
971 ui = op.repo.ui
967 try:
972 try:
968 username = procutil.getuser()
973 username = procutil.getuser()
969 except Exception:
974 except Exception:
970 username = b'unknown'
975 username = b'unknown'
971 # Generate random request id to be able to find all logged entries
976 # Generate random request id to be able to find all logged entries
972 # for the same request. Since requestid is pseudo-generated it may
977 # for the same request. Since requestid is pseudo-generated it may
973 # not be unique, but we assume that (hostname, username, requestid)
978 # not be unique, but we assume that (hostname, username, requestid)
974 # is unique.
979 # is unique.
975 random.seed()
980 random.seed()
976 requestid = random.randint(0, 2000000000)
981 requestid = random.randint(0, 2000000000)
977 hostname = socket.gethostname()
982 hostname = socket.gethostname()
978 logger = functools.partial(
983 logger = functools.partial(
979 ui.log,
984 ui.log,
980 b'infinitepush',
985 b'infinitepush',
981 user=username,
986 user=username,
982 requestid=requestid,
987 requestid=requestid,
983 hostname=hostname,
988 hostname=hostname,
984 reponame=ui.config(b'infinitepush', b'reponame'),
989 reponame=ui.config(b'infinitepush', b'reponame'),
985 )
990 )
986 op.records.add(b'infinitepushlogger', logger)
991 op.records.add(b'infinitepushlogger', logger)
987 else:
992 else:
988 logger = logger[0]
993 logger = logger[0]
989 return logger
994 return logger
990
995
991
996
992 def storetobundlestore(orig, repo, op, unbundler):
997 def storetobundlestore(orig, repo, op, unbundler):
993 """stores the incoming bundle coming from push command to the bundlestore
998 """stores the incoming bundle coming from push command to the bundlestore
994 instead of applying on the revlogs"""
999 instead of applying on the revlogs"""
995
1000
996 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1001 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
997 bundler = bundle2.bundle20(repo.ui)
1002 bundler = bundle2.bundle20(repo.ui)
998
1003
999 # processing each part and storing it in bundler
1004 # processing each part and storing it in bundler
1000 with bundle2.partiterator(repo, op, unbundler) as parts:
1005 with bundle2.partiterator(repo, op, unbundler) as parts:
1001 for part in parts:
1006 for part in parts:
1002 bundlepart = None
1007 bundlepart = None
1003 if part.type == b'replycaps':
1008 if part.type == b'replycaps':
1004 # This configures the current operation to allow reply parts.
1009 # This configures the current operation to allow reply parts.
1005 bundle2._processpart(op, part)
1010 bundle2._processpart(op, part)
1006 else:
1011 else:
1007 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1012 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1008 for key, value in pycompat.iteritems(part.params):
1013 for key, value in pycompat.iteritems(part.params):
1009 bundlepart.addparam(key, value)
1014 bundlepart.addparam(key, value)
1010
1015
1011 # Certain parts require a response
1016 # Certain parts require a response
1012 if part.type in (b'pushkey', b'changegroup'):
1017 if part.type in (b'pushkey', b'changegroup'):
1013 if op.reply is not None:
1018 if op.reply is not None:
1014 rpart = op.reply.newpart(b'reply:%s' % part.type)
1019 rpart = op.reply.newpart(b'reply:%s' % part.type)
1015 rpart.addparam(
1020 rpart.addparam(
1016 b'in-reply-to', b'%d' % part.id, mandatory=False
1021 b'in-reply-to', b'%d' % part.id, mandatory=False
1017 )
1022 )
1018 rpart.addparam(b'return', b'1', mandatory=False)
1023 rpart.addparam(b'return', b'1', mandatory=False)
1019
1024
1020 op.records.add(part.type, {b'return': 1,})
1025 op.records.add(part.type, {b'return': 1,})
1021 if bundlepart:
1026 if bundlepart:
1022 bundler.addpart(bundlepart)
1027 bundler.addpart(bundlepart)
1023
1028
1024 # storing the bundle in the bundlestore
1029 # storing the bundle in the bundlestore
1025 buf = util.chunkbuffer(bundler.getchunks())
1030 buf = util.chunkbuffer(bundler.getchunks())
1026 fd, bundlefile = pycompat.mkstemp()
1031 fd, bundlefile = pycompat.mkstemp()
1027 try:
1032 try:
1028 try:
1033 try:
1029 fp = os.fdopen(fd, r'wb')
1034 fp = os.fdopen(fd, r'wb')
1030 fp.write(buf.read())
1035 fp.write(buf.read())
1031 finally:
1036 finally:
1032 fp.close()
1037 fp.close()
1033 storebundle(op, {}, bundlefile)
1038 storebundle(op, {}, bundlefile)
1034 finally:
1039 finally:
1035 try:
1040 try:
1036 os.unlink(bundlefile)
1041 os.unlink(bundlefile)
1037 except Exception:
1042 except Exception:
1038 # we would rather see the original exception
1043 # we would rather see the original exception
1039 pass
1044 pass
1040
1045
1041
1046
1042 def processparts(orig, repo, op, unbundler):
1047 def processparts(orig, repo, op, unbundler):
1043
1048
1044 # make sure we don't wrap processparts in case of `hg unbundle`
1049 # make sure we don't wrap processparts in case of `hg unbundle`
1045 if op.source == b'unbundle':
1050 if op.source == b'unbundle':
1046 return orig(repo, op, unbundler)
1051 return orig(repo, op, unbundler)
1047
1052
1048 # this server routes each push to bundle store
1053 # this server routes each push to bundle store
1049 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1054 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1050 return storetobundlestore(orig, repo, op, unbundler)
1055 return storetobundlestore(orig, repo, op, unbundler)
1051
1056
1052 if unbundler.params.get(b'infinitepush') != b'True':
1057 if unbundler.params.get(b'infinitepush') != b'True':
1053 return orig(repo, op, unbundler)
1058 return orig(repo, op, unbundler)
1054
1059
1055 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1060 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1056
1061
1057 bundler = bundle2.bundle20(repo.ui)
1062 bundler = bundle2.bundle20(repo.ui)
1058 cgparams = None
1063 cgparams = None
1059 with bundle2.partiterator(repo, op, unbundler) as parts:
1064 with bundle2.partiterator(repo, op, unbundler) as parts:
1060 for part in parts:
1065 for part in parts:
1061 bundlepart = None
1066 bundlepart = None
1062 if part.type == b'replycaps':
1067 if part.type == b'replycaps':
1063 # This configures the current operation to allow reply parts.
1068 # This configures the current operation to allow reply parts.
1064 bundle2._processpart(op, part)
1069 bundle2._processpart(op, part)
1065 elif part.type == bundleparts.scratchbranchparttype:
1070 elif part.type == bundleparts.scratchbranchparttype:
1066 # Scratch branch parts need to be converted to normal
1071 # Scratch branch parts need to be converted to normal
1067 # changegroup parts, and the extra parameters stored for later
1072 # changegroup parts, and the extra parameters stored for later
1068 # when we upload to the store. Eventually those parameters will
1073 # when we upload to the store. Eventually those parameters will
1069 # be put on the actual bundle instead of this part, then we can
1074 # be put on the actual bundle instead of this part, then we can
1070 # send a vanilla changegroup instead of the scratchbranch part.
1075 # send a vanilla changegroup instead of the scratchbranch part.
1071 cgversion = part.params.get(b'cgversion', b'01')
1076 cgversion = part.params.get(b'cgversion', b'01')
1072 bundlepart = bundle2.bundlepart(
1077 bundlepart = bundle2.bundlepart(
1073 b'changegroup', data=part.read()
1078 b'changegroup', data=part.read()
1074 )
1079 )
1075 bundlepart.addparam(b'version', cgversion)
1080 bundlepart.addparam(b'version', cgversion)
1076 cgparams = part.params
1081 cgparams = part.params
1077
1082
1078 # If we're not dumping all parts into the new bundle, we need to
1083 # If we're not dumping all parts into the new bundle, we need to
1079 # alert the future pushkey and phase-heads handler to skip
1084 # alert the future pushkey and phase-heads handler to skip
1080 # the part.
1085 # the part.
1081 if not handleallparts:
1086 if not handleallparts:
1082 op.records.add(
1087 op.records.add(
1083 scratchbranchparttype + b'_skippushkey', True
1088 scratchbranchparttype + b'_skippushkey', True
1084 )
1089 )
1085 op.records.add(
1090 op.records.add(
1086 scratchbranchparttype + b'_skipphaseheads', True
1091 scratchbranchparttype + b'_skipphaseheads', True
1087 )
1092 )
1088 else:
1093 else:
1089 if handleallparts:
1094 if handleallparts:
1090 # Ideally we would not process any parts, and instead just
1095 # Ideally we would not process any parts, and instead just
1091 # forward them to the bundle for storage, but since this
1096 # forward them to the bundle for storage, but since this
1092 # differs from previous behavior, we need to put it behind a
1097 # differs from previous behavior, we need to put it behind a
1093 # config flag for incremental rollout.
1098 # config flag for incremental rollout.
1094 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1099 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1095 for key, value in pycompat.iteritems(part.params):
1100 for key, value in pycompat.iteritems(part.params):
1096 bundlepart.addparam(key, value)
1101 bundlepart.addparam(key, value)
1097
1102
1098 # Certain parts require a response
1103 # Certain parts require a response
1099 if part.type == b'pushkey':
1104 if part.type == b'pushkey':
1100 if op.reply is not None:
1105 if op.reply is not None:
1101 rpart = op.reply.newpart(b'reply:pushkey')
1106 rpart = op.reply.newpart(b'reply:pushkey')
1102 rpart.addparam(
1107 rpart.addparam(
1103 b'in-reply-to', str(part.id), mandatory=False
1108 b'in-reply-to', str(part.id), mandatory=False
1104 )
1109 )
1105 rpart.addparam(b'return', b'1', mandatory=False)
1110 rpart.addparam(b'return', b'1', mandatory=False)
1106 else:
1111 else:
1107 bundle2._processpart(op, part)
1112 bundle2._processpart(op, part)
1108
1113
1109 if handleallparts:
1114 if handleallparts:
1110 op.records.add(part.type, {b'return': 1,})
1115 op.records.add(part.type, {b'return': 1,})
1111 if bundlepart:
1116 if bundlepart:
1112 bundler.addpart(bundlepart)
1117 bundler.addpart(bundlepart)
1113
1118
1114 # If commits were sent, store them
1119 # If commits were sent, store them
1115 if cgparams:
1120 if cgparams:
1116 buf = util.chunkbuffer(bundler.getchunks())
1121 buf = util.chunkbuffer(bundler.getchunks())
1117 fd, bundlefile = pycompat.mkstemp()
1122 fd, bundlefile = pycompat.mkstemp()
1118 try:
1123 try:
1119 try:
1124 try:
1120 fp = os.fdopen(fd, r'wb')
1125 fp = os.fdopen(fd, r'wb')
1121 fp.write(buf.read())
1126 fp.write(buf.read())
1122 finally:
1127 finally:
1123 fp.close()
1128 fp.close()
1124 storebundle(op, cgparams, bundlefile)
1129 storebundle(op, cgparams, bundlefile)
1125 finally:
1130 finally:
1126 try:
1131 try:
1127 os.unlink(bundlefile)
1132 os.unlink(bundlefile)
1128 except Exception:
1133 except Exception:
1129 # we would rather see the original exception
1134 # we would rather see the original exception
1130 pass
1135 pass
1131
1136
1132
1137
1133 def storebundle(op, params, bundlefile):
1138 def storebundle(op, params, bundlefile):
1134 log = _getorcreateinfinitepushlogger(op)
1139 log = _getorcreateinfinitepushlogger(op)
1135 parthandlerstart = time.time()
1140 parthandlerstart = time.time()
1136 log(scratchbranchparttype, eventtype=b'start')
1141 log(scratchbranchparttype, eventtype=b'start')
1137 index = op.repo.bundlestore.index
1142 index = op.repo.bundlestore.index
1138 store = op.repo.bundlestore.store
1143 store = op.repo.bundlestore.store
1139 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1144 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1140
1145
1141 bundle = None
1146 bundle = None
1142 try: # guards bundle
1147 try: # guards bundle
1143 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1148 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1144 bundle = hg.repository(op.repo.ui, bundlepath)
1149 bundle = hg.repository(op.repo.ui, bundlepath)
1145
1150
1146 bookmark = params.get(b'bookmark')
1151 bookmark = params.get(b'bookmark')
1147 bookprevnode = params.get(b'bookprevnode', b'')
1152 bookprevnode = params.get(b'bookprevnode', b'')
1148 force = params.get(b'force')
1153 force = params.get(b'force')
1149
1154
1150 if bookmark:
1155 if bookmark:
1151 oldnode = index.getnode(bookmark)
1156 oldnode = index.getnode(bookmark)
1152 else:
1157 else:
1153 oldnode = None
1158 oldnode = None
1154 bundleheads = bundle.revs(b'heads(bundle())')
1159 bundleheads = bundle.revs(b'heads(bundle())')
1155 if bookmark and len(bundleheads) > 1:
1160 if bookmark and len(bundleheads) > 1:
1156 raise error.Abort(
1161 raise error.Abort(
1157 _(b'cannot push more than one head to a scratch branch')
1162 _(b'cannot push more than one head to a scratch branch')
1158 )
1163 )
1159
1164
1160 revs = _getrevs(bundle, oldnode, force, bookmark)
1165 revs = _getrevs(bundle, oldnode, force, bookmark)
1161
1166
1162 # Notify the user of what is being pushed
1167 # Notify the user of what is being pushed
1163 plural = b's' if len(revs) > 1 else b''
1168 plural = b's' if len(revs) > 1 else b''
1164 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1169 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1165 maxoutput = 10
1170 maxoutput = 10
1166 for i in range(0, min(len(revs), maxoutput)):
1171 for i in range(0, min(len(revs), maxoutput)):
1167 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1172 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1168 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1173 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1169
1174
1170 if len(revs) > maxoutput + 1:
1175 if len(revs) > maxoutput + 1:
1171 op.repo.ui.warn(b" ...\n")
1176 op.repo.ui.warn(b" ...\n")
1172 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1177 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1173 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1178 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1174
1179
1175 nodesctx = [bundle[rev] for rev in revs]
1180 nodesctx = [bundle[rev] for rev in revs]
1176 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1181 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1177 if bundleheads:
1182 if bundleheads:
1178 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1183 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1179 else:
1184 else:
1180 newheadscount = 0
1185 newheadscount = 0
1181 # If there's a bookmark specified, there should be only one head,
1186 # If there's a bookmark specified, there should be only one head,
1182 # so we choose the last node, which will be that head.
1187 # so we choose the last node, which will be that head.
1183 # If a bug or malicious client allows there to be a bookmark
1188 # If a bug or malicious client allows there to be a bookmark
1184 # with multiple heads, we will place the bookmark on the last head.
1189 # with multiple heads, we will place the bookmark on the last head.
1185 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1190 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1186 key = None
1191 key = None
1187 if newheadscount:
1192 if newheadscount:
1188 with open(bundlefile, b'rb') as f:
1193 with open(bundlefile, b'rb') as f:
1189 bundledata = f.read()
1194 bundledata = f.read()
1190 with logservicecall(
1195 with logservicecall(
1191 log, b'bundlestore', bundlesize=len(bundledata)
1196 log, b'bundlestore', bundlesize=len(bundledata)
1192 ):
1197 ):
1193 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1198 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1194 if len(bundledata) > bundlesizelimit:
1199 if len(bundledata) > bundlesizelimit:
1195 error_msg = (
1200 error_msg = (
1196 b'bundle is too big: %d bytes. '
1201 b'bundle is too big: %d bytes. '
1197 + b'max allowed size is 100 MB'
1202 + b'max allowed size is 100 MB'
1198 )
1203 )
1199 raise error.Abort(error_msg % (len(bundledata),))
1204 raise error.Abort(error_msg % (len(bundledata),))
1200 key = store.write(bundledata)
1205 key = store.write(bundledata)
1201
1206
1202 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1207 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1203 if key:
1208 if key:
1204 index.addbundle(key, nodesctx)
1209 index.addbundle(key, nodesctx)
1205 if bookmark:
1210 if bookmark:
1206 index.addbookmark(bookmark, bookmarknode)
1211 index.addbookmark(bookmark, bookmarknode)
1207 _maybeaddpushbackpart(
1212 _maybeaddpushbackpart(
1208 op, bookmark, bookmarknode, bookprevnode, params
1213 op, bookmark, bookmarknode, bookprevnode, params
1209 )
1214 )
1210 log(
1215 log(
1211 scratchbranchparttype,
1216 scratchbranchparttype,
1212 eventtype=b'success',
1217 eventtype=b'success',
1213 elapsedms=(time.time() - parthandlerstart) * 1000,
1218 elapsedms=(time.time() - parthandlerstart) * 1000,
1214 )
1219 )
1215
1220
1216 except Exception as e:
1221 except Exception as e:
1217 log(
1222 log(
1218 scratchbranchparttype,
1223 scratchbranchparttype,
1219 eventtype=b'failure',
1224 eventtype=b'failure',
1220 elapsedms=(time.time() - parthandlerstart) * 1000,
1225 elapsedms=(time.time() - parthandlerstart) * 1000,
1221 errormsg=str(e),
1226 errormsg=str(e),
1222 )
1227 )
1223 raise
1228 raise
1224 finally:
1229 finally:
1225 if bundle:
1230 if bundle:
1226 bundle.close()
1231 bundle.close()
1227
1232
1228
1233
1229 @bundle2.parthandler(
1234 @bundle2.parthandler(
1230 scratchbranchparttype,
1235 scratchbranchparttype,
1231 (
1236 (
1232 b'bookmark',
1237 b'bookmark',
1233 b'bookprevnode',
1238 b'bookprevnode',
1234 b'force',
1239 b'force',
1235 b'pushbackbookmarks',
1240 b'pushbackbookmarks',
1236 b'cgversion',
1241 b'cgversion',
1237 ),
1242 ),
1238 )
1243 )
1239 def bundle2scratchbranch(op, part):
1244 def bundle2scratchbranch(op, part):
1240 '''unbundle a bundle2 part containing a changegroup to store'''
1245 '''unbundle a bundle2 part containing a changegroup to store'''
1241
1246
1242 bundler = bundle2.bundle20(op.repo.ui)
1247 bundler = bundle2.bundle20(op.repo.ui)
1243 cgversion = part.params.get(b'cgversion', b'01')
1248 cgversion = part.params.get(b'cgversion', b'01')
1244 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1249 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1245 cgpart.addparam(b'version', cgversion)
1250 cgpart.addparam(b'version', cgversion)
1246 bundler.addpart(cgpart)
1251 bundler.addpart(cgpart)
1247 buf = util.chunkbuffer(bundler.getchunks())
1252 buf = util.chunkbuffer(bundler.getchunks())
1248
1253
1249 fd, bundlefile = pycompat.mkstemp()
1254 fd, bundlefile = pycompat.mkstemp()
1250 try:
1255 try:
1251 try:
1256 try:
1252 fp = os.fdopen(fd, r'wb')
1257 fp = os.fdopen(fd, r'wb')
1253 fp.write(buf.read())
1258 fp.write(buf.read())
1254 finally:
1259 finally:
1255 fp.close()
1260 fp.close()
1256 storebundle(op, part.params, bundlefile)
1261 storebundle(op, part.params, bundlefile)
1257 finally:
1262 finally:
1258 try:
1263 try:
1259 os.unlink(bundlefile)
1264 os.unlink(bundlefile)
1260 except OSError as e:
1265 except OSError as e:
1261 if e.errno != errno.ENOENT:
1266 if e.errno != errno.ENOENT:
1262 raise
1267 raise
1263
1268
1264 return 1
1269 return 1
1265
1270
1266
1271
1267 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1272 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1268 if params.get(b'pushbackbookmarks'):
1273 if params.get(b'pushbackbookmarks'):
1269 if op.reply and b'pushback' in op.reply.capabilities:
1274 if op.reply and b'pushback' in op.reply.capabilities:
1270 params = {
1275 params = {
1271 b'namespace': b'bookmarks',
1276 b'namespace': b'bookmarks',
1272 b'key': bookmark,
1277 b'key': bookmark,
1273 b'new': newnode,
1278 b'new': newnode,
1274 b'old': oldnode,
1279 b'old': oldnode,
1275 }
1280 }
1276 op.reply.newpart(
1281 op.reply.newpart(
1277 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1282 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1278 )
1283 )
1279
1284
1280
1285
1281 def bundle2pushkey(orig, op, part):
1286 def bundle2pushkey(orig, op, part):
1282 '''Wrapper of bundle2.handlepushkey()
1287 '''Wrapper of bundle2.handlepushkey()
1283
1288
1284 The only goal is to skip calling the original function if flag is set.
1289 The only goal is to skip calling the original function if flag is set.
1285 It's set if infinitepush push is happening.
1290 It's set if infinitepush push is happening.
1286 '''
1291 '''
1287 if op.records[scratchbranchparttype + b'_skippushkey']:
1292 if op.records[scratchbranchparttype + b'_skippushkey']:
1288 if op.reply is not None:
1293 if op.reply is not None:
1289 rpart = op.reply.newpart(b'reply:pushkey')
1294 rpart = op.reply.newpart(b'reply:pushkey')
1290 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1291 rpart.addparam(b'return', b'1', mandatory=False)
1296 rpart.addparam(b'return', b'1', mandatory=False)
1292 return 1
1297 return 1
1293
1298
1294 return orig(op, part)
1299 return orig(op, part)
1295
1300
1296
1301
1297 def bundle2handlephases(orig, op, part):
1302 def bundle2handlephases(orig, op, part):
1298 '''Wrapper of bundle2.handlephases()
1303 '''Wrapper of bundle2.handlephases()
1299
1304
1300 The only goal is to skip calling the original function if flag is set.
1305 The only goal is to skip calling the original function if flag is set.
1301 It's set if infinitepush push is happening.
1306 It's set if infinitepush push is happening.
1302 '''
1307 '''
1303
1308
1304 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1309 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1305 return
1310 return
1306
1311
1307 return orig(op, part)
1312 return orig(op, part)
1308
1313
1309
1314
1310 def _asyncsavemetadata(root, nodes):
1315 def _asyncsavemetadata(root, nodes):
1311 '''starts a separate process that fills metadata for the nodes
1316 '''starts a separate process that fills metadata for the nodes
1312
1317
1313 This function creates a separate process and doesn't wait for it's
1318 This function creates a separate process and doesn't wait for it's
1314 completion. This was done to avoid slowing down pushes
1319 completion. This was done to avoid slowing down pushes
1315 '''
1320 '''
1316
1321
1317 maxnodes = 50
1322 maxnodes = 50
1318 if len(nodes) > maxnodes:
1323 if len(nodes) > maxnodes:
1319 return
1324 return
1320 nodesargs = []
1325 nodesargs = []
1321 for node in nodes:
1326 for node in nodes:
1322 nodesargs.append(b'--node')
1327 nodesargs.append(b'--node')
1323 nodesargs.append(node)
1328 nodesargs.append(node)
1324 with open(os.devnull, b'w+b') as devnull:
1329 with open(os.devnull, b'w+b') as devnull:
1325 cmdline = [
1330 cmdline = [
1326 util.hgexecutable(),
1331 util.hgexecutable(),
1327 b'debugfillinfinitepushmetadata',
1332 b'debugfillinfinitepushmetadata',
1328 b'-R',
1333 b'-R',
1329 root,
1334 root,
1330 ] + nodesargs
1335 ] + nodesargs
1331 # Process will run in background. We don't care about the return code
1336 # Process will run in background. We don't care about the return code
1332 subprocess.Popen(
1337 subprocess.Popen(
1333 pycompat.rapply(procutil.tonativestr, cmdline),
1338 pycompat.rapply(procutil.tonativestr, cmdline),
1334 close_fds=True,
1339 close_fds=True,
1335 shell=False,
1340 shell=False,
1336 stdin=devnull,
1341 stdin=devnull,
1337 stdout=devnull,
1342 stdout=devnull,
1338 stderr=devnull,
1343 stderr=devnull,
1339 )
1344 )
General Comments 0
You need to be logged in to leave comments. Login now