##// END OF EJS Templates
infinitepush: use bundleoperation.source instead of hacking on tr...
Pulkit Goyal -
r37256:40ee0af0 default
parent child Browse files
Show More
@@ -1,1186 +1,1184 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 [infinitepush]
9 [infinitepush]
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 branchpattern = PATTERN
11 branchpattern = PATTERN
12
12
13 # Server or client
13 # Server or client
14 server = False
14 server = False
15
15
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 indextype = disk
17 indextype = disk
18
18
19 # Server-side option. Used only if indextype=sql.
19 # Server-side option. Used only if indextype=sql.
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22
22
23 # Server-side option. Used only if indextype=disk.
23 # Server-side option. Used only if indextype=disk.
24 # Filesystem path to the index store
24 # Filesystem path to the index store
25 indexpath = PATH
25 indexpath = PATH
26
26
27 # Server-side option. Possible values: 'disk' or 'external'
27 # Server-side option. Possible values: 'disk' or 'external'
28 # Fails if not set
28 # Fails if not set
29 storetype = disk
29 storetype = disk
30
30
31 # Server-side option.
31 # Server-side option.
32 # Path to the binary that will save bundle to the bundlestore
32 # Path to the binary that will save bundle to the bundlestore
33 # Formatted cmd line will be passed to it (see `put_args`)
33 # Formatted cmd line will be passed to it (see `put_args`)
34 put_binary = put
34 put_binary = put
35
35
36 # Serser-side option. Used only if storetype=external.
36 # Serser-side option. Used only if storetype=external.
37 # Format cmd-line string for put binary. Placeholder: {filename}
37 # Format cmd-line string for put binary. Placeholder: {filename}
38 put_args = {filename}
38 put_args = {filename}
39
39
40 # Server-side option.
40 # Server-side option.
41 # Path to the binary that get bundle from the bundlestore.
41 # Path to the binary that get bundle from the bundlestore.
42 # Formatted cmd line will be passed to it (see `get_args`)
42 # Formatted cmd line will be passed to it (see `get_args`)
43 get_binary = get
43 get_binary = get
44
44
45 # Serser-side option. Used only if storetype=external.
45 # Serser-side option. Used only if storetype=external.
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 get_args = {filename} {handle}
47 get_args = {filename} {handle}
48
48
49 # Server-side option
49 # Server-side option
50 logfile = FIlE
50 logfile = FIlE
51
51
52 # Server-side option
52 # Server-side option
53 loglevel = DEBUG
53 loglevel = DEBUG
54
54
55 # Server-side option. Used only if indextype=sql.
55 # Server-side option. Used only if indextype=sql.
56 # Sets mysql wait_timeout option.
56 # Sets mysql wait_timeout option.
57 waittimeout = 300
57 waittimeout = 300
58
58
59 # Server-side option. Used only if indextype=sql.
59 # Server-side option. Used only if indextype=sql.
60 # Sets mysql innodb_lock_wait_timeout option.
60 # Sets mysql innodb_lock_wait_timeout option.
61 locktimeout = 120
61 locktimeout = 120
62
62
63 # Server-side option. Used only if indextype=sql.
63 # Server-side option. Used only if indextype=sql.
64 # Name of the repository
64 # Name of the repository
65 reponame = ''
65 reponame = ''
66
66
67 # Client-side option. Used by --list-remote option. List of remote scratch
67 # Client-side option. Used by --list-remote option. List of remote scratch
68 # patterns to list if no patterns are specified.
68 # patterns to list if no patterns are specified.
69 defaultremotepatterns = ['*']
69 defaultremotepatterns = ['*']
70
70
71 # Instructs infinitepush to forward all received bundle2 parts to the
71 # Instructs infinitepush to forward all received bundle2 parts to the
72 # bundle for storage. Defaults to False.
72 # bundle for storage. Defaults to False.
73 storeallparts = True
73 storeallparts = True
74
74
75 # routes each incoming push to the bundlestore. defaults to False
75 # routes each incoming push to the bundlestore. defaults to False
76 pushtobundlestore = True
76 pushtobundlestore = True
77
77
78 [remotenames]
78 [remotenames]
79 # Client-side option
79 # Client-side option
80 # This option should be set only if remotenames extension is enabled.
80 # This option should be set only if remotenames extension is enabled.
81 # Whether remote bookmarks are tracked by remotenames extension.
81 # Whether remote bookmarks are tracked by remotenames extension.
82 bookmarks = True
82 bookmarks = True
83 """
83 """
84
84
85 from __future__ import absolute_import
85 from __future__ import absolute_import
86
86
87 import collections
87 import collections
88 import contextlib
88 import contextlib
89 import errno
89 import errno
90 import functools
90 import functools
91 import logging
91 import logging
92 import os
92 import os
93 import random
93 import random
94 import re
94 import re
95 import socket
95 import socket
96 import subprocess
96 import subprocess
97 import tempfile
97 import tempfile
98 import time
98 import time
99
99
100 from mercurial.node import (
100 from mercurial.node import (
101 bin,
101 bin,
102 hex,
102 hex,
103 )
103 )
104
104
105 from mercurial.i18n import _
105 from mercurial.i18n import _
106
106
107 from mercurial.utils import (
107 from mercurial.utils import (
108 procutil,
108 procutil,
109 stringutil,
109 stringutil,
110 )
110 )
111
111
112 from mercurial import (
112 from mercurial import (
113 bundle2,
113 bundle2,
114 changegroup,
114 changegroup,
115 commands,
115 commands,
116 discovery,
116 discovery,
117 encoding,
117 encoding,
118 error,
118 error,
119 exchange,
119 exchange,
120 extensions,
120 extensions,
121 hg,
121 hg,
122 localrepo,
122 localrepo,
123 peer,
123 peer,
124 phases,
124 phases,
125 pushkey,
125 pushkey,
126 registrar,
126 registrar,
127 util,
127 util,
128 wireproto,
128 wireproto,
129 )
129 )
130
130
131 from . import (
131 from . import (
132 bundleparts,
132 bundleparts,
133 common,
133 common,
134 )
134 )
135
135
136 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
136 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
137 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
137 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
138 # be specifying the version(s) of Mercurial they are tested with, or
138 # be specifying the version(s) of Mercurial they are tested with, or
139 # leave the attribute unspecified.
139 # leave the attribute unspecified.
140 testedwith = 'ships-with-hg-core'
140 testedwith = 'ships-with-hg-core'
141
141
142 configtable = {}
142 configtable = {}
143 configitem = registrar.configitem(configtable)
143 configitem = registrar.configitem(configtable)
144
144
145 configitem('infinitepush', 'server',
145 configitem('infinitepush', 'server',
146 default=False,
146 default=False,
147 )
147 )
148 configitem('infinitepush', 'storetype',
148 configitem('infinitepush', 'storetype',
149 default='',
149 default='',
150 )
150 )
151 configitem('infinitepush', 'indextype',
151 configitem('infinitepush', 'indextype',
152 default='',
152 default='',
153 )
153 )
154 configitem('infinitepush', 'indexpath',
154 configitem('infinitepush', 'indexpath',
155 default='',
155 default='',
156 )
156 )
157 configitem('infinitepush', 'storeallparts',
157 configitem('infinitepush', 'storeallparts',
158 default=False,
158 default=False,
159 )
159 )
160 configitem('infinitepush', 'reponame',
160 configitem('infinitepush', 'reponame',
161 default='',
161 default='',
162 )
162 )
163 configitem('scratchbranch', 'storepath',
163 configitem('scratchbranch', 'storepath',
164 default='',
164 default='',
165 )
165 )
166 configitem('infinitepush', 'branchpattern',
166 configitem('infinitepush', 'branchpattern',
167 default='',
167 default='',
168 )
168 )
169 configitem('infinitepush', 'pushtobundlestore',
169 configitem('infinitepush', 'pushtobundlestore',
170 default=False,
170 default=False,
171 )
171 )
172 configitem('experimental', 'server-bundlestore-bookmark',
172 configitem('experimental', 'server-bundlestore-bookmark',
173 default='',
173 default='',
174 )
174 )
175 configitem('experimental', 'infinitepush-scratchpush',
175 configitem('experimental', 'infinitepush-scratchpush',
176 default=False,
176 default=False,
177 )
177 )
178
178
179 experimental = 'experimental'
179 experimental = 'experimental'
180 configbookmark = 'server-bundlestore-bookmark'
180 configbookmark = 'server-bundlestore-bookmark'
181 configscratchpush = 'infinitepush-scratchpush'
181 configscratchpush = 'infinitepush-scratchpush'
182
182
183 scratchbranchparttype = bundleparts.scratchbranchparttype
183 scratchbranchparttype = bundleparts.scratchbranchparttype
184 revsetpredicate = registrar.revsetpredicate()
184 revsetpredicate = registrar.revsetpredicate()
185 templatekeyword = registrar.templatekeyword()
185 templatekeyword = registrar.templatekeyword()
186 _scratchbranchmatcher = lambda x: False
186 _scratchbranchmatcher = lambda x: False
187 _maybehash = re.compile(r'^[a-f0-9]+$').search
187 _maybehash = re.compile(r'^[a-f0-9]+$').search
188
188
189 def _buildexternalbundlestore(ui):
189 def _buildexternalbundlestore(ui):
190 put_args = ui.configlist('infinitepush', 'put_args', [])
190 put_args = ui.configlist('infinitepush', 'put_args', [])
191 put_binary = ui.config('infinitepush', 'put_binary')
191 put_binary = ui.config('infinitepush', 'put_binary')
192 if not put_binary:
192 if not put_binary:
193 raise error.Abort('put binary is not specified')
193 raise error.Abort('put binary is not specified')
194 get_args = ui.configlist('infinitepush', 'get_args', [])
194 get_args = ui.configlist('infinitepush', 'get_args', [])
195 get_binary = ui.config('infinitepush', 'get_binary')
195 get_binary = ui.config('infinitepush', 'get_binary')
196 if not get_binary:
196 if not get_binary:
197 raise error.Abort('get binary is not specified')
197 raise error.Abort('get binary is not specified')
198 from . import store
198 from . import store
199 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
199 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
200
200
201 def _buildsqlindex(ui):
201 def _buildsqlindex(ui):
202 sqlhost = ui.config('infinitepush', 'sqlhost')
202 sqlhost = ui.config('infinitepush', 'sqlhost')
203 if not sqlhost:
203 if not sqlhost:
204 raise error.Abort(_('please set infinitepush.sqlhost'))
204 raise error.Abort(_('please set infinitepush.sqlhost'))
205 host, port, db, user, password = sqlhost.split(':')
205 host, port, db, user, password = sqlhost.split(':')
206 reponame = ui.config('infinitepush', 'reponame')
206 reponame = ui.config('infinitepush', 'reponame')
207 if not reponame:
207 if not reponame:
208 raise error.Abort(_('please set infinitepush.reponame'))
208 raise error.Abort(_('please set infinitepush.reponame'))
209
209
210 logfile = ui.config('infinitepush', 'logfile', '')
210 logfile = ui.config('infinitepush', 'logfile', '')
211 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
211 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
212 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
212 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
213 from . import sqlindexapi
213 from . import sqlindexapi
214 return sqlindexapi.sqlindexapi(
214 return sqlindexapi.sqlindexapi(
215 reponame, host, port, db, user, password,
215 reponame, host, port, db, user, password,
216 logfile, _getloglevel(ui), waittimeout=waittimeout,
216 logfile, _getloglevel(ui), waittimeout=waittimeout,
217 locktimeout=locktimeout)
217 locktimeout=locktimeout)
218
218
219 def _getloglevel(ui):
219 def _getloglevel(ui):
220 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
220 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
221 numeric_loglevel = getattr(logging, loglevel.upper(), None)
221 numeric_loglevel = getattr(logging, loglevel.upper(), None)
222 if not isinstance(numeric_loglevel, int):
222 if not isinstance(numeric_loglevel, int):
223 raise error.Abort(_('invalid log level %s') % loglevel)
223 raise error.Abort(_('invalid log level %s') % loglevel)
224 return numeric_loglevel
224 return numeric_loglevel
225
225
226 def _tryhoist(ui, remotebookmark):
226 def _tryhoist(ui, remotebookmark):
227 '''returns a bookmarks with hoisted part removed
227 '''returns a bookmarks with hoisted part removed
228
228
229 Remotenames extension has a 'hoist' config that allows to use remote
229 Remotenames extension has a 'hoist' config that allows to use remote
230 bookmarks without specifying remote path. For example, 'hg update master'
230 bookmarks without specifying remote path. For example, 'hg update master'
231 works as well as 'hg update remote/master'. We want to allow the same in
231 works as well as 'hg update remote/master'. We want to allow the same in
232 infinitepush.
232 infinitepush.
233 '''
233 '''
234
234
235 if common.isremotebooksenabled(ui):
235 if common.isremotebooksenabled(ui):
236 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
236 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
237 if remotebookmark.startswith(hoist):
237 if remotebookmark.startswith(hoist):
238 return remotebookmark[len(hoist):]
238 return remotebookmark[len(hoist):]
239 return remotebookmark
239 return remotebookmark
240
240
241 class bundlestore(object):
241 class bundlestore(object):
242 def __init__(self, repo):
242 def __init__(self, repo):
243 self._repo = repo
243 self._repo = repo
244 storetype = self._repo.ui.config('infinitepush', 'storetype')
244 storetype = self._repo.ui.config('infinitepush', 'storetype')
245 if storetype == 'disk':
245 if storetype == 'disk':
246 from . import store
246 from . import store
247 self.store = store.filebundlestore(self._repo.ui, self._repo)
247 self.store = store.filebundlestore(self._repo.ui, self._repo)
248 elif storetype == 'external':
248 elif storetype == 'external':
249 self.store = _buildexternalbundlestore(self._repo.ui)
249 self.store = _buildexternalbundlestore(self._repo.ui)
250 else:
250 else:
251 raise error.Abort(
251 raise error.Abort(
252 _('unknown infinitepush store type specified %s') % storetype)
252 _('unknown infinitepush store type specified %s') % storetype)
253
253
254 indextype = self._repo.ui.config('infinitepush', 'indextype')
254 indextype = self._repo.ui.config('infinitepush', 'indextype')
255 if indextype == 'disk':
255 if indextype == 'disk':
256 from . import fileindexapi
256 from . import fileindexapi
257 self.index = fileindexapi.fileindexapi(self._repo)
257 self.index = fileindexapi.fileindexapi(self._repo)
258 elif indextype == 'sql':
258 elif indextype == 'sql':
259 self.index = _buildsqlindex(self._repo.ui)
259 self.index = _buildsqlindex(self._repo.ui)
260 else:
260 else:
261 raise error.Abort(
261 raise error.Abort(
262 _('unknown infinitepush index type specified %s') % indextype)
262 _('unknown infinitepush index type specified %s') % indextype)
263
263
264 def _isserver(ui):
264 def _isserver(ui):
265 return ui.configbool('infinitepush', 'server')
265 return ui.configbool('infinitepush', 'server')
266
266
267 def reposetup(ui, repo):
267 def reposetup(ui, repo):
268 if _isserver(ui) and repo.local():
268 if _isserver(ui) and repo.local():
269 repo.bundlestore = bundlestore(repo)
269 repo.bundlestore = bundlestore(repo)
270
270
271 def extsetup(ui):
271 def extsetup(ui):
272 commonsetup(ui)
272 commonsetup(ui)
273 if _isserver(ui):
273 if _isserver(ui):
274 serverextsetup(ui)
274 serverextsetup(ui)
275 else:
275 else:
276 clientextsetup(ui)
276 clientextsetup(ui)
277
277
278 def commonsetup(ui):
278 def commonsetup(ui):
279 wireproto.commands['listkeyspatterns'] = (
279 wireproto.commands['listkeyspatterns'] = (
280 wireprotolistkeyspatterns, 'namespace patterns')
280 wireprotolistkeyspatterns, 'namespace patterns')
281 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
281 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
282 if scratchbranchpat:
282 if scratchbranchpat:
283 global _scratchbranchmatcher
283 global _scratchbranchmatcher
284 kind, pat, _scratchbranchmatcher = \
284 kind, pat, _scratchbranchmatcher = \
285 stringutil.stringmatcher(scratchbranchpat)
285 stringutil.stringmatcher(scratchbranchpat)
286
286
287 def serverextsetup(ui):
287 def serverextsetup(ui):
288 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
288 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
289
289
290 def newpushkeyhandler(*args, **kwargs):
290 def newpushkeyhandler(*args, **kwargs):
291 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
291 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
292 newpushkeyhandler.params = origpushkeyhandler.params
292 newpushkeyhandler.params = origpushkeyhandler.params
293 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
293 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
294
294
295 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
295 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
296 newphaseheadshandler = lambda *args, **kwargs: \
296 newphaseheadshandler = lambda *args, **kwargs: \
297 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
297 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
298 newphaseheadshandler.params = orighandlephasehandler.params
298 newphaseheadshandler.params = orighandlephasehandler.params
299 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
299 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
300
300
301 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
301 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
302 localrepolistkeys)
302 localrepolistkeys)
303 wireproto.commands['lookup'] = (
303 wireproto.commands['lookup'] = (
304 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
304 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
305 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
305 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
306
306
307 extensions.wrapfunction(bundle2, 'processparts', processparts)
307 extensions.wrapfunction(bundle2, 'processparts', processparts)
308
308
309 def clientextsetup(ui):
309 def clientextsetup(ui):
310 entry = extensions.wrapcommand(commands.table, 'push', _push)
310 entry = extensions.wrapcommand(commands.table, 'push', _push)
311
311
312 entry[1].append(
312 entry[1].append(
313 ('', 'bundle-store', None,
313 ('', 'bundle-store', None,
314 _('force push to go to bundle store (EXPERIMENTAL)')))
314 _('force push to go to bundle store (EXPERIMENTAL)')))
315
315
316 extensions.wrapcommand(commands.table, 'pull', _pull)
316 extensions.wrapcommand(commands.table, 'pull', _pull)
317
317
318 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
318 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
319
319
320 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
320 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
321
321
322 partorder = exchange.b2partsgenorder
322 partorder = exchange.b2partsgenorder
323 index = partorder.index('changeset')
323 index = partorder.index('changeset')
324 partorder.insert(
324 partorder.insert(
325 index, partorder.pop(partorder.index(scratchbranchparttype)))
325 index, partorder.pop(partorder.index(scratchbranchparttype)))
326
326
327 def _checkheads(orig, pushop):
327 def _checkheads(orig, pushop):
328 if pushop.ui.configbool(experimental, configscratchpush, False):
328 if pushop.ui.configbool(experimental, configscratchpush, False):
329 return
329 return
330 return orig(pushop)
330 return orig(pushop)
331
331
332 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
332 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
333 patterns = wireproto.decodelist(patterns)
333 patterns = wireproto.decodelist(patterns)
334 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
334 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
335 return pushkey.encodekeys(d)
335 return pushkey.encodekeys(d)
336
336
337 def localrepolistkeys(orig, self, namespace, patterns=None):
337 def localrepolistkeys(orig, self, namespace, patterns=None):
338 if namespace == 'bookmarks' and patterns:
338 if namespace == 'bookmarks' and patterns:
339 index = self.bundlestore.index
339 index = self.bundlestore.index
340 results = {}
340 results = {}
341 bookmarks = orig(self, namespace)
341 bookmarks = orig(self, namespace)
342 for pattern in patterns:
342 for pattern in patterns:
343 results.update(index.getbookmarks(pattern))
343 results.update(index.getbookmarks(pattern))
344 if pattern.endswith('*'):
344 if pattern.endswith('*'):
345 pattern = 're:^' + pattern[:-1] + '.*'
345 pattern = 're:^' + pattern[:-1] + '.*'
346 kind, pat, matcher = stringutil.stringmatcher(pattern)
346 kind, pat, matcher = stringutil.stringmatcher(pattern)
347 for bookmark, node in bookmarks.iteritems():
347 for bookmark, node in bookmarks.iteritems():
348 if matcher(bookmark):
348 if matcher(bookmark):
349 results[bookmark] = node
349 results[bookmark] = node
350 return results
350 return results
351 else:
351 else:
352 return orig(self, namespace)
352 return orig(self, namespace)
353
353
354 @peer.batchable
354 @peer.batchable
355 def listkeyspatterns(self, namespace, patterns):
355 def listkeyspatterns(self, namespace, patterns):
356 if not self.capable('pushkey'):
356 if not self.capable('pushkey'):
357 yield {}, None
357 yield {}, None
358 f = peer.future()
358 f = peer.future()
359 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
359 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
360 (namespace, patterns))
360 (namespace, patterns))
361 yield {
361 yield {
362 'namespace': encoding.fromlocal(namespace),
362 'namespace': encoding.fromlocal(namespace),
363 'patterns': wireproto.encodelist(patterns)
363 'patterns': wireproto.encodelist(patterns)
364 }, f
364 }, f
365 d = f.value
365 d = f.value
366 self.ui.debug('received listkey for "%s": %i bytes\n'
366 self.ui.debug('received listkey for "%s": %i bytes\n'
367 % (namespace, len(d)))
367 % (namespace, len(d)))
368 yield pushkey.decodekeys(d)
368 yield pushkey.decodekeys(d)
369
369
370 def _readbundlerevs(bundlerepo):
370 def _readbundlerevs(bundlerepo):
371 return list(bundlerepo.revs('bundle()'))
371 return list(bundlerepo.revs('bundle()'))
372
372
373 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
373 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
374 '''Tells remotefilelog to include all changed files to the changegroup
374 '''Tells remotefilelog to include all changed files to the changegroup
375
375
376 By default remotefilelog doesn't include file content to the changegroup.
376 By default remotefilelog doesn't include file content to the changegroup.
377 But we need to include it if we are fetching from bundlestore.
377 But we need to include it if we are fetching from bundlestore.
378 '''
378 '''
379 changedfiles = set()
379 changedfiles = set()
380 cl = bundlerepo.changelog
380 cl = bundlerepo.changelog
381 for r in bundlerevs:
381 for r in bundlerevs:
382 # [3] means changed files
382 # [3] means changed files
383 changedfiles.update(cl.read(r)[3])
383 changedfiles.update(cl.read(r)[3])
384 if not changedfiles:
384 if not changedfiles:
385 return bundlecaps
385 return bundlecaps
386
386
387 changedfiles = '\0'.join(changedfiles)
387 changedfiles = '\0'.join(changedfiles)
388 newcaps = []
388 newcaps = []
389 appended = False
389 appended = False
390 for cap in (bundlecaps or []):
390 for cap in (bundlecaps or []):
391 if cap.startswith('excludepattern='):
391 if cap.startswith('excludepattern='):
392 newcaps.append('\0'.join((cap, changedfiles)))
392 newcaps.append('\0'.join((cap, changedfiles)))
393 appended = True
393 appended = True
394 else:
394 else:
395 newcaps.append(cap)
395 newcaps.append(cap)
396 if not appended:
396 if not appended:
397 # Not found excludepattern cap. Just append it
397 # Not found excludepattern cap. Just append it
398 newcaps.append('excludepattern=' + changedfiles)
398 newcaps.append('excludepattern=' + changedfiles)
399
399
400 return newcaps
400 return newcaps
401
401
402 def _rebundle(bundlerepo, bundleroots, unknownhead):
402 def _rebundle(bundlerepo, bundleroots, unknownhead):
403 '''
403 '''
404 Bundle may include more revision then user requested. For example,
404 Bundle may include more revision then user requested. For example,
405 if user asks for revision but bundle also consists its descendants.
405 if user asks for revision but bundle also consists its descendants.
406 This function will filter out all revision that user is not requested.
406 This function will filter out all revision that user is not requested.
407 '''
407 '''
408 parts = []
408 parts = []
409
409
410 version = '02'
410 version = '02'
411 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
411 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
412 missingheads=[unknownhead])
412 missingheads=[unknownhead])
413 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
413 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
414 cgstream = util.chunkbuffer(cgstream).read()
414 cgstream = util.chunkbuffer(cgstream).read()
415 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
415 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
416 cgpart.addparam('version', version)
416 cgpart.addparam('version', version)
417 parts.append(cgpart)
417 parts.append(cgpart)
418
418
419 return parts
419 return parts
420
420
421 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
421 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
422 cl = bundlerepo.changelog
422 cl = bundlerepo.changelog
423 bundleroots = []
423 bundleroots = []
424 for rev in bundlerevs:
424 for rev in bundlerevs:
425 node = cl.node(rev)
425 node = cl.node(rev)
426 parents = cl.parents(node)
426 parents = cl.parents(node)
427 for parent in parents:
427 for parent in parents:
428 # include all revs that exist in the main repo
428 # include all revs that exist in the main repo
429 # to make sure that bundle may apply client-side
429 # to make sure that bundle may apply client-side
430 if parent in oldrepo:
430 if parent in oldrepo:
431 bundleroots.append(parent)
431 bundleroots.append(parent)
432 return bundleroots
432 return bundleroots
433
433
434 def _needsrebundling(head, bundlerepo):
434 def _needsrebundling(head, bundlerepo):
435 bundleheads = list(bundlerepo.revs('heads(bundle())'))
435 bundleheads = list(bundlerepo.revs('heads(bundle())'))
436 return not (len(bundleheads) == 1 and
436 return not (len(bundleheads) == 1 and
437 bundlerepo[bundleheads[0]].node() == head)
437 bundlerepo[bundleheads[0]].node() == head)
438
438
439 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
439 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
440 '''generates bundle that will be send to the user
440 '''generates bundle that will be send to the user
441
441
442 returns tuple with raw bundle string and bundle type
442 returns tuple with raw bundle string and bundle type
443 '''
443 '''
444 parts = []
444 parts = []
445 if not _needsrebundling(head, bundlerepo):
445 if not _needsrebundling(head, bundlerepo):
446 with util.posixfile(bundlefile, "rb") as f:
446 with util.posixfile(bundlefile, "rb") as f:
447 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
447 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
448 if isinstance(unbundler, changegroup.cg1unpacker):
448 if isinstance(unbundler, changegroup.cg1unpacker):
449 part = bundle2.bundlepart('changegroup',
449 part = bundle2.bundlepart('changegroup',
450 data=unbundler._stream.read())
450 data=unbundler._stream.read())
451 part.addparam('version', '01')
451 part.addparam('version', '01')
452 parts.append(part)
452 parts.append(part)
453 elif isinstance(unbundler, bundle2.unbundle20):
453 elif isinstance(unbundler, bundle2.unbundle20):
454 haschangegroup = False
454 haschangegroup = False
455 for part in unbundler.iterparts():
455 for part in unbundler.iterparts():
456 if part.type == 'changegroup':
456 if part.type == 'changegroup':
457 haschangegroup = True
457 haschangegroup = True
458 newpart = bundle2.bundlepart(part.type, data=part.read())
458 newpart = bundle2.bundlepart(part.type, data=part.read())
459 for key, value in part.params.iteritems():
459 for key, value in part.params.iteritems():
460 newpart.addparam(key, value)
460 newpart.addparam(key, value)
461 parts.append(newpart)
461 parts.append(newpart)
462
462
463 if not haschangegroup:
463 if not haschangegroup:
464 raise error.Abort(
464 raise error.Abort(
465 'unexpected bundle without changegroup part, ' +
465 'unexpected bundle without changegroup part, ' +
466 'head: %s' % hex(head),
466 'head: %s' % hex(head),
467 hint='report to administrator')
467 hint='report to administrator')
468 else:
468 else:
469 raise error.Abort('unknown bundle type')
469 raise error.Abort('unknown bundle type')
470 else:
470 else:
471 parts = _rebundle(bundlerepo, bundleroots, head)
471 parts = _rebundle(bundlerepo, bundleroots, head)
472
472
473 return parts
473 return parts
474
474
475 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
475 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
476 heads = heads or []
476 heads = heads or []
477 # newheads are parents of roots of scratch bundles that were requested
477 # newheads are parents of roots of scratch bundles that were requested
478 newphases = {}
478 newphases = {}
479 scratchbundles = []
479 scratchbundles = []
480 newheads = []
480 newheads = []
481 scratchheads = []
481 scratchheads = []
482 nodestobundle = {}
482 nodestobundle = {}
483 allbundlestocleanup = []
483 allbundlestocleanup = []
484 try:
484 try:
485 for head in heads:
485 for head in heads:
486 if head not in repo.changelog.nodemap:
486 if head not in repo.changelog.nodemap:
487 if head not in nodestobundle:
487 if head not in nodestobundle:
488 newbundlefile = common.downloadbundle(repo, head)
488 newbundlefile = common.downloadbundle(repo, head)
489 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
489 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
490 bundlerepo = hg.repository(repo.ui, bundlepath)
490 bundlerepo = hg.repository(repo.ui, bundlepath)
491
491
492 allbundlestocleanup.append((bundlerepo, newbundlefile))
492 allbundlestocleanup.append((bundlerepo, newbundlefile))
493 bundlerevs = set(_readbundlerevs(bundlerepo))
493 bundlerevs = set(_readbundlerevs(bundlerepo))
494 bundlecaps = _includefilelogstobundle(
494 bundlecaps = _includefilelogstobundle(
495 bundlecaps, bundlerepo, bundlerevs, repo.ui)
495 bundlecaps, bundlerepo, bundlerevs, repo.ui)
496 cl = bundlerepo.changelog
496 cl = bundlerepo.changelog
497 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
497 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
498 for rev in bundlerevs:
498 for rev in bundlerevs:
499 node = cl.node(rev)
499 node = cl.node(rev)
500 newphases[hex(node)] = str(phases.draft)
500 newphases[hex(node)] = str(phases.draft)
501 nodestobundle[node] = (bundlerepo, bundleroots,
501 nodestobundle[node] = (bundlerepo, bundleroots,
502 newbundlefile)
502 newbundlefile)
503
503
504 scratchbundles.append(
504 scratchbundles.append(
505 _generateoutputparts(head, *nodestobundle[head]))
505 _generateoutputparts(head, *nodestobundle[head]))
506 newheads.extend(bundleroots)
506 newheads.extend(bundleroots)
507 scratchheads.append(head)
507 scratchheads.append(head)
508 finally:
508 finally:
509 for bundlerepo, bundlefile in allbundlestocleanup:
509 for bundlerepo, bundlefile in allbundlestocleanup:
510 bundlerepo.close()
510 bundlerepo.close()
511 try:
511 try:
512 os.unlink(bundlefile)
512 os.unlink(bundlefile)
513 except (IOError, OSError):
513 except (IOError, OSError):
514 # if we can't cleanup the file then just ignore the error,
514 # if we can't cleanup the file then just ignore the error,
515 # no need to fail
515 # no need to fail
516 pass
516 pass
517
517
518 pullfrombundlestore = bool(scratchbundles)
518 pullfrombundlestore = bool(scratchbundles)
519 wrappedchangegrouppart = False
519 wrappedchangegrouppart = False
520 wrappedlistkeys = False
520 wrappedlistkeys = False
521 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
521 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
522 try:
522 try:
523 def _changegrouppart(bundler, *args, **kwargs):
523 def _changegrouppart(bundler, *args, **kwargs):
524 # Order is important here. First add non-scratch part
524 # Order is important here. First add non-scratch part
525 # and only then add parts with scratch bundles because
525 # and only then add parts with scratch bundles because
526 # non-scratch part contains parents of roots of scratch bundles.
526 # non-scratch part contains parents of roots of scratch bundles.
527 result = oldchangegrouppart(bundler, *args, **kwargs)
527 result = oldchangegrouppart(bundler, *args, **kwargs)
528 for bundle in scratchbundles:
528 for bundle in scratchbundles:
529 for part in bundle:
529 for part in bundle:
530 bundler.addpart(part)
530 bundler.addpart(part)
531 return result
531 return result
532
532
533 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
533 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
534 wrappedchangegrouppart = True
534 wrappedchangegrouppart = True
535
535
536 def _listkeys(orig, self, namespace):
536 def _listkeys(orig, self, namespace):
537 origvalues = orig(self, namespace)
537 origvalues = orig(self, namespace)
538 if namespace == 'phases' and pullfrombundlestore:
538 if namespace == 'phases' and pullfrombundlestore:
539 if origvalues.get('publishing') == 'True':
539 if origvalues.get('publishing') == 'True':
540 # Make repo non-publishing to preserve draft phase
540 # Make repo non-publishing to preserve draft phase
541 del origvalues['publishing']
541 del origvalues['publishing']
542 origvalues.update(newphases)
542 origvalues.update(newphases)
543 return origvalues
543 return origvalues
544
544
545 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
545 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
546 _listkeys)
546 _listkeys)
547 wrappedlistkeys = True
547 wrappedlistkeys = True
548 heads = list((set(newheads) | set(heads)) - set(scratchheads))
548 heads = list((set(newheads) | set(heads)) - set(scratchheads))
549 result = orig(repo, source, heads=heads,
549 result = orig(repo, source, heads=heads,
550 bundlecaps=bundlecaps, **kwargs)
550 bundlecaps=bundlecaps, **kwargs)
551 finally:
551 finally:
552 if wrappedchangegrouppart:
552 if wrappedchangegrouppart:
553 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
553 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
554 if wrappedlistkeys:
554 if wrappedlistkeys:
555 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
555 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
556 _listkeys)
556 _listkeys)
557 return result
557 return result
558
558
559 def _lookupwrap(orig):
559 def _lookupwrap(orig):
560 def _lookup(repo, proto, key):
560 def _lookup(repo, proto, key):
561 localkey = encoding.tolocal(key)
561 localkey = encoding.tolocal(key)
562
562
563 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
563 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
564 scratchnode = repo.bundlestore.index.getnode(localkey)
564 scratchnode = repo.bundlestore.index.getnode(localkey)
565 if scratchnode:
565 if scratchnode:
566 return "%s %s\n" % (1, scratchnode)
566 return "%s %s\n" % (1, scratchnode)
567 else:
567 else:
568 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
568 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
569 else:
569 else:
570 try:
570 try:
571 r = hex(repo.lookup(localkey))
571 r = hex(repo.lookup(localkey))
572 return "%s %s\n" % (1, r)
572 return "%s %s\n" % (1, r)
573 except Exception as inst:
573 except Exception as inst:
574 if repo.bundlestore.index.getbundle(localkey):
574 if repo.bundlestore.index.getbundle(localkey):
575 return "%s %s\n" % (1, localkey)
575 return "%s %s\n" % (1, localkey)
576 else:
576 else:
577 r = str(inst)
577 r = str(inst)
578 return "%s %s\n" % (0, r)
578 return "%s %s\n" % (0, r)
579 return _lookup
579 return _lookup
580
580
581 def _pull(orig, ui, repo, source="default", **opts):
581 def _pull(orig, ui, repo, source="default", **opts):
582 # Copy paste from `pull` command
582 # Copy paste from `pull` command
583 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
583 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
584
584
585 scratchbookmarks = {}
585 scratchbookmarks = {}
586 unfi = repo.unfiltered()
586 unfi = repo.unfiltered()
587 unknownnodes = []
587 unknownnodes = []
588 for rev in opts.get('rev', []):
588 for rev in opts.get('rev', []):
589 if rev not in unfi:
589 if rev not in unfi:
590 unknownnodes.append(rev)
590 unknownnodes.append(rev)
591 if opts.get('bookmark'):
591 if opts.get('bookmark'):
592 bookmarks = []
592 bookmarks = []
593 revs = opts.get('rev') or []
593 revs = opts.get('rev') or []
594 for bookmark in opts.get('bookmark'):
594 for bookmark in opts.get('bookmark'):
595 if _scratchbranchmatcher(bookmark):
595 if _scratchbranchmatcher(bookmark):
596 # rev is not known yet
596 # rev is not known yet
597 # it will be fetched with listkeyspatterns next
597 # it will be fetched with listkeyspatterns next
598 scratchbookmarks[bookmark] = 'REVTOFETCH'
598 scratchbookmarks[bookmark] = 'REVTOFETCH'
599 else:
599 else:
600 bookmarks.append(bookmark)
600 bookmarks.append(bookmark)
601
601
602 if scratchbookmarks:
602 if scratchbookmarks:
603 other = hg.peer(repo, opts, source)
603 other = hg.peer(repo, opts, source)
604 fetchedbookmarks = other.listkeyspatterns(
604 fetchedbookmarks = other.listkeyspatterns(
605 'bookmarks', patterns=scratchbookmarks)
605 'bookmarks', patterns=scratchbookmarks)
606 for bookmark in scratchbookmarks:
606 for bookmark in scratchbookmarks:
607 if bookmark not in fetchedbookmarks:
607 if bookmark not in fetchedbookmarks:
608 raise error.Abort('remote bookmark %s not found!' %
608 raise error.Abort('remote bookmark %s not found!' %
609 bookmark)
609 bookmark)
610 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
610 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
611 revs.append(fetchedbookmarks[bookmark])
611 revs.append(fetchedbookmarks[bookmark])
612 opts['bookmark'] = bookmarks
612 opts['bookmark'] = bookmarks
613 opts['rev'] = revs
613 opts['rev'] = revs
614
614
615 if scratchbookmarks or unknownnodes:
615 if scratchbookmarks or unknownnodes:
616 # Set anyincoming to True
616 # Set anyincoming to True
617 extensions.wrapfunction(discovery, 'findcommonincoming',
617 extensions.wrapfunction(discovery, 'findcommonincoming',
618 _findcommonincoming)
618 _findcommonincoming)
619 try:
619 try:
620 # Remote scratch bookmarks will be deleted because remotenames doesn't
620 # Remote scratch bookmarks will be deleted because remotenames doesn't
621 # know about them. Let's save it before pull and restore after
621 # know about them. Let's save it before pull and restore after
622 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
622 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
623 result = orig(ui, repo, source, **opts)
623 result = orig(ui, repo, source, **opts)
624 # TODO(stash): race condition is possible
624 # TODO(stash): race condition is possible
625 # if scratch bookmarks was updated right after orig.
625 # if scratch bookmarks was updated right after orig.
626 # But that's unlikely and shouldn't be harmful.
626 # But that's unlikely and shouldn't be harmful.
627 if common.isremotebooksenabled(ui):
627 if common.isremotebooksenabled(ui):
628 remotescratchbookmarks.update(scratchbookmarks)
628 remotescratchbookmarks.update(scratchbookmarks)
629 _saveremotebookmarks(repo, remotescratchbookmarks, source)
629 _saveremotebookmarks(repo, remotescratchbookmarks, source)
630 else:
630 else:
631 _savelocalbookmarks(repo, scratchbookmarks)
631 _savelocalbookmarks(repo, scratchbookmarks)
632 return result
632 return result
633 finally:
633 finally:
634 if scratchbookmarks:
634 if scratchbookmarks:
635 extensions.unwrapfunction(discovery, 'findcommonincoming')
635 extensions.unwrapfunction(discovery, 'findcommonincoming')
636
636
637 def _readscratchremotebookmarks(ui, repo, other):
637 def _readscratchremotebookmarks(ui, repo, other):
638 if common.isremotebooksenabled(ui):
638 if common.isremotebooksenabled(ui):
639 remotenamesext = extensions.find('remotenames')
639 remotenamesext = extensions.find('remotenames')
640 remotepath = remotenamesext.activepath(repo.ui, other)
640 remotepath = remotenamesext.activepath(repo.ui, other)
641 result = {}
641 result = {}
642 # Let's refresh remotenames to make sure we have it up to date
642 # Let's refresh remotenames to make sure we have it up to date
643 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
643 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
644 # and it results in deleting scratch bookmarks. Our best guess how to
644 # and it results in deleting scratch bookmarks. Our best guess how to
645 # fix it is to use `clearnames()`
645 # fix it is to use `clearnames()`
646 repo._remotenames.clearnames()
646 repo._remotenames.clearnames()
647 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
647 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
648 path, bookname = remotenamesext.splitremotename(remotebookmark)
648 path, bookname = remotenamesext.splitremotename(remotebookmark)
649 if path == remotepath and _scratchbranchmatcher(bookname):
649 if path == remotepath and _scratchbranchmatcher(bookname):
650 nodes = repo.names['remotebookmarks'].nodes(repo,
650 nodes = repo.names['remotebookmarks'].nodes(repo,
651 remotebookmark)
651 remotebookmark)
652 if nodes:
652 if nodes:
653 result[bookname] = hex(nodes[0])
653 result[bookname] = hex(nodes[0])
654 return result
654 return result
655 else:
655 else:
656 return {}
656 return {}
657
657
658 def _saveremotebookmarks(repo, newbookmarks, remote):
658 def _saveremotebookmarks(repo, newbookmarks, remote):
659 remotenamesext = extensions.find('remotenames')
659 remotenamesext = extensions.find('remotenames')
660 remotepath = remotenamesext.activepath(repo.ui, remote)
660 remotepath = remotenamesext.activepath(repo.ui, remote)
661 branches = collections.defaultdict(list)
661 branches = collections.defaultdict(list)
662 bookmarks = {}
662 bookmarks = {}
663 remotenames = remotenamesext.readremotenames(repo)
663 remotenames = remotenamesext.readremotenames(repo)
664 for hexnode, nametype, remote, rname in remotenames:
664 for hexnode, nametype, remote, rname in remotenames:
665 if remote != remotepath:
665 if remote != remotepath:
666 continue
666 continue
667 if nametype == 'bookmarks':
667 if nametype == 'bookmarks':
668 if rname in newbookmarks:
668 if rname in newbookmarks:
669 # It's possible if we have a normal bookmark that matches
669 # It's possible if we have a normal bookmark that matches
670 # scratch branch pattern. In this case just use the current
670 # scratch branch pattern. In this case just use the current
671 # bookmark node
671 # bookmark node
672 del newbookmarks[rname]
672 del newbookmarks[rname]
673 bookmarks[rname] = hexnode
673 bookmarks[rname] = hexnode
674 elif nametype == 'branches':
674 elif nametype == 'branches':
675 # saveremotenames expects 20 byte binary nodes for branches
675 # saveremotenames expects 20 byte binary nodes for branches
676 branches[rname].append(bin(hexnode))
676 branches[rname].append(bin(hexnode))
677
677
678 for bookmark, hexnode in newbookmarks.iteritems():
678 for bookmark, hexnode in newbookmarks.iteritems():
679 bookmarks[bookmark] = hexnode
679 bookmarks[bookmark] = hexnode
680 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
680 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
681
681
682 def _savelocalbookmarks(repo, bookmarks):
682 def _savelocalbookmarks(repo, bookmarks):
683 if not bookmarks:
683 if not bookmarks:
684 return
684 return
685 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
685 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
686 changes = []
686 changes = []
687 for scratchbook, node in bookmarks.iteritems():
687 for scratchbook, node in bookmarks.iteritems():
688 changectx = repo[node]
688 changectx = repo[node]
689 changes.append((scratchbook, changectx.node()))
689 changes.append((scratchbook, changectx.node()))
690 repo._bookmarks.applychanges(repo, tr, changes)
690 repo._bookmarks.applychanges(repo, tr, changes)
691
691
692 def _findcommonincoming(orig, *args, **kwargs):
692 def _findcommonincoming(orig, *args, **kwargs):
693 common, inc, remoteheads = orig(*args, **kwargs)
693 common, inc, remoteheads = orig(*args, **kwargs)
694 return common, True, remoteheads
694 return common, True, remoteheads
695
695
696 def _push(orig, ui, repo, dest=None, *args, **opts):
696 def _push(orig, ui, repo, dest=None, *args, **opts):
697
697
698 bookmark = opts.get('bookmark')
698 bookmark = opts.get('bookmark')
699 # we only support pushing one infinitepush bookmark at once
699 # we only support pushing one infinitepush bookmark at once
700 if len(bookmark) == 1:
700 if len(bookmark) == 1:
701 bookmark = bookmark[0]
701 bookmark = bookmark[0]
702 else:
702 else:
703 bookmark = ''
703 bookmark = ''
704
704
705 oldphasemove = None
705 oldphasemove = None
706 overrides = {(experimental, configbookmark): bookmark}
706 overrides = {(experimental, configbookmark): bookmark}
707
707
708 with ui.configoverride(overrides, 'infinitepush'):
708 with ui.configoverride(overrides, 'infinitepush'):
709 scratchpush = opts.get('bundle_store')
709 scratchpush = opts.get('bundle_store')
710 if _scratchbranchmatcher(bookmark):
710 if _scratchbranchmatcher(bookmark):
711 scratchpush = True
711 scratchpush = True
712 # bundle2 can be sent back after push (for example, bundle2
712 # bundle2 can be sent back after push (for example, bundle2
713 # containing `pushkey` part to update bookmarks)
713 # containing `pushkey` part to update bookmarks)
714 ui.setconfig(experimental, 'bundle2.pushback', True)
714 ui.setconfig(experimental, 'bundle2.pushback', True)
715
715
716 if scratchpush:
716 if scratchpush:
717 # this is an infinitepush, we don't want the bookmark to be applied
717 # this is an infinitepush, we don't want the bookmark to be applied
718 # rather that should be stored in the bundlestore
718 # rather that should be stored in the bundlestore
719 opts['bookmark'] = []
719 opts['bookmark'] = []
720 ui.setconfig(experimental, configscratchpush, True)
720 ui.setconfig(experimental, configscratchpush, True)
721 oldphasemove = extensions.wrapfunction(exchange,
721 oldphasemove = extensions.wrapfunction(exchange,
722 '_localphasemove',
722 '_localphasemove',
723 _phasemove)
723 _phasemove)
724 # Copy-paste from `push` command
724 # Copy-paste from `push` command
725 path = ui.paths.getpath(dest, default=('default-push', 'default'))
725 path = ui.paths.getpath(dest, default=('default-push', 'default'))
726 if not path:
726 if not path:
727 raise error.Abort(_('default repository not configured!'),
727 raise error.Abort(_('default repository not configured!'),
728 hint=_("see 'hg help config.paths'"))
728 hint=_("see 'hg help config.paths'"))
729 destpath = path.pushloc or path.loc
729 destpath = path.pushloc or path.loc
730 # Remote scratch bookmarks will be deleted because remotenames doesn't
730 # Remote scratch bookmarks will be deleted because remotenames doesn't
731 # know about them. Let's save it before push and restore after
731 # know about them. Let's save it before push and restore after
732 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
732 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
733 result = orig(ui, repo, dest, *args, **opts)
733 result = orig(ui, repo, dest, *args, **opts)
734 if common.isremotebooksenabled(ui):
734 if common.isremotebooksenabled(ui):
735 if bookmark and scratchpush:
735 if bookmark and scratchpush:
736 other = hg.peer(repo, opts, destpath)
736 other = hg.peer(repo, opts, destpath)
737 fetchedbookmarks = other.listkeyspatterns('bookmarks',
737 fetchedbookmarks = other.listkeyspatterns('bookmarks',
738 patterns=[bookmark])
738 patterns=[bookmark])
739 remotescratchbookmarks.update(fetchedbookmarks)
739 remotescratchbookmarks.update(fetchedbookmarks)
740 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
740 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
741 if oldphasemove:
741 if oldphasemove:
742 exchange._localphasemove = oldphasemove
742 exchange._localphasemove = oldphasemove
743 return result
743 return result
744
744
745 def _deleteinfinitepushbookmarks(ui, repo, path, names):
745 def _deleteinfinitepushbookmarks(ui, repo, path, names):
746 """Prune remote names by removing the bookmarks we don't want anymore,
746 """Prune remote names by removing the bookmarks we don't want anymore,
747 then writing the result back to disk
747 then writing the result back to disk
748 """
748 """
749 remotenamesext = extensions.find('remotenames')
749 remotenamesext = extensions.find('remotenames')
750
750
751 # remotename format is:
751 # remotename format is:
752 # (node, nametype ("branches" or "bookmarks"), remote, name)
752 # (node, nametype ("branches" or "bookmarks"), remote, name)
753 nametype_idx = 1
753 nametype_idx = 1
754 remote_idx = 2
754 remote_idx = 2
755 name_idx = 3
755 name_idx = 3
756 remotenames = [remotename for remotename in \
756 remotenames = [remotename for remotename in \
757 remotenamesext.readremotenames(repo) \
757 remotenamesext.readremotenames(repo) \
758 if remotename[remote_idx] == path]
758 if remotename[remote_idx] == path]
759 remote_bm_names = [remotename[name_idx] for remotename in \
759 remote_bm_names = [remotename[name_idx] for remotename in \
760 remotenames if remotename[nametype_idx] == "bookmarks"]
760 remotenames if remotename[nametype_idx] == "bookmarks"]
761
761
762 for name in names:
762 for name in names:
763 if name not in remote_bm_names:
763 if name not in remote_bm_names:
764 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
764 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
765 "in path '{}'").format(name, path))
765 "in path '{}'").format(name, path))
766
766
767 bookmarks = {}
767 bookmarks = {}
768 branches = collections.defaultdict(list)
768 branches = collections.defaultdict(list)
769 for node, nametype, remote, name in remotenames:
769 for node, nametype, remote, name in remotenames:
770 if nametype == "bookmarks" and name not in names:
770 if nametype == "bookmarks" and name not in names:
771 bookmarks[name] = node
771 bookmarks[name] = node
772 elif nametype == "branches":
772 elif nametype == "branches":
773 # saveremotenames wants binary nodes for branches
773 # saveremotenames wants binary nodes for branches
774 branches[name].append(bin(node))
774 branches[name].append(bin(node))
775
775
776 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
776 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
777
777
778 def _phasemove(orig, pushop, nodes, phase=phases.public):
778 def _phasemove(orig, pushop, nodes, phase=phases.public):
779 """prevent commits from being marked public
779 """prevent commits from being marked public
780
780
781 Since these are going to a scratch branch, they aren't really being
781 Since these are going to a scratch branch, they aren't really being
782 published."""
782 published."""
783
783
784 if phase != phases.public:
784 if phase != phases.public:
785 orig(pushop, nodes, phase)
785 orig(pushop, nodes, phase)
786
786
787 @exchange.b2partsgenerator(scratchbranchparttype)
787 @exchange.b2partsgenerator(scratchbranchparttype)
788 def partgen(pushop, bundler):
788 def partgen(pushop, bundler):
789 bookmark = pushop.ui.config(experimental, configbookmark)
789 bookmark = pushop.ui.config(experimental, configbookmark)
790 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
790 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
791 if 'changesets' in pushop.stepsdone or not scratchpush:
791 if 'changesets' in pushop.stepsdone or not scratchpush:
792 return
792 return
793
793
794 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
794 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
795 return
795 return
796
796
797 pushop.stepsdone.add('changesets')
797 pushop.stepsdone.add('changesets')
798 if not pushop.outgoing.missing:
798 if not pushop.outgoing.missing:
799 pushop.ui.status(_('no changes found\n'))
799 pushop.ui.status(_('no changes found\n'))
800 pushop.cgresult = 0
800 pushop.cgresult = 0
801 return
801 return
802
802
803 # This parameter tells the server that the following bundle is an
803 # This parameter tells the server that the following bundle is an
804 # infinitepush. This let's it switch the part processing to our infinitepush
804 # infinitepush. This let's it switch the part processing to our infinitepush
805 # code path.
805 # code path.
806 bundler.addparam("infinitepush", "True")
806 bundler.addparam("infinitepush", "True")
807
807
808 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
808 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
809 pushop.remote,
809 pushop.remote,
810 pushop.outgoing,
810 pushop.outgoing,
811 pushop.ui,
811 pushop.ui,
812 bookmark)
812 bookmark)
813
813
814 for scratchpart in scratchparts:
814 for scratchpart in scratchparts:
815 bundler.addpart(scratchpart)
815 bundler.addpart(scratchpart)
816
816
817 def handlereply(op):
817 def handlereply(op):
818 # server either succeeds or aborts; no code to read
818 # server either succeeds or aborts; no code to read
819 pushop.cgresult = 1
819 pushop.cgresult = 1
820
820
821 return handlereply
821 return handlereply
822
822
823 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
823 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
824
824
825 def _getrevs(bundle, oldnode, force, bookmark):
825 def _getrevs(bundle, oldnode, force, bookmark):
826 'extracts and validates the revs to be imported'
826 'extracts and validates the revs to be imported'
827 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
827 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
828
828
829 # new bookmark
829 # new bookmark
830 if oldnode is None:
830 if oldnode is None:
831 return revs
831 return revs
832
832
833 # Fast forward update
833 # Fast forward update
834 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
834 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
835 return revs
835 return revs
836
836
837 return revs
837 return revs
838
838
839 @contextlib.contextmanager
839 @contextlib.contextmanager
840 def logservicecall(logger, service, **kwargs):
840 def logservicecall(logger, service, **kwargs):
841 start = time.time()
841 start = time.time()
842 logger(service, eventtype='start', **kwargs)
842 logger(service, eventtype='start', **kwargs)
843 try:
843 try:
844 yield
844 yield
845 logger(service, eventtype='success',
845 logger(service, eventtype='success',
846 elapsedms=(time.time() - start) * 1000, **kwargs)
846 elapsedms=(time.time() - start) * 1000, **kwargs)
847 except Exception as e:
847 except Exception as e:
848 logger(service, eventtype='failure',
848 logger(service, eventtype='failure',
849 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
849 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
850 **kwargs)
850 **kwargs)
851 raise
851 raise
852
852
853 def _getorcreateinfinitepushlogger(op):
853 def _getorcreateinfinitepushlogger(op):
854 logger = op.records['infinitepushlogger']
854 logger = op.records['infinitepushlogger']
855 if not logger:
855 if not logger:
856 ui = op.repo.ui
856 ui = op.repo.ui
857 try:
857 try:
858 username = procutil.getuser()
858 username = procutil.getuser()
859 except Exception:
859 except Exception:
860 username = 'unknown'
860 username = 'unknown'
861 # Generate random request id to be able to find all logged entries
861 # Generate random request id to be able to find all logged entries
862 # for the same request. Since requestid is pseudo-generated it may
862 # for the same request. Since requestid is pseudo-generated it may
863 # not be unique, but we assume that (hostname, username, requestid)
863 # not be unique, but we assume that (hostname, username, requestid)
864 # is unique.
864 # is unique.
865 random.seed()
865 random.seed()
866 requestid = random.randint(0, 2000000000)
866 requestid = random.randint(0, 2000000000)
867 hostname = socket.gethostname()
867 hostname = socket.gethostname()
868 logger = functools.partial(ui.log, 'infinitepush', user=username,
868 logger = functools.partial(ui.log, 'infinitepush', user=username,
869 requestid=requestid, hostname=hostname,
869 requestid=requestid, hostname=hostname,
870 reponame=ui.config('infinitepush',
870 reponame=ui.config('infinitepush',
871 'reponame'))
871 'reponame'))
872 op.records.add('infinitepushlogger', logger)
872 op.records.add('infinitepushlogger', logger)
873 else:
873 else:
874 logger = logger[0]
874 logger = logger[0]
875 return logger
875 return logger
876
876
877 def storetobundlestore(orig, repo, op, unbundler):
877 def storetobundlestore(orig, repo, op, unbundler):
878 """stores the incoming bundle coming from push command to the bundlestore
878 """stores the incoming bundle coming from push command to the bundlestore
879 instead of applying on the revlogs"""
879 instead of applying on the revlogs"""
880
880
881 repo.ui.status(_("storing changesets on the bundlestore\n"))
881 repo.ui.status(_("storing changesets on the bundlestore\n"))
882 bundler = bundle2.bundle20(repo.ui)
882 bundler = bundle2.bundle20(repo.ui)
883
883
884 # processing each part and storing it in bundler
884 # processing each part and storing it in bundler
885 with bundle2.partiterator(repo, op, unbundler) as parts:
885 with bundle2.partiterator(repo, op, unbundler) as parts:
886 for part in parts:
886 for part in parts:
887 bundlepart = None
887 bundlepart = None
888 if part.type == 'replycaps':
888 if part.type == 'replycaps':
889 # This configures the current operation to allow reply parts.
889 # This configures the current operation to allow reply parts.
890 bundle2._processpart(op, part)
890 bundle2._processpart(op, part)
891 else:
891 else:
892 bundlepart = bundle2.bundlepart(part.type, data=part.read())
892 bundlepart = bundle2.bundlepart(part.type, data=part.read())
893 for key, value in part.params.iteritems():
893 for key, value in part.params.iteritems():
894 bundlepart.addparam(key, value)
894 bundlepart.addparam(key, value)
895
895
896 # Certain parts require a response
896 # Certain parts require a response
897 if part.type in ('pushkey', 'changegroup'):
897 if part.type in ('pushkey', 'changegroup'):
898 if op.reply is not None:
898 if op.reply is not None:
899 rpart = op.reply.newpart('reply:%s' % part.type)
899 rpart = op.reply.newpart('reply:%s' % part.type)
900 rpart.addparam('in-reply-to', str(part.id),
900 rpart.addparam('in-reply-to', str(part.id),
901 mandatory=False)
901 mandatory=False)
902 rpart.addparam('return', '1', mandatory=False)
902 rpart.addparam('return', '1', mandatory=False)
903
903
904 op.records.add(part.type, {
904 op.records.add(part.type, {
905 'return': 1,
905 'return': 1,
906 })
906 })
907 if bundlepart:
907 if bundlepart:
908 bundler.addpart(bundlepart)
908 bundler.addpart(bundlepart)
909
909
910 # storing the bundle in the bundlestore
910 # storing the bundle in the bundlestore
911 buf = util.chunkbuffer(bundler.getchunks())
911 buf = util.chunkbuffer(bundler.getchunks())
912 fd, bundlefile = tempfile.mkstemp()
912 fd, bundlefile = tempfile.mkstemp()
913 try:
913 try:
914 try:
914 try:
915 fp = os.fdopen(fd, 'wb')
915 fp = os.fdopen(fd, 'wb')
916 fp.write(buf.read())
916 fp.write(buf.read())
917 finally:
917 finally:
918 fp.close()
918 fp.close()
919 storebundle(op, {}, bundlefile)
919 storebundle(op, {}, bundlefile)
920 finally:
920 finally:
921 try:
921 try:
922 os.unlink(bundlefile)
922 os.unlink(bundlefile)
923 except Exception:
923 except Exception:
924 # we would rather see the original exception
924 # we would rather see the original exception
925 pass
925 pass
926
926
927 def processparts(orig, repo, op, unbundler):
927 def processparts(orig, repo, op, unbundler):
928
928
929 # make sure we don't wrap processparts in case of `hg unbundle`
929 # make sure we don't wrap processparts in case of `hg unbundle`
930 tr = repo.currenttransaction()
930 if op.source == 'unbundle':
931 if tr:
931 return orig(repo, op, unbundler)
932 if tr.names[0].startswith('unbundle'):
933 return orig(repo, op, unbundler)
934
932
935 # this server routes each push to bundle store
933 # this server routes each push to bundle store
936 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
934 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
937 return storetobundlestore(orig, repo, op, unbundler)
935 return storetobundlestore(orig, repo, op, unbundler)
938
936
939 if unbundler.params.get('infinitepush') != 'True':
937 if unbundler.params.get('infinitepush') != 'True':
940 return orig(repo, op, unbundler)
938 return orig(repo, op, unbundler)
941
939
942 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
940 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
943
941
944 bundler = bundle2.bundle20(repo.ui)
942 bundler = bundle2.bundle20(repo.ui)
945 cgparams = None
943 cgparams = None
946 with bundle2.partiterator(repo, op, unbundler) as parts:
944 with bundle2.partiterator(repo, op, unbundler) as parts:
947 for part in parts:
945 for part in parts:
948 bundlepart = None
946 bundlepart = None
949 if part.type == 'replycaps':
947 if part.type == 'replycaps':
950 # This configures the current operation to allow reply parts.
948 # This configures the current operation to allow reply parts.
951 bundle2._processpart(op, part)
949 bundle2._processpart(op, part)
952 elif part.type == bundleparts.scratchbranchparttype:
950 elif part.type == bundleparts.scratchbranchparttype:
953 # Scratch branch parts need to be converted to normal
951 # Scratch branch parts need to be converted to normal
954 # changegroup parts, and the extra parameters stored for later
952 # changegroup parts, and the extra parameters stored for later
955 # when we upload to the store. Eventually those parameters will
953 # when we upload to the store. Eventually those parameters will
956 # be put on the actual bundle instead of this part, then we can
954 # be put on the actual bundle instead of this part, then we can
957 # send a vanilla changegroup instead of the scratchbranch part.
955 # send a vanilla changegroup instead of the scratchbranch part.
958 cgversion = part.params.get('cgversion', '01')
956 cgversion = part.params.get('cgversion', '01')
959 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
957 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
960 bundlepart.addparam('version', cgversion)
958 bundlepart.addparam('version', cgversion)
961 cgparams = part.params
959 cgparams = part.params
962
960
963 # If we're not dumping all parts into the new bundle, we need to
961 # If we're not dumping all parts into the new bundle, we need to
964 # alert the future pushkey and phase-heads handler to skip
962 # alert the future pushkey and phase-heads handler to skip
965 # the part.
963 # the part.
966 if not handleallparts:
964 if not handleallparts:
967 op.records.add(scratchbranchparttype + '_skippushkey', True)
965 op.records.add(scratchbranchparttype + '_skippushkey', True)
968 op.records.add(scratchbranchparttype + '_skipphaseheads',
966 op.records.add(scratchbranchparttype + '_skipphaseheads',
969 True)
967 True)
970 else:
968 else:
971 if handleallparts:
969 if handleallparts:
972 # Ideally we would not process any parts, and instead just
970 # Ideally we would not process any parts, and instead just
973 # forward them to the bundle for storage, but since this
971 # forward them to the bundle for storage, but since this
974 # differs from previous behavior, we need to put it behind a
972 # differs from previous behavior, we need to put it behind a
975 # config flag for incremental rollout.
973 # config flag for incremental rollout.
976 bundlepart = bundle2.bundlepart(part.type, data=part.read())
974 bundlepart = bundle2.bundlepart(part.type, data=part.read())
977 for key, value in part.params.iteritems():
975 for key, value in part.params.iteritems():
978 bundlepart.addparam(key, value)
976 bundlepart.addparam(key, value)
979
977
980 # Certain parts require a response
978 # Certain parts require a response
981 if part.type == 'pushkey':
979 if part.type == 'pushkey':
982 if op.reply is not None:
980 if op.reply is not None:
983 rpart = op.reply.newpart('reply:pushkey')
981 rpart = op.reply.newpart('reply:pushkey')
984 rpart.addparam('in-reply-to', str(part.id),
982 rpart.addparam('in-reply-to', str(part.id),
985 mandatory=False)
983 mandatory=False)
986 rpart.addparam('return', '1', mandatory=False)
984 rpart.addparam('return', '1', mandatory=False)
987 else:
985 else:
988 bundle2._processpart(op, part)
986 bundle2._processpart(op, part)
989
987
990 if handleallparts:
988 if handleallparts:
991 op.records.add(part.type, {
989 op.records.add(part.type, {
992 'return': 1,
990 'return': 1,
993 })
991 })
994 if bundlepart:
992 if bundlepart:
995 bundler.addpart(bundlepart)
993 bundler.addpart(bundlepart)
996
994
997 # If commits were sent, store them
995 # If commits were sent, store them
998 if cgparams:
996 if cgparams:
999 buf = util.chunkbuffer(bundler.getchunks())
997 buf = util.chunkbuffer(bundler.getchunks())
1000 fd, bundlefile = tempfile.mkstemp()
998 fd, bundlefile = tempfile.mkstemp()
1001 try:
999 try:
1002 try:
1000 try:
1003 fp = os.fdopen(fd, 'wb')
1001 fp = os.fdopen(fd, 'wb')
1004 fp.write(buf.read())
1002 fp.write(buf.read())
1005 finally:
1003 finally:
1006 fp.close()
1004 fp.close()
1007 storebundle(op, cgparams, bundlefile)
1005 storebundle(op, cgparams, bundlefile)
1008 finally:
1006 finally:
1009 try:
1007 try:
1010 os.unlink(bundlefile)
1008 os.unlink(bundlefile)
1011 except Exception:
1009 except Exception:
1012 # we would rather see the original exception
1010 # we would rather see the original exception
1013 pass
1011 pass
1014
1012
1015 def storebundle(op, params, bundlefile):
1013 def storebundle(op, params, bundlefile):
1016 log = _getorcreateinfinitepushlogger(op)
1014 log = _getorcreateinfinitepushlogger(op)
1017 parthandlerstart = time.time()
1015 parthandlerstart = time.time()
1018 log(scratchbranchparttype, eventtype='start')
1016 log(scratchbranchparttype, eventtype='start')
1019 index = op.repo.bundlestore.index
1017 index = op.repo.bundlestore.index
1020 store = op.repo.bundlestore.store
1018 store = op.repo.bundlestore.store
1021 op.records.add(scratchbranchparttype + '_skippushkey', True)
1019 op.records.add(scratchbranchparttype + '_skippushkey', True)
1022
1020
1023 bundle = None
1021 bundle = None
1024 try: # guards bundle
1022 try: # guards bundle
1025 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1023 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1026 bundle = hg.repository(op.repo.ui, bundlepath)
1024 bundle = hg.repository(op.repo.ui, bundlepath)
1027
1025
1028 bookmark = params.get('bookmark')
1026 bookmark = params.get('bookmark')
1029 bookprevnode = params.get('bookprevnode', '')
1027 bookprevnode = params.get('bookprevnode', '')
1030 force = params.get('force')
1028 force = params.get('force')
1031
1029
1032 if bookmark:
1030 if bookmark:
1033 oldnode = index.getnode(bookmark)
1031 oldnode = index.getnode(bookmark)
1034 else:
1032 else:
1035 oldnode = None
1033 oldnode = None
1036 bundleheads = bundle.revs('heads(bundle())')
1034 bundleheads = bundle.revs('heads(bundle())')
1037 if bookmark and len(bundleheads) > 1:
1035 if bookmark and len(bundleheads) > 1:
1038 raise error.Abort(
1036 raise error.Abort(
1039 _('cannot push more than one head to a scratch branch'))
1037 _('cannot push more than one head to a scratch branch'))
1040
1038
1041 revs = _getrevs(bundle, oldnode, force, bookmark)
1039 revs = _getrevs(bundle, oldnode, force, bookmark)
1042
1040
1043 # Notify the user of what is being pushed
1041 # Notify the user of what is being pushed
1044 plural = 's' if len(revs) > 1 else ''
1042 plural = 's' if len(revs) > 1 else ''
1045 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1043 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1046 maxoutput = 10
1044 maxoutput = 10
1047 for i in range(0, min(len(revs), maxoutput)):
1045 for i in range(0, min(len(revs), maxoutput)):
1048 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1046 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1049 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1047 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1050
1048
1051 if len(revs) > maxoutput + 1:
1049 if len(revs) > maxoutput + 1:
1052 op.repo.ui.warn((" ...\n"))
1050 op.repo.ui.warn((" ...\n"))
1053 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1051 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1054 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1052 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1055
1053
1056 nodesctx = [bundle[rev] for rev in revs]
1054 nodesctx = [bundle[rev] for rev in revs]
1057 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1055 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1058 if bundleheads:
1056 if bundleheads:
1059 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1057 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1060 else:
1058 else:
1061 newheadscount = 0
1059 newheadscount = 0
1062 # If there's a bookmark specified, there should be only one head,
1060 # If there's a bookmark specified, there should be only one head,
1063 # so we choose the last node, which will be that head.
1061 # so we choose the last node, which will be that head.
1064 # If a bug or malicious client allows there to be a bookmark
1062 # If a bug or malicious client allows there to be a bookmark
1065 # with multiple heads, we will place the bookmark on the last head.
1063 # with multiple heads, we will place the bookmark on the last head.
1066 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1064 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1067 key = None
1065 key = None
1068 if newheadscount:
1066 if newheadscount:
1069 with open(bundlefile, 'r') as f:
1067 with open(bundlefile, 'r') as f:
1070 bundledata = f.read()
1068 bundledata = f.read()
1071 with logservicecall(log, 'bundlestore',
1069 with logservicecall(log, 'bundlestore',
1072 bundlesize=len(bundledata)):
1070 bundlesize=len(bundledata)):
1073 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1071 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1074 if len(bundledata) > bundlesizelimit:
1072 if len(bundledata) > bundlesizelimit:
1075 error_msg = ('bundle is too big: %d bytes. ' +
1073 error_msg = ('bundle is too big: %d bytes. ' +
1076 'max allowed size is 100 MB')
1074 'max allowed size is 100 MB')
1077 raise error.Abort(error_msg % (len(bundledata),))
1075 raise error.Abort(error_msg % (len(bundledata),))
1078 key = store.write(bundledata)
1076 key = store.write(bundledata)
1079
1077
1080 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1078 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1081 if key:
1079 if key:
1082 index.addbundle(key, nodesctx)
1080 index.addbundle(key, nodesctx)
1083 if bookmark:
1081 if bookmark:
1084 index.addbookmark(bookmark, bookmarknode)
1082 index.addbookmark(bookmark, bookmarknode)
1085 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1083 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1086 bookprevnode, params)
1084 bookprevnode, params)
1087 log(scratchbranchparttype, eventtype='success',
1085 log(scratchbranchparttype, eventtype='success',
1088 elapsedms=(time.time() - parthandlerstart) * 1000)
1086 elapsedms=(time.time() - parthandlerstart) * 1000)
1089
1087
1090 except Exception as e:
1088 except Exception as e:
1091 log(scratchbranchparttype, eventtype='failure',
1089 log(scratchbranchparttype, eventtype='failure',
1092 elapsedms=(time.time() - parthandlerstart) * 1000,
1090 elapsedms=(time.time() - parthandlerstart) * 1000,
1093 errormsg=str(e))
1091 errormsg=str(e))
1094 raise
1092 raise
1095 finally:
1093 finally:
1096 if bundle:
1094 if bundle:
1097 bundle.close()
1095 bundle.close()
1098
1096
1099 @bundle2.parthandler(scratchbranchparttype,
1097 @bundle2.parthandler(scratchbranchparttype,
1100 ('bookmark', 'bookprevnode', 'force',
1098 ('bookmark', 'bookprevnode', 'force',
1101 'pushbackbookmarks', 'cgversion'))
1099 'pushbackbookmarks', 'cgversion'))
1102 def bundle2scratchbranch(op, part):
1100 def bundle2scratchbranch(op, part):
1103 '''unbundle a bundle2 part containing a changegroup to store'''
1101 '''unbundle a bundle2 part containing a changegroup to store'''
1104
1102
1105 bundler = bundle2.bundle20(op.repo.ui)
1103 bundler = bundle2.bundle20(op.repo.ui)
1106 cgversion = part.params.get('cgversion', '01')
1104 cgversion = part.params.get('cgversion', '01')
1107 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1105 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1108 cgpart.addparam('version', cgversion)
1106 cgpart.addparam('version', cgversion)
1109 bundler.addpart(cgpart)
1107 bundler.addpart(cgpart)
1110 buf = util.chunkbuffer(bundler.getchunks())
1108 buf = util.chunkbuffer(bundler.getchunks())
1111
1109
1112 fd, bundlefile = tempfile.mkstemp()
1110 fd, bundlefile = tempfile.mkstemp()
1113 try:
1111 try:
1114 try:
1112 try:
1115 fp = os.fdopen(fd, 'wb')
1113 fp = os.fdopen(fd, 'wb')
1116 fp.write(buf.read())
1114 fp.write(buf.read())
1117 finally:
1115 finally:
1118 fp.close()
1116 fp.close()
1119 storebundle(op, part.params, bundlefile)
1117 storebundle(op, part.params, bundlefile)
1120 finally:
1118 finally:
1121 try:
1119 try:
1122 os.unlink(bundlefile)
1120 os.unlink(bundlefile)
1123 except OSError as e:
1121 except OSError as e:
1124 if e.errno != errno.ENOENT:
1122 if e.errno != errno.ENOENT:
1125 raise
1123 raise
1126
1124
1127 return 1
1125 return 1
1128
1126
1129 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1127 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1130 if params.get('pushbackbookmarks'):
1128 if params.get('pushbackbookmarks'):
1131 if op.reply and 'pushback' in op.reply.capabilities:
1129 if op.reply and 'pushback' in op.reply.capabilities:
1132 params = {
1130 params = {
1133 'namespace': 'bookmarks',
1131 'namespace': 'bookmarks',
1134 'key': bookmark,
1132 'key': bookmark,
1135 'new': newnode,
1133 'new': newnode,
1136 'old': oldnode,
1134 'old': oldnode,
1137 }
1135 }
1138 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1136 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1139
1137
1140 def bundle2pushkey(orig, op, part):
1138 def bundle2pushkey(orig, op, part):
1141 '''Wrapper of bundle2.handlepushkey()
1139 '''Wrapper of bundle2.handlepushkey()
1142
1140
1143 The only goal is to skip calling the original function if flag is set.
1141 The only goal is to skip calling the original function if flag is set.
1144 It's set if infinitepush push is happening.
1142 It's set if infinitepush push is happening.
1145 '''
1143 '''
1146 if op.records[scratchbranchparttype + '_skippushkey']:
1144 if op.records[scratchbranchparttype + '_skippushkey']:
1147 if op.reply is not None:
1145 if op.reply is not None:
1148 rpart = op.reply.newpart('reply:pushkey')
1146 rpart = op.reply.newpart('reply:pushkey')
1149 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1147 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1150 rpart.addparam('return', '1', mandatory=False)
1148 rpart.addparam('return', '1', mandatory=False)
1151 return 1
1149 return 1
1152
1150
1153 return orig(op, part)
1151 return orig(op, part)
1154
1152
1155 def bundle2handlephases(orig, op, part):
1153 def bundle2handlephases(orig, op, part):
1156 '''Wrapper of bundle2.handlephases()
1154 '''Wrapper of bundle2.handlephases()
1157
1155
1158 The only goal is to skip calling the original function if flag is set.
1156 The only goal is to skip calling the original function if flag is set.
1159 It's set if infinitepush push is happening.
1157 It's set if infinitepush push is happening.
1160 '''
1158 '''
1161
1159
1162 if op.records[scratchbranchparttype + '_skipphaseheads']:
1160 if op.records[scratchbranchparttype + '_skipphaseheads']:
1163 return
1161 return
1164
1162
1165 return orig(op, part)
1163 return orig(op, part)
1166
1164
1167 def _asyncsavemetadata(root, nodes):
1165 def _asyncsavemetadata(root, nodes):
1168 '''starts a separate process that fills metadata for the nodes
1166 '''starts a separate process that fills metadata for the nodes
1169
1167
1170 This function creates a separate process and doesn't wait for it's
1168 This function creates a separate process and doesn't wait for it's
1171 completion. This was done to avoid slowing down pushes
1169 completion. This was done to avoid slowing down pushes
1172 '''
1170 '''
1173
1171
1174 maxnodes = 50
1172 maxnodes = 50
1175 if len(nodes) > maxnodes:
1173 if len(nodes) > maxnodes:
1176 return
1174 return
1177 nodesargs = []
1175 nodesargs = []
1178 for node in nodes:
1176 for node in nodes:
1179 nodesargs.append('--node')
1177 nodesargs.append('--node')
1180 nodesargs.append(node)
1178 nodesargs.append(node)
1181 with open(os.devnull, 'w+b') as devnull:
1179 with open(os.devnull, 'w+b') as devnull:
1182 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1180 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1183 '-R', root] + nodesargs
1181 '-R', root] + nodesargs
1184 # Process will run in background. We don't care about the return code
1182 # Process will run in background. We don't care about the return code
1185 subprocess.Popen(cmdline, close_fds=True, shell=False,
1183 subprocess.Popen(cmdline, close_fds=True, shell=False,
1186 stdin=devnull, stdout=devnull, stderr=devnull)
1184 stdin=devnull, stdout=devnull, stderr=devnull)
General Comments 0
You need to be logged in to leave comments. Login now