##// END OF EJS Templates
py3: use pycompat.{strkwargs|byteskwargs} in infinitepush...
Pulkit Goyal -
r37595:e7eea858 default
parent child Browse files
Show More
@@ -1,1184 +1,1186 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Instructs infinitepush to forward all received bundle2 parts to the
72 72 # bundle for storage. Defaults to False.
73 73 storeallparts = True
74 74
75 75 # routes each incoming push to the bundlestore. defaults to False
76 76 pushtobundlestore = True
77 77
78 78 [remotenames]
79 79 # Client-side option
80 80 # This option should be set only if remotenames extension is enabled.
81 81 # Whether remote bookmarks are tracked by remotenames extension.
82 82 bookmarks = True
83 83 """
84 84
85 85 from __future__ import absolute_import
86 86
87 87 import collections
88 88 import contextlib
89 89 import errno
90 90 import functools
91 91 import logging
92 92 import os
93 93 import random
94 94 import re
95 95 import socket
96 96 import subprocess
97 97 import tempfile
98 98 import time
99 99
100 100 from mercurial.node import (
101 101 bin,
102 102 hex,
103 103 )
104 104
105 105 from mercurial.i18n import _
106 106
107 107 from mercurial.utils import (
108 108 procutil,
109 109 stringutil,
110 110 )
111 111
112 112 from mercurial import (
113 113 bundle2,
114 114 changegroup,
115 115 commands,
116 116 discovery,
117 117 encoding,
118 118 error,
119 119 exchange,
120 120 extensions,
121 121 hg,
122 122 localrepo,
123 123 peer,
124 124 phases,
125 125 pushkey,
126 pycompat,
126 127 registrar,
127 128 util,
128 129 wireproto,
129 130 )
130 131
131 132 from . import (
132 133 bundleparts,
133 134 common,
134 135 )
135 136
136 137 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
137 138 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
138 139 # be specifying the version(s) of Mercurial they are tested with, or
139 140 # leave the attribute unspecified.
140 141 testedwith = 'ships-with-hg-core'
141 142
142 143 configtable = {}
143 144 configitem = registrar.configitem(configtable)
144 145
145 146 configitem('infinitepush', 'server',
146 147 default=False,
147 148 )
148 149 configitem('infinitepush', 'storetype',
149 150 default='',
150 151 )
151 152 configitem('infinitepush', 'indextype',
152 153 default='',
153 154 )
154 155 configitem('infinitepush', 'indexpath',
155 156 default='',
156 157 )
157 158 configitem('infinitepush', 'storeallparts',
158 159 default=False,
159 160 )
160 161 configitem('infinitepush', 'reponame',
161 162 default='',
162 163 )
163 164 configitem('scratchbranch', 'storepath',
164 165 default='',
165 166 )
166 167 configitem('infinitepush', 'branchpattern',
167 168 default='',
168 169 )
169 170 configitem('infinitepush', 'pushtobundlestore',
170 171 default=False,
171 172 )
172 173 configitem('experimental', 'server-bundlestore-bookmark',
173 174 default='',
174 175 )
175 176 configitem('experimental', 'infinitepush-scratchpush',
176 177 default=False,
177 178 )
178 179
179 180 experimental = 'experimental'
180 181 configbookmark = 'server-bundlestore-bookmark'
181 182 configscratchpush = 'infinitepush-scratchpush'
182 183
183 184 scratchbranchparttype = bundleparts.scratchbranchparttype
184 185 revsetpredicate = registrar.revsetpredicate()
185 186 templatekeyword = registrar.templatekeyword()
186 187 _scratchbranchmatcher = lambda x: False
187 188 _maybehash = re.compile(r'^[a-f0-9]+$').search
188 189
189 190 def _buildexternalbundlestore(ui):
190 191 put_args = ui.configlist('infinitepush', 'put_args', [])
191 192 put_binary = ui.config('infinitepush', 'put_binary')
192 193 if not put_binary:
193 194 raise error.Abort('put binary is not specified')
194 195 get_args = ui.configlist('infinitepush', 'get_args', [])
195 196 get_binary = ui.config('infinitepush', 'get_binary')
196 197 if not get_binary:
197 198 raise error.Abort('get binary is not specified')
198 199 from . import store
199 200 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
200 201
201 202 def _buildsqlindex(ui):
202 203 sqlhost = ui.config('infinitepush', 'sqlhost')
203 204 if not sqlhost:
204 205 raise error.Abort(_('please set infinitepush.sqlhost'))
205 206 host, port, db, user, password = sqlhost.split(':')
206 207 reponame = ui.config('infinitepush', 'reponame')
207 208 if not reponame:
208 209 raise error.Abort(_('please set infinitepush.reponame'))
209 210
210 211 logfile = ui.config('infinitepush', 'logfile', '')
211 212 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
212 213 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
213 214 from . import sqlindexapi
214 215 return sqlindexapi.sqlindexapi(
215 216 reponame, host, port, db, user, password,
216 217 logfile, _getloglevel(ui), waittimeout=waittimeout,
217 218 locktimeout=locktimeout)
218 219
219 220 def _getloglevel(ui):
220 221 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
221 222 numeric_loglevel = getattr(logging, loglevel.upper(), None)
222 223 if not isinstance(numeric_loglevel, int):
223 224 raise error.Abort(_('invalid log level %s') % loglevel)
224 225 return numeric_loglevel
225 226
226 227 def _tryhoist(ui, remotebookmark):
227 228 '''returns a bookmarks with hoisted part removed
228 229
229 230 Remotenames extension has a 'hoist' config that allows to use remote
230 231 bookmarks without specifying remote path. For example, 'hg update master'
231 232 works as well as 'hg update remote/master'. We want to allow the same in
232 233 infinitepush.
233 234 '''
234 235
235 236 if common.isremotebooksenabled(ui):
236 237 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
237 238 if remotebookmark.startswith(hoist):
238 239 return remotebookmark[len(hoist):]
239 240 return remotebookmark
240 241
241 242 class bundlestore(object):
242 243 def __init__(self, repo):
243 244 self._repo = repo
244 245 storetype = self._repo.ui.config('infinitepush', 'storetype')
245 246 if storetype == 'disk':
246 247 from . import store
247 248 self.store = store.filebundlestore(self._repo.ui, self._repo)
248 249 elif storetype == 'external':
249 250 self.store = _buildexternalbundlestore(self._repo.ui)
250 251 else:
251 252 raise error.Abort(
252 253 _('unknown infinitepush store type specified %s') % storetype)
253 254
254 255 indextype = self._repo.ui.config('infinitepush', 'indextype')
255 256 if indextype == 'disk':
256 257 from . import fileindexapi
257 258 self.index = fileindexapi.fileindexapi(self._repo)
258 259 elif indextype == 'sql':
259 260 self.index = _buildsqlindex(self._repo.ui)
260 261 else:
261 262 raise error.Abort(
262 263 _('unknown infinitepush index type specified %s') % indextype)
263 264
264 265 def _isserver(ui):
265 266 return ui.configbool('infinitepush', 'server')
266 267
267 268 def reposetup(ui, repo):
268 269 if _isserver(ui) and repo.local():
269 270 repo.bundlestore = bundlestore(repo)
270 271
271 272 def extsetup(ui):
272 273 commonsetup(ui)
273 274 if _isserver(ui):
274 275 serverextsetup(ui)
275 276 else:
276 277 clientextsetup(ui)
277 278
278 279 def commonsetup(ui):
279 280 wireproto.commands['listkeyspatterns'] = (
280 281 wireprotolistkeyspatterns, 'namespace patterns')
281 282 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
282 283 if scratchbranchpat:
283 284 global _scratchbranchmatcher
284 285 kind, pat, _scratchbranchmatcher = \
285 286 stringutil.stringmatcher(scratchbranchpat)
286 287
287 288 def serverextsetup(ui):
288 289 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
289 290
290 291 def newpushkeyhandler(*args, **kwargs):
291 292 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
292 293 newpushkeyhandler.params = origpushkeyhandler.params
293 294 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
294 295
295 296 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
296 297 newphaseheadshandler = lambda *args, **kwargs: \
297 298 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
298 299 newphaseheadshandler.params = orighandlephasehandler.params
299 300 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
300 301
301 302 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
302 303 localrepolistkeys)
303 304 wireproto.commands['lookup'] = (
304 305 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
305 306 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
306 307
307 308 extensions.wrapfunction(bundle2, 'processparts', processparts)
308 309
309 310 def clientextsetup(ui):
310 311 entry = extensions.wrapcommand(commands.table, 'push', _push)
311 312
312 313 entry[1].append(
313 314 ('', 'bundle-store', None,
314 315 _('force push to go to bundle store (EXPERIMENTAL)')))
315 316
316 317 extensions.wrapcommand(commands.table, 'pull', _pull)
317 318
318 319 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
319 320
320 321 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
321 322
322 323 partorder = exchange.b2partsgenorder
323 324 index = partorder.index('changeset')
324 325 partorder.insert(
325 326 index, partorder.pop(partorder.index(scratchbranchparttype)))
326 327
327 328 def _checkheads(orig, pushop):
328 329 if pushop.ui.configbool(experimental, configscratchpush, False):
329 330 return
330 331 return orig(pushop)
331 332
332 333 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
333 334 patterns = wireproto.decodelist(patterns)
334 335 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
335 336 return pushkey.encodekeys(d)
336 337
337 338 def localrepolistkeys(orig, self, namespace, patterns=None):
338 339 if namespace == 'bookmarks' and patterns:
339 340 index = self.bundlestore.index
340 341 results = {}
341 342 bookmarks = orig(self, namespace)
342 343 for pattern in patterns:
343 344 results.update(index.getbookmarks(pattern))
344 345 if pattern.endswith('*'):
345 346 pattern = 're:^' + pattern[:-1] + '.*'
346 347 kind, pat, matcher = stringutil.stringmatcher(pattern)
347 348 for bookmark, node in bookmarks.iteritems():
348 349 if matcher(bookmark):
349 350 results[bookmark] = node
350 351 return results
351 352 else:
352 353 return orig(self, namespace)
353 354
354 355 @peer.batchable
355 356 def listkeyspatterns(self, namespace, patterns):
356 357 if not self.capable('pushkey'):
357 358 yield {}, None
358 359 f = peer.future()
359 360 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
360 361 (namespace, patterns))
361 362 yield {
362 363 'namespace': encoding.fromlocal(namespace),
363 364 'patterns': wireproto.encodelist(patterns)
364 365 }, f
365 366 d = f.value
366 367 self.ui.debug('received listkey for "%s": %i bytes\n'
367 368 % (namespace, len(d)))
368 369 yield pushkey.decodekeys(d)
369 370
370 371 def _readbundlerevs(bundlerepo):
371 372 return list(bundlerepo.revs('bundle()'))
372 373
373 374 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
374 375 '''Tells remotefilelog to include all changed files to the changegroup
375 376
376 377 By default remotefilelog doesn't include file content to the changegroup.
377 378 But we need to include it if we are fetching from bundlestore.
378 379 '''
379 380 changedfiles = set()
380 381 cl = bundlerepo.changelog
381 382 for r in bundlerevs:
382 383 # [3] means changed files
383 384 changedfiles.update(cl.read(r)[3])
384 385 if not changedfiles:
385 386 return bundlecaps
386 387
387 388 changedfiles = '\0'.join(changedfiles)
388 389 newcaps = []
389 390 appended = False
390 391 for cap in (bundlecaps or []):
391 392 if cap.startswith('excludepattern='):
392 393 newcaps.append('\0'.join((cap, changedfiles)))
393 394 appended = True
394 395 else:
395 396 newcaps.append(cap)
396 397 if not appended:
397 398 # Not found excludepattern cap. Just append it
398 399 newcaps.append('excludepattern=' + changedfiles)
399 400
400 401 return newcaps
401 402
402 403 def _rebundle(bundlerepo, bundleroots, unknownhead):
403 404 '''
404 405 Bundle may include more revision then user requested. For example,
405 406 if user asks for revision but bundle also consists its descendants.
406 407 This function will filter out all revision that user is not requested.
407 408 '''
408 409 parts = []
409 410
410 411 version = '02'
411 412 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
412 413 missingheads=[unknownhead])
413 414 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
414 415 cgstream = util.chunkbuffer(cgstream).read()
415 416 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
416 417 cgpart.addparam('version', version)
417 418 parts.append(cgpart)
418 419
419 420 return parts
420 421
421 422 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
422 423 cl = bundlerepo.changelog
423 424 bundleroots = []
424 425 for rev in bundlerevs:
425 426 node = cl.node(rev)
426 427 parents = cl.parents(node)
427 428 for parent in parents:
428 429 # include all revs that exist in the main repo
429 430 # to make sure that bundle may apply client-side
430 431 if parent in oldrepo:
431 432 bundleroots.append(parent)
432 433 return bundleroots
433 434
434 435 def _needsrebundling(head, bundlerepo):
435 436 bundleheads = list(bundlerepo.revs('heads(bundle())'))
436 437 return not (len(bundleheads) == 1 and
437 438 bundlerepo[bundleheads[0]].node() == head)
438 439
439 440 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
440 441 '''generates bundle that will be send to the user
441 442
442 443 returns tuple with raw bundle string and bundle type
443 444 '''
444 445 parts = []
445 446 if not _needsrebundling(head, bundlerepo):
446 447 with util.posixfile(bundlefile, "rb") as f:
447 448 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
448 449 if isinstance(unbundler, changegroup.cg1unpacker):
449 450 part = bundle2.bundlepart('changegroup',
450 451 data=unbundler._stream.read())
451 452 part.addparam('version', '01')
452 453 parts.append(part)
453 454 elif isinstance(unbundler, bundle2.unbundle20):
454 455 haschangegroup = False
455 456 for part in unbundler.iterparts():
456 457 if part.type == 'changegroup':
457 458 haschangegroup = True
458 459 newpart = bundle2.bundlepart(part.type, data=part.read())
459 460 for key, value in part.params.iteritems():
460 461 newpart.addparam(key, value)
461 462 parts.append(newpart)
462 463
463 464 if not haschangegroup:
464 465 raise error.Abort(
465 466 'unexpected bundle without changegroup part, ' +
466 467 'head: %s' % hex(head),
467 468 hint='report to administrator')
468 469 else:
469 470 raise error.Abort('unknown bundle type')
470 471 else:
471 472 parts = _rebundle(bundlerepo, bundleroots, head)
472 473
473 474 return parts
474 475
475 476 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
476 477 heads = heads or []
477 478 # newheads are parents of roots of scratch bundles that were requested
478 479 newphases = {}
479 480 scratchbundles = []
480 481 newheads = []
481 482 scratchheads = []
482 483 nodestobundle = {}
483 484 allbundlestocleanup = []
484 485 try:
485 486 for head in heads:
486 487 if head not in repo.changelog.nodemap:
487 488 if head not in nodestobundle:
488 489 newbundlefile = common.downloadbundle(repo, head)
489 490 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
490 491 bundlerepo = hg.repository(repo.ui, bundlepath)
491 492
492 493 allbundlestocleanup.append((bundlerepo, newbundlefile))
493 494 bundlerevs = set(_readbundlerevs(bundlerepo))
494 495 bundlecaps = _includefilelogstobundle(
495 496 bundlecaps, bundlerepo, bundlerevs, repo.ui)
496 497 cl = bundlerepo.changelog
497 498 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
498 499 for rev in bundlerevs:
499 500 node = cl.node(rev)
500 501 newphases[hex(node)] = str(phases.draft)
501 502 nodestobundle[node] = (bundlerepo, bundleroots,
502 503 newbundlefile)
503 504
504 505 scratchbundles.append(
505 506 _generateoutputparts(head, *nodestobundle[head]))
506 507 newheads.extend(bundleroots)
507 508 scratchheads.append(head)
508 509 finally:
509 510 for bundlerepo, bundlefile in allbundlestocleanup:
510 511 bundlerepo.close()
511 512 try:
512 513 os.unlink(bundlefile)
513 514 except (IOError, OSError):
514 515 # if we can't cleanup the file then just ignore the error,
515 516 # no need to fail
516 517 pass
517 518
518 519 pullfrombundlestore = bool(scratchbundles)
519 520 wrappedchangegrouppart = False
520 521 wrappedlistkeys = False
521 522 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
522 523 try:
523 524 def _changegrouppart(bundler, *args, **kwargs):
524 525 # Order is important here. First add non-scratch part
525 526 # and only then add parts with scratch bundles because
526 527 # non-scratch part contains parents of roots of scratch bundles.
527 528 result = oldchangegrouppart(bundler, *args, **kwargs)
528 529 for bundle in scratchbundles:
529 530 for part in bundle:
530 531 bundler.addpart(part)
531 532 return result
532 533
533 534 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
534 535 wrappedchangegrouppart = True
535 536
536 537 def _listkeys(orig, self, namespace):
537 538 origvalues = orig(self, namespace)
538 539 if namespace == 'phases' and pullfrombundlestore:
539 540 if origvalues.get('publishing') == 'True':
540 541 # Make repo non-publishing to preserve draft phase
541 542 del origvalues['publishing']
542 543 origvalues.update(newphases)
543 544 return origvalues
544 545
545 546 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
546 547 _listkeys)
547 548 wrappedlistkeys = True
548 549 heads = list((set(newheads) | set(heads)) - set(scratchheads))
549 550 result = orig(repo, source, heads=heads,
550 551 bundlecaps=bundlecaps, **kwargs)
551 552 finally:
552 553 if wrappedchangegrouppart:
553 554 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
554 555 if wrappedlistkeys:
555 556 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
556 557 _listkeys)
557 558 return result
558 559
559 560 def _lookupwrap(orig):
560 561 def _lookup(repo, proto, key):
561 562 localkey = encoding.tolocal(key)
562 563
563 564 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
564 565 scratchnode = repo.bundlestore.index.getnode(localkey)
565 566 if scratchnode:
566 567 return "%s %s\n" % (1, scratchnode)
567 568 else:
568 569 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
569 570 else:
570 571 try:
571 572 r = hex(repo.lookup(localkey))
572 573 return "%s %s\n" % (1, r)
573 574 except Exception as inst:
574 575 if repo.bundlestore.index.getbundle(localkey):
575 576 return "%s %s\n" % (1, localkey)
576 577 else:
577 578 r = str(inst)
578 579 return "%s %s\n" % (0, r)
579 580 return _lookup
580 581
581 582 def _pull(orig, ui, repo, source="default", **opts):
583 opts = pycompat.byteskwargs(opts)
582 584 # Copy paste from `pull` command
583 585 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
584 586
585 587 scratchbookmarks = {}
586 588 unfi = repo.unfiltered()
587 589 unknownnodes = []
588 590 for rev in opts.get('rev', []):
589 591 if rev not in unfi:
590 592 unknownnodes.append(rev)
591 593 if opts.get('bookmark'):
592 594 bookmarks = []
593 595 revs = opts.get('rev') or []
594 596 for bookmark in opts.get('bookmark'):
595 597 if _scratchbranchmatcher(bookmark):
596 598 # rev is not known yet
597 599 # it will be fetched with listkeyspatterns next
598 600 scratchbookmarks[bookmark] = 'REVTOFETCH'
599 601 else:
600 602 bookmarks.append(bookmark)
601 603
602 604 if scratchbookmarks:
603 605 other = hg.peer(repo, opts, source)
604 606 fetchedbookmarks = other.listkeyspatterns(
605 607 'bookmarks', patterns=scratchbookmarks)
606 608 for bookmark in scratchbookmarks:
607 609 if bookmark not in fetchedbookmarks:
608 610 raise error.Abort('remote bookmark %s not found!' %
609 611 bookmark)
610 612 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
611 613 revs.append(fetchedbookmarks[bookmark])
612 614 opts['bookmark'] = bookmarks
613 615 opts['rev'] = revs
614 616
615 617 if scratchbookmarks or unknownnodes:
616 618 # Set anyincoming to True
617 619 extensions.wrapfunction(discovery, 'findcommonincoming',
618 620 _findcommonincoming)
619 621 try:
620 622 # Remote scratch bookmarks will be deleted because remotenames doesn't
621 623 # know about them. Let's save it before pull and restore after
622 624 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
623 result = orig(ui, repo, source, **opts)
625 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
624 626 # TODO(stash): race condition is possible
625 627 # if scratch bookmarks was updated right after orig.
626 628 # But that's unlikely and shouldn't be harmful.
627 629 if common.isremotebooksenabled(ui):
628 630 remotescratchbookmarks.update(scratchbookmarks)
629 631 _saveremotebookmarks(repo, remotescratchbookmarks, source)
630 632 else:
631 633 _savelocalbookmarks(repo, scratchbookmarks)
632 634 return result
633 635 finally:
634 636 if scratchbookmarks:
635 637 extensions.unwrapfunction(discovery, 'findcommonincoming')
636 638
637 639 def _readscratchremotebookmarks(ui, repo, other):
638 640 if common.isremotebooksenabled(ui):
639 641 remotenamesext = extensions.find('remotenames')
640 642 remotepath = remotenamesext.activepath(repo.ui, other)
641 643 result = {}
642 644 # Let's refresh remotenames to make sure we have it up to date
643 645 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
644 646 # and it results in deleting scratch bookmarks. Our best guess how to
645 647 # fix it is to use `clearnames()`
646 648 repo._remotenames.clearnames()
647 649 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
648 650 path, bookname = remotenamesext.splitremotename(remotebookmark)
649 651 if path == remotepath and _scratchbranchmatcher(bookname):
650 652 nodes = repo.names['remotebookmarks'].nodes(repo,
651 653 remotebookmark)
652 654 if nodes:
653 655 result[bookname] = hex(nodes[0])
654 656 return result
655 657 else:
656 658 return {}
657 659
658 660 def _saveremotebookmarks(repo, newbookmarks, remote):
659 661 remotenamesext = extensions.find('remotenames')
660 662 remotepath = remotenamesext.activepath(repo.ui, remote)
661 663 branches = collections.defaultdict(list)
662 664 bookmarks = {}
663 665 remotenames = remotenamesext.readremotenames(repo)
664 666 for hexnode, nametype, remote, rname in remotenames:
665 667 if remote != remotepath:
666 668 continue
667 669 if nametype == 'bookmarks':
668 670 if rname in newbookmarks:
669 671 # It's possible if we have a normal bookmark that matches
670 672 # scratch branch pattern. In this case just use the current
671 673 # bookmark node
672 674 del newbookmarks[rname]
673 675 bookmarks[rname] = hexnode
674 676 elif nametype == 'branches':
675 677 # saveremotenames expects 20 byte binary nodes for branches
676 678 branches[rname].append(bin(hexnode))
677 679
678 680 for bookmark, hexnode in newbookmarks.iteritems():
679 681 bookmarks[bookmark] = hexnode
680 682 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
681 683
682 684 def _savelocalbookmarks(repo, bookmarks):
683 685 if not bookmarks:
684 686 return
685 687 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
686 688 changes = []
687 689 for scratchbook, node in bookmarks.iteritems():
688 690 changectx = repo[node]
689 691 changes.append((scratchbook, changectx.node()))
690 692 repo._bookmarks.applychanges(repo, tr, changes)
691 693
692 694 def _findcommonincoming(orig, *args, **kwargs):
693 695 common, inc, remoteheads = orig(*args, **kwargs)
694 696 return common, True, remoteheads
695 697
696 698 def _push(orig, ui, repo, dest=None, *args, **opts):
697 699
698 700 bookmark = opts.get('bookmark')
699 701 # we only support pushing one infinitepush bookmark at once
700 702 if len(bookmark) == 1:
701 703 bookmark = bookmark[0]
702 704 else:
703 705 bookmark = ''
704 706
705 707 oldphasemove = None
706 708 overrides = {(experimental, configbookmark): bookmark}
707 709
708 710 with ui.configoverride(overrides, 'infinitepush'):
709 711 scratchpush = opts.get('bundle_store')
710 712 if _scratchbranchmatcher(bookmark):
711 713 scratchpush = True
712 714 # bundle2 can be sent back after push (for example, bundle2
713 715 # containing `pushkey` part to update bookmarks)
714 716 ui.setconfig(experimental, 'bundle2.pushback', True)
715 717
716 718 if scratchpush:
717 719 # this is an infinitepush, we don't want the bookmark to be applied
718 720 # rather that should be stored in the bundlestore
719 721 opts['bookmark'] = []
720 722 ui.setconfig(experimental, configscratchpush, True)
721 723 oldphasemove = extensions.wrapfunction(exchange,
722 724 '_localphasemove',
723 725 _phasemove)
724 726 # Copy-paste from `push` command
725 727 path = ui.paths.getpath(dest, default=('default-push', 'default'))
726 728 if not path:
727 729 raise error.Abort(_('default repository not configured!'),
728 730 hint=_("see 'hg help config.paths'"))
729 731 destpath = path.pushloc or path.loc
730 732 # Remote scratch bookmarks will be deleted because remotenames doesn't
731 733 # know about them. Let's save it before push and restore after
732 734 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
733 735 result = orig(ui, repo, dest, *args, **opts)
734 736 if common.isremotebooksenabled(ui):
735 737 if bookmark and scratchpush:
736 738 other = hg.peer(repo, opts, destpath)
737 739 fetchedbookmarks = other.listkeyspatterns('bookmarks',
738 740 patterns=[bookmark])
739 741 remotescratchbookmarks.update(fetchedbookmarks)
740 742 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
741 743 if oldphasemove:
742 744 exchange._localphasemove = oldphasemove
743 745 return result
744 746
745 747 def _deleteinfinitepushbookmarks(ui, repo, path, names):
746 748 """Prune remote names by removing the bookmarks we don't want anymore,
747 749 then writing the result back to disk
748 750 """
749 751 remotenamesext = extensions.find('remotenames')
750 752
751 753 # remotename format is:
752 754 # (node, nametype ("branches" or "bookmarks"), remote, name)
753 755 nametype_idx = 1
754 756 remote_idx = 2
755 757 name_idx = 3
756 758 remotenames = [remotename for remotename in \
757 759 remotenamesext.readremotenames(repo) \
758 760 if remotename[remote_idx] == path]
759 761 remote_bm_names = [remotename[name_idx] for remotename in \
760 762 remotenames if remotename[nametype_idx] == "bookmarks"]
761 763
762 764 for name in names:
763 765 if name not in remote_bm_names:
764 766 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
765 767 "in path '{}'").format(name, path))
766 768
767 769 bookmarks = {}
768 770 branches = collections.defaultdict(list)
769 771 for node, nametype, remote, name in remotenames:
770 772 if nametype == "bookmarks" and name not in names:
771 773 bookmarks[name] = node
772 774 elif nametype == "branches":
773 775 # saveremotenames wants binary nodes for branches
774 776 branches[name].append(bin(node))
775 777
776 778 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
777 779
778 780 def _phasemove(orig, pushop, nodes, phase=phases.public):
779 781 """prevent commits from being marked public
780 782
781 783 Since these are going to a scratch branch, they aren't really being
782 784 published."""
783 785
784 786 if phase != phases.public:
785 787 orig(pushop, nodes, phase)
786 788
787 789 @exchange.b2partsgenerator(scratchbranchparttype)
788 790 def partgen(pushop, bundler):
789 791 bookmark = pushop.ui.config(experimental, configbookmark)
790 792 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
791 793 if 'changesets' in pushop.stepsdone or not scratchpush:
792 794 return
793 795
794 796 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
795 797 return
796 798
797 799 pushop.stepsdone.add('changesets')
798 800 if not pushop.outgoing.missing:
799 801 pushop.ui.status(_('no changes found\n'))
800 802 pushop.cgresult = 0
801 803 return
802 804
803 805 # This parameter tells the server that the following bundle is an
804 806 # infinitepush. This let's it switch the part processing to our infinitepush
805 807 # code path.
806 808 bundler.addparam("infinitepush", "True")
807 809
808 810 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
809 811 pushop.remote,
810 812 pushop.outgoing,
811 813 pushop.ui,
812 814 bookmark)
813 815
814 816 for scratchpart in scratchparts:
815 817 bundler.addpart(scratchpart)
816 818
817 819 def handlereply(op):
818 820 # server either succeeds or aborts; no code to read
819 821 pushop.cgresult = 1
820 822
821 823 return handlereply
822 824
823 825 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
824 826
825 827 def _getrevs(bundle, oldnode, force, bookmark):
826 828 'extracts and validates the revs to be imported'
827 829 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
828 830
829 831 # new bookmark
830 832 if oldnode is None:
831 833 return revs
832 834
833 835 # Fast forward update
834 836 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
835 837 return revs
836 838
837 839 return revs
838 840
839 841 @contextlib.contextmanager
840 842 def logservicecall(logger, service, **kwargs):
841 843 start = time.time()
842 844 logger(service, eventtype='start', **kwargs)
843 845 try:
844 846 yield
845 847 logger(service, eventtype='success',
846 848 elapsedms=(time.time() - start) * 1000, **kwargs)
847 849 except Exception as e:
848 850 logger(service, eventtype='failure',
849 851 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
850 852 **kwargs)
851 853 raise
852 854
853 855 def _getorcreateinfinitepushlogger(op):
854 856 logger = op.records['infinitepushlogger']
855 857 if not logger:
856 858 ui = op.repo.ui
857 859 try:
858 860 username = procutil.getuser()
859 861 except Exception:
860 862 username = 'unknown'
861 863 # Generate random request id to be able to find all logged entries
862 864 # for the same request. Since requestid is pseudo-generated it may
863 865 # not be unique, but we assume that (hostname, username, requestid)
864 866 # is unique.
865 867 random.seed()
866 868 requestid = random.randint(0, 2000000000)
867 869 hostname = socket.gethostname()
868 870 logger = functools.partial(ui.log, 'infinitepush', user=username,
869 871 requestid=requestid, hostname=hostname,
870 872 reponame=ui.config('infinitepush',
871 873 'reponame'))
872 874 op.records.add('infinitepushlogger', logger)
873 875 else:
874 876 logger = logger[0]
875 877 return logger
876 878
877 879 def storetobundlestore(orig, repo, op, unbundler):
878 880 """stores the incoming bundle coming from push command to the bundlestore
879 881 instead of applying on the revlogs"""
880 882
881 883 repo.ui.status(_("storing changesets on the bundlestore\n"))
882 884 bundler = bundle2.bundle20(repo.ui)
883 885
884 886 # processing each part and storing it in bundler
885 887 with bundle2.partiterator(repo, op, unbundler) as parts:
886 888 for part in parts:
887 889 bundlepart = None
888 890 if part.type == 'replycaps':
889 891 # This configures the current operation to allow reply parts.
890 892 bundle2._processpart(op, part)
891 893 else:
892 894 bundlepart = bundle2.bundlepart(part.type, data=part.read())
893 895 for key, value in part.params.iteritems():
894 896 bundlepart.addparam(key, value)
895 897
896 898 # Certain parts require a response
897 899 if part.type in ('pushkey', 'changegroup'):
898 900 if op.reply is not None:
899 901 rpart = op.reply.newpart('reply:%s' % part.type)
900 902 rpart.addparam('in-reply-to', str(part.id),
901 903 mandatory=False)
902 904 rpart.addparam('return', '1', mandatory=False)
903 905
904 906 op.records.add(part.type, {
905 907 'return': 1,
906 908 })
907 909 if bundlepart:
908 910 bundler.addpart(bundlepart)
909 911
910 912 # storing the bundle in the bundlestore
911 913 buf = util.chunkbuffer(bundler.getchunks())
912 914 fd, bundlefile = tempfile.mkstemp()
913 915 try:
914 916 try:
915 917 fp = os.fdopen(fd, 'wb')
916 918 fp.write(buf.read())
917 919 finally:
918 920 fp.close()
919 921 storebundle(op, {}, bundlefile)
920 922 finally:
921 923 try:
922 924 os.unlink(bundlefile)
923 925 except Exception:
924 926 # we would rather see the original exception
925 927 pass
926 928
927 929 def processparts(orig, repo, op, unbundler):
928 930
929 931 # make sure we don't wrap processparts in case of `hg unbundle`
930 932 if op.source == 'unbundle':
931 933 return orig(repo, op, unbundler)
932 934
933 935 # this server routes each push to bundle store
934 936 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
935 937 return storetobundlestore(orig, repo, op, unbundler)
936 938
937 939 if unbundler.params.get('infinitepush') != 'True':
938 940 return orig(repo, op, unbundler)
939 941
940 942 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
941 943
942 944 bundler = bundle2.bundle20(repo.ui)
943 945 cgparams = None
944 946 with bundle2.partiterator(repo, op, unbundler) as parts:
945 947 for part in parts:
946 948 bundlepart = None
947 949 if part.type == 'replycaps':
948 950 # This configures the current operation to allow reply parts.
949 951 bundle2._processpart(op, part)
950 952 elif part.type == bundleparts.scratchbranchparttype:
951 953 # Scratch branch parts need to be converted to normal
952 954 # changegroup parts, and the extra parameters stored for later
953 955 # when we upload to the store. Eventually those parameters will
954 956 # be put on the actual bundle instead of this part, then we can
955 957 # send a vanilla changegroup instead of the scratchbranch part.
956 958 cgversion = part.params.get('cgversion', '01')
957 959 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
958 960 bundlepart.addparam('version', cgversion)
959 961 cgparams = part.params
960 962
961 963 # If we're not dumping all parts into the new bundle, we need to
962 964 # alert the future pushkey and phase-heads handler to skip
963 965 # the part.
964 966 if not handleallparts:
965 967 op.records.add(scratchbranchparttype + '_skippushkey', True)
966 968 op.records.add(scratchbranchparttype + '_skipphaseheads',
967 969 True)
968 970 else:
969 971 if handleallparts:
970 972 # Ideally we would not process any parts, and instead just
971 973 # forward them to the bundle for storage, but since this
972 974 # differs from previous behavior, we need to put it behind a
973 975 # config flag for incremental rollout.
974 976 bundlepart = bundle2.bundlepart(part.type, data=part.read())
975 977 for key, value in part.params.iteritems():
976 978 bundlepart.addparam(key, value)
977 979
978 980 # Certain parts require a response
979 981 if part.type == 'pushkey':
980 982 if op.reply is not None:
981 983 rpart = op.reply.newpart('reply:pushkey')
982 984 rpart.addparam('in-reply-to', str(part.id),
983 985 mandatory=False)
984 986 rpart.addparam('return', '1', mandatory=False)
985 987 else:
986 988 bundle2._processpart(op, part)
987 989
988 990 if handleallparts:
989 991 op.records.add(part.type, {
990 992 'return': 1,
991 993 })
992 994 if bundlepart:
993 995 bundler.addpart(bundlepart)
994 996
995 997 # If commits were sent, store them
996 998 if cgparams:
997 999 buf = util.chunkbuffer(bundler.getchunks())
998 1000 fd, bundlefile = tempfile.mkstemp()
999 1001 try:
1000 1002 try:
1001 1003 fp = os.fdopen(fd, 'wb')
1002 1004 fp.write(buf.read())
1003 1005 finally:
1004 1006 fp.close()
1005 1007 storebundle(op, cgparams, bundlefile)
1006 1008 finally:
1007 1009 try:
1008 1010 os.unlink(bundlefile)
1009 1011 except Exception:
1010 1012 # we would rather see the original exception
1011 1013 pass
1012 1014
1013 1015 def storebundle(op, params, bundlefile):
1014 1016 log = _getorcreateinfinitepushlogger(op)
1015 1017 parthandlerstart = time.time()
1016 1018 log(scratchbranchparttype, eventtype='start')
1017 1019 index = op.repo.bundlestore.index
1018 1020 store = op.repo.bundlestore.store
1019 1021 op.records.add(scratchbranchparttype + '_skippushkey', True)
1020 1022
1021 1023 bundle = None
1022 1024 try: # guards bundle
1023 1025 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1024 1026 bundle = hg.repository(op.repo.ui, bundlepath)
1025 1027
1026 1028 bookmark = params.get('bookmark')
1027 1029 bookprevnode = params.get('bookprevnode', '')
1028 1030 force = params.get('force')
1029 1031
1030 1032 if bookmark:
1031 1033 oldnode = index.getnode(bookmark)
1032 1034 else:
1033 1035 oldnode = None
1034 1036 bundleheads = bundle.revs('heads(bundle())')
1035 1037 if bookmark and len(bundleheads) > 1:
1036 1038 raise error.Abort(
1037 1039 _('cannot push more than one head to a scratch branch'))
1038 1040
1039 1041 revs = _getrevs(bundle, oldnode, force, bookmark)
1040 1042
1041 1043 # Notify the user of what is being pushed
1042 1044 plural = 's' if len(revs) > 1 else ''
1043 1045 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1044 1046 maxoutput = 10
1045 1047 for i in range(0, min(len(revs), maxoutput)):
1046 1048 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1047 1049 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1048 1050
1049 1051 if len(revs) > maxoutput + 1:
1050 1052 op.repo.ui.warn((" ...\n"))
1051 1053 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1052 1054 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1053 1055
1054 1056 nodesctx = [bundle[rev] for rev in revs]
1055 1057 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1056 1058 if bundleheads:
1057 1059 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1058 1060 else:
1059 1061 newheadscount = 0
1060 1062 # If there's a bookmark specified, there should be only one head,
1061 1063 # so we choose the last node, which will be that head.
1062 1064 # If a bug or malicious client allows there to be a bookmark
1063 1065 # with multiple heads, we will place the bookmark on the last head.
1064 1066 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1065 1067 key = None
1066 1068 if newheadscount:
1067 1069 with open(bundlefile, 'r') as f:
1068 1070 bundledata = f.read()
1069 1071 with logservicecall(log, 'bundlestore',
1070 1072 bundlesize=len(bundledata)):
1071 1073 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1072 1074 if len(bundledata) > bundlesizelimit:
1073 1075 error_msg = ('bundle is too big: %d bytes. ' +
1074 1076 'max allowed size is 100 MB')
1075 1077 raise error.Abort(error_msg % (len(bundledata),))
1076 1078 key = store.write(bundledata)
1077 1079
1078 1080 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1079 1081 if key:
1080 1082 index.addbundle(key, nodesctx)
1081 1083 if bookmark:
1082 1084 index.addbookmark(bookmark, bookmarknode)
1083 1085 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1084 1086 bookprevnode, params)
1085 1087 log(scratchbranchparttype, eventtype='success',
1086 1088 elapsedms=(time.time() - parthandlerstart) * 1000)
1087 1089
1088 1090 except Exception as e:
1089 1091 log(scratchbranchparttype, eventtype='failure',
1090 1092 elapsedms=(time.time() - parthandlerstart) * 1000,
1091 1093 errormsg=str(e))
1092 1094 raise
1093 1095 finally:
1094 1096 if bundle:
1095 1097 bundle.close()
1096 1098
1097 1099 @bundle2.parthandler(scratchbranchparttype,
1098 1100 ('bookmark', 'bookprevnode', 'force',
1099 1101 'pushbackbookmarks', 'cgversion'))
1100 1102 def bundle2scratchbranch(op, part):
1101 1103 '''unbundle a bundle2 part containing a changegroup to store'''
1102 1104
1103 1105 bundler = bundle2.bundle20(op.repo.ui)
1104 1106 cgversion = part.params.get('cgversion', '01')
1105 1107 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1106 1108 cgpart.addparam('version', cgversion)
1107 1109 bundler.addpart(cgpart)
1108 1110 buf = util.chunkbuffer(bundler.getchunks())
1109 1111
1110 1112 fd, bundlefile = tempfile.mkstemp()
1111 1113 try:
1112 1114 try:
1113 1115 fp = os.fdopen(fd, 'wb')
1114 1116 fp.write(buf.read())
1115 1117 finally:
1116 1118 fp.close()
1117 1119 storebundle(op, part.params, bundlefile)
1118 1120 finally:
1119 1121 try:
1120 1122 os.unlink(bundlefile)
1121 1123 except OSError as e:
1122 1124 if e.errno != errno.ENOENT:
1123 1125 raise
1124 1126
1125 1127 return 1
1126 1128
1127 1129 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1128 1130 if params.get('pushbackbookmarks'):
1129 1131 if op.reply and 'pushback' in op.reply.capabilities:
1130 1132 params = {
1131 1133 'namespace': 'bookmarks',
1132 1134 'key': bookmark,
1133 1135 'new': newnode,
1134 1136 'old': oldnode,
1135 1137 }
1136 1138 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1137 1139
1138 1140 def bundle2pushkey(orig, op, part):
1139 1141 '''Wrapper of bundle2.handlepushkey()
1140 1142
1141 1143 The only goal is to skip calling the original function if flag is set.
1142 1144 It's set if infinitepush push is happening.
1143 1145 '''
1144 1146 if op.records[scratchbranchparttype + '_skippushkey']:
1145 1147 if op.reply is not None:
1146 1148 rpart = op.reply.newpart('reply:pushkey')
1147 1149 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1148 1150 rpart.addparam('return', '1', mandatory=False)
1149 1151 return 1
1150 1152
1151 1153 return orig(op, part)
1152 1154
1153 1155 def bundle2handlephases(orig, op, part):
1154 1156 '''Wrapper of bundle2.handlephases()
1155 1157
1156 1158 The only goal is to skip calling the original function if flag is set.
1157 1159 It's set if infinitepush push is happening.
1158 1160 '''
1159 1161
1160 1162 if op.records[scratchbranchparttype + '_skipphaseheads']:
1161 1163 return
1162 1164
1163 1165 return orig(op, part)
1164 1166
1165 1167 def _asyncsavemetadata(root, nodes):
1166 1168 '''starts a separate process that fills metadata for the nodes
1167 1169
1168 1170 This function creates a separate process and doesn't wait for it's
1169 1171 completion. This was done to avoid slowing down pushes
1170 1172 '''
1171 1173
1172 1174 maxnodes = 50
1173 1175 if len(nodes) > maxnodes:
1174 1176 return
1175 1177 nodesargs = []
1176 1178 for node in nodes:
1177 1179 nodesargs.append('--node')
1178 1180 nodesargs.append(node)
1179 1181 with open(os.devnull, 'w+b') as devnull:
1180 1182 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1181 1183 '-R', root] + nodesargs
1182 1184 # Process will run in background. We don't care about the return code
1183 1185 subprocess.Popen(cmdline, close_fds=True, shell=False,
1184 1186 stdin=devnull, stdout=devnull, stderr=devnull)
General Comments 0
You need to be logged in to leave comments. Login now