##// END OF EJS Templates
py3: use pycompat.fsencode() to convert tempfile name to bytes...
Pulkit Goyal -
r38099:ca1cf9b3 default
parent child Browse files
Show More
@@ -1,1187 +1,1188 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Instructs infinitepush to forward all received bundle2 parts to the
72 72 # bundle for storage. Defaults to False.
73 73 storeallparts = True
74 74
75 75 # routes each incoming push to the bundlestore. defaults to False
76 76 pushtobundlestore = True
77 77
78 78 [remotenames]
79 79 # Client-side option
80 80 # This option should be set only if remotenames extension is enabled.
81 81 # Whether remote bookmarks are tracked by remotenames extension.
82 82 bookmarks = True
83 83 """
84 84
85 85 from __future__ import absolute_import
86 86
87 87 import collections
88 88 import contextlib
89 89 import errno
90 90 import functools
91 91 import logging
92 92 import os
93 93 import random
94 94 import re
95 95 import socket
96 96 import subprocess
97 97 import tempfile
98 98 import time
99 99
100 100 from mercurial.node import (
101 101 bin,
102 102 hex,
103 103 )
104 104
105 105 from mercurial.i18n import _
106 106
107 107 from mercurial.utils import (
108 108 procutil,
109 109 stringutil,
110 110 )
111 111
112 112 from mercurial import (
113 113 bundle2,
114 114 changegroup,
115 115 commands,
116 116 discovery,
117 117 encoding,
118 118 error,
119 119 exchange,
120 120 extensions,
121 121 hg,
122 122 localrepo,
123 123 phases,
124 124 pushkey,
125 125 pycompat,
126 126 registrar,
127 127 util,
128 128 wireprototypes,
129 129 wireprotov1peer,
130 130 wireprotov1server,
131 131 )
132 132
133 133 from . import (
134 134 bundleparts,
135 135 common,
136 136 )
137 137
138 138 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
139 139 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
140 140 # be specifying the version(s) of Mercurial they are tested with, or
141 141 # leave the attribute unspecified.
142 142 testedwith = 'ships-with-hg-core'
143 143
144 144 configtable = {}
145 145 configitem = registrar.configitem(configtable)
146 146
147 147 configitem('infinitepush', 'server',
148 148 default=False,
149 149 )
150 150 configitem('infinitepush', 'storetype',
151 151 default='',
152 152 )
153 153 configitem('infinitepush', 'indextype',
154 154 default='',
155 155 )
156 156 configitem('infinitepush', 'indexpath',
157 157 default='',
158 158 )
159 159 configitem('infinitepush', 'storeallparts',
160 160 default=False,
161 161 )
162 162 configitem('infinitepush', 'reponame',
163 163 default='',
164 164 )
165 165 configitem('scratchbranch', 'storepath',
166 166 default='',
167 167 )
168 168 configitem('infinitepush', 'branchpattern',
169 169 default='',
170 170 )
171 171 configitem('infinitepush', 'pushtobundlestore',
172 172 default=False,
173 173 )
174 174 configitem('experimental', 'server-bundlestore-bookmark',
175 175 default='',
176 176 )
177 177 configitem('experimental', 'infinitepush-scratchpush',
178 178 default=False,
179 179 )
180 180
181 181 experimental = 'experimental'
182 182 configbookmark = 'server-bundlestore-bookmark'
183 183 configscratchpush = 'infinitepush-scratchpush'
184 184
185 185 scratchbranchparttype = bundleparts.scratchbranchparttype
186 186 revsetpredicate = registrar.revsetpredicate()
187 187 templatekeyword = registrar.templatekeyword()
188 188 _scratchbranchmatcher = lambda x: False
189 189 _maybehash = re.compile(r'^[a-f0-9]+$').search
190 190
191 191 def _buildexternalbundlestore(ui):
192 192 put_args = ui.configlist('infinitepush', 'put_args', [])
193 193 put_binary = ui.config('infinitepush', 'put_binary')
194 194 if not put_binary:
195 195 raise error.Abort('put binary is not specified')
196 196 get_args = ui.configlist('infinitepush', 'get_args', [])
197 197 get_binary = ui.config('infinitepush', 'get_binary')
198 198 if not get_binary:
199 199 raise error.Abort('get binary is not specified')
200 200 from . import store
201 201 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
202 202
203 203 def _buildsqlindex(ui):
204 204 sqlhost = ui.config('infinitepush', 'sqlhost')
205 205 if not sqlhost:
206 206 raise error.Abort(_('please set infinitepush.sqlhost'))
207 207 host, port, db, user, password = sqlhost.split(':')
208 208 reponame = ui.config('infinitepush', 'reponame')
209 209 if not reponame:
210 210 raise error.Abort(_('please set infinitepush.reponame'))
211 211
212 212 logfile = ui.config('infinitepush', 'logfile', '')
213 213 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
214 214 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
215 215 from . import sqlindexapi
216 216 return sqlindexapi.sqlindexapi(
217 217 reponame, host, port, db, user, password,
218 218 logfile, _getloglevel(ui), waittimeout=waittimeout,
219 219 locktimeout=locktimeout)
220 220
221 221 def _getloglevel(ui):
222 222 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
223 223 numeric_loglevel = getattr(logging, loglevel.upper(), None)
224 224 if not isinstance(numeric_loglevel, int):
225 225 raise error.Abort(_('invalid log level %s') % loglevel)
226 226 return numeric_loglevel
227 227
228 228 def _tryhoist(ui, remotebookmark):
229 229 '''returns a bookmarks with hoisted part removed
230 230
231 231 Remotenames extension has a 'hoist' config that allows to use remote
232 232 bookmarks without specifying remote path. For example, 'hg update master'
233 233 works as well as 'hg update remote/master'. We want to allow the same in
234 234 infinitepush.
235 235 '''
236 236
237 237 if common.isremotebooksenabled(ui):
238 238 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
239 239 if remotebookmark.startswith(hoist):
240 240 return remotebookmark[len(hoist):]
241 241 return remotebookmark
242 242
243 243 class bundlestore(object):
244 244 def __init__(self, repo):
245 245 self._repo = repo
246 246 storetype = self._repo.ui.config('infinitepush', 'storetype')
247 247 if storetype == 'disk':
248 248 from . import store
249 249 self.store = store.filebundlestore(self._repo.ui, self._repo)
250 250 elif storetype == 'external':
251 251 self.store = _buildexternalbundlestore(self._repo.ui)
252 252 else:
253 253 raise error.Abort(
254 254 _('unknown infinitepush store type specified %s') % storetype)
255 255
256 256 indextype = self._repo.ui.config('infinitepush', 'indextype')
257 257 if indextype == 'disk':
258 258 from . import fileindexapi
259 259 self.index = fileindexapi.fileindexapi(self._repo)
260 260 elif indextype == 'sql':
261 261 self.index = _buildsqlindex(self._repo.ui)
262 262 else:
263 263 raise error.Abort(
264 264 _('unknown infinitepush index type specified %s') % indextype)
265 265
266 266 def _isserver(ui):
267 267 return ui.configbool('infinitepush', 'server')
268 268
269 269 def reposetup(ui, repo):
270 270 if _isserver(ui) and repo.local():
271 271 repo.bundlestore = bundlestore(repo)
272 272
273 273 def extsetup(ui):
274 274 commonsetup(ui)
275 275 if _isserver(ui):
276 276 serverextsetup(ui)
277 277 else:
278 278 clientextsetup(ui)
279 279
280 280 def commonsetup(ui):
281 281 wireprotov1server.commands['listkeyspatterns'] = (
282 282 wireprotolistkeyspatterns, 'namespace patterns')
283 283 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
284 284 if scratchbranchpat:
285 285 global _scratchbranchmatcher
286 286 kind, pat, _scratchbranchmatcher = \
287 287 stringutil.stringmatcher(scratchbranchpat)
288 288
289 289 def serverextsetup(ui):
290 290 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
291 291
292 292 def newpushkeyhandler(*args, **kwargs):
293 293 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
294 294 newpushkeyhandler.params = origpushkeyhandler.params
295 295 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
296 296
297 297 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
298 298 newphaseheadshandler = lambda *args, **kwargs: \
299 299 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
300 300 newphaseheadshandler.params = orighandlephasehandler.params
301 301 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
302 302
303 303 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
304 304 localrepolistkeys)
305 305 wireprotov1server.commands['lookup'] = (
306 306 _lookupwrap(wireprotov1server.commands['lookup'][0]), 'key')
307 307 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
308 308
309 309 extensions.wrapfunction(bundle2, 'processparts', processparts)
310 310
311 311 def clientextsetup(ui):
312 312 entry = extensions.wrapcommand(commands.table, 'push', _push)
313 313
314 314 entry[1].append(
315 315 ('', 'bundle-store', None,
316 316 _('force push to go to bundle store (EXPERIMENTAL)')))
317 317
318 318 extensions.wrapcommand(commands.table, 'pull', _pull)
319 319
320 320 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
321 321
322 322 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
323 323
324 324 partorder = exchange.b2partsgenorder
325 325 index = partorder.index('changeset')
326 326 partorder.insert(
327 327 index, partorder.pop(partorder.index(scratchbranchparttype)))
328 328
329 329 def _checkheads(orig, pushop):
330 330 if pushop.ui.configbool(experimental, configscratchpush, False):
331 331 return
332 332 return orig(pushop)
333 333
334 334 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
335 335 patterns = wireprototypes.decodelist(patterns)
336 336 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
337 337 return pushkey.encodekeys(d)
338 338
339 339 def localrepolistkeys(orig, self, namespace, patterns=None):
340 340 if namespace == 'bookmarks' and patterns:
341 341 index = self.bundlestore.index
342 342 results = {}
343 343 bookmarks = orig(self, namespace)
344 344 for pattern in patterns:
345 345 results.update(index.getbookmarks(pattern))
346 346 if pattern.endswith('*'):
347 347 pattern = 're:^' + pattern[:-1] + '.*'
348 348 kind, pat, matcher = stringutil.stringmatcher(pattern)
349 349 for bookmark, node in bookmarks.iteritems():
350 350 if matcher(bookmark):
351 351 results[bookmark] = node
352 352 return results
353 353 else:
354 354 return orig(self, namespace)
355 355
356 356 @wireprotov1peer.batchable
357 357 def listkeyspatterns(self, namespace, patterns):
358 358 if not self.capable('pushkey'):
359 359 yield {}, None
360 360 f = wireprotov1peer.future()
361 361 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
362 362 (namespace, patterns))
363 363 yield {
364 364 'namespace': encoding.fromlocal(namespace),
365 365 'patterns': wireprototypes.encodelist(patterns)
366 366 }, f
367 367 d = f.value
368 368 self.ui.debug('received listkey for "%s": %i bytes\n'
369 369 % (namespace, len(d)))
370 370 yield pushkey.decodekeys(d)
371 371
372 372 def _readbundlerevs(bundlerepo):
373 373 return list(bundlerepo.revs('bundle()'))
374 374
375 375 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
376 376 '''Tells remotefilelog to include all changed files to the changegroup
377 377
378 378 By default remotefilelog doesn't include file content to the changegroup.
379 379 But we need to include it if we are fetching from bundlestore.
380 380 '''
381 381 changedfiles = set()
382 382 cl = bundlerepo.changelog
383 383 for r in bundlerevs:
384 384 # [3] means changed files
385 385 changedfiles.update(cl.read(r)[3])
386 386 if not changedfiles:
387 387 return bundlecaps
388 388
389 389 changedfiles = '\0'.join(changedfiles)
390 390 newcaps = []
391 391 appended = False
392 392 for cap in (bundlecaps or []):
393 393 if cap.startswith('excludepattern='):
394 394 newcaps.append('\0'.join((cap, changedfiles)))
395 395 appended = True
396 396 else:
397 397 newcaps.append(cap)
398 398 if not appended:
399 399 # Not found excludepattern cap. Just append it
400 400 newcaps.append('excludepattern=' + changedfiles)
401 401
402 402 return newcaps
403 403
404 404 def _rebundle(bundlerepo, bundleroots, unknownhead):
405 405 '''
406 406 Bundle may include more revision then user requested. For example,
407 407 if user asks for revision but bundle also consists its descendants.
408 408 This function will filter out all revision that user is not requested.
409 409 '''
410 410 parts = []
411 411
412 412 version = '02'
413 413 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
414 414 missingheads=[unknownhead])
415 415 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
416 416 cgstream = util.chunkbuffer(cgstream).read()
417 417 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
418 418 cgpart.addparam('version', version)
419 419 parts.append(cgpart)
420 420
421 421 return parts
422 422
423 423 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
424 424 cl = bundlerepo.changelog
425 425 bundleroots = []
426 426 for rev in bundlerevs:
427 427 node = cl.node(rev)
428 428 parents = cl.parents(node)
429 429 for parent in parents:
430 430 # include all revs that exist in the main repo
431 431 # to make sure that bundle may apply client-side
432 432 if parent in oldrepo:
433 433 bundleroots.append(parent)
434 434 return bundleroots
435 435
436 436 def _needsrebundling(head, bundlerepo):
437 437 bundleheads = list(bundlerepo.revs('heads(bundle())'))
438 438 return not (len(bundleheads) == 1 and
439 439 bundlerepo[bundleheads[0]].node() == head)
440 440
441 441 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
442 442 '''generates bundle that will be send to the user
443 443
444 444 returns tuple with raw bundle string and bundle type
445 445 '''
446 446 parts = []
447 447 if not _needsrebundling(head, bundlerepo):
448 448 with util.posixfile(bundlefile, "rb") as f:
449 449 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
450 450 if isinstance(unbundler, changegroup.cg1unpacker):
451 451 part = bundle2.bundlepart('changegroup',
452 452 data=unbundler._stream.read())
453 453 part.addparam('version', '01')
454 454 parts.append(part)
455 455 elif isinstance(unbundler, bundle2.unbundle20):
456 456 haschangegroup = False
457 457 for part in unbundler.iterparts():
458 458 if part.type == 'changegroup':
459 459 haschangegroup = True
460 460 newpart = bundle2.bundlepart(part.type, data=part.read())
461 461 for key, value in part.params.iteritems():
462 462 newpart.addparam(key, value)
463 463 parts.append(newpart)
464 464
465 465 if not haschangegroup:
466 466 raise error.Abort(
467 467 'unexpected bundle without changegroup part, ' +
468 468 'head: %s' % hex(head),
469 469 hint='report to administrator')
470 470 else:
471 471 raise error.Abort('unknown bundle type')
472 472 else:
473 473 parts = _rebundle(bundlerepo, bundleroots, head)
474 474
475 475 return parts
476 476
477 477 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
478 478 heads = heads or []
479 479 # newheads are parents of roots of scratch bundles that were requested
480 480 newphases = {}
481 481 scratchbundles = []
482 482 newheads = []
483 483 scratchheads = []
484 484 nodestobundle = {}
485 485 allbundlestocleanup = []
486 486 try:
487 487 for head in heads:
488 488 if head not in repo.changelog.nodemap:
489 489 if head not in nodestobundle:
490 490 newbundlefile = common.downloadbundle(repo, head)
491 491 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
492 492 bundlerepo = hg.repository(repo.ui, bundlepath)
493 493
494 494 allbundlestocleanup.append((bundlerepo, newbundlefile))
495 495 bundlerevs = set(_readbundlerevs(bundlerepo))
496 496 bundlecaps = _includefilelogstobundle(
497 497 bundlecaps, bundlerepo, bundlerevs, repo.ui)
498 498 cl = bundlerepo.changelog
499 499 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
500 500 for rev in bundlerevs:
501 501 node = cl.node(rev)
502 502 newphases[hex(node)] = str(phases.draft)
503 503 nodestobundle[node] = (bundlerepo, bundleroots,
504 504 newbundlefile)
505 505
506 506 scratchbundles.append(
507 507 _generateoutputparts(head, *nodestobundle[head]))
508 508 newheads.extend(bundleroots)
509 509 scratchheads.append(head)
510 510 finally:
511 511 for bundlerepo, bundlefile in allbundlestocleanup:
512 512 bundlerepo.close()
513 513 try:
514 514 os.unlink(bundlefile)
515 515 except (IOError, OSError):
516 516 # if we can't cleanup the file then just ignore the error,
517 517 # no need to fail
518 518 pass
519 519
520 520 pullfrombundlestore = bool(scratchbundles)
521 521 wrappedchangegrouppart = False
522 522 wrappedlistkeys = False
523 523 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
524 524 try:
525 525 def _changegrouppart(bundler, *args, **kwargs):
526 526 # Order is important here. First add non-scratch part
527 527 # and only then add parts with scratch bundles because
528 528 # non-scratch part contains parents of roots of scratch bundles.
529 529 result = oldchangegrouppart(bundler, *args, **kwargs)
530 530 for bundle in scratchbundles:
531 531 for part in bundle:
532 532 bundler.addpart(part)
533 533 return result
534 534
535 535 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
536 536 wrappedchangegrouppart = True
537 537
538 538 def _listkeys(orig, self, namespace):
539 539 origvalues = orig(self, namespace)
540 540 if namespace == 'phases' and pullfrombundlestore:
541 541 if origvalues.get('publishing') == 'True':
542 542 # Make repo non-publishing to preserve draft phase
543 543 del origvalues['publishing']
544 544 origvalues.update(newphases)
545 545 return origvalues
546 546
547 547 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
548 548 _listkeys)
549 549 wrappedlistkeys = True
550 550 heads = list((set(newheads) | set(heads)) - set(scratchheads))
551 551 result = orig(repo, source, heads=heads,
552 552 bundlecaps=bundlecaps, **kwargs)
553 553 finally:
554 554 if wrappedchangegrouppart:
555 555 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
556 556 if wrappedlistkeys:
557 557 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
558 558 _listkeys)
559 559 return result
560 560
561 561 def _lookupwrap(orig):
562 562 def _lookup(repo, proto, key):
563 563 localkey = encoding.tolocal(key)
564 564
565 565 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
566 566 scratchnode = repo.bundlestore.index.getnode(localkey)
567 567 if scratchnode:
568 568 return "%d %s\n" % (1, scratchnode)
569 569 else:
570 570 return "%d %s\n" % (0, 'scratch branch %s not found' % localkey)
571 571 else:
572 572 try:
573 573 r = hex(repo.lookup(localkey))
574 574 return "%d %s\n" % (1, r)
575 575 except Exception as inst:
576 576 if repo.bundlestore.index.getbundle(localkey):
577 577 return "%d %s\n" % (1, localkey)
578 578 else:
579 579 r = stringutil.forcebytestr(inst)
580 580 return "%d %s\n" % (0, r)
581 581 return _lookup
582 582
583 583 def _pull(orig, ui, repo, source="default", **opts):
584 584 opts = pycompat.byteskwargs(opts)
585 585 # Copy paste from `pull` command
586 586 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
587 587
588 588 scratchbookmarks = {}
589 589 unfi = repo.unfiltered()
590 590 unknownnodes = []
591 591 for rev in opts.get('rev', []):
592 592 if rev not in unfi:
593 593 unknownnodes.append(rev)
594 594 if opts.get('bookmark'):
595 595 bookmarks = []
596 596 revs = opts.get('rev') or []
597 597 for bookmark in opts.get('bookmark'):
598 598 if _scratchbranchmatcher(bookmark):
599 599 # rev is not known yet
600 600 # it will be fetched with listkeyspatterns next
601 601 scratchbookmarks[bookmark] = 'REVTOFETCH'
602 602 else:
603 603 bookmarks.append(bookmark)
604 604
605 605 if scratchbookmarks:
606 606 other = hg.peer(repo, opts, source)
607 607 fetchedbookmarks = other.listkeyspatterns(
608 608 'bookmarks', patterns=scratchbookmarks)
609 609 for bookmark in scratchbookmarks:
610 610 if bookmark not in fetchedbookmarks:
611 611 raise error.Abort('remote bookmark %s not found!' %
612 612 bookmark)
613 613 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
614 614 revs.append(fetchedbookmarks[bookmark])
615 615 opts['bookmark'] = bookmarks
616 616 opts['rev'] = revs
617 617
618 618 if scratchbookmarks or unknownnodes:
619 619 # Set anyincoming to True
620 620 extensions.wrapfunction(discovery, 'findcommonincoming',
621 621 _findcommonincoming)
622 622 try:
623 623 # Remote scratch bookmarks will be deleted because remotenames doesn't
624 624 # know about them. Let's save it before pull and restore after
625 625 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
626 626 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
627 627 # TODO(stash): race condition is possible
628 628 # if scratch bookmarks was updated right after orig.
629 629 # But that's unlikely and shouldn't be harmful.
630 630 if common.isremotebooksenabled(ui):
631 631 remotescratchbookmarks.update(scratchbookmarks)
632 632 _saveremotebookmarks(repo, remotescratchbookmarks, source)
633 633 else:
634 634 _savelocalbookmarks(repo, scratchbookmarks)
635 635 return result
636 636 finally:
637 637 if scratchbookmarks:
638 638 extensions.unwrapfunction(discovery, 'findcommonincoming')
639 639
640 640 def _readscratchremotebookmarks(ui, repo, other):
641 641 if common.isremotebooksenabled(ui):
642 642 remotenamesext = extensions.find('remotenames')
643 643 remotepath = remotenamesext.activepath(repo.ui, other)
644 644 result = {}
645 645 # Let's refresh remotenames to make sure we have it up to date
646 646 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
647 647 # and it results in deleting scratch bookmarks. Our best guess how to
648 648 # fix it is to use `clearnames()`
649 649 repo._remotenames.clearnames()
650 650 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
651 651 path, bookname = remotenamesext.splitremotename(remotebookmark)
652 652 if path == remotepath and _scratchbranchmatcher(bookname):
653 653 nodes = repo.names['remotebookmarks'].nodes(repo,
654 654 remotebookmark)
655 655 if nodes:
656 656 result[bookname] = hex(nodes[0])
657 657 return result
658 658 else:
659 659 return {}
660 660
661 661 def _saveremotebookmarks(repo, newbookmarks, remote):
662 662 remotenamesext = extensions.find('remotenames')
663 663 remotepath = remotenamesext.activepath(repo.ui, remote)
664 664 branches = collections.defaultdict(list)
665 665 bookmarks = {}
666 666 remotenames = remotenamesext.readremotenames(repo)
667 667 for hexnode, nametype, remote, rname in remotenames:
668 668 if remote != remotepath:
669 669 continue
670 670 if nametype == 'bookmarks':
671 671 if rname in newbookmarks:
672 672 # It's possible if we have a normal bookmark that matches
673 673 # scratch branch pattern. In this case just use the current
674 674 # bookmark node
675 675 del newbookmarks[rname]
676 676 bookmarks[rname] = hexnode
677 677 elif nametype == 'branches':
678 678 # saveremotenames expects 20 byte binary nodes for branches
679 679 branches[rname].append(bin(hexnode))
680 680
681 681 for bookmark, hexnode in newbookmarks.iteritems():
682 682 bookmarks[bookmark] = hexnode
683 683 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
684 684
685 685 def _savelocalbookmarks(repo, bookmarks):
686 686 if not bookmarks:
687 687 return
688 688 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
689 689 changes = []
690 690 for scratchbook, node in bookmarks.iteritems():
691 691 changectx = repo[node]
692 692 changes.append((scratchbook, changectx.node()))
693 693 repo._bookmarks.applychanges(repo, tr, changes)
694 694
695 695 def _findcommonincoming(orig, *args, **kwargs):
696 696 common, inc, remoteheads = orig(*args, **kwargs)
697 697 return common, True, remoteheads
698 698
699 699 def _push(orig, ui, repo, dest=None, *args, **opts):
700 700
701 701 bookmark = opts.get(r'bookmark')
702 702 # we only support pushing one infinitepush bookmark at once
703 703 if len(bookmark) == 1:
704 704 bookmark = bookmark[0]
705 705 else:
706 706 bookmark = ''
707 707
708 708 oldphasemove = None
709 709 overrides = {(experimental, configbookmark): bookmark}
710 710
711 711 with ui.configoverride(overrides, 'infinitepush'):
712 712 scratchpush = opts.get('bundle_store')
713 713 if _scratchbranchmatcher(bookmark):
714 714 scratchpush = True
715 715 # bundle2 can be sent back after push (for example, bundle2
716 716 # containing `pushkey` part to update bookmarks)
717 717 ui.setconfig(experimental, 'bundle2.pushback', True)
718 718
719 719 if scratchpush:
720 720 # this is an infinitepush, we don't want the bookmark to be applied
721 721 # rather that should be stored in the bundlestore
722 722 opts[r'bookmark'] = []
723 723 ui.setconfig(experimental, configscratchpush, True)
724 724 oldphasemove = extensions.wrapfunction(exchange,
725 725 '_localphasemove',
726 726 _phasemove)
727 727 # Copy-paste from `push` command
728 728 path = ui.paths.getpath(dest, default=('default-push', 'default'))
729 729 if not path:
730 730 raise error.Abort(_('default repository not configured!'),
731 731 hint=_("see 'hg help config.paths'"))
732 732 destpath = path.pushloc or path.loc
733 733 # Remote scratch bookmarks will be deleted because remotenames doesn't
734 734 # know about them. Let's save it before push and restore after
735 735 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
736 736 result = orig(ui, repo, dest, *args, **opts)
737 737 if common.isremotebooksenabled(ui):
738 738 if bookmark and scratchpush:
739 739 other = hg.peer(repo, opts, destpath)
740 740 fetchedbookmarks = other.listkeyspatterns('bookmarks',
741 741 patterns=[bookmark])
742 742 remotescratchbookmarks.update(fetchedbookmarks)
743 743 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
744 744 if oldphasemove:
745 745 exchange._localphasemove = oldphasemove
746 746 return result
747 747
748 748 def _deleteinfinitepushbookmarks(ui, repo, path, names):
749 749 """Prune remote names by removing the bookmarks we don't want anymore,
750 750 then writing the result back to disk
751 751 """
752 752 remotenamesext = extensions.find('remotenames')
753 753
754 754 # remotename format is:
755 755 # (node, nametype ("branches" or "bookmarks"), remote, name)
756 756 nametype_idx = 1
757 757 remote_idx = 2
758 758 name_idx = 3
759 759 remotenames = [remotename for remotename in \
760 760 remotenamesext.readremotenames(repo) \
761 761 if remotename[remote_idx] == path]
762 762 remote_bm_names = [remotename[name_idx] for remotename in \
763 763 remotenames if remotename[nametype_idx] == "bookmarks"]
764 764
765 765 for name in names:
766 766 if name not in remote_bm_names:
767 767 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
768 768 "in path '{}'").format(name, path))
769 769
770 770 bookmarks = {}
771 771 branches = collections.defaultdict(list)
772 772 for node, nametype, remote, name in remotenames:
773 773 if nametype == "bookmarks" and name not in names:
774 774 bookmarks[name] = node
775 775 elif nametype == "branches":
776 776 # saveremotenames wants binary nodes for branches
777 777 branches[name].append(bin(node))
778 778
779 779 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
780 780
781 781 def _phasemove(orig, pushop, nodes, phase=phases.public):
782 782 """prevent commits from being marked public
783 783
784 784 Since these are going to a scratch branch, they aren't really being
785 785 published."""
786 786
787 787 if phase != phases.public:
788 788 orig(pushop, nodes, phase)
789 789
790 790 @exchange.b2partsgenerator(scratchbranchparttype)
791 791 def partgen(pushop, bundler):
792 792 bookmark = pushop.ui.config(experimental, configbookmark)
793 793 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
794 794 if 'changesets' in pushop.stepsdone or not scratchpush:
795 795 return
796 796
797 797 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
798 798 return
799 799
800 800 pushop.stepsdone.add('changesets')
801 801 if not pushop.outgoing.missing:
802 802 pushop.ui.status(_('no changes found\n'))
803 803 pushop.cgresult = 0
804 804 return
805 805
806 806 # This parameter tells the server that the following bundle is an
807 807 # infinitepush. This let's it switch the part processing to our infinitepush
808 808 # code path.
809 809 bundler.addparam("infinitepush", "True")
810 810
811 811 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
812 812 pushop.remote,
813 813 pushop.outgoing,
814 814 pushop.ui,
815 815 bookmark)
816 816
817 817 for scratchpart in scratchparts:
818 818 bundler.addpart(scratchpart)
819 819
820 820 def handlereply(op):
821 821 # server either succeeds or aborts; no code to read
822 822 pushop.cgresult = 1
823 823
824 824 return handlereply
825 825
826 826 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
827 827
828 828 def _getrevs(bundle, oldnode, force, bookmark):
829 829 'extracts and validates the revs to be imported'
830 830 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
831 831
832 832 # new bookmark
833 833 if oldnode is None:
834 834 return revs
835 835
836 836 # Fast forward update
837 837 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
838 838 return revs
839 839
840 840 return revs
841 841
842 842 @contextlib.contextmanager
843 843 def logservicecall(logger, service, **kwargs):
844 844 start = time.time()
845 845 logger(service, eventtype='start', **kwargs)
846 846 try:
847 847 yield
848 848 logger(service, eventtype='success',
849 849 elapsedms=(time.time() - start) * 1000, **kwargs)
850 850 except Exception as e:
851 851 logger(service, eventtype='failure',
852 852 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
853 853 **kwargs)
854 854 raise
855 855
856 856 def _getorcreateinfinitepushlogger(op):
857 857 logger = op.records['infinitepushlogger']
858 858 if not logger:
859 859 ui = op.repo.ui
860 860 try:
861 861 username = procutil.getuser()
862 862 except Exception:
863 863 username = 'unknown'
864 864 # Generate random request id to be able to find all logged entries
865 865 # for the same request. Since requestid is pseudo-generated it may
866 866 # not be unique, but we assume that (hostname, username, requestid)
867 867 # is unique.
868 868 random.seed()
869 869 requestid = random.randint(0, 2000000000)
870 870 hostname = socket.gethostname()
871 871 logger = functools.partial(ui.log, 'infinitepush', user=username,
872 872 requestid=requestid, hostname=hostname,
873 873 reponame=ui.config('infinitepush',
874 874 'reponame'))
875 875 op.records.add('infinitepushlogger', logger)
876 876 else:
877 877 logger = logger[0]
878 878 return logger
879 879
880 880 def storetobundlestore(orig, repo, op, unbundler):
881 881 """stores the incoming bundle coming from push command to the bundlestore
882 882 instead of applying on the revlogs"""
883 883
884 884 repo.ui.status(_("storing changesets on the bundlestore\n"))
885 885 bundler = bundle2.bundle20(repo.ui)
886 886
887 887 # processing each part and storing it in bundler
888 888 with bundle2.partiterator(repo, op, unbundler) as parts:
889 889 for part in parts:
890 890 bundlepart = None
891 891 if part.type == 'replycaps':
892 892 # This configures the current operation to allow reply parts.
893 893 bundle2._processpart(op, part)
894 894 else:
895 895 bundlepart = bundle2.bundlepart(part.type, data=part.read())
896 896 for key, value in part.params.iteritems():
897 897 bundlepart.addparam(key, value)
898 898
899 899 # Certain parts require a response
900 900 if part.type in ('pushkey', 'changegroup'):
901 901 if op.reply is not None:
902 902 rpart = op.reply.newpart('reply:%s' % part.type)
903 903 rpart.addparam('in-reply-to', str(part.id),
904 904 mandatory=False)
905 905 rpart.addparam('return', '1', mandatory=False)
906 906
907 907 op.records.add(part.type, {
908 908 'return': 1,
909 909 })
910 910 if bundlepart:
911 911 bundler.addpart(bundlepart)
912 912
913 913 # storing the bundle in the bundlestore
914 914 buf = util.chunkbuffer(bundler.getchunks())
915 915 fd, bundlefile = tempfile.mkstemp()
916 916 try:
917 917 try:
918 918 fp = os.fdopen(fd, r'wb')
919 919 fp.write(buf.read())
920 920 finally:
921 921 fp.close()
922 922 storebundle(op, {}, bundlefile)
923 923 finally:
924 924 try:
925 925 os.unlink(bundlefile)
926 926 except Exception:
927 927 # we would rather see the original exception
928 928 pass
929 929
930 930 def processparts(orig, repo, op, unbundler):
931 931
932 932 # make sure we don't wrap processparts in case of `hg unbundle`
933 933 if op.source == 'unbundle':
934 934 return orig(repo, op, unbundler)
935 935
936 936 # this server routes each push to bundle store
937 937 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
938 938 return storetobundlestore(orig, repo, op, unbundler)
939 939
940 940 if unbundler.params.get('infinitepush') != 'True':
941 941 return orig(repo, op, unbundler)
942 942
943 943 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
944 944
945 945 bundler = bundle2.bundle20(repo.ui)
946 946 cgparams = None
947 947 with bundle2.partiterator(repo, op, unbundler) as parts:
948 948 for part in parts:
949 949 bundlepart = None
950 950 if part.type == 'replycaps':
951 951 # This configures the current operation to allow reply parts.
952 952 bundle2._processpart(op, part)
953 953 elif part.type == bundleparts.scratchbranchparttype:
954 954 # Scratch branch parts need to be converted to normal
955 955 # changegroup parts, and the extra parameters stored for later
956 956 # when we upload to the store. Eventually those parameters will
957 957 # be put on the actual bundle instead of this part, then we can
958 958 # send a vanilla changegroup instead of the scratchbranch part.
959 959 cgversion = part.params.get('cgversion', '01')
960 960 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
961 961 bundlepart.addparam('version', cgversion)
962 962 cgparams = part.params
963 963
964 964 # If we're not dumping all parts into the new bundle, we need to
965 965 # alert the future pushkey and phase-heads handler to skip
966 966 # the part.
967 967 if not handleallparts:
968 968 op.records.add(scratchbranchparttype + '_skippushkey', True)
969 969 op.records.add(scratchbranchparttype + '_skipphaseheads',
970 970 True)
971 971 else:
972 972 if handleallparts:
973 973 # Ideally we would not process any parts, and instead just
974 974 # forward them to the bundle for storage, but since this
975 975 # differs from previous behavior, we need to put it behind a
976 976 # config flag for incremental rollout.
977 977 bundlepart = bundle2.bundlepart(part.type, data=part.read())
978 978 for key, value in part.params.iteritems():
979 979 bundlepart.addparam(key, value)
980 980
981 981 # Certain parts require a response
982 982 if part.type == 'pushkey':
983 983 if op.reply is not None:
984 984 rpart = op.reply.newpart('reply:pushkey')
985 985 rpart.addparam('in-reply-to', str(part.id),
986 986 mandatory=False)
987 987 rpart.addparam('return', '1', mandatory=False)
988 988 else:
989 989 bundle2._processpart(op, part)
990 990
991 991 if handleallparts:
992 992 op.records.add(part.type, {
993 993 'return': 1,
994 994 })
995 995 if bundlepart:
996 996 bundler.addpart(bundlepart)
997 997
998 998 # If commits were sent, store them
999 999 if cgparams:
1000 1000 buf = util.chunkbuffer(bundler.getchunks())
1001 1001 fd, bundlefile = tempfile.mkstemp()
1002 1002 try:
1003 1003 try:
1004 1004 fp = os.fdopen(fd, r'wb')
1005 1005 fp.write(buf.read())
1006 1006 finally:
1007 1007 fp.close()
1008 1008 storebundle(op, cgparams, bundlefile)
1009 1009 finally:
1010 1010 try:
1011 1011 os.unlink(bundlefile)
1012 1012 except Exception:
1013 1013 # we would rather see the original exception
1014 1014 pass
1015 1015
1016 1016 def storebundle(op, params, bundlefile):
1017 1017 log = _getorcreateinfinitepushlogger(op)
1018 1018 parthandlerstart = time.time()
1019 1019 log(scratchbranchparttype, eventtype='start')
1020 1020 index = op.repo.bundlestore.index
1021 1021 store = op.repo.bundlestore.store
1022 1022 op.records.add(scratchbranchparttype + '_skippushkey', True)
1023 1023
1024 1024 bundle = None
1025 1025 try: # guards bundle
1026 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1026 bundlepath = "bundle:%s+%s" % (op.repo.root,
1027 pycompat.fsencode(bundlefile))
1027 1028 bundle = hg.repository(op.repo.ui, bundlepath)
1028 1029
1029 1030 bookmark = params.get('bookmark')
1030 1031 bookprevnode = params.get('bookprevnode', '')
1031 1032 force = params.get('force')
1032 1033
1033 1034 if bookmark:
1034 1035 oldnode = index.getnode(bookmark)
1035 1036 else:
1036 1037 oldnode = None
1037 1038 bundleheads = bundle.revs('heads(bundle())')
1038 1039 if bookmark and len(bundleheads) > 1:
1039 1040 raise error.Abort(
1040 1041 _('cannot push more than one head to a scratch branch'))
1041 1042
1042 1043 revs = _getrevs(bundle, oldnode, force, bookmark)
1043 1044
1044 1045 # Notify the user of what is being pushed
1045 1046 plural = 's' if len(revs) > 1 else ''
1046 1047 op.repo.ui.warn(_("pushing %d commit%s:\n") % (len(revs), plural))
1047 1048 maxoutput = 10
1048 1049 for i in range(0, min(len(revs), maxoutput)):
1049 1050 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1050 1051 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1051 1052
1052 1053 if len(revs) > maxoutput + 1:
1053 1054 op.repo.ui.warn((" ...\n"))
1054 1055 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1055 1056 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1056 1057
1057 1058 nodesctx = [bundle[rev] for rev in revs]
1058 1059 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1059 1060 if bundleheads:
1060 1061 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1061 1062 else:
1062 1063 newheadscount = 0
1063 1064 # If there's a bookmark specified, there should be only one head,
1064 1065 # so we choose the last node, which will be that head.
1065 1066 # If a bug or malicious client allows there to be a bookmark
1066 1067 # with multiple heads, we will place the bookmark on the last head.
1067 1068 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1068 1069 key = None
1069 1070 if newheadscount:
1070 1071 with open(bundlefile, 'rb') as f:
1071 1072 bundledata = f.read()
1072 1073 with logservicecall(log, 'bundlestore',
1073 1074 bundlesize=len(bundledata)):
1074 1075 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1075 1076 if len(bundledata) > bundlesizelimit:
1076 1077 error_msg = ('bundle is too big: %d bytes. ' +
1077 1078 'max allowed size is 100 MB')
1078 1079 raise error.Abort(error_msg % (len(bundledata),))
1079 1080 key = store.write(bundledata)
1080 1081
1081 1082 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1082 1083 if key:
1083 1084 index.addbundle(key, nodesctx)
1084 1085 if bookmark:
1085 1086 index.addbookmark(bookmark, bookmarknode)
1086 1087 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1087 1088 bookprevnode, params)
1088 1089 log(scratchbranchparttype, eventtype='success',
1089 1090 elapsedms=(time.time() - parthandlerstart) * 1000)
1090 1091
1091 1092 except Exception as e:
1092 1093 log(scratchbranchparttype, eventtype='failure',
1093 1094 elapsedms=(time.time() - parthandlerstart) * 1000,
1094 1095 errormsg=str(e))
1095 1096 raise
1096 1097 finally:
1097 1098 if bundle:
1098 1099 bundle.close()
1099 1100
1100 1101 @bundle2.parthandler(scratchbranchparttype,
1101 1102 ('bookmark', 'bookprevnode', 'force',
1102 1103 'pushbackbookmarks', 'cgversion'))
1103 1104 def bundle2scratchbranch(op, part):
1104 1105 '''unbundle a bundle2 part containing a changegroup to store'''
1105 1106
1106 1107 bundler = bundle2.bundle20(op.repo.ui)
1107 1108 cgversion = part.params.get('cgversion', '01')
1108 1109 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1109 1110 cgpart.addparam('version', cgversion)
1110 1111 bundler.addpart(cgpart)
1111 1112 buf = util.chunkbuffer(bundler.getchunks())
1112 1113
1113 1114 fd, bundlefile = tempfile.mkstemp()
1114 1115 try:
1115 1116 try:
1116 1117 fp = os.fdopen(fd, r'wb')
1117 1118 fp.write(buf.read())
1118 1119 finally:
1119 1120 fp.close()
1120 1121 storebundle(op, part.params, bundlefile)
1121 1122 finally:
1122 1123 try:
1123 1124 os.unlink(bundlefile)
1124 1125 except OSError as e:
1125 1126 if e.errno != errno.ENOENT:
1126 1127 raise
1127 1128
1128 1129 return 1
1129 1130
1130 1131 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1131 1132 if params.get('pushbackbookmarks'):
1132 1133 if op.reply and 'pushback' in op.reply.capabilities:
1133 1134 params = {
1134 1135 'namespace': 'bookmarks',
1135 1136 'key': bookmark,
1136 1137 'new': newnode,
1137 1138 'old': oldnode,
1138 1139 }
1139 1140 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1140 1141
1141 1142 def bundle2pushkey(orig, op, part):
1142 1143 '''Wrapper of bundle2.handlepushkey()
1143 1144
1144 1145 The only goal is to skip calling the original function if flag is set.
1145 1146 It's set if infinitepush push is happening.
1146 1147 '''
1147 1148 if op.records[scratchbranchparttype + '_skippushkey']:
1148 1149 if op.reply is not None:
1149 1150 rpart = op.reply.newpart('reply:pushkey')
1150 1151 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1151 1152 rpart.addparam('return', '1', mandatory=False)
1152 1153 return 1
1153 1154
1154 1155 return orig(op, part)
1155 1156
1156 1157 def bundle2handlephases(orig, op, part):
1157 1158 '''Wrapper of bundle2.handlephases()
1158 1159
1159 1160 The only goal is to skip calling the original function if flag is set.
1160 1161 It's set if infinitepush push is happening.
1161 1162 '''
1162 1163
1163 1164 if op.records[scratchbranchparttype + '_skipphaseheads']:
1164 1165 return
1165 1166
1166 1167 return orig(op, part)
1167 1168
1168 1169 def _asyncsavemetadata(root, nodes):
1169 1170 '''starts a separate process that fills metadata for the nodes
1170 1171
1171 1172 This function creates a separate process and doesn't wait for it's
1172 1173 completion. This was done to avoid slowing down pushes
1173 1174 '''
1174 1175
1175 1176 maxnodes = 50
1176 1177 if len(nodes) > maxnodes:
1177 1178 return
1178 1179 nodesargs = []
1179 1180 for node in nodes:
1180 1181 nodesargs.append('--node')
1181 1182 nodesargs.append(node)
1182 1183 with open(os.devnull, 'w+b') as devnull:
1183 1184 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1184 1185 '-R', root] + nodesargs
1185 1186 # Process will run in background. We don't care about the return code
1186 1187 subprocess.Popen(cmdline, close_fds=True, shell=False,
1187 1188 stdin=devnull, stdout=devnull, stderr=devnull)
General Comments 0
You need to be logged in to leave comments. Login now