##// END OF EJS Templates
infinitepush: remove pycompat.iteritems()...
Gregory Szorc -
r49770:78911231 default
parent child Browse files
Show More
@@ -1,1387 +1,1387
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 IMPORTANT: if you use this extension, please contact
10 10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 11 be unused and barring learning of users of this functionality, we will
12 12 delete this code at the end of 2020.
13 13
14 14 [infinitepush]
15 15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 16 branchpattern = PATTERN
17 17
18 18 # Server or client
19 19 server = False
20 20
21 21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 22 indextype = disk
23 23
24 24 # Server-side option. Used only if indextype=sql.
25 25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27 27
28 28 # Server-side option. Used only if indextype=disk.
29 29 # Filesystem path to the index store
30 30 indexpath = PATH
31 31
32 32 # Server-side option. Possible values: 'disk' or 'external'
33 33 # Fails if not set
34 34 storetype = disk
35 35
36 36 # Server-side option.
37 37 # Path to the binary that will save bundle to the bundlestore
38 38 # Formatted cmd line will be passed to it (see `put_args`)
39 39 put_binary = put
40 40
41 41 # Serser-side option. Used only if storetype=external.
42 42 # Format cmd-line string for put binary. Placeholder: {filename}
43 43 put_args = {filename}
44 44
45 45 # Server-side option.
46 46 # Path to the binary that get bundle from the bundlestore.
47 47 # Formatted cmd line will be passed to it (see `get_args`)
48 48 get_binary = get
49 49
50 50 # Serser-side option. Used only if storetype=external.
51 51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 52 get_args = {filename} {handle}
53 53
54 54 # Server-side option
55 55 logfile = FIlE
56 56
57 57 # Server-side option
58 58 loglevel = DEBUG
59 59
60 60 # Server-side option. Used only if indextype=sql.
61 61 # Sets mysql wait_timeout option.
62 62 waittimeout = 300
63 63
64 64 # Server-side option. Used only if indextype=sql.
65 65 # Sets mysql innodb_lock_wait_timeout option.
66 66 locktimeout = 120
67 67
68 68 # Server-side option. Used only if indextype=sql.
69 69 # Name of the repository
70 70 reponame = ''
71 71
72 72 # Client-side option. Used by --list-remote option. List of remote scratch
73 73 # patterns to list if no patterns are specified.
74 74 defaultremotepatterns = ['*']
75 75
76 76 # Instructs infinitepush to forward all received bundle2 parts to the
77 77 # bundle for storage. Defaults to False.
78 78 storeallparts = True
79 79
80 80 # routes each incoming push to the bundlestore. defaults to False
81 81 pushtobundlestore = True
82 82
83 83 [remotenames]
84 84 # Client-side option
85 85 # This option should be set only if remotenames extension is enabled.
86 86 # Whether remote bookmarks are tracked by remotenames extension.
87 87 bookmarks = True
88 88 """
89 89
90 90
91 91 import collections
92 92 import contextlib
93 93 import errno
94 94 import functools
95 95 import logging
96 96 import os
97 97 import random
98 98 import re
99 99 import socket
100 100 import subprocess
101 101 import time
102 102
103 103 from mercurial.node import (
104 104 bin,
105 105 hex,
106 106 )
107 107
108 108 from mercurial.i18n import _
109 109
110 110 from mercurial.pycompat import (
111 111 getattr,
112 112 open,
113 113 )
114 114
115 115 from mercurial.utils import (
116 116 procutil,
117 117 stringutil,
118 118 urlutil,
119 119 )
120 120
121 121 from mercurial import (
122 122 bundle2,
123 123 changegroup,
124 124 commands,
125 125 discovery,
126 126 encoding,
127 127 error,
128 128 exchange,
129 129 extensions,
130 130 hg,
131 131 localrepo,
132 132 phases,
133 133 pushkey,
134 134 pycompat,
135 135 registrar,
136 136 util,
137 137 wireprototypes,
138 138 wireprotov1peer,
139 139 wireprotov1server,
140 140 )
141 141
142 142 from . import (
143 143 bundleparts,
144 144 common,
145 145 )
146 146
147 147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 149 # be specifying the version(s) of Mercurial they are tested with, or
150 150 # leave the attribute unspecified.
151 151 testedwith = b'ships-with-hg-core'
152 152
153 153 configtable = {}
154 154 configitem = registrar.configitem(configtable)
155 155
156 156 configitem(
157 157 b'infinitepush',
158 158 b'server',
159 159 default=False,
160 160 )
161 161 configitem(
162 162 b'infinitepush',
163 163 b'storetype',
164 164 default=b'',
165 165 )
166 166 configitem(
167 167 b'infinitepush',
168 168 b'indextype',
169 169 default=b'',
170 170 )
171 171 configitem(
172 172 b'infinitepush',
173 173 b'indexpath',
174 174 default=b'',
175 175 )
176 176 configitem(
177 177 b'infinitepush',
178 178 b'storeallparts',
179 179 default=False,
180 180 )
181 181 configitem(
182 182 b'infinitepush',
183 183 b'reponame',
184 184 default=b'',
185 185 )
186 186 configitem(
187 187 b'scratchbranch',
188 188 b'storepath',
189 189 default=b'',
190 190 )
191 191 configitem(
192 192 b'infinitepush',
193 193 b'branchpattern',
194 194 default=b'',
195 195 )
196 196 configitem(
197 197 b'infinitepush',
198 198 b'pushtobundlestore',
199 199 default=False,
200 200 )
201 201 configitem(
202 202 b'experimental',
203 203 b'server-bundlestore-bookmark',
204 204 default=b'',
205 205 )
206 206 configitem(
207 207 b'experimental',
208 208 b'infinitepush-scratchpush',
209 209 default=False,
210 210 )
211 211
212 212 experimental = b'experimental'
213 213 configbookmark = b'server-bundlestore-bookmark'
214 214 configscratchpush = b'infinitepush-scratchpush'
215 215
216 216 scratchbranchparttype = bundleparts.scratchbranchparttype
217 217 revsetpredicate = registrar.revsetpredicate()
218 218 templatekeyword = registrar.templatekeyword()
219 219 _scratchbranchmatcher = lambda x: False
220 220 _maybehash = re.compile('^[a-f0-9]+$').search
221 221
222 222
223 223 def _buildexternalbundlestore(ui):
224 224 put_args = ui.configlist(b'infinitepush', b'put_args', [])
225 225 put_binary = ui.config(b'infinitepush', b'put_binary')
226 226 if not put_binary:
227 227 raise error.Abort(b'put binary is not specified')
228 228 get_args = ui.configlist(b'infinitepush', b'get_args', [])
229 229 get_binary = ui.config(b'infinitepush', b'get_binary')
230 230 if not get_binary:
231 231 raise error.Abort(b'get binary is not specified')
232 232 from . import store
233 233
234 234 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
235 235
236 236
237 237 def _buildsqlindex(ui):
238 238 sqlhost = ui.config(b'infinitepush', b'sqlhost')
239 239 if not sqlhost:
240 240 raise error.Abort(_(b'please set infinitepush.sqlhost'))
241 241 host, port, db, user, password = sqlhost.split(b':')
242 242 reponame = ui.config(b'infinitepush', b'reponame')
243 243 if not reponame:
244 244 raise error.Abort(_(b'please set infinitepush.reponame'))
245 245
246 246 logfile = ui.config(b'infinitepush', b'logfile', b'')
247 247 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
248 248 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
249 249 from . import sqlindexapi
250 250
251 251 return sqlindexapi.sqlindexapi(
252 252 reponame,
253 253 host,
254 254 port,
255 255 db,
256 256 user,
257 257 password,
258 258 logfile,
259 259 _getloglevel(ui),
260 260 waittimeout=waittimeout,
261 261 locktimeout=locktimeout,
262 262 )
263 263
264 264
265 265 def _getloglevel(ui):
266 266 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
267 267 numeric_loglevel = getattr(logging, loglevel.upper(), None)
268 268 if not isinstance(numeric_loglevel, int):
269 269 raise error.Abort(_(b'invalid log level %s') % loglevel)
270 270 return numeric_loglevel
271 271
272 272
273 273 def _tryhoist(ui, remotebookmark):
274 274 """returns a bookmarks with hoisted part removed
275 275
276 276 Remotenames extension has a 'hoist' config that allows to use remote
277 277 bookmarks without specifying remote path. For example, 'hg update master'
278 278 works as well as 'hg update remote/master'. We want to allow the same in
279 279 infinitepush.
280 280 """
281 281
282 282 if common.isremotebooksenabled(ui):
283 283 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
284 284 if remotebookmark.startswith(hoist):
285 285 return remotebookmark[len(hoist) :]
286 286 return remotebookmark
287 287
288 288
289 289 class bundlestore(object):
290 290 def __init__(self, repo):
291 291 self._repo = repo
292 292 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
293 293 if storetype == b'disk':
294 294 from . import store
295 295
296 296 self.store = store.filebundlestore(self._repo.ui, self._repo)
297 297 elif storetype == b'external':
298 298 self.store = _buildexternalbundlestore(self._repo.ui)
299 299 else:
300 300 raise error.Abort(
301 301 _(b'unknown infinitepush store type specified %s') % storetype
302 302 )
303 303
304 304 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
305 305 if indextype == b'disk':
306 306 from . import fileindexapi
307 307
308 308 self.index = fileindexapi.fileindexapi(self._repo)
309 309 elif indextype == b'sql':
310 310 self.index = _buildsqlindex(self._repo.ui)
311 311 else:
312 312 raise error.Abort(
313 313 _(b'unknown infinitepush index type specified %s') % indextype
314 314 )
315 315
316 316
317 317 def _isserver(ui):
318 318 return ui.configbool(b'infinitepush', b'server')
319 319
320 320
321 321 def reposetup(ui, repo):
322 322 if _isserver(ui) and repo.local():
323 323 repo.bundlestore = bundlestore(repo)
324 324
325 325
326 326 def extsetup(ui):
327 327 commonsetup(ui)
328 328 if _isserver(ui):
329 329 serverextsetup(ui)
330 330 else:
331 331 clientextsetup(ui)
332 332
333 333
334 334 def commonsetup(ui):
335 335 wireprotov1server.commands[b'listkeyspatterns'] = (
336 336 wireprotolistkeyspatterns,
337 337 b'namespace patterns',
338 338 )
339 339 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
340 340 if scratchbranchpat:
341 341 global _scratchbranchmatcher
342 342 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
343 343 scratchbranchpat
344 344 )
345 345
346 346
347 347 def serverextsetup(ui):
348 348 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
349 349
350 350 def newpushkeyhandler(*args, **kwargs):
351 351 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
352 352
353 353 newpushkeyhandler.params = origpushkeyhandler.params
354 354 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
355 355
356 356 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
357 357 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
358 358 orighandlephasehandler, *args, **kwargs
359 359 )
360 360 newphaseheadshandler.params = orighandlephasehandler.params
361 361 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
362 362
363 363 extensions.wrapfunction(
364 364 localrepo.localrepository, b'listkeys', localrepolistkeys
365 365 )
366 366 wireprotov1server.commands[b'lookup'] = (
367 367 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
368 368 b'key',
369 369 )
370 370 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
371 371
372 372 extensions.wrapfunction(bundle2, b'processparts', processparts)
373 373
374 374
375 375 def clientextsetup(ui):
376 376 entry = extensions.wrapcommand(commands.table, b'push', _push)
377 377
378 378 entry[1].append(
379 379 (
380 380 b'',
381 381 b'bundle-store',
382 382 None,
383 383 _(b'force push to go to bundle store (EXPERIMENTAL)'),
384 384 )
385 385 )
386 386
387 387 extensions.wrapcommand(commands.table, b'pull', _pull)
388 388
389 389 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
390 390
391 391 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
392 392
393 393 partorder = exchange.b2partsgenorder
394 394 index = partorder.index(b'changeset')
395 395 partorder.insert(
396 396 index, partorder.pop(partorder.index(scratchbranchparttype))
397 397 )
398 398
399 399
400 400 def _checkheads(orig, pushop):
401 401 if pushop.ui.configbool(experimental, configscratchpush, False):
402 402 return
403 403 return orig(pushop)
404 404
405 405
406 406 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
407 407 patterns = wireprototypes.decodelist(patterns)
408 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
408 d = repo.listkeys(encoding.tolocal(namespace), patterns).items()
409 409 return pushkey.encodekeys(d)
410 410
411 411
412 412 def localrepolistkeys(orig, self, namespace, patterns=None):
413 413 if namespace == b'bookmarks' and patterns:
414 414 index = self.bundlestore.index
415 415 results = {}
416 416 bookmarks = orig(self, namespace)
417 417 for pattern in patterns:
418 418 results.update(index.getbookmarks(pattern))
419 419 if pattern.endswith(b'*'):
420 420 pattern = b're:^' + pattern[:-1] + b'.*'
421 421 kind, pat, matcher = stringutil.stringmatcher(pattern)
422 422 for bookmark, node in bookmarks.items():
423 423 if matcher(bookmark):
424 424 results[bookmark] = node
425 425 return results
426 426 else:
427 427 return orig(self, namespace)
428 428
429 429
430 430 @wireprotov1peer.batchable
431 431 def listkeyspatterns(self, namespace, patterns):
432 432 if not self.capable(b'pushkey'):
433 433 return {}, None
434 434 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
435 435
436 436 def decode(d):
437 437 self.ui.debug(
438 438 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
439 439 )
440 440 return pushkey.decodekeys(d)
441 441
442 442 return {
443 443 b'namespace': encoding.fromlocal(namespace),
444 444 b'patterns': wireprototypes.encodelist(patterns),
445 445 }, decode
446 446
447 447
448 448 def _readbundlerevs(bundlerepo):
449 449 return list(bundlerepo.revs(b'bundle()'))
450 450
451 451
452 452 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
453 453 """Tells remotefilelog to include all changed files to the changegroup
454 454
455 455 By default remotefilelog doesn't include file content to the changegroup.
456 456 But we need to include it if we are fetching from bundlestore.
457 457 """
458 458 changedfiles = set()
459 459 cl = bundlerepo.changelog
460 460 for r in bundlerevs:
461 461 # [3] means changed files
462 462 changedfiles.update(cl.read(r)[3])
463 463 if not changedfiles:
464 464 return bundlecaps
465 465
466 466 changedfiles = b'\0'.join(changedfiles)
467 467 newcaps = []
468 468 appended = False
469 469 for cap in bundlecaps or []:
470 470 if cap.startswith(b'excludepattern='):
471 471 newcaps.append(b'\0'.join((cap, changedfiles)))
472 472 appended = True
473 473 else:
474 474 newcaps.append(cap)
475 475 if not appended:
476 476 # Not found excludepattern cap. Just append it
477 477 newcaps.append(b'excludepattern=' + changedfiles)
478 478
479 479 return newcaps
480 480
481 481
482 482 def _rebundle(bundlerepo, bundleroots, unknownhead):
483 483 """
484 484 Bundle may include more revision then user requested. For example,
485 485 if user asks for revision but bundle also consists its descendants.
486 486 This function will filter out all revision that user is not requested.
487 487 """
488 488 parts = []
489 489
490 490 version = b'02'
491 491 outgoing = discovery.outgoing(
492 492 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
493 493 )
494 494 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
495 495 cgstream = util.chunkbuffer(cgstream).read()
496 496 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
497 497 cgpart.addparam(b'version', version)
498 498 parts.append(cgpart)
499 499
500 500 return parts
501 501
502 502
503 503 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
504 504 cl = bundlerepo.changelog
505 505 bundleroots = []
506 506 for rev in bundlerevs:
507 507 node = cl.node(rev)
508 508 parents = cl.parents(node)
509 509 for parent in parents:
510 510 # include all revs that exist in the main repo
511 511 # to make sure that bundle may apply client-side
512 512 if parent in oldrepo:
513 513 bundleroots.append(parent)
514 514 return bundleroots
515 515
516 516
517 517 def _needsrebundling(head, bundlerepo):
518 518 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
519 519 return not (
520 520 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
521 521 )
522 522
523 523
524 524 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
525 525 """generates bundle that will be send to the user
526 526
527 527 returns tuple with raw bundle string and bundle type
528 528 """
529 529 parts = []
530 530 if not _needsrebundling(head, bundlerepo):
531 531 with util.posixfile(bundlefile, b"rb") as f:
532 532 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
533 533 if isinstance(unbundler, changegroup.cg1unpacker):
534 534 part = bundle2.bundlepart(
535 535 b'changegroup', data=unbundler._stream.read()
536 536 )
537 537 part.addparam(b'version', b'01')
538 538 parts.append(part)
539 539 elif isinstance(unbundler, bundle2.unbundle20):
540 540 haschangegroup = False
541 541 for part in unbundler.iterparts():
542 542 if part.type == b'changegroup':
543 543 haschangegroup = True
544 544 newpart = bundle2.bundlepart(part.type, data=part.read())
545 545 for key, value in part.params.items():
546 546 newpart.addparam(key, value)
547 547 parts.append(newpart)
548 548
549 549 if not haschangegroup:
550 550 raise error.Abort(
551 551 b'unexpected bundle without changegroup part, '
552 552 + b'head: %s' % hex(head),
553 553 hint=b'report to administrator',
554 554 )
555 555 else:
556 556 raise error.Abort(b'unknown bundle type')
557 557 else:
558 558 parts = _rebundle(bundlerepo, bundleroots, head)
559 559
560 560 return parts
561 561
562 562
563 563 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
564 564 heads = heads or []
565 565 # newheads are parents of roots of scratch bundles that were requested
566 566 newphases = {}
567 567 scratchbundles = []
568 568 newheads = []
569 569 scratchheads = []
570 570 nodestobundle = {}
571 571 allbundlestocleanup = []
572 572 try:
573 573 for head in heads:
574 574 if not repo.changelog.index.has_node(head):
575 575 if head not in nodestobundle:
576 576 newbundlefile = common.downloadbundle(repo, head)
577 577 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
578 578 bundlerepo = hg.repository(repo.ui, bundlepath)
579 579
580 580 allbundlestocleanup.append((bundlerepo, newbundlefile))
581 581 bundlerevs = set(_readbundlerevs(bundlerepo))
582 582 bundlecaps = _includefilelogstobundle(
583 583 bundlecaps, bundlerepo, bundlerevs, repo.ui
584 584 )
585 585 cl = bundlerepo.changelog
586 586 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
587 587 for rev in bundlerevs:
588 588 node = cl.node(rev)
589 589 newphases[hex(node)] = str(phases.draft)
590 590 nodestobundle[node] = (
591 591 bundlerepo,
592 592 bundleroots,
593 593 newbundlefile,
594 594 )
595 595
596 596 scratchbundles.append(
597 597 _generateoutputparts(head, *nodestobundle[head])
598 598 )
599 599 newheads.extend(bundleroots)
600 600 scratchheads.append(head)
601 601 finally:
602 602 for bundlerepo, bundlefile in allbundlestocleanup:
603 603 bundlerepo.close()
604 604 try:
605 605 os.unlink(bundlefile)
606 606 except (IOError, OSError):
607 607 # if we can't cleanup the file then just ignore the error,
608 608 # no need to fail
609 609 pass
610 610
611 611 pullfrombundlestore = bool(scratchbundles)
612 612 wrappedchangegrouppart = False
613 613 wrappedlistkeys = False
614 614 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
615 615 try:
616 616
617 617 def _changegrouppart(bundler, *args, **kwargs):
618 618 # Order is important here. First add non-scratch part
619 619 # and only then add parts with scratch bundles because
620 620 # non-scratch part contains parents of roots of scratch bundles.
621 621 result = oldchangegrouppart(bundler, *args, **kwargs)
622 622 for bundle in scratchbundles:
623 623 for part in bundle:
624 624 bundler.addpart(part)
625 625 return result
626 626
627 627 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
628 628 wrappedchangegrouppart = True
629 629
630 630 def _listkeys(orig, self, namespace):
631 631 origvalues = orig(self, namespace)
632 632 if namespace == b'phases' and pullfrombundlestore:
633 633 if origvalues.get(b'publishing') == b'True':
634 634 # Make repo non-publishing to preserve draft phase
635 635 del origvalues[b'publishing']
636 636 origvalues.update(newphases)
637 637 return origvalues
638 638
639 639 extensions.wrapfunction(
640 640 localrepo.localrepository, b'listkeys', _listkeys
641 641 )
642 642 wrappedlistkeys = True
643 643 heads = list((set(newheads) | set(heads)) - set(scratchheads))
644 644 result = orig(
645 645 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
646 646 )
647 647 finally:
648 648 if wrappedchangegrouppart:
649 649 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
650 650 if wrappedlistkeys:
651 651 extensions.unwrapfunction(
652 652 localrepo.localrepository, b'listkeys', _listkeys
653 653 )
654 654 return result
655 655
656 656
657 657 def _lookupwrap(orig):
658 658 def _lookup(repo, proto, key):
659 659 localkey = encoding.tolocal(key)
660 660
661 661 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
662 662 scratchnode = repo.bundlestore.index.getnode(localkey)
663 663 if scratchnode:
664 664 return b"%d %s\n" % (1, scratchnode)
665 665 else:
666 666 return b"%d %s\n" % (
667 667 0,
668 668 b'scratch branch %s not found' % localkey,
669 669 )
670 670 else:
671 671 try:
672 672 r = hex(repo.lookup(localkey))
673 673 return b"%d %s\n" % (1, r)
674 674 except Exception as inst:
675 675 if repo.bundlestore.index.getbundle(localkey):
676 676 return b"%d %s\n" % (1, localkey)
677 677 else:
678 678 r = stringutil.forcebytestr(inst)
679 679 return b"%d %s\n" % (0, r)
680 680
681 681 return _lookup
682 682
683 683
684 684 def _pull(orig, ui, repo, source=b"default", **opts):
685 685 opts = pycompat.byteskwargs(opts)
686 686 # Copy paste from `pull` command
687 687 source, branches = urlutil.get_unique_pull_path(
688 688 b"infinite-push's pull",
689 689 repo,
690 690 ui,
691 691 source,
692 692 default_branches=opts.get(b'branch'),
693 693 )
694 694
695 695 scratchbookmarks = {}
696 696 unfi = repo.unfiltered()
697 697 unknownnodes = []
698 698 for rev in opts.get(b'rev', []):
699 699 if rev not in unfi:
700 700 unknownnodes.append(rev)
701 701 if opts.get(b'bookmark'):
702 702 bookmarks = []
703 703 revs = opts.get(b'rev') or []
704 704 for bookmark in opts.get(b'bookmark'):
705 705 if _scratchbranchmatcher(bookmark):
706 706 # rev is not known yet
707 707 # it will be fetched with listkeyspatterns next
708 708 scratchbookmarks[bookmark] = b'REVTOFETCH'
709 709 else:
710 710 bookmarks.append(bookmark)
711 711
712 712 if scratchbookmarks:
713 713 other = hg.peer(repo, opts, source)
714 714 try:
715 715 fetchedbookmarks = other.listkeyspatterns(
716 716 b'bookmarks', patterns=scratchbookmarks
717 717 )
718 718 for bookmark in scratchbookmarks:
719 719 if bookmark not in fetchedbookmarks:
720 720 raise error.Abort(
721 721 b'remote bookmark %s not found!' % bookmark
722 722 )
723 723 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
724 724 revs.append(fetchedbookmarks[bookmark])
725 725 finally:
726 726 other.close()
727 727 opts[b'bookmark'] = bookmarks
728 728 opts[b'rev'] = revs
729 729
730 730 if scratchbookmarks or unknownnodes:
731 731 # Set anyincoming to True
732 732 extensions.wrapfunction(
733 733 discovery, b'findcommonincoming', _findcommonincoming
734 734 )
735 735 try:
736 736 # Remote scratch bookmarks will be deleted because remotenames doesn't
737 737 # know about them. Let's save it before pull and restore after
738 738 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
739 739 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
740 740 # TODO(stash): race condition is possible
741 741 # if scratch bookmarks was updated right after orig.
742 742 # But that's unlikely and shouldn't be harmful.
743 743 if common.isremotebooksenabled(ui):
744 744 remotescratchbookmarks.update(scratchbookmarks)
745 745 _saveremotebookmarks(repo, remotescratchbookmarks, source)
746 746 else:
747 747 _savelocalbookmarks(repo, scratchbookmarks)
748 748 return result
749 749 finally:
750 750 if scratchbookmarks:
751 751 extensions.unwrapfunction(discovery, b'findcommonincoming')
752 752
753 753
754 754 def _readscratchremotebookmarks(ui, repo, other):
755 755 if common.isremotebooksenabled(ui):
756 756 remotenamesext = extensions.find(b'remotenames')
757 757 remotepath = remotenamesext.activepath(repo.ui, other)
758 758 result = {}
759 759 # Let's refresh remotenames to make sure we have it up to date
760 760 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
761 761 # and it results in deleting scratch bookmarks. Our best guess how to
762 762 # fix it is to use `clearnames()`
763 763 repo._remotenames.clearnames()
764 764 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
765 765 path, bookname = remotenamesext.splitremotename(remotebookmark)
766 766 if path == remotepath and _scratchbranchmatcher(bookname):
767 767 nodes = repo.names[b'remotebookmarks'].nodes(
768 768 repo, remotebookmark
769 769 )
770 770 if nodes:
771 771 result[bookname] = hex(nodes[0])
772 772 return result
773 773 else:
774 774 return {}
775 775
776 776
777 777 def _saveremotebookmarks(repo, newbookmarks, remote):
778 778 remotenamesext = extensions.find(b'remotenames')
779 779 remotepath = remotenamesext.activepath(repo.ui, remote)
780 780 branches = collections.defaultdict(list)
781 781 bookmarks = {}
782 782 remotenames = remotenamesext.readremotenames(repo)
783 783 for hexnode, nametype, remote, rname in remotenames:
784 784 if remote != remotepath:
785 785 continue
786 786 if nametype == b'bookmarks':
787 787 if rname in newbookmarks:
788 788 # It's possible if we have a normal bookmark that matches
789 789 # scratch branch pattern. In this case just use the current
790 790 # bookmark node
791 791 del newbookmarks[rname]
792 792 bookmarks[rname] = hexnode
793 793 elif nametype == b'branches':
794 794 # saveremotenames expects 20 byte binary nodes for branches
795 795 branches[rname].append(bin(hexnode))
796 796
797 797 for bookmark, hexnode in newbookmarks.items():
798 798 bookmarks[bookmark] = hexnode
799 799 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
800 800
801 801
802 802 def _savelocalbookmarks(repo, bookmarks):
803 803 if not bookmarks:
804 804 return
805 805 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
806 806 changes = []
807 807 for scratchbook, node in bookmarks.items():
808 808 changectx = repo[node]
809 809 changes.append((scratchbook, changectx.node()))
810 810 repo._bookmarks.applychanges(repo, tr, changes)
811 811
812 812
813 813 def _findcommonincoming(orig, *args, **kwargs):
814 814 common, inc, remoteheads = orig(*args, **kwargs)
815 815 return common, True, remoteheads
816 816
817 817
818 818 def _push(orig, ui, repo, *dests, **opts):
819 819 opts = pycompat.byteskwargs(opts)
820 820 bookmark = opts.get(b'bookmark')
821 821 # we only support pushing one infinitepush bookmark at once
822 822 if len(bookmark) == 1:
823 823 bookmark = bookmark[0]
824 824 else:
825 825 bookmark = b''
826 826
827 827 oldphasemove = None
828 828 overrides = {(experimental, configbookmark): bookmark}
829 829
830 830 with ui.configoverride(overrides, b'infinitepush'):
831 831 scratchpush = opts.get(b'bundle_store')
832 832 if _scratchbranchmatcher(bookmark):
833 833 scratchpush = True
834 834 # bundle2 can be sent back after push (for example, bundle2
835 835 # containing `pushkey` part to update bookmarks)
836 836 ui.setconfig(experimental, b'bundle2.pushback', True)
837 837
838 838 if scratchpush:
839 839 # this is an infinitepush, we don't want the bookmark to be applied
840 840 # rather that should be stored in the bundlestore
841 841 opts[b'bookmark'] = []
842 842 ui.setconfig(experimental, configscratchpush, True)
843 843 oldphasemove = extensions.wrapfunction(
844 844 exchange, b'_localphasemove', _phasemove
845 845 )
846 846
847 847 paths = list(urlutil.get_push_paths(repo, ui, dests))
848 848 if len(paths) > 1:
849 849 msg = _(b'cannot push to multiple path with infinitepush')
850 850 raise error.Abort(msg)
851 851
852 852 path = paths[0]
853 853 destpath = path.pushloc or path.loc
854 854 # Remote scratch bookmarks will be deleted because remotenames doesn't
855 855 # know about them. Let's save it before push and restore after
856 856 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
857 857 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
858 858 if common.isremotebooksenabled(ui):
859 859 if bookmark and scratchpush:
860 860 other = hg.peer(repo, opts, destpath)
861 861 try:
862 862 fetchedbookmarks = other.listkeyspatterns(
863 863 b'bookmarks', patterns=[bookmark]
864 864 )
865 865 remotescratchbookmarks.update(fetchedbookmarks)
866 866 finally:
867 867 other.close()
868 868 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
869 869 if oldphasemove:
870 870 exchange._localphasemove = oldphasemove
871 871 return result
872 872
873 873
874 874 def _deleteinfinitepushbookmarks(ui, repo, path, names):
875 875 """Prune remote names by removing the bookmarks we don't want anymore,
876 876 then writing the result back to disk
877 877 """
878 878 remotenamesext = extensions.find(b'remotenames')
879 879
880 880 # remotename format is:
881 881 # (node, nametype ("branches" or "bookmarks"), remote, name)
882 882 nametype_idx = 1
883 883 remote_idx = 2
884 884 name_idx = 3
885 885 remotenames = [
886 886 remotename
887 887 for remotename in remotenamesext.readremotenames(repo)
888 888 if remotename[remote_idx] == path
889 889 ]
890 890 remote_bm_names = [
891 891 remotename[name_idx]
892 892 for remotename in remotenames
893 893 if remotename[nametype_idx] == b"bookmarks"
894 894 ]
895 895
896 896 for name in names:
897 897 if name not in remote_bm_names:
898 898 raise error.Abort(
899 899 _(
900 900 b"infinitepush bookmark '{}' does not exist "
901 901 b"in path '{}'"
902 902 ).format(name, path)
903 903 )
904 904
905 905 bookmarks = {}
906 906 branches = collections.defaultdict(list)
907 907 for node, nametype, remote, name in remotenames:
908 908 if nametype == b"bookmarks" and name not in names:
909 909 bookmarks[name] = node
910 910 elif nametype == b"branches":
911 911 # saveremotenames wants binary nodes for branches
912 912 branches[name].append(bin(node))
913 913
914 914 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
915 915
916 916
917 917 def _phasemove(orig, pushop, nodes, phase=phases.public):
918 918 """prevent commits from being marked public
919 919
920 920 Since these are going to a scratch branch, they aren't really being
921 921 published."""
922 922
923 923 if phase != phases.public:
924 924 orig(pushop, nodes, phase)
925 925
926 926
927 927 @exchange.b2partsgenerator(scratchbranchparttype)
928 928 def partgen(pushop, bundler):
929 929 bookmark = pushop.ui.config(experimental, configbookmark)
930 930 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
931 931 if b'changesets' in pushop.stepsdone or not scratchpush:
932 932 return
933 933
934 934 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
935 935 return
936 936
937 937 pushop.stepsdone.add(b'changesets')
938 938 if not pushop.outgoing.missing:
939 939 pushop.ui.status(_(b'no changes found\n'))
940 940 pushop.cgresult = 0
941 941 return
942 942
943 943 # This parameter tells the server that the following bundle is an
944 944 # infinitepush. This let's it switch the part processing to our infinitepush
945 945 # code path.
946 946 bundler.addparam(b"infinitepush", b"True")
947 947
948 948 scratchparts = bundleparts.getscratchbranchparts(
949 949 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
950 950 )
951 951
952 952 for scratchpart in scratchparts:
953 953 bundler.addpart(scratchpart)
954 954
955 955 def handlereply(op):
956 956 # server either succeeds or aborts; no code to read
957 957 pushop.cgresult = 1
958 958
959 959 return handlereply
960 960
961 961
962 962 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
963 963
964 964
965 965 def _getrevs(bundle, oldnode, force, bookmark):
966 966 b'extracts and validates the revs to be imported'
967 967 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
968 968
969 969 # new bookmark
970 970 if oldnode is None:
971 971 return revs
972 972
973 973 # Fast forward update
974 974 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
975 975 return revs
976 976
977 977 return revs
978 978
979 979
980 980 @contextlib.contextmanager
981 981 def logservicecall(logger, service, **kwargs):
982 982 start = time.time()
983 983 logger(service, eventtype=b'start', **kwargs)
984 984 try:
985 985 yield
986 986 logger(
987 987 service,
988 988 eventtype=b'success',
989 989 elapsedms=(time.time() - start) * 1000,
990 990 **kwargs
991 991 )
992 992 except Exception as e:
993 993 logger(
994 994 service,
995 995 eventtype=b'failure',
996 996 elapsedms=(time.time() - start) * 1000,
997 997 errormsg=stringutil.forcebytestr(e),
998 998 **kwargs
999 999 )
1000 1000 raise
1001 1001
1002 1002
1003 1003 def _getorcreateinfinitepushlogger(op):
1004 1004 logger = op.records[b'infinitepushlogger']
1005 1005 if not logger:
1006 1006 ui = op.repo.ui
1007 1007 try:
1008 1008 username = procutil.getuser()
1009 1009 except Exception:
1010 1010 username = b'unknown'
1011 1011 # Generate random request id to be able to find all logged entries
1012 1012 # for the same request. Since requestid is pseudo-generated it may
1013 1013 # not be unique, but we assume that (hostname, username, requestid)
1014 1014 # is unique.
1015 1015 random.seed()
1016 1016 requestid = random.randint(0, 2000000000)
1017 1017 hostname = socket.gethostname()
1018 1018 logger = functools.partial(
1019 1019 ui.log,
1020 1020 b'infinitepush',
1021 1021 user=username,
1022 1022 requestid=requestid,
1023 1023 hostname=hostname,
1024 1024 reponame=ui.config(b'infinitepush', b'reponame'),
1025 1025 )
1026 1026 op.records.add(b'infinitepushlogger', logger)
1027 1027 else:
1028 1028 logger = logger[0]
1029 1029 return logger
1030 1030
1031 1031
1032 1032 def storetobundlestore(orig, repo, op, unbundler):
1033 1033 """stores the incoming bundle coming from push command to the bundlestore
1034 1034 instead of applying on the revlogs"""
1035 1035
1036 1036 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1037 1037 bundler = bundle2.bundle20(repo.ui)
1038 1038
1039 1039 # processing each part and storing it in bundler
1040 1040 with bundle2.partiterator(repo, op, unbundler) as parts:
1041 1041 for part in parts:
1042 1042 bundlepart = None
1043 1043 if part.type == b'replycaps':
1044 1044 # This configures the current operation to allow reply parts.
1045 1045 bundle2._processpart(op, part)
1046 1046 else:
1047 1047 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1048 1048 for key, value in part.params.items():
1049 1049 bundlepart.addparam(key, value)
1050 1050
1051 1051 # Certain parts require a response
1052 1052 if part.type in (b'pushkey', b'changegroup'):
1053 1053 if op.reply is not None:
1054 1054 rpart = op.reply.newpart(b'reply:%s' % part.type)
1055 1055 rpart.addparam(
1056 1056 b'in-reply-to', b'%d' % part.id, mandatory=False
1057 1057 )
1058 1058 rpart.addparam(b'return', b'1', mandatory=False)
1059 1059
1060 1060 op.records.add(
1061 1061 part.type,
1062 1062 {
1063 1063 b'return': 1,
1064 1064 },
1065 1065 )
1066 1066 if bundlepart:
1067 1067 bundler.addpart(bundlepart)
1068 1068
1069 1069 # storing the bundle in the bundlestore
1070 1070 buf = util.chunkbuffer(bundler.getchunks())
1071 1071 fd, bundlefile = pycompat.mkstemp()
1072 1072 try:
1073 1073 try:
1074 1074 fp = os.fdopen(fd, 'wb')
1075 1075 fp.write(buf.read())
1076 1076 finally:
1077 1077 fp.close()
1078 1078 storebundle(op, {}, bundlefile)
1079 1079 finally:
1080 1080 try:
1081 1081 os.unlink(bundlefile)
1082 1082 except Exception:
1083 1083 # we would rather see the original exception
1084 1084 pass
1085 1085
1086 1086
1087 1087 def processparts(orig, repo, op, unbundler):
1088 1088
1089 1089 # make sure we don't wrap processparts in case of `hg unbundle`
1090 1090 if op.source == b'unbundle':
1091 1091 return orig(repo, op, unbundler)
1092 1092
1093 1093 # this server routes each push to bundle store
1094 1094 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1095 1095 return storetobundlestore(orig, repo, op, unbundler)
1096 1096
1097 1097 if unbundler.params.get(b'infinitepush') != b'True':
1098 1098 return orig(repo, op, unbundler)
1099 1099
1100 1100 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1101 1101
1102 1102 bundler = bundle2.bundle20(repo.ui)
1103 1103 cgparams = None
1104 1104 with bundle2.partiterator(repo, op, unbundler) as parts:
1105 1105 for part in parts:
1106 1106 bundlepart = None
1107 1107 if part.type == b'replycaps':
1108 1108 # This configures the current operation to allow reply parts.
1109 1109 bundle2._processpart(op, part)
1110 1110 elif part.type == bundleparts.scratchbranchparttype:
1111 1111 # Scratch branch parts need to be converted to normal
1112 1112 # changegroup parts, and the extra parameters stored for later
1113 1113 # when we upload to the store. Eventually those parameters will
1114 1114 # be put on the actual bundle instead of this part, then we can
1115 1115 # send a vanilla changegroup instead of the scratchbranch part.
1116 1116 cgversion = part.params.get(b'cgversion', b'01')
1117 1117 bundlepart = bundle2.bundlepart(
1118 1118 b'changegroup', data=part.read()
1119 1119 )
1120 1120 bundlepart.addparam(b'version', cgversion)
1121 1121 cgparams = part.params
1122 1122
1123 1123 # If we're not dumping all parts into the new bundle, we need to
1124 1124 # alert the future pushkey and phase-heads handler to skip
1125 1125 # the part.
1126 1126 if not handleallparts:
1127 1127 op.records.add(
1128 1128 scratchbranchparttype + b'_skippushkey', True
1129 1129 )
1130 1130 op.records.add(
1131 1131 scratchbranchparttype + b'_skipphaseheads', True
1132 1132 )
1133 1133 else:
1134 1134 if handleallparts:
1135 1135 # Ideally we would not process any parts, and instead just
1136 1136 # forward them to the bundle for storage, but since this
1137 1137 # differs from previous behavior, we need to put it behind a
1138 1138 # config flag for incremental rollout.
1139 1139 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1140 1140 for key, value in part.params.items():
1141 1141 bundlepart.addparam(key, value)
1142 1142
1143 1143 # Certain parts require a response
1144 1144 if part.type == b'pushkey':
1145 1145 if op.reply is not None:
1146 1146 rpart = op.reply.newpart(b'reply:pushkey')
1147 1147 rpart.addparam(
1148 1148 b'in-reply-to', str(part.id), mandatory=False
1149 1149 )
1150 1150 rpart.addparam(b'return', b'1', mandatory=False)
1151 1151 else:
1152 1152 bundle2._processpart(op, part)
1153 1153
1154 1154 if handleallparts:
1155 1155 op.records.add(
1156 1156 part.type,
1157 1157 {
1158 1158 b'return': 1,
1159 1159 },
1160 1160 )
1161 1161 if bundlepart:
1162 1162 bundler.addpart(bundlepart)
1163 1163
1164 1164 # If commits were sent, store them
1165 1165 if cgparams:
1166 1166 buf = util.chunkbuffer(bundler.getchunks())
1167 1167 fd, bundlefile = pycompat.mkstemp()
1168 1168 try:
1169 1169 try:
1170 1170 fp = os.fdopen(fd, 'wb')
1171 1171 fp.write(buf.read())
1172 1172 finally:
1173 1173 fp.close()
1174 1174 storebundle(op, cgparams, bundlefile)
1175 1175 finally:
1176 1176 try:
1177 1177 os.unlink(bundlefile)
1178 1178 except Exception:
1179 1179 # we would rather see the original exception
1180 1180 pass
1181 1181
1182 1182
1183 1183 def storebundle(op, params, bundlefile):
1184 1184 log = _getorcreateinfinitepushlogger(op)
1185 1185 parthandlerstart = time.time()
1186 1186 log(scratchbranchparttype, eventtype=b'start')
1187 1187 index = op.repo.bundlestore.index
1188 1188 store = op.repo.bundlestore.store
1189 1189 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1190 1190
1191 1191 bundle = None
1192 1192 try: # guards bundle
1193 1193 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1194 1194 bundle = hg.repository(op.repo.ui, bundlepath)
1195 1195
1196 1196 bookmark = params.get(b'bookmark')
1197 1197 bookprevnode = params.get(b'bookprevnode', b'')
1198 1198 force = params.get(b'force')
1199 1199
1200 1200 if bookmark:
1201 1201 oldnode = index.getnode(bookmark)
1202 1202 else:
1203 1203 oldnode = None
1204 1204 bundleheads = bundle.revs(b'heads(bundle())')
1205 1205 if bookmark and len(bundleheads) > 1:
1206 1206 raise error.Abort(
1207 1207 _(b'cannot push more than one head to a scratch branch')
1208 1208 )
1209 1209
1210 1210 revs = _getrevs(bundle, oldnode, force, bookmark)
1211 1211
1212 1212 # Notify the user of what is being pushed
1213 1213 plural = b's' if len(revs) > 1 else b''
1214 1214 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1215 1215 maxoutput = 10
1216 1216 for i in range(0, min(len(revs), maxoutput)):
1217 1217 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1218 1218 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1219 1219
1220 1220 if len(revs) > maxoutput + 1:
1221 1221 op.repo.ui.warn(b" ...\n")
1222 1222 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1223 1223 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1224 1224
1225 1225 nodesctx = [bundle[rev] for rev in revs]
1226 1226 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1227 1227 if bundleheads:
1228 1228 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1229 1229 else:
1230 1230 newheadscount = 0
1231 1231 # If there's a bookmark specified, there should be only one head,
1232 1232 # so we choose the last node, which will be that head.
1233 1233 # If a bug or malicious client allows there to be a bookmark
1234 1234 # with multiple heads, we will place the bookmark on the last head.
1235 1235 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1236 1236 key = None
1237 1237 if newheadscount:
1238 1238 with open(bundlefile, b'rb') as f:
1239 1239 bundledata = f.read()
1240 1240 with logservicecall(
1241 1241 log, b'bundlestore', bundlesize=len(bundledata)
1242 1242 ):
1243 1243 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1244 1244 if len(bundledata) > bundlesizelimit:
1245 1245 error_msg = (
1246 1246 b'bundle is too big: %d bytes. '
1247 1247 + b'max allowed size is 100 MB'
1248 1248 )
1249 1249 raise error.Abort(error_msg % (len(bundledata),))
1250 1250 key = store.write(bundledata)
1251 1251
1252 1252 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1253 1253 if key:
1254 1254 index.addbundle(key, nodesctx)
1255 1255 if bookmark:
1256 1256 index.addbookmark(bookmark, bookmarknode)
1257 1257 _maybeaddpushbackpart(
1258 1258 op, bookmark, bookmarknode, bookprevnode, params
1259 1259 )
1260 1260 log(
1261 1261 scratchbranchparttype,
1262 1262 eventtype=b'success',
1263 1263 elapsedms=(time.time() - parthandlerstart) * 1000,
1264 1264 )
1265 1265
1266 1266 except Exception as e:
1267 1267 log(
1268 1268 scratchbranchparttype,
1269 1269 eventtype=b'failure',
1270 1270 elapsedms=(time.time() - parthandlerstart) * 1000,
1271 1271 errormsg=stringutil.forcebytestr(e),
1272 1272 )
1273 1273 raise
1274 1274 finally:
1275 1275 if bundle:
1276 1276 bundle.close()
1277 1277
1278 1278
1279 1279 @bundle2.parthandler(
1280 1280 scratchbranchparttype,
1281 1281 (
1282 1282 b'bookmark',
1283 1283 b'bookprevnode',
1284 1284 b'force',
1285 1285 b'pushbackbookmarks',
1286 1286 b'cgversion',
1287 1287 ),
1288 1288 )
1289 1289 def bundle2scratchbranch(op, part):
1290 1290 '''unbundle a bundle2 part containing a changegroup to store'''
1291 1291
1292 1292 bundler = bundle2.bundle20(op.repo.ui)
1293 1293 cgversion = part.params.get(b'cgversion', b'01')
1294 1294 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1295 1295 cgpart.addparam(b'version', cgversion)
1296 1296 bundler.addpart(cgpart)
1297 1297 buf = util.chunkbuffer(bundler.getchunks())
1298 1298
1299 1299 fd, bundlefile = pycompat.mkstemp()
1300 1300 try:
1301 1301 try:
1302 1302 fp = os.fdopen(fd, 'wb')
1303 1303 fp.write(buf.read())
1304 1304 finally:
1305 1305 fp.close()
1306 1306 storebundle(op, part.params, bundlefile)
1307 1307 finally:
1308 1308 try:
1309 1309 os.unlink(bundlefile)
1310 1310 except OSError as e:
1311 1311 if e.errno != errno.ENOENT:
1312 1312 raise
1313 1313
1314 1314 return 1
1315 1315
1316 1316
1317 1317 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1318 1318 if params.get(b'pushbackbookmarks'):
1319 1319 if op.reply and b'pushback' in op.reply.capabilities:
1320 1320 params = {
1321 1321 b'namespace': b'bookmarks',
1322 1322 b'key': bookmark,
1323 1323 b'new': newnode,
1324 1324 b'old': oldnode,
1325 1325 }
1326 1326 op.reply.newpart(b'pushkey', mandatoryparams=params.items())
1327 1327
1328 1328
1329 1329 def bundle2pushkey(orig, op, part):
1330 1330 """Wrapper of bundle2.handlepushkey()
1331 1331
1332 1332 The only goal is to skip calling the original function if flag is set.
1333 1333 It's set if infinitepush push is happening.
1334 1334 """
1335 1335 if op.records[scratchbranchparttype + b'_skippushkey']:
1336 1336 if op.reply is not None:
1337 1337 rpart = op.reply.newpart(b'reply:pushkey')
1338 1338 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1339 1339 rpart.addparam(b'return', b'1', mandatory=False)
1340 1340 return 1
1341 1341
1342 1342 return orig(op, part)
1343 1343
1344 1344
1345 1345 def bundle2handlephases(orig, op, part):
1346 1346 """Wrapper of bundle2.handlephases()
1347 1347
1348 1348 The only goal is to skip calling the original function if flag is set.
1349 1349 It's set if infinitepush push is happening.
1350 1350 """
1351 1351
1352 1352 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1353 1353 return
1354 1354
1355 1355 return orig(op, part)
1356 1356
1357 1357
1358 1358 def _asyncsavemetadata(root, nodes):
1359 1359 """starts a separate process that fills metadata for the nodes
1360 1360
1361 1361 This function creates a separate process and doesn't wait for it's
1362 1362 completion. This was done to avoid slowing down pushes
1363 1363 """
1364 1364
1365 1365 maxnodes = 50
1366 1366 if len(nodes) > maxnodes:
1367 1367 return
1368 1368 nodesargs = []
1369 1369 for node in nodes:
1370 1370 nodesargs.append(b'--node')
1371 1371 nodesargs.append(node)
1372 1372 with open(os.devnull, b'w+b') as devnull:
1373 1373 cmdline = [
1374 1374 util.hgexecutable(),
1375 1375 b'debugfillinfinitepushmetadata',
1376 1376 b'-R',
1377 1377 root,
1378 1378 ] + nodesargs
1379 1379 # Process will run in background. We don't care about the return code
1380 1380 subprocess.Popen(
1381 1381 pycompat.rapply(procutil.tonativestr, cmdline),
1382 1382 close_fds=True,
1383 1383 shell=False,
1384 1384 stdin=devnull,
1385 1385 stdout=devnull,
1386 1386 stderr=devnull,
1387 1387 )
General Comments 0
You need to be logged in to leave comments. Login now