##// END OF EJS Templates
infinitepush: use `get_unique_pull_path`...
marmoute -
r47705:30ee1224 default
parent child Browse files
Show More
@@ -1,1385 +1,1389
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 IMPORTANT: if you use this extension, please contact
10 10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 11 be unused and barring learning of users of this functionality, we will
12 12 delete this code at the end of 2020.
13 13
14 14 [infinitepush]
15 15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 16 branchpattern = PATTERN
17 17
18 18 # Server or client
19 19 server = False
20 20
21 21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 22 indextype = disk
23 23
24 24 # Server-side option. Used only if indextype=sql.
25 25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27 27
28 28 # Server-side option. Used only if indextype=disk.
29 29 # Filesystem path to the index store
30 30 indexpath = PATH
31 31
32 32 # Server-side option. Possible values: 'disk' or 'external'
33 33 # Fails if not set
34 34 storetype = disk
35 35
36 36 # Server-side option.
37 37 # Path to the binary that will save bundle to the bundlestore
38 38 # Formatted cmd line will be passed to it (see `put_args`)
39 39 put_binary = put
40 40
41 41 # Serser-side option. Used only if storetype=external.
42 42 # Format cmd-line string for put binary. Placeholder: {filename}
43 43 put_args = {filename}
44 44
45 45 # Server-side option.
46 46 # Path to the binary that get bundle from the bundlestore.
47 47 # Formatted cmd line will be passed to it (see `get_args`)
48 48 get_binary = get
49 49
50 50 # Serser-side option. Used only if storetype=external.
51 51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 52 get_args = {filename} {handle}
53 53
54 54 # Server-side option
55 55 logfile = FIlE
56 56
57 57 # Server-side option
58 58 loglevel = DEBUG
59 59
60 60 # Server-side option. Used only if indextype=sql.
61 61 # Sets mysql wait_timeout option.
62 62 waittimeout = 300
63 63
64 64 # Server-side option. Used only if indextype=sql.
65 65 # Sets mysql innodb_lock_wait_timeout option.
66 66 locktimeout = 120
67 67
68 68 # Server-side option. Used only if indextype=sql.
69 69 # Name of the repository
70 70 reponame = ''
71 71
72 72 # Client-side option. Used by --list-remote option. List of remote scratch
73 73 # patterns to list if no patterns are specified.
74 74 defaultremotepatterns = ['*']
75 75
76 76 # Instructs infinitepush to forward all received bundle2 parts to the
77 77 # bundle for storage. Defaults to False.
78 78 storeallparts = True
79 79
80 80 # routes each incoming push to the bundlestore. defaults to False
81 81 pushtobundlestore = True
82 82
83 83 [remotenames]
84 84 # Client-side option
85 85 # This option should be set only if remotenames extension is enabled.
86 86 # Whether remote bookmarks are tracked by remotenames extension.
87 87 bookmarks = True
88 88 """
89 89
90 90 from __future__ import absolute_import
91 91
92 92 import collections
93 93 import contextlib
94 94 import errno
95 95 import functools
96 96 import logging
97 97 import os
98 98 import random
99 99 import re
100 100 import socket
101 101 import subprocess
102 102 import time
103 103
104 104 from mercurial.node import (
105 105 bin,
106 106 hex,
107 107 )
108 108
109 109 from mercurial.i18n import _
110 110
111 111 from mercurial.pycompat import (
112 112 getattr,
113 113 open,
114 114 )
115 115
116 116 from mercurial.utils import (
117 117 procutil,
118 118 stringutil,
119 119 urlutil,
120 120 )
121 121
122 122 from mercurial import (
123 123 bundle2,
124 124 changegroup,
125 125 commands,
126 126 discovery,
127 127 encoding,
128 128 error,
129 129 exchange,
130 130 extensions,
131 131 hg,
132 132 localrepo,
133 133 phases,
134 134 pushkey,
135 135 pycompat,
136 136 registrar,
137 137 util,
138 138 wireprototypes,
139 139 wireprotov1peer,
140 140 wireprotov1server,
141 141 )
142 142
143 143 from . import (
144 144 bundleparts,
145 145 common,
146 146 )
147 147
148 148 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
149 149 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
150 150 # be specifying the version(s) of Mercurial they are tested with, or
151 151 # leave the attribute unspecified.
152 152 testedwith = b'ships-with-hg-core'
153 153
154 154 configtable = {}
155 155 configitem = registrar.configitem(configtable)
156 156
157 157 configitem(
158 158 b'infinitepush',
159 159 b'server',
160 160 default=False,
161 161 )
162 162 configitem(
163 163 b'infinitepush',
164 164 b'storetype',
165 165 default=b'',
166 166 )
167 167 configitem(
168 168 b'infinitepush',
169 169 b'indextype',
170 170 default=b'',
171 171 )
172 172 configitem(
173 173 b'infinitepush',
174 174 b'indexpath',
175 175 default=b'',
176 176 )
177 177 configitem(
178 178 b'infinitepush',
179 179 b'storeallparts',
180 180 default=False,
181 181 )
182 182 configitem(
183 183 b'infinitepush',
184 184 b'reponame',
185 185 default=b'',
186 186 )
187 187 configitem(
188 188 b'scratchbranch',
189 189 b'storepath',
190 190 default=b'',
191 191 )
192 192 configitem(
193 193 b'infinitepush',
194 194 b'branchpattern',
195 195 default=b'',
196 196 )
197 197 configitem(
198 198 b'infinitepush',
199 199 b'pushtobundlestore',
200 200 default=False,
201 201 )
202 202 configitem(
203 203 b'experimental',
204 204 b'server-bundlestore-bookmark',
205 205 default=b'',
206 206 )
207 207 configitem(
208 208 b'experimental',
209 209 b'infinitepush-scratchpush',
210 210 default=False,
211 211 )
212 212
213 213 experimental = b'experimental'
214 214 configbookmark = b'server-bundlestore-bookmark'
215 215 configscratchpush = b'infinitepush-scratchpush'
216 216
217 217 scratchbranchparttype = bundleparts.scratchbranchparttype
218 218 revsetpredicate = registrar.revsetpredicate()
219 219 templatekeyword = registrar.templatekeyword()
220 220 _scratchbranchmatcher = lambda x: False
221 221 _maybehash = re.compile('^[a-f0-9]+$').search
222 222
223 223
224 224 def _buildexternalbundlestore(ui):
225 225 put_args = ui.configlist(b'infinitepush', b'put_args', [])
226 226 put_binary = ui.config(b'infinitepush', b'put_binary')
227 227 if not put_binary:
228 228 raise error.Abort(b'put binary is not specified')
229 229 get_args = ui.configlist(b'infinitepush', b'get_args', [])
230 230 get_binary = ui.config(b'infinitepush', b'get_binary')
231 231 if not get_binary:
232 232 raise error.Abort(b'get binary is not specified')
233 233 from . import store
234 234
235 235 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
236 236
237 237
238 238 def _buildsqlindex(ui):
239 239 sqlhost = ui.config(b'infinitepush', b'sqlhost')
240 240 if not sqlhost:
241 241 raise error.Abort(_(b'please set infinitepush.sqlhost'))
242 242 host, port, db, user, password = sqlhost.split(b':')
243 243 reponame = ui.config(b'infinitepush', b'reponame')
244 244 if not reponame:
245 245 raise error.Abort(_(b'please set infinitepush.reponame'))
246 246
247 247 logfile = ui.config(b'infinitepush', b'logfile', b'')
248 248 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
249 249 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
250 250 from . import sqlindexapi
251 251
252 252 return sqlindexapi.sqlindexapi(
253 253 reponame,
254 254 host,
255 255 port,
256 256 db,
257 257 user,
258 258 password,
259 259 logfile,
260 260 _getloglevel(ui),
261 261 waittimeout=waittimeout,
262 262 locktimeout=locktimeout,
263 263 )
264 264
265 265
266 266 def _getloglevel(ui):
267 267 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
268 268 numeric_loglevel = getattr(logging, loglevel.upper(), None)
269 269 if not isinstance(numeric_loglevel, int):
270 270 raise error.Abort(_(b'invalid log level %s') % loglevel)
271 271 return numeric_loglevel
272 272
273 273
274 274 def _tryhoist(ui, remotebookmark):
275 275 """returns a bookmarks with hoisted part removed
276 276
277 277 Remotenames extension has a 'hoist' config that allows to use remote
278 278 bookmarks without specifying remote path. For example, 'hg update master'
279 279 works as well as 'hg update remote/master'. We want to allow the same in
280 280 infinitepush.
281 281 """
282 282
283 283 if common.isremotebooksenabled(ui):
284 284 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
285 285 if remotebookmark.startswith(hoist):
286 286 return remotebookmark[len(hoist) :]
287 287 return remotebookmark
288 288
289 289
290 290 class bundlestore(object):
291 291 def __init__(self, repo):
292 292 self._repo = repo
293 293 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
294 294 if storetype == b'disk':
295 295 from . import store
296 296
297 297 self.store = store.filebundlestore(self._repo.ui, self._repo)
298 298 elif storetype == b'external':
299 299 self.store = _buildexternalbundlestore(self._repo.ui)
300 300 else:
301 301 raise error.Abort(
302 302 _(b'unknown infinitepush store type specified %s') % storetype
303 303 )
304 304
305 305 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
306 306 if indextype == b'disk':
307 307 from . import fileindexapi
308 308
309 309 self.index = fileindexapi.fileindexapi(self._repo)
310 310 elif indextype == b'sql':
311 311 self.index = _buildsqlindex(self._repo.ui)
312 312 else:
313 313 raise error.Abort(
314 314 _(b'unknown infinitepush index type specified %s') % indextype
315 315 )
316 316
317 317
318 318 def _isserver(ui):
319 319 return ui.configbool(b'infinitepush', b'server')
320 320
321 321
322 322 def reposetup(ui, repo):
323 323 if _isserver(ui) and repo.local():
324 324 repo.bundlestore = bundlestore(repo)
325 325
326 326
327 327 def extsetup(ui):
328 328 commonsetup(ui)
329 329 if _isserver(ui):
330 330 serverextsetup(ui)
331 331 else:
332 332 clientextsetup(ui)
333 333
334 334
335 335 def commonsetup(ui):
336 336 wireprotov1server.commands[b'listkeyspatterns'] = (
337 337 wireprotolistkeyspatterns,
338 338 b'namespace patterns',
339 339 )
340 340 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
341 341 if scratchbranchpat:
342 342 global _scratchbranchmatcher
343 343 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
344 344 scratchbranchpat
345 345 )
346 346
347 347
348 348 def serverextsetup(ui):
349 349 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
350 350
351 351 def newpushkeyhandler(*args, **kwargs):
352 352 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
353 353
354 354 newpushkeyhandler.params = origpushkeyhandler.params
355 355 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
356 356
357 357 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
358 358 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
359 359 orighandlephasehandler, *args, **kwargs
360 360 )
361 361 newphaseheadshandler.params = orighandlephasehandler.params
362 362 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
363 363
364 364 extensions.wrapfunction(
365 365 localrepo.localrepository, b'listkeys', localrepolistkeys
366 366 )
367 367 wireprotov1server.commands[b'lookup'] = (
368 368 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
369 369 b'key',
370 370 )
371 371 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
372 372
373 373 extensions.wrapfunction(bundle2, b'processparts', processparts)
374 374
375 375
376 376 def clientextsetup(ui):
377 377 entry = extensions.wrapcommand(commands.table, b'push', _push)
378 378
379 379 entry[1].append(
380 380 (
381 381 b'',
382 382 b'bundle-store',
383 383 None,
384 384 _(b'force push to go to bundle store (EXPERIMENTAL)'),
385 385 )
386 386 )
387 387
388 388 extensions.wrapcommand(commands.table, b'pull', _pull)
389 389
390 390 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
391 391
392 392 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
393 393
394 394 partorder = exchange.b2partsgenorder
395 395 index = partorder.index(b'changeset')
396 396 partorder.insert(
397 397 index, partorder.pop(partorder.index(scratchbranchparttype))
398 398 )
399 399
400 400
401 401 def _checkheads(orig, pushop):
402 402 if pushop.ui.configbool(experimental, configscratchpush, False):
403 403 return
404 404 return orig(pushop)
405 405
406 406
407 407 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
408 408 patterns = wireprototypes.decodelist(patterns)
409 409 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
410 410 return pushkey.encodekeys(d)
411 411
412 412
413 413 def localrepolistkeys(orig, self, namespace, patterns=None):
414 414 if namespace == b'bookmarks' and patterns:
415 415 index = self.bundlestore.index
416 416 results = {}
417 417 bookmarks = orig(self, namespace)
418 418 for pattern in patterns:
419 419 results.update(index.getbookmarks(pattern))
420 420 if pattern.endswith(b'*'):
421 421 pattern = b're:^' + pattern[:-1] + b'.*'
422 422 kind, pat, matcher = stringutil.stringmatcher(pattern)
423 423 for bookmark, node in pycompat.iteritems(bookmarks):
424 424 if matcher(bookmark):
425 425 results[bookmark] = node
426 426 return results
427 427 else:
428 428 return orig(self, namespace)
429 429
430 430
431 431 @wireprotov1peer.batchable
432 432 def listkeyspatterns(self, namespace, patterns):
433 433 if not self.capable(b'pushkey'):
434 434 yield {}, None
435 435 f = wireprotov1peer.future()
436 436 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
437 437 yield {
438 438 b'namespace': encoding.fromlocal(namespace),
439 439 b'patterns': wireprototypes.encodelist(patterns),
440 440 }, f
441 441 d = f.value
442 442 self.ui.debug(
443 443 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
444 444 )
445 445 yield pushkey.decodekeys(d)
446 446
447 447
448 448 def _readbundlerevs(bundlerepo):
449 449 return list(bundlerepo.revs(b'bundle()'))
450 450
451 451
452 452 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
453 453 """Tells remotefilelog to include all changed files to the changegroup
454 454
455 455 By default remotefilelog doesn't include file content to the changegroup.
456 456 But we need to include it if we are fetching from bundlestore.
457 457 """
458 458 changedfiles = set()
459 459 cl = bundlerepo.changelog
460 460 for r in bundlerevs:
461 461 # [3] means changed files
462 462 changedfiles.update(cl.read(r)[3])
463 463 if not changedfiles:
464 464 return bundlecaps
465 465
466 466 changedfiles = b'\0'.join(changedfiles)
467 467 newcaps = []
468 468 appended = False
469 469 for cap in bundlecaps or []:
470 470 if cap.startswith(b'excludepattern='):
471 471 newcaps.append(b'\0'.join((cap, changedfiles)))
472 472 appended = True
473 473 else:
474 474 newcaps.append(cap)
475 475 if not appended:
476 476 # Not found excludepattern cap. Just append it
477 477 newcaps.append(b'excludepattern=' + changedfiles)
478 478
479 479 return newcaps
480 480
481 481
482 482 def _rebundle(bundlerepo, bundleroots, unknownhead):
483 483 """
484 484 Bundle may include more revision then user requested. For example,
485 485 if user asks for revision but bundle also consists its descendants.
486 486 This function will filter out all revision that user is not requested.
487 487 """
488 488 parts = []
489 489
490 490 version = b'02'
491 491 outgoing = discovery.outgoing(
492 492 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
493 493 )
494 494 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
495 495 cgstream = util.chunkbuffer(cgstream).read()
496 496 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
497 497 cgpart.addparam(b'version', version)
498 498 parts.append(cgpart)
499 499
500 500 return parts
501 501
502 502
503 503 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
504 504 cl = bundlerepo.changelog
505 505 bundleroots = []
506 506 for rev in bundlerevs:
507 507 node = cl.node(rev)
508 508 parents = cl.parents(node)
509 509 for parent in parents:
510 510 # include all revs that exist in the main repo
511 511 # to make sure that bundle may apply client-side
512 512 if parent in oldrepo:
513 513 bundleroots.append(parent)
514 514 return bundleroots
515 515
516 516
517 517 def _needsrebundling(head, bundlerepo):
518 518 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
519 519 return not (
520 520 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
521 521 )
522 522
523 523
524 524 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
525 525 """generates bundle that will be send to the user
526 526
527 527 returns tuple with raw bundle string and bundle type
528 528 """
529 529 parts = []
530 530 if not _needsrebundling(head, bundlerepo):
531 531 with util.posixfile(bundlefile, b"rb") as f:
532 532 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
533 533 if isinstance(unbundler, changegroup.cg1unpacker):
534 534 part = bundle2.bundlepart(
535 535 b'changegroup', data=unbundler._stream.read()
536 536 )
537 537 part.addparam(b'version', b'01')
538 538 parts.append(part)
539 539 elif isinstance(unbundler, bundle2.unbundle20):
540 540 haschangegroup = False
541 541 for part in unbundler.iterparts():
542 542 if part.type == b'changegroup':
543 543 haschangegroup = True
544 544 newpart = bundle2.bundlepart(part.type, data=part.read())
545 545 for key, value in pycompat.iteritems(part.params):
546 546 newpart.addparam(key, value)
547 547 parts.append(newpart)
548 548
549 549 if not haschangegroup:
550 550 raise error.Abort(
551 551 b'unexpected bundle without changegroup part, '
552 552 + b'head: %s' % hex(head),
553 553 hint=b'report to administrator',
554 554 )
555 555 else:
556 556 raise error.Abort(b'unknown bundle type')
557 557 else:
558 558 parts = _rebundle(bundlerepo, bundleroots, head)
559 559
560 560 return parts
561 561
562 562
563 563 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
564 564 heads = heads or []
565 565 # newheads are parents of roots of scratch bundles that were requested
566 566 newphases = {}
567 567 scratchbundles = []
568 568 newheads = []
569 569 scratchheads = []
570 570 nodestobundle = {}
571 571 allbundlestocleanup = []
572 572 try:
573 573 for head in heads:
574 574 if not repo.changelog.index.has_node(head):
575 575 if head not in nodestobundle:
576 576 newbundlefile = common.downloadbundle(repo, head)
577 577 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
578 578 bundlerepo = hg.repository(repo.ui, bundlepath)
579 579
580 580 allbundlestocleanup.append((bundlerepo, newbundlefile))
581 581 bundlerevs = set(_readbundlerevs(bundlerepo))
582 582 bundlecaps = _includefilelogstobundle(
583 583 bundlecaps, bundlerepo, bundlerevs, repo.ui
584 584 )
585 585 cl = bundlerepo.changelog
586 586 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
587 587 for rev in bundlerevs:
588 588 node = cl.node(rev)
589 589 newphases[hex(node)] = str(phases.draft)
590 590 nodestobundle[node] = (
591 591 bundlerepo,
592 592 bundleroots,
593 593 newbundlefile,
594 594 )
595 595
596 596 scratchbundles.append(
597 597 _generateoutputparts(head, *nodestobundle[head])
598 598 )
599 599 newheads.extend(bundleroots)
600 600 scratchheads.append(head)
601 601 finally:
602 602 for bundlerepo, bundlefile in allbundlestocleanup:
603 603 bundlerepo.close()
604 604 try:
605 605 os.unlink(bundlefile)
606 606 except (IOError, OSError):
607 607 # if we can't cleanup the file then just ignore the error,
608 608 # no need to fail
609 609 pass
610 610
611 611 pullfrombundlestore = bool(scratchbundles)
612 612 wrappedchangegrouppart = False
613 613 wrappedlistkeys = False
614 614 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
615 615 try:
616 616
617 617 def _changegrouppart(bundler, *args, **kwargs):
618 618 # Order is important here. First add non-scratch part
619 619 # and only then add parts with scratch bundles because
620 620 # non-scratch part contains parents of roots of scratch bundles.
621 621 result = oldchangegrouppart(bundler, *args, **kwargs)
622 622 for bundle in scratchbundles:
623 623 for part in bundle:
624 624 bundler.addpart(part)
625 625 return result
626 626
627 627 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
628 628 wrappedchangegrouppart = True
629 629
630 630 def _listkeys(orig, self, namespace):
631 631 origvalues = orig(self, namespace)
632 632 if namespace == b'phases' and pullfrombundlestore:
633 633 if origvalues.get(b'publishing') == b'True':
634 634 # Make repo non-publishing to preserve draft phase
635 635 del origvalues[b'publishing']
636 636 origvalues.update(newphases)
637 637 return origvalues
638 638
639 639 extensions.wrapfunction(
640 640 localrepo.localrepository, b'listkeys', _listkeys
641 641 )
642 642 wrappedlistkeys = True
643 643 heads = list((set(newheads) | set(heads)) - set(scratchheads))
644 644 result = orig(
645 645 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
646 646 )
647 647 finally:
648 648 if wrappedchangegrouppart:
649 649 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
650 650 if wrappedlistkeys:
651 651 extensions.unwrapfunction(
652 652 localrepo.localrepository, b'listkeys', _listkeys
653 653 )
654 654 return result
655 655
656 656
657 657 def _lookupwrap(orig):
658 658 def _lookup(repo, proto, key):
659 659 localkey = encoding.tolocal(key)
660 660
661 661 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
662 662 scratchnode = repo.bundlestore.index.getnode(localkey)
663 663 if scratchnode:
664 664 return b"%d %s\n" % (1, scratchnode)
665 665 else:
666 666 return b"%d %s\n" % (
667 667 0,
668 668 b'scratch branch %s not found' % localkey,
669 669 )
670 670 else:
671 671 try:
672 672 r = hex(repo.lookup(localkey))
673 673 return b"%d %s\n" % (1, r)
674 674 except Exception as inst:
675 675 if repo.bundlestore.index.getbundle(localkey):
676 676 return b"%d %s\n" % (1, localkey)
677 677 else:
678 678 r = stringutil.forcebytestr(inst)
679 679 return b"%d %s\n" % (0, r)
680 680
681 681 return _lookup
682 682
683 683
684 684 def _pull(orig, ui, repo, source=b"default", **opts):
685 685 opts = pycompat.byteskwargs(opts)
686 686 # Copy paste from `pull` command
687 source, branches = urlutil.parseurl(
688 ui.expandpath(source), opts.get(b'branch')
687 source, branches = urlutil.get_unique_pull_path(
688 b"infinite-push's pull",
689 repo,
690 ui,
691 source,
692 default_branches=opts.get(b'branch'),
689 693 )
690 694
691 695 scratchbookmarks = {}
692 696 unfi = repo.unfiltered()
693 697 unknownnodes = []
694 698 for rev in opts.get(b'rev', []):
695 699 if rev not in unfi:
696 700 unknownnodes.append(rev)
697 701 if opts.get(b'bookmark'):
698 702 bookmarks = []
699 703 revs = opts.get(b'rev') or []
700 704 for bookmark in opts.get(b'bookmark'):
701 705 if _scratchbranchmatcher(bookmark):
702 706 # rev is not known yet
703 707 # it will be fetched with listkeyspatterns next
704 708 scratchbookmarks[bookmark] = b'REVTOFETCH'
705 709 else:
706 710 bookmarks.append(bookmark)
707 711
708 712 if scratchbookmarks:
709 713 other = hg.peer(repo, opts, source)
710 714 try:
711 715 fetchedbookmarks = other.listkeyspatterns(
712 716 b'bookmarks', patterns=scratchbookmarks
713 717 )
714 718 for bookmark in scratchbookmarks:
715 719 if bookmark not in fetchedbookmarks:
716 720 raise error.Abort(
717 721 b'remote bookmark %s not found!' % bookmark
718 722 )
719 723 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
720 724 revs.append(fetchedbookmarks[bookmark])
721 725 finally:
722 726 other.close()
723 727 opts[b'bookmark'] = bookmarks
724 728 opts[b'rev'] = revs
725 729
726 730 if scratchbookmarks or unknownnodes:
727 731 # Set anyincoming to True
728 732 extensions.wrapfunction(
729 733 discovery, b'findcommonincoming', _findcommonincoming
730 734 )
731 735 try:
732 736 # Remote scratch bookmarks will be deleted because remotenames doesn't
733 737 # know about them. Let's save it before pull and restore after
734 738 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
735 739 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
736 740 # TODO(stash): race condition is possible
737 741 # if scratch bookmarks was updated right after orig.
738 742 # But that's unlikely and shouldn't be harmful.
739 743 if common.isremotebooksenabled(ui):
740 744 remotescratchbookmarks.update(scratchbookmarks)
741 745 _saveremotebookmarks(repo, remotescratchbookmarks, source)
742 746 else:
743 747 _savelocalbookmarks(repo, scratchbookmarks)
744 748 return result
745 749 finally:
746 750 if scratchbookmarks:
747 751 extensions.unwrapfunction(discovery, b'findcommonincoming')
748 752
749 753
750 754 def _readscratchremotebookmarks(ui, repo, other):
751 755 if common.isremotebooksenabled(ui):
752 756 remotenamesext = extensions.find(b'remotenames')
753 757 remotepath = remotenamesext.activepath(repo.ui, other)
754 758 result = {}
755 759 # Let's refresh remotenames to make sure we have it up to date
756 760 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
757 761 # and it results in deleting scratch bookmarks. Our best guess how to
758 762 # fix it is to use `clearnames()`
759 763 repo._remotenames.clearnames()
760 764 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
761 765 path, bookname = remotenamesext.splitremotename(remotebookmark)
762 766 if path == remotepath and _scratchbranchmatcher(bookname):
763 767 nodes = repo.names[b'remotebookmarks'].nodes(
764 768 repo, remotebookmark
765 769 )
766 770 if nodes:
767 771 result[bookname] = hex(nodes[0])
768 772 return result
769 773 else:
770 774 return {}
771 775
772 776
773 777 def _saveremotebookmarks(repo, newbookmarks, remote):
774 778 remotenamesext = extensions.find(b'remotenames')
775 779 remotepath = remotenamesext.activepath(repo.ui, remote)
776 780 branches = collections.defaultdict(list)
777 781 bookmarks = {}
778 782 remotenames = remotenamesext.readremotenames(repo)
779 783 for hexnode, nametype, remote, rname in remotenames:
780 784 if remote != remotepath:
781 785 continue
782 786 if nametype == b'bookmarks':
783 787 if rname in newbookmarks:
784 788 # It's possible if we have a normal bookmark that matches
785 789 # scratch branch pattern. In this case just use the current
786 790 # bookmark node
787 791 del newbookmarks[rname]
788 792 bookmarks[rname] = hexnode
789 793 elif nametype == b'branches':
790 794 # saveremotenames expects 20 byte binary nodes for branches
791 795 branches[rname].append(bin(hexnode))
792 796
793 797 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
794 798 bookmarks[bookmark] = hexnode
795 799 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
796 800
797 801
798 802 def _savelocalbookmarks(repo, bookmarks):
799 803 if not bookmarks:
800 804 return
801 805 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
802 806 changes = []
803 807 for scratchbook, node in pycompat.iteritems(bookmarks):
804 808 changectx = repo[node]
805 809 changes.append((scratchbook, changectx.node()))
806 810 repo._bookmarks.applychanges(repo, tr, changes)
807 811
808 812
809 813 def _findcommonincoming(orig, *args, **kwargs):
810 814 common, inc, remoteheads = orig(*args, **kwargs)
811 815 return common, True, remoteheads
812 816
813 817
814 818 def _push(orig, ui, repo, *dests, **opts):
815 819 opts = pycompat.byteskwargs(opts)
816 820 bookmark = opts.get(b'bookmark')
817 821 # we only support pushing one infinitepush bookmark at once
818 822 if len(bookmark) == 1:
819 823 bookmark = bookmark[0]
820 824 else:
821 825 bookmark = b''
822 826
823 827 oldphasemove = None
824 828 overrides = {(experimental, configbookmark): bookmark}
825 829
826 830 with ui.configoverride(overrides, b'infinitepush'):
827 831 scratchpush = opts.get(b'bundle_store')
828 832 if _scratchbranchmatcher(bookmark):
829 833 scratchpush = True
830 834 # bundle2 can be sent back after push (for example, bundle2
831 835 # containing `pushkey` part to update bookmarks)
832 836 ui.setconfig(experimental, b'bundle2.pushback', True)
833 837
834 838 if scratchpush:
835 839 # this is an infinitepush, we don't want the bookmark to be applied
836 840 # rather that should be stored in the bundlestore
837 841 opts[b'bookmark'] = []
838 842 ui.setconfig(experimental, configscratchpush, True)
839 843 oldphasemove = extensions.wrapfunction(
840 844 exchange, b'_localphasemove', _phasemove
841 845 )
842 846
843 847 paths = list(urlutil.get_push_paths(repo, ui, dests))
844 848 if len(paths) > 1:
845 849 msg = _(b'cannot push to multiple path with infinitepush')
846 850 raise error.Abort(msg)
847 851
848 852 path = paths[0]
849 853 destpath = path.pushloc or path.loc
850 854 # Remote scratch bookmarks will be deleted because remotenames doesn't
851 855 # know about them. Let's save it before push and restore after
852 856 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
853 857 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
854 858 if common.isremotebooksenabled(ui):
855 859 if bookmark and scratchpush:
856 860 other = hg.peer(repo, opts, destpath)
857 861 try:
858 862 fetchedbookmarks = other.listkeyspatterns(
859 863 b'bookmarks', patterns=[bookmark]
860 864 )
861 865 remotescratchbookmarks.update(fetchedbookmarks)
862 866 finally:
863 867 other.close()
864 868 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
865 869 if oldphasemove:
866 870 exchange._localphasemove = oldphasemove
867 871 return result
868 872
869 873
870 874 def _deleteinfinitepushbookmarks(ui, repo, path, names):
871 875 """Prune remote names by removing the bookmarks we don't want anymore,
872 876 then writing the result back to disk
873 877 """
874 878 remotenamesext = extensions.find(b'remotenames')
875 879
876 880 # remotename format is:
877 881 # (node, nametype ("branches" or "bookmarks"), remote, name)
878 882 nametype_idx = 1
879 883 remote_idx = 2
880 884 name_idx = 3
881 885 remotenames = [
882 886 remotename
883 887 for remotename in remotenamesext.readremotenames(repo)
884 888 if remotename[remote_idx] == path
885 889 ]
886 890 remote_bm_names = [
887 891 remotename[name_idx]
888 892 for remotename in remotenames
889 893 if remotename[nametype_idx] == b"bookmarks"
890 894 ]
891 895
892 896 for name in names:
893 897 if name not in remote_bm_names:
894 898 raise error.Abort(
895 899 _(
896 900 b"infinitepush bookmark '{}' does not exist "
897 901 b"in path '{}'"
898 902 ).format(name, path)
899 903 )
900 904
901 905 bookmarks = {}
902 906 branches = collections.defaultdict(list)
903 907 for node, nametype, remote, name in remotenames:
904 908 if nametype == b"bookmarks" and name not in names:
905 909 bookmarks[name] = node
906 910 elif nametype == b"branches":
907 911 # saveremotenames wants binary nodes for branches
908 912 branches[name].append(bin(node))
909 913
910 914 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
911 915
912 916
913 917 def _phasemove(orig, pushop, nodes, phase=phases.public):
914 918 """prevent commits from being marked public
915 919
916 920 Since these are going to a scratch branch, they aren't really being
917 921 published."""
918 922
919 923 if phase != phases.public:
920 924 orig(pushop, nodes, phase)
921 925
922 926
923 927 @exchange.b2partsgenerator(scratchbranchparttype)
924 928 def partgen(pushop, bundler):
925 929 bookmark = pushop.ui.config(experimental, configbookmark)
926 930 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
927 931 if b'changesets' in pushop.stepsdone or not scratchpush:
928 932 return
929 933
930 934 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
931 935 return
932 936
933 937 pushop.stepsdone.add(b'changesets')
934 938 if not pushop.outgoing.missing:
935 939 pushop.ui.status(_(b'no changes found\n'))
936 940 pushop.cgresult = 0
937 941 return
938 942
939 943 # This parameter tells the server that the following bundle is an
940 944 # infinitepush. This let's it switch the part processing to our infinitepush
941 945 # code path.
942 946 bundler.addparam(b"infinitepush", b"True")
943 947
944 948 scratchparts = bundleparts.getscratchbranchparts(
945 949 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
946 950 )
947 951
948 952 for scratchpart in scratchparts:
949 953 bundler.addpart(scratchpart)
950 954
951 955 def handlereply(op):
952 956 # server either succeeds or aborts; no code to read
953 957 pushop.cgresult = 1
954 958
955 959 return handlereply
956 960
957 961
958 962 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
959 963
960 964
961 965 def _getrevs(bundle, oldnode, force, bookmark):
962 966 b'extracts and validates the revs to be imported'
963 967 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
964 968
965 969 # new bookmark
966 970 if oldnode is None:
967 971 return revs
968 972
969 973 # Fast forward update
970 974 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
971 975 return revs
972 976
973 977 return revs
974 978
975 979
976 980 @contextlib.contextmanager
977 981 def logservicecall(logger, service, **kwargs):
978 982 start = time.time()
979 983 logger(service, eventtype=b'start', **kwargs)
980 984 try:
981 985 yield
982 986 logger(
983 987 service,
984 988 eventtype=b'success',
985 989 elapsedms=(time.time() - start) * 1000,
986 990 **kwargs
987 991 )
988 992 except Exception as e:
989 993 logger(
990 994 service,
991 995 eventtype=b'failure',
992 996 elapsedms=(time.time() - start) * 1000,
993 997 errormsg=stringutil.forcebytestr(e),
994 998 **kwargs
995 999 )
996 1000 raise
997 1001
998 1002
999 1003 def _getorcreateinfinitepushlogger(op):
1000 1004 logger = op.records[b'infinitepushlogger']
1001 1005 if not logger:
1002 1006 ui = op.repo.ui
1003 1007 try:
1004 1008 username = procutil.getuser()
1005 1009 except Exception:
1006 1010 username = b'unknown'
1007 1011 # Generate random request id to be able to find all logged entries
1008 1012 # for the same request. Since requestid is pseudo-generated it may
1009 1013 # not be unique, but we assume that (hostname, username, requestid)
1010 1014 # is unique.
1011 1015 random.seed()
1012 1016 requestid = random.randint(0, 2000000000)
1013 1017 hostname = socket.gethostname()
1014 1018 logger = functools.partial(
1015 1019 ui.log,
1016 1020 b'infinitepush',
1017 1021 user=username,
1018 1022 requestid=requestid,
1019 1023 hostname=hostname,
1020 1024 reponame=ui.config(b'infinitepush', b'reponame'),
1021 1025 )
1022 1026 op.records.add(b'infinitepushlogger', logger)
1023 1027 else:
1024 1028 logger = logger[0]
1025 1029 return logger
1026 1030
1027 1031
1028 1032 def storetobundlestore(orig, repo, op, unbundler):
1029 1033 """stores the incoming bundle coming from push command to the bundlestore
1030 1034 instead of applying on the revlogs"""
1031 1035
1032 1036 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1033 1037 bundler = bundle2.bundle20(repo.ui)
1034 1038
1035 1039 # processing each part and storing it in bundler
1036 1040 with bundle2.partiterator(repo, op, unbundler) as parts:
1037 1041 for part in parts:
1038 1042 bundlepart = None
1039 1043 if part.type == b'replycaps':
1040 1044 # This configures the current operation to allow reply parts.
1041 1045 bundle2._processpart(op, part)
1042 1046 else:
1043 1047 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1044 1048 for key, value in pycompat.iteritems(part.params):
1045 1049 bundlepart.addparam(key, value)
1046 1050
1047 1051 # Certain parts require a response
1048 1052 if part.type in (b'pushkey', b'changegroup'):
1049 1053 if op.reply is not None:
1050 1054 rpart = op.reply.newpart(b'reply:%s' % part.type)
1051 1055 rpart.addparam(
1052 1056 b'in-reply-to', b'%d' % part.id, mandatory=False
1053 1057 )
1054 1058 rpart.addparam(b'return', b'1', mandatory=False)
1055 1059
1056 1060 op.records.add(
1057 1061 part.type,
1058 1062 {
1059 1063 b'return': 1,
1060 1064 },
1061 1065 )
1062 1066 if bundlepart:
1063 1067 bundler.addpart(bundlepart)
1064 1068
1065 1069 # storing the bundle in the bundlestore
1066 1070 buf = util.chunkbuffer(bundler.getchunks())
1067 1071 fd, bundlefile = pycompat.mkstemp()
1068 1072 try:
1069 1073 try:
1070 1074 fp = os.fdopen(fd, 'wb')
1071 1075 fp.write(buf.read())
1072 1076 finally:
1073 1077 fp.close()
1074 1078 storebundle(op, {}, bundlefile)
1075 1079 finally:
1076 1080 try:
1077 1081 os.unlink(bundlefile)
1078 1082 except Exception:
1079 1083 # we would rather see the original exception
1080 1084 pass
1081 1085
1082 1086
1083 1087 def processparts(orig, repo, op, unbundler):
1084 1088
1085 1089 # make sure we don't wrap processparts in case of `hg unbundle`
1086 1090 if op.source == b'unbundle':
1087 1091 return orig(repo, op, unbundler)
1088 1092
1089 1093 # this server routes each push to bundle store
1090 1094 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1091 1095 return storetobundlestore(orig, repo, op, unbundler)
1092 1096
1093 1097 if unbundler.params.get(b'infinitepush') != b'True':
1094 1098 return orig(repo, op, unbundler)
1095 1099
1096 1100 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1097 1101
1098 1102 bundler = bundle2.bundle20(repo.ui)
1099 1103 cgparams = None
1100 1104 with bundle2.partiterator(repo, op, unbundler) as parts:
1101 1105 for part in parts:
1102 1106 bundlepart = None
1103 1107 if part.type == b'replycaps':
1104 1108 # This configures the current operation to allow reply parts.
1105 1109 bundle2._processpart(op, part)
1106 1110 elif part.type == bundleparts.scratchbranchparttype:
1107 1111 # Scratch branch parts need to be converted to normal
1108 1112 # changegroup parts, and the extra parameters stored for later
1109 1113 # when we upload to the store. Eventually those parameters will
1110 1114 # be put on the actual bundle instead of this part, then we can
1111 1115 # send a vanilla changegroup instead of the scratchbranch part.
1112 1116 cgversion = part.params.get(b'cgversion', b'01')
1113 1117 bundlepart = bundle2.bundlepart(
1114 1118 b'changegroup', data=part.read()
1115 1119 )
1116 1120 bundlepart.addparam(b'version', cgversion)
1117 1121 cgparams = part.params
1118 1122
1119 1123 # If we're not dumping all parts into the new bundle, we need to
1120 1124 # alert the future pushkey and phase-heads handler to skip
1121 1125 # the part.
1122 1126 if not handleallparts:
1123 1127 op.records.add(
1124 1128 scratchbranchparttype + b'_skippushkey', True
1125 1129 )
1126 1130 op.records.add(
1127 1131 scratchbranchparttype + b'_skipphaseheads', True
1128 1132 )
1129 1133 else:
1130 1134 if handleallparts:
1131 1135 # Ideally we would not process any parts, and instead just
1132 1136 # forward them to the bundle for storage, but since this
1133 1137 # differs from previous behavior, we need to put it behind a
1134 1138 # config flag for incremental rollout.
1135 1139 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1136 1140 for key, value in pycompat.iteritems(part.params):
1137 1141 bundlepart.addparam(key, value)
1138 1142
1139 1143 # Certain parts require a response
1140 1144 if part.type == b'pushkey':
1141 1145 if op.reply is not None:
1142 1146 rpart = op.reply.newpart(b'reply:pushkey')
1143 1147 rpart.addparam(
1144 1148 b'in-reply-to', str(part.id), mandatory=False
1145 1149 )
1146 1150 rpart.addparam(b'return', b'1', mandatory=False)
1147 1151 else:
1148 1152 bundle2._processpart(op, part)
1149 1153
1150 1154 if handleallparts:
1151 1155 op.records.add(
1152 1156 part.type,
1153 1157 {
1154 1158 b'return': 1,
1155 1159 },
1156 1160 )
1157 1161 if bundlepart:
1158 1162 bundler.addpart(bundlepart)
1159 1163
1160 1164 # If commits were sent, store them
1161 1165 if cgparams:
1162 1166 buf = util.chunkbuffer(bundler.getchunks())
1163 1167 fd, bundlefile = pycompat.mkstemp()
1164 1168 try:
1165 1169 try:
1166 1170 fp = os.fdopen(fd, 'wb')
1167 1171 fp.write(buf.read())
1168 1172 finally:
1169 1173 fp.close()
1170 1174 storebundle(op, cgparams, bundlefile)
1171 1175 finally:
1172 1176 try:
1173 1177 os.unlink(bundlefile)
1174 1178 except Exception:
1175 1179 # we would rather see the original exception
1176 1180 pass
1177 1181
1178 1182
1179 1183 def storebundle(op, params, bundlefile):
1180 1184 log = _getorcreateinfinitepushlogger(op)
1181 1185 parthandlerstart = time.time()
1182 1186 log(scratchbranchparttype, eventtype=b'start')
1183 1187 index = op.repo.bundlestore.index
1184 1188 store = op.repo.bundlestore.store
1185 1189 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1186 1190
1187 1191 bundle = None
1188 1192 try: # guards bundle
1189 1193 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1190 1194 bundle = hg.repository(op.repo.ui, bundlepath)
1191 1195
1192 1196 bookmark = params.get(b'bookmark')
1193 1197 bookprevnode = params.get(b'bookprevnode', b'')
1194 1198 force = params.get(b'force')
1195 1199
1196 1200 if bookmark:
1197 1201 oldnode = index.getnode(bookmark)
1198 1202 else:
1199 1203 oldnode = None
1200 1204 bundleheads = bundle.revs(b'heads(bundle())')
1201 1205 if bookmark and len(bundleheads) > 1:
1202 1206 raise error.Abort(
1203 1207 _(b'cannot push more than one head to a scratch branch')
1204 1208 )
1205 1209
1206 1210 revs = _getrevs(bundle, oldnode, force, bookmark)
1207 1211
1208 1212 # Notify the user of what is being pushed
1209 1213 plural = b's' if len(revs) > 1 else b''
1210 1214 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1211 1215 maxoutput = 10
1212 1216 for i in range(0, min(len(revs), maxoutput)):
1213 1217 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1214 1218 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1215 1219
1216 1220 if len(revs) > maxoutput + 1:
1217 1221 op.repo.ui.warn(b" ...\n")
1218 1222 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1219 1223 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1220 1224
1221 1225 nodesctx = [bundle[rev] for rev in revs]
1222 1226 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1223 1227 if bundleheads:
1224 1228 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1225 1229 else:
1226 1230 newheadscount = 0
1227 1231 # If there's a bookmark specified, there should be only one head,
1228 1232 # so we choose the last node, which will be that head.
1229 1233 # If a bug or malicious client allows there to be a bookmark
1230 1234 # with multiple heads, we will place the bookmark on the last head.
1231 1235 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1232 1236 key = None
1233 1237 if newheadscount:
1234 1238 with open(bundlefile, b'rb') as f:
1235 1239 bundledata = f.read()
1236 1240 with logservicecall(
1237 1241 log, b'bundlestore', bundlesize=len(bundledata)
1238 1242 ):
1239 1243 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1240 1244 if len(bundledata) > bundlesizelimit:
1241 1245 error_msg = (
1242 1246 b'bundle is too big: %d bytes. '
1243 1247 + b'max allowed size is 100 MB'
1244 1248 )
1245 1249 raise error.Abort(error_msg % (len(bundledata),))
1246 1250 key = store.write(bundledata)
1247 1251
1248 1252 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1249 1253 if key:
1250 1254 index.addbundle(key, nodesctx)
1251 1255 if bookmark:
1252 1256 index.addbookmark(bookmark, bookmarknode)
1253 1257 _maybeaddpushbackpart(
1254 1258 op, bookmark, bookmarknode, bookprevnode, params
1255 1259 )
1256 1260 log(
1257 1261 scratchbranchparttype,
1258 1262 eventtype=b'success',
1259 1263 elapsedms=(time.time() - parthandlerstart) * 1000,
1260 1264 )
1261 1265
1262 1266 except Exception as e:
1263 1267 log(
1264 1268 scratchbranchparttype,
1265 1269 eventtype=b'failure',
1266 1270 elapsedms=(time.time() - parthandlerstart) * 1000,
1267 1271 errormsg=stringutil.forcebytestr(e),
1268 1272 )
1269 1273 raise
1270 1274 finally:
1271 1275 if bundle:
1272 1276 bundle.close()
1273 1277
1274 1278
1275 1279 @bundle2.parthandler(
1276 1280 scratchbranchparttype,
1277 1281 (
1278 1282 b'bookmark',
1279 1283 b'bookprevnode',
1280 1284 b'force',
1281 1285 b'pushbackbookmarks',
1282 1286 b'cgversion',
1283 1287 ),
1284 1288 )
1285 1289 def bundle2scratchbranch(op, part):
1286 1290 '''unbundle a bundle2 part containing a changegroup to store'''
1287 1291
1288 1292 bundler = bundle2.bundle20(op.repo.ui)
1289 1293 cgversion = part.params.get(b'cgversion', b'01')
1290 1294 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1291 1295 cgpart.addparam(b'version', cgversion)
1292 1296 bundler.addpart(cgpart)
1293 1297 buf = util.chunkbuffer(bundler.getchunks())
1294 1298
1295 1299 fd, bundlefile = pycompat.mkstemp()
1296 1300 try:
1297 1301 try:
1298 1302 fp = os.fdopen(fd, 'wb')
1299 1303 fp.write(buf.read())
1300 1304 finally:
1301 1305 fp.close()
1302 1306 storebundle(op, part.params, bundlefile)
1303 1307 finally:
1304 1308 try:
1305 1309 os.unlink(bundlefile)
1306 1310 except OSError as e:
1307 1311 if e.errno != errno.ENOENT:
1308 1312 raise
1309 1313
1310 1314 return 1
1311 1315
1312 1316
1313 1317 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1314 1318 if params.get(b'pushbackbookmarks'):
1315 1319 if op.reply and b'pushback' in op.reply.capabilities:
1316 1320 params = {
1317 1321 b'namespace': b'bookmarks',
1318 1322 b'key': bookmark,
1319 1323 b'new': newnode,
1320 1324 b'old': oldnode,
1321 1325 }
1322 1326 op.reply.newpart(
1323 1327 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1324 1328 )
1325 1329
1326 1330
1327 1331 def bundle2pushkey(orig, op, part):
1328 1332 """Wrapper of bundle2.handlepushkey()
1329 1333
1330 1334 The only goal is to skip calling the original function if flag is set.
1331 1335 It's set if infinitepush push is happening.
1332 1336 """
1333 1337 if op.records[scratchbranchparttype + b'_skippushkey']:
1334 1338 if op.reply is not None:
1335 1339 rpart = op.reply.newpart(b'reply:pushkey')
1336 1340 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1337 1341 rpart.addparam(b'return', b'1', mandatory=False)
1338 1342 return 1
1339 1343
1340 1344 return orig(op, part)
1341 1345
1342 1346
1343 1347 def bundle2handlephases(orig, op, part):
1344 1348 """Wrapper of bundle2.handlephases()
1345 1349
1346 1350 The only goal is to skip calling the original function if flag is set.
1347 1351 It's set if infinitepush push is happening.
1348 1352 """
1349 1353
1350 1354 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1351 1355 return
1352 1356
1353 1357 return orig(op, part)
1354 1358
1355 1359
1356 1360 def _asyncsavemetadata(root, nodes):
1357 1361 """starts a separate process that fills metadata for the nodes
1358 1362
1359 1363 This function creates a separate process and doesn't wait for it's
1360 1364 completion. This was done to avoid slowing down pushes
1361 1365 """
1362 1366
1363 1367 maxnodes = 50
1364 1368 if len(nodes) > maxnodes:
1365 1369 return
1366 1370 nodesargs = []
1367 1371 for node in nodes:
1368 1372 nodesargs.append(b'--node')
1369 1373 nodesargs.append(node)
1370 1374 with open(os.devnull, b'w+b') as devnull:
1371 1375 cmdline = [
1372 1376 util.hgexecutable(),
1373 1377 b'debugfillinfinitepushmetadata',
1374 1378 b'-R',
1375 1379 root,
1376 1380 ] + nodesargs
1377 1381 # Process will run in background. We don't care about the return code
1378 1382 subprocess.Popen(
1379 1383 pycompat.rapply(procutil.tonativestr, cmdline),
1380 1384 close_fds=True,
1381 1385 shell=False,
1382 1386 stdin=devnull,
1383 1387 stdout=devnull,
1384 1388 stderr=devnull,
1385 1389 )
General Comments 0
You need to be logged in to leave comments. Login now