##// END OF EJS Templates
infinitepush: mark extension as likely to be deleted...
Augie Fackler -
r43381:e5d53562 default
parent child Browse files
Show More
@@ -1,1339 +1,1344 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 IMPORTANT: if you use this extension, please contact
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 be unused and barring learning of users of this functionality, we will
12 delete this code at the end of 2020.
13
9 14 [infinitepush]
10 15 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 16 branchpattern = PATTERN
12 17
13 18 # Server or client
14 19 server = False
15 20
16 21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 22 indextype = disk
18 23
19 24 # Server-side option. Used only if indextype=sql.
20 25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 27
23 28 # Server-side option. Used only if indextype=disk.
24 29 # Filesystem path to the index store
25 30 indexpath = PATH
26 31
27 32 # Server-side option. Possible values: 'disk' or 'external'
28 33 # Fails if not set
29 34 storetype = disk
30 35
31 36 # Server-side option.
32 37 # Path to the binary that will save bundle to the bundlestore
33 38 # Formatted cmd line will be passed to it (see `put_args`)
34 39 put_binary = put
35 40
36 41 # Serser-side option. Used only if storetype=external.
37 42 # Format cmd-line string for put binary. Placeholder: {filename}
38 43 put_args = {filename}
39 44
40 45 # Server-side option.
41 46 # Path to the binary that get bundle from the bundlestore.
42 47 # Formatted cmd line will be passed to it (see `get_args`)
43 48 get_binary = get
44 49
45 50 # Serser-side option. Used only if storetype=external.
46 51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 52 get_args = {filename} {handle}
48 53
49 54 # Server-side option
50 55 logfile = FIlE
51 56
52 57 # Server-side option
53 58 loglevel = DEBUG
54 59
55 60 # Server-side option. Used only if indextype=sql.
56 61 # Sets mysql wait_timeout option.
57 62 waittimeout = 300
58 63
59 64 # Server-side option. Used only if indextype=sql.
60 65 # Sets mysql innodb_lock_wait_timeout option.
61 66 locktimeout = 120
62 67
63 68 # Server-side option. Used only if indextype=sql.
64 69 # Name of the repository
65 70 reponame = ''
66 71
67 72 # Client-side option. Used by --list-remote option. List of remote scratch
68 73 # patterns to list if no patterns are specified.
69 74 defaultremotepatterns = ['*']
70 75
71 76 # Instructs infinitepush to forward all received bundle2 parts to the
72 77 # bundle for storage. Defaults to False.
73 78 storeallparts = True
74 79
75 80 # routes each incoming push to the bundlestore. defaults to False
76 81 pushtobundlestore = True
77 82
78 83 [remotenames]
79 84 # Client-side option
80 85 # This option should be set only if remotenames extension is enabled.
81 86 # Whether remote bookmarks are tracked by remotenames extension.
82 87 bookmarks = True
83 88 """
84 89
85 90 from __future__ import absolute_import
86 91
87 92 import collections
88 93 import contextlib
89 94 import errno
90 95 import functools
91 96 import logging
92 97 import os
93 98 import random
94 99 import re
95 100 import socket
96 101 import subprocess
97 102 import time
98 103
99 104 from mercurial.node import (
100 105 bin,
101 106 hex,
102 107 )
103 108
104 109 from mercurial.i18n import _
105 110
106 111 from mercurial.pycompat import (
107 112 getattr,
108 113 open,
109 114 )
110 115
111 116 from mercurial.utils import (
112 117 procutil,
113 118 stringutil,
114 119 )
115 120
116 121 from mercurial import (
117 122 bundle2,
118 123 changegroup,
119 124 commands,
120 125 discovery,
121 126 encoding,
122 127 error,
123 128 exchange,
124 129 extensions,
125 130 hg,
126 131 localrepo,
127 132 phases,
128 133 pushkey,
129 134 pycompat,
130 135 registrar,
131 136 util,
132 137 wireprototypes,
133 138 wireprotov1peer,
134 139 wireprotov1server,
135 140 )
136 141
137 142 from . import (
138 143 bundleparts,
139 144 common,
140 145 )
141 146
142 147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
143 148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
144 149 # be specifying the version(s) of Mercurial they are tested with, or
145 150 # leave the attribute unspecified.
146 151 testedwith = b'ships-with-hg-core'
147 152
148 153 configtable = {}
149 154 configitem = registrar.configitem(configtable)
150 155
151 156 configitem(
152 157 b'infinitepush', b'server', default=False,
153 158 )
154 159 configitem(
155 160 b'infinitepush', b'storetype', default=b'',
156 161 )
157 162 configitem(
158 163 b'infinitepush', b'indextype', default=b'',
159 164 )
160 165 configitem(
161 166 b'infinitepush', b'indexpath', default=b'',
162 167 )
163 168 configitem(
164 169 b'infinitepush', b'storeallparts', default=False,
165 170 )
166 171 configitem(
167 172 b'infinitepush', b'reponame', default=b'',
168 173 )
169 174 configitem(
170 175 b'scratchbranch', b'storepath', default=b'',
171 176 )
172 177 configitem(
173 178 b'infinitepush', b'branchpattern', default=b'',
174 179 )
175 180 configitem(
176 181 b'infinitepush', b'pushtobundlestore', default=False,
177 182 )
178 183 configitem(
179 184 b'experimental', b'server-bundlestore-bookmark', default=b'',
180 185 )
181 186 configitem(
182 187 b'experimental', b'infinitepush-scratchpush', default=False,
183 188 )
184 189
185 190 experimental = b'experimental'
186 191 configbookmark = b'server-bundlestore-bookmark'
187 192 configscratchpush = b'infinitepush-scratchpush'
188 193
189 194 scratchbranchparttype = bundleparts.scratchbranchparttype
190 195 revsetpredicate = registrar.revsetpredicate()
191 196 templatekeyword = registrar.templatekeyword()
192 197 _scratchbranchmatcher = lambda x: False
193 198 _maybehash = re.compile(r'^[a-f0-9]+$').search
194 199
195 200
196 201 def _buildexternalbundlestore(ui):
197 202 put_args = ui.configlist(b'infinitepush', b'put_args', [])
198 203 put_binary = ui.config(b'infinitepush', b'put_binary')
199 204 if not put_binary:
200 205 raise error.Abort(b'put binary is not specified')
201 206 get_args = ui.configlist(b'infinitepush', b'get_args', [])
202 207 get_binary = ui.config(b'infinitepush', b'get_binary')
203 208 if not get_binary:
204 209 raise error.Abort(b'get binary is not specified')
205 210 from . import store
206 211
207 212 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
208 213
209 214
210 215 def _buildsqlindex(ui):
211 216 sqlhost = ui.config(b'infinitepush', b'sqlhost')
212 217 if not sqlhost:
213 218 raise error.Abort(_(b'please set infinitepush.sqlhost'))
214 219 host, port, db, user, password = sqlhost.split(b':')
215 220 reponame = ui.config(b'infinitepush', b'reponame')
216 221 if not reponame:
217 222 raise error.Abort(_(b'please set infinitepush.reponame'))
218 223
219 224 logfile = ui.config(b'infinitepush', b'logfile', b'')
220 225 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
221 226 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
222 227 from . import sqlindexapi
223 228
224 229 return sqlindexapi.sqlindexapi(
225 230 reponame,
226 231 host,
227 232 port,
228 233 db,
229 234 user,
230 235 password,
231 236 logfile,
232 237 _getloglevel(ui),
233 238 waittimeout=waittimeout,
234 239 locktimeout=locktimeout,
235 240 )
236 241
237 242
238 243 def _getloglevel(ui):
239 244 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
240 245 numeric_loglevel = getattr(logging, loglevel.upper(), None)
241 246 if not isinstance(numeric_loglevel, int):
242 247 raise error.Abort(_(b'invalid log level %s') % loglevel)
243 248 return numeric_loglevel
244 249
245 250
246 251 def _tryhoist(ui, remotebookmark):
247 252 '''returns a bookmarks with hoisted part removed
248 253
249 254 Remotenames extension has a 'hoist' config that allows to use remote
250 255 bookmarks without specifying remote path. For example, 'hg update master'
251 256 works as well as 'hg update remote/master'. We want to allow the same in
252 257 infinitepush.
253 258 '''
254 259
255 260 if common.isremotebooksenabled(ui):
256 261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
257 262 if remotebookmark.startswith(hoist):
258 263 return remotebookmark[len(hoist) :]
259 264 return remotebookmark
260 265
261 266
262 267 class bundlestore(object):
263 268 def __init__(self, repo):
264 269 self._repo = repo
265 270 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
266 271 if storetype == b'disk':
267 272 from . import store
268 273
269 274 self.store = store.filebundlestore(self._repo.ui, self._repo)
270 275 elif storetype == b'external':
271 276 self.store = _buildexternalbundlestore(self._repo.ui)
272 277 else:
273 278 raise error.Abort(
274 279 _(b'unknown infinitepush store type specified %s') % storetype
275 280 )
276 281
277 282 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
278 283 if indextype == b'disk':
279 284 from . import fileindexapi
280 285
281 286 self.index = fileindexapi.fileindexapi(self._repo)
282 287 elif indextype == b'sql':
283 288 self.index = _buildsqlindex(self._repo.ui)
284 289 else:
285 290 raise error.Abort(
286 291 _(b'unknown infinitepush index type specified %s') % indextype
287 292 )
288 293
289 294
290 295 def _isserver(ui):
291 296 return ui.configbool(b'infinitepush', b'server')
292 297
293 298
294 299 def reposetup(ui, repo):
295 300 if _isserver(ui) and repo.local():
296 301 repo.bundlestore = bundlestore(repo)
297 302
298 303
299 304 def extsetup(ui):
300 305 commonsetup(ui)
301 306 if _isserver(ui):
302 307 serverextsetup(ui)
303 308 else:
304 309 clientextsetup(ui)
305 310
306 311
307 312 def commonsetup(ui):
308 313 wireprotov1server.commands[b'listkeyspatterns'] = (
309 314 wireprotolistkeyspatterns,
310 315 b'namespace patterns',
311 316 )
312 317 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
313 318 if scratchbranchpat:
314 319 global _scratchbranchmatcher
315 320 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
316 321 scratchbranchpat
317 322 )
318 323
319 324
320 325 def serverextsetup(ui):
321 326 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
322 327
323 328 def newpushkeyhandler(*args, **kwargs):
324 329 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
325 330
326 331 newpushkeyhandler.params = origpushkeyhandler.params
327 332 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
328 333
329 334 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
330 335 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
331 336 orighandlephasehandler, *args, **kwargs
332 337 )
333 338 newphaseheadshandler.params = orighandlephasehandler.params
334 339 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
335 340
336 341 extensions.wrapfunction(
337 342 localrepo.localrepository, b'listkeys', localrepolistkeys
338 343 )
339 344 wireprotov1server.commands[b'lookup'] = (
340 345 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
341 346 b'key',
342 347 )
343 348 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
344 349
345 350 extensions.wrapfunction(bundle2, b'processparts', processparts)
346 351
347 352
348 353 def clientextsetup(ui):
349 354 entry = extensions.wrapcommand(commands.table, b'push', _push)
350 355
351 356 entry[1].append(
352 357 (
353 358 b'',
354 359 b'bundle-store',
355 360 None,
356 361 _(b'force push to go to bundle store (EXPERIMENTAL)'),
357 362 )
358 363 )
359 364
360 365 extensions.wrapcommand(commands.table, b'pull', _pull)
361 366
362 367 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
363 368
364 369 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
365 370
366 371 partorder = exchange.b2partsgenorder
367 372 index = partorder.index(b'changeset')
368 373 partorder.insert(
369 374 index, partorder.pop(partorder.index(scratchbranchparttype))
370 375 )
371 376
372 377
373 378 def _checkheads(orig, pushop):
374 379 if pushop.ui.configbool(experimental, configscratchpush, False):
375 380 return
376 381 return orig(pushop)
377 382
378 383
379 384 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
380 385 patterns = wireprototypes.decodelist(patterns)
381 386 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
382 387 return pushkey.encodekeys(d)
383 388
384 389
385 390 def localrepolistkeys(orig, self, namespace, patterns=None):
386 391 if namespace == b'bookmarks' and patterns:
387 392 index = self.bundlestore.index
388 393 results = {}
389 394 bookmarks = orig(self, namespace)
390 395 for pattern in patterns:
391 396 results.update(index.getbookmarks(pattern))
392 397 if pattern.endswith(b'*'):
393 398 pattern = b're:^' + pattern[:-1] + b'.*'
394 399 kind, pat, matcher = stringutil.stringmatcher(pattern)
395 400 for bookmark, node in pycompat.iteritems(bookmarks):
396 401 if matcher(bookmark):
397 402 results[bookmark] = node
398 403 return results
399 404 else:
400 405 return orig(self, namespace)
401 406
402 407
403 408 @wireprotov1peer.batchable
404 409 def listkeyspatterns(self, namespace, patterns):
405 410 if not self.capable(b'pushkey'):
406 411 yield {}, None
407 412 f = wireprotov1peer.future()
408 413 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
409 414 yield {
410 415 b'namespace': encoding.fromlocal(namespace),
411 416 b'patterns': wireprototypes.encodelist(patterns),
412 417 }, f
413 418 d = f.value
414 419 self.ui.debug(
415 420 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
416 421 )
417 422 yield pushkey.decodekeys(d)
418 423
419 424
420 425 def _readbundlerevs(bundlerepo):
421 426 return list(bundlerepo.revs(b'bundle()'))
422 427
423 428
424 429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
425 430 '''Tells remotefilelog to include all changed files to the changegroup
426 431
427 432 By default remotefilelog doesn't include file content to the changegroup.
428 433 But we need to include it if we are fetching from bundlestore.
429 434 '''
430 435 changedfiles = set()
431 436 cl = bundlerepo.changelog
432 437 for r in bundlerevs:
433 438 # [3] means changed files
434 439 changedfiles.update(cl.read(r)[3])
435 440 if not changedfiles:
436 441 return bundlecaps
437 442
438 443 changedfiles = b'\0'.join(changedfiles)
439 444 newcaps = []
440 445 appended = False
441 446 for cap in bundlecaps or []:
442 447 if cap.startswith(b'excludepattern='):
443 448 newcaps.append(b'\0'.join((cap, changedfiles)))
444 449 appended = True
445 450 else:
446 451 newcaps.append(cap)
447 452 if not appended:
448 453 # Not found excludepattern cap. Just append it
449 454 newcaps.append(b'excludepattern=' + changedfiles)
450 455
451 456 return newcaps
452 457
453 458
454 459 def _rebundle(bundlerepo, bundleroots, unknownhead):
455 460 '''
456 461 Bundle may include more revision then user requested. For example,
457 462 if user asks for revision but bundle also consists its descendants.
458 463 This function will filter out all revision that user is not requested.
459 464 '''
460 465 parts = []
461 466
462 467 version = b'02'
463 468 outgoing = discovery.outgoing(
464 469 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
465 470 )
466 471 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
467 472 cgstream = util.chunkbuffer(cgstream).read()
468 473 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
469 474 cgpart.addparam(b'version', version)
470 475 parts.append(cgpart)
471 476
472 477 return parts
473 478
474 479
475 480 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
476 481 cl = bundlerepo.changelog
477 482 bundleroots = []
478 483 for rev in bundlerevs:
479 484 node = cl.node(rev)
480 485 parents = cl.parents(node)
481 486 for parent in parents:
482 487 # include all revs that exist in the main repo
483 488 # to make sure that bundle may apply client-side
484 489 if parent in oldrepo:
485 490 bundleroots.append(parent)
486 491 return bundleroots
487 492
488 493
489 494 def _needsrebundling(head, bundlerepo):
490 495 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
491 496 return not (
492 497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
493 498 )
494 499
495 500
496 501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
497 502 '''generates bundle that will be send to the user
498 503
499 504 returns tuple with raw bundle string and bundle type
500 505 '''
501 506 parts = []
502 507 if not _needsrebundling(head, bundlerepo):
503 508 with util.posixfile(bundlefile, b"rb") as f:
504 509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
505 510 if isinstance(unbundler, changegroup.cg1unpacker):
506 511 part = bundle2.bundlepart(
507 512 b'changegroup', data=unbundler._stream.read()
508 513 )
509 514 part.addparam(b'version', b'01')
510 515 parts.append(part)
511 516 elif isinstance(unbundler, bundle2.unbundle20):
512 517 haschangegroup = False
513 518 for part in unbundler.iterparts():
514 519 if part.type == b'changegroup':
515 520 haschangegroup = True
516 521 newpart = bundle2.bundlepart(part.type, data=part.read())
517 522 for key, value in pycompat.iteritems(part.params):
518 523 newpart.addparam(key, value)
519 524 parts.append(newpart)
520 525
521 526 if not haschangegroup:
522 527 raise error.Abort(
523 528 b'unexpected bundle without changegroup part, '
524 529 + b'head: %s' % hex(head),
525 530 hint=b'report to administrator',
526 531 )
527 532 else:
528 533 raise error.Abort(b'unknown bundle type')
529 534 else:
530 535 parts = _rebundle(bundlerepo, bundleroots, head)
531 536
532 537 return parts
533 538
534 539
535 540 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
536 541 heads = heads or []
537 542 # newheads are parents of roots of scratch bundles that were requested
538 543 newphases = {}
539 544 scratchbundles = []
540 545 newheads = []
541 546 scratchheads = []
542 547 nodestobundle = {}
543 548 allbundlestocleanup = []
544 549 try:
545 550 for head in heads:
546 551 if head not in repo.changelog.nodemap:
547 552 if head not in nodestobundle:
548 553 newbundlefile = common.downloadbundle(repo, head)
549 554 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
550 555 bundlerepo = hg.repository(repo.ui, bundlepath)
551 556
552 557 allbundlestocleanup.append((bundlerepo, newbundlefile))
553 558 bundlerevs = set(_readbundlerevs(bundlerepo))
554 559 bundlecaps = _includefilelogstobundle(
555 560 bundlecaps, bundlerepo, bundlerevs, repo.ui
556 561 )
557 562 cl = bundlerepo.changelog
558 563 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
559 564 for rev in bundlerevs:
560 565 node = cl.node(rev)
561 566 newphases[hex(node)] = str(phases.draft)
562 567 nodestobundle[node] = (
563 568 bundlerepo,
564 569 bundleroots,
565 570 newbundlefile,
566 571 )
567 572
568 573 scratchbundles.append(
569 574 _generateoutputparts(head, *nodestobundle[head])
570 575 )
571 576 newheads.extend(bundleroots)
572 577 scratchheads.append(head)
573 578 finally:
574 579 for bundlerepo, bundlefile in allbundlestocleanup:
575 580 bundlerepo.close()
576 581 try:
577 582 os.unlink(bundlefile)
578 583 except (IOError, OSError):
579 584 # if we can't cleanup the file then just ignore the error,
580 585 # no need to fail
581 586 pass
582 587
583 588 pullfrombundlestore = bool(scratchbundles)
584 589 wrappedchangegrouppart = False
585 590 wrappedlistkeys = False
586 591 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
587 592 try:
588 593
589 594 def _changegrouppart(bundler, *args, **kwargs):
590 595 # Order is important here. First add non-scratch part
591 596 # and only then add parts with scratch bundles because
592 597 # non-scratch part contains parents of roots of scratch bundles.
593 598 result = oldchangegrouppart(bundler, *args, **kwargs)
594 599 for bundle in scratchbundles:
595 600 for part in bundle:
596 601 bundler.addpart(part)
597 602 return result
598 603
599 604 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
600 605 wrappedchangegrouppart = True
601 606
602 607 def _listkeys(orig, self, namespace):
603 608 origvalues = orig(self, namespace)
604 609 if namespace == b'phases' and pullfrombundlestore:
605 610 if origvalues.get(b'publishing') == b'True':
606 611 # Make repo non-publishing to preserve draft phase
607 612 del origvalues[b'publishing']
608 613 origvalues.update(newphases)
609 614 return origvalues
610 615
611 616 extensions.wrapfunction(
612 617 localrepo.localrepository, b'listkeys', _listkeys
613 618 )
614 619 wrappedlistkeys = True
615 620 heads = list((set(newheads) | set(heads)) - set(scratchheads))
616 621 result = orig(
617 622 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
618 623 )
619 624 finally:
620 625 if wrappedchangegrouppart:
621 626 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
622 627 if wrappedlistkeys:
623 628 extensions.unwrapfunction(
624 629 localrepo.localrepository, b'listkeys', _listkeys
625 630 )
626 631 return result
627 632
628 633
629 634 def _lookupwrap(orig):
630 635 def _lookup(repo, proto, key):
631 636 localkey = encoding.tolocal(key)
632 637
633 638 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
634 639 scratchnode = repo.bundlestore.index.getnode(localkey)
635 640 if scratchnode:
636 641 return b"%d %s\n" % (1, scratchnode)
637 642 else:
638 643 return b"%d %s\n" % (
639 644 0,
640 645 b'scratch branch %s not found' % localkey,
641 646 )
642 647 else:
643 648 try:
644 649 r = hex(repo.lookup(localkey))
645 650 return b"%d %s\n" % (1, r)
646 651 except Exception as inst:
647 652 if repo.bundlestore.index.getbundle(localkey):
648 653 return b"%d %s\n" % (1, localkey)
649 654 else:
650 655 r = stringutil.forcebytestr(inst)
651 656 return b"%d %s\n" % (0, r)
652 657
653 658 return _lookup
654 659
655 660
656 661 def _pull(orig, ui, repo, source=b"default", **opts):
657 662 opts = pycompat.byteskwargs(opts)
658 663 # Copy paste from `pull` command
659 664 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
660 665
661 666 scratchbookmarks = {}
662 667 unfi = repo.unfiltered()
663 668 unknownnodes = []
664 669 for rev in opts.get(b'rev', []):
665 670 if rev not in unfi:
666 671 unknownnodes.append(rev)
667 672 if opts.get(b'bookmark'):
668 673 bookmarks = []
669 674 revs = opts.get(b'rev') or []
670 675 for bookmark in opts.get(b'bookmark'):
671 676 if _scratchbranchmatcher(bookmark):
672 677 # rev is not known yet
673 678 # it will be fetched with listkeyspatterns next
674 679 scratchbookmarks[bookmark] = b'REVTOFETCH'
675 680 else:
676 681 bookmarks.append(bookmark)
677 682
678 683 if scratchbookmarks:
679 684 other = hg.peer(repo, opts, source)
680 685 fetchedbookmarks = other.listkeyspatterns(
681 686 b'bookmarks', patterns=scratchbookmarks
682 687 )
683 688 for bookmark in scratchbookmarks:
684 689 if bookmark not in fetchedbookmarks:
685 690 raise error.Abort(
686 691 b'remote bookmark %s not found!' % bookmark
687 692 )
688 693 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
689 694 revs.append(fetchedbookmarks[bookmark])
690 695 opts[b'bookmark'] = bookmarks
691 696 opts[b'rev'] = revs
692 697
693 698 if scratchbookmarks or unknownnodes:
694 699 # Set anyincoming to True
695 700 extensions.wrapfunction(
696 701 discovery, b'findcommonincoming', _findcommonincoming
697 702 )
698 703 try:
699 704 # Remote scratch bookmarks will be deleted because remotenames doesn't
700 705 # know about them. Let's save it before pull and restore after
701 706 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
702 707 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
703 708 # TODO(stash): race condition is possible
704 709 # if scratch bookmarks was updated right after orig.
705 710 # But that's unlikely and shouldn't be harmful.
706 711 if common.isremotebooksenabled(ui):
707 712 remotescratchbookmarks.update(scratchbookmarks)
708 713 _saveremotebookmarks(repo, remotescratchbookmarks, source)
709 714 else:
710 715 _savelocalbookmarks(repo, scratchbookmarks)
711 716 return result
712 717 finally:
713 718 if scratchbookmarks:
714 719 extensions.unwrapfunction(discovery, b'findcommonincoming')
715 720
716 721
717 722 def _readscratchremotebookmarks(ui, repo, other):
718 723 if common.isremotebooksenabled(ui):
719 724 remotenamesext = extensions.find(b'remotenames')
720 725 remotepath = remotenamesext.activepath(repo.ui, other)
721 726 result = {}
722 727 # Let's refresh remotenames to make sure we have it up to date
723 728 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
724 729 # and it results in deleting scratch bookmarks. Our best guess how to
725 730 # fix it is to use `clearnames()`
726 731 repo._remotenames.clearnames()
727 732 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
728 733 path, bookname = remotenamesext.splitremotename(remotebookmark)
729 734 if path == remotepath and _scratchbranchmatcher(bookname):
730 735 nodes = repo.names[b'remotebookmarks'].nodes(
731 736 repo, remotebookmark
732 737 )
733 738 if nodes:
734 739 result[bookname] = hex(nodes[0])
735 740 return result
736 741 else:
737 742 return {}
738 743
739 744
740 745 def _saveremotebookmarks(repo, newbookmarks, remote):
741 746 remotenamesext = extensions.find(b'remotenames')
742 747 remotepath = remotenamesext.activepath(repo.ui, remote)
743 748 branches = collections.defaultdict(list)
744 749 bookmarks = {}
745 750 remotenames = remotenamesext.readremotenames(repo)
746 751 for hexnode, nametype, remote, rname in remotenames:
747 752 if remote != remotepath:
748 753 continue
749 754 if nametype == b'bookmarks':
750 755 if rname in newbookmarks:
751 756 # It's possible if we have a normal bookmark that matches
752 757 # scratch branch pattern. In this case just use the current
753 758 # bookmark node
754 759 del newbookmarks[rname]
755 760 bookmarks[rname] = hexnode
756 761 elif nametype == b'branches':
757 762 # saveremotenames expects 20 byte binary nodes for branches
758 763 branches[rname].append(bin(hexnode))
759 764
760 765 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
761 766 bookmarks[bookmark] = hexnode
762 767 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
763 768
764 769
765 770 def _savelocalbookmarks(repo, bookmarks):
766 771 if not bookmarks:
767 772 return
768 773 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
769 774 changes = []
770 775 for scratchbook, node in pycompat.iteritems(bookmarks):
771 776 changectx = repo[node]
772 777 changes.append((scratchbook, changectx.node()))
773 778 repo._bookmarks.applychanges(repo, tr, changes)
774 779
775 780
776 781 def _findcommonincoming(orig, *args, **kwargs):
777 782 common, inc, remoteheads = orig(*args, **kwargs)
778 783 return common, True, remoteheads
779 784
780 785
781 786 def _push(orig, ui, repo, dest=None, *args, **opts):
782 787 opts = pycompat.byteskwargs(opts)
783 788 bookmark = opts.get(b'bookmark')
784 789 # we only support pushing one infinitepush bookmark at once
785 790 if len(bookmark) == 1:
786 791 bookmark = bookmark[0]
787 792 else:
788 793 bookmark = b''
789 794
790 795 oldphasemove = None
791 796 overrides = {(experimental, configbookmark): bookmark}
792 797
793 798 with ui.configoverride(overrides, b'infinitepush'):
794 799 scratchpush = opts.get(b'bundle_store')
795 800 if _scratchbranchmatcher(bookmark):
796 801 scratchpush = True
797 802 # bundle2 can be sent back after push (for example, bundle2
798 803 # containing `pushkey` part to update bookmarks)
799 804 ui.setconfig(experimental, b'bundle2.pushback', True)
800 805
801 806 if scratchpush:
802 807 # this is an infinitepush, we don't want the bookmark to be applied
803 808 # rather that should be stored in the bundlestore
804 809 opts[b'bookmark'] = []
805 810 ui.setconfig(experimental, configscratchpush, True)
806 811 oldphasemove = extensions.wrapfunction(
807 812 exchange, b'_localphasemove', _phasemove
808 813 )
809 814 # Copy-paste from `push` command
810 815 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
811 816 if not path:
812 817 raise error.Abort(
813 818 _(b'default repository not configured!'),
814 819 hint=_(b"see 'hg help config.paths'"),
815 820 )
816 821 destpath = path.pushloc or path.loc
817 822 # Remote scratch bookmarks will be deleted because remotenames doesn't
818 823 # know about them. Let's save it before push and restore after
819 824 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
820 825 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
821 826 if common.isremotebooksenabled(ui):
822 827 if bookmark and scratchpush:
823 828 other = hg.peer(repo, opts, destpath)
824 829 fetchedbookmarks = other.listkeyspatterns(
825 830 b'bookmarks', patterns=[bookmark]
826 831 )
827 832 remotescratchbookmarks.update(fetchedbookmarks)
828 833 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
829 834 if oldphasemove:
830 835 exchange._localphasemove = oldphasemove
831 836 return result
832 837
833 838
834 839 def _deleteinfinitepushbookmarks(ui, repo, path, names):
835 840 """Prune remote names by removing the bookmarks we don't want anymore,
836 841 then writing the result back to disk
837 842 """
838 843 remotenamesext = extensions.find(b'remotenames')
839 844
840 845 # remotename format is:
841 846 # (node, nametype ("branches" or "bookmarks"), remote, name)
842 847 nametype_idx = 1
843 848 remote_idx = 2
844 849 name_idx = 3
845 850 remotenames = [
846 851 remotename
847 852 for remotename in remotenamesext.readremotenames(repo)
848 853 if remotename[remote_idx] == path
849 854 ]
850 855 remote_bm_names = [
851 856 remotename[name_idx]
852 857 for remotename in remotenames
853 858 if remotename[nametype_idx] == b"bookmarks"
854 859 ]
855 860
856 861 for name in names:
857 862 if name not in remote_bm_names:
858 863 raise error.Abort(
859 864 _(
860 865 b"infinitepush bookmark '{}' does not exist "
861 866 b"in path '{}'"
862 867 ).format(name, path)
863 868 )
864 869
865 870 bookmarks = {}
866 871 branches = collections.defaultdict(list)
867 872 for node, nametype, remote, name in remotenames:
868 873 if nametype == b"bookmarks" and name not in names:
869 874 bookmarks[name] = node
870 875 elif nametype == b"branches":
871 876 # saveremotenames wants binary nodes for branches
872 877 branches[name].append(bin(node))
873 878
874 879 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
875 880
876 881
877 882 def _phasemove(orig, pushop, nodes, phase=phases.public):
878 883 """prevent commits from being marked public
879 884
880 885 Since these are going to a scratch branch, they aren't really being
881 886 published."""
882 887
883 888 if phase != phases.public:
884 889 orig(pushop, nodes, phase)
885 890
886 891
887 892 @exchange.b2partsgenerator(scratchbranchparttype)
888 893 def partgen(pushop, bundler):
889 894 bookmark = pushop.ui.config(experimental, configbookmark)
890 895 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
891 896 if b'changesets' in pushop.stepsdone or not scratchpush:
892 897 return
893 898
894 899 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
895 900 return
896 901
897 902 pushop.stepsdone.add(b'changesets')
898 903 if not pushop.outgoing.missing:
899 904 pushop.ui.status(_(b'no changes found\n'))
900 905 pushop.cgresult = 0
901 906 return
902 907
903 908 # This parameter tells the server that the following bundle is an
904 909 # infinitepush. This let's it switch the part processing to our infinitepush
905 910 # code path.
906 911 bundler.addparam(b"infinitepush", b"True")
907 912
908 913 scratchparts = bundleparts.getscratchbranchparts(
909 914 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
910 915 )
911 916
912 917 for scratchpart in scratchparts:
913 918 bundler.addpart(scratchpart)
914 919
915 920 def handlereply(op):
916 921 # server either succeeds or aborts; no code to read
917 922 pushop.cgresult = 1
918 923
919 924 return handlereply
920 925
921 926
922 927 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
923 928
924 929
925 930 def _getrevs(bundle, oldnode, force, bookmark):
926 931 b'extracts and validates the revs to be imported'
927 932 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
928 933
929 934 # new bookmark
930 935 if oldnode is None:
931 936 return revs
932 937
933 938 # Fast forward update
934 939 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
935 940 return revs
936 941
937 942 return revs
938 943
939 944
940 945 @contextlib.contextmanager
941 946 def logservicecall(logger, service, **kwargs):
942 947 start = time.time()
943 948 logger(service, eventtype=b'start', **kwargs)
944 949 try:
945 950 yield
946 951 logger(
947 952 service,
948 953 eventtype=b'success',
949 954 elapsedms=(time.time() - start) * 1000,
950 955 **kwargs
951 956 )
952 957 except Exception as e:
953 958 logger(
954 959 service,
955 960 eventtype=b'failure',
956 961 elapsedms=(time.time() - start) * 1000,
957 962 errormsg=str(e),
958 963 **kwargs
959 964 )
960 965 raise
961 966
962 967
963 968 def _getorcreateinfinitepushlogger(op):
964 969 logger = op.records[b'infinitepushlogger']
965 970 if not logger:
966 971 ui = op.repo.ui
967 972 try:
968 973 username = procutil.getuser()
969 974 except Exception:
970 975 username = b'unknown'
971 976 # Generate random request id to be able to find all logged entries
972 977 # for the same request. Since requestid is pseudo-generated it may
973 978 # not be unique, but we assume that (hostname, username, requestid)
974 979 # is unique.
975 980 random.seed()
976 981 requestid = random.randint(0, 2000000000)
977 982 hostname = socket.gethostname()
978 983 logger = functools.partial(
979 984 ui.log,
980 985 b'infinitepush',
981 986 user=username,
982 987 requestid=requestid,
983 988 hostname=hostname,
984 989 reponame=ui.config(b'infinitepush', b'reponame'),
985 990 )
986 991 op.records.add(b'infinitepushlogger', logger)
987 992 else:
988 993 logger = logger[0]
989 994 return logger
990 995
991 996
992 997 def storetobundlestore(orig, repo, op, unbundler):
993 998 """stores the incoming bundle coming from push command to the bundlestore
994 999 instead of applying on the revlogs"""
995 1000
996 1001 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
997 1002 bundler = bundle2.bundle20(repo.ui)
998 1003
999 1004 # processing each part and storing it in bundler
1000 1005 with bundle2.partiterator(repo, op, unbundler) as parts:
1001 1006 for part in parts:
1002 1007 bundlepart = None
1003 1008 if part.type == b'replycaps':
1004 1009 # This configures the current operation to allow reply parts.
1005 1010 bundle2._processpart(op, part)
1006 1011 else:
1007 1012 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1008 1013 for key, value in pycompat.iteritems(part.params):
1009 1014 bundlepart.addparam(key, value)
1010 1015
1011 1016 # Certain parts require a response
1012 1017 if part.type in (b'pushkey', b'changegroup'):
1013 1018 if op.reply is not None:
1014 1019 rpart = op.reply.newpart(b'reply:%s' % part.type)
1015 1020 rpart.addparam(
1016 1021 b'in-reply-to', b'%d' % part.id, mandatory=False
1017 1022 )
1018 1023 rpart.addparam(b'return', b'1', mandatory=False)
1019 1024
1020 1025 op.records.add(part.type, {b'return': 1,})
1021 1026 if bundlepart:
1022 1027 bundler.addpart(bundlepart)
1023 1028
1024 1029 # storing the bundle in the bundlestore
1025 1030 buf = util.chunkbuffer(bundler.getchunks())
1026 1031 fd, bundlefile = pycompat.mkstemp()
1027 1032 try:
1028 1033 try:
1029 1034 fp = os.fdopen(fd, r'wb')
1030 1035 fp.write(buf.read())
1031 1036 finally:
1032 1037 fp.close()
1033 1038 storebundle(op, {}, bundlefile)
1034 1039 finally:
1035 1040 try:
1036 1041 os.unlink(bundlefile)
1037 1042 except Exception:
1038 1043 # we would rather see the original exception
1039 1044 pass
1040 1045
1041 1046
1042 1047 def processparts(orig, repo, op, unbundler):
1043 1048
1044 1049 # make sure we don't wrap processparts in case of `hg unbundle`
1045 1050 if op.source == b'unbundle':
1046 1051 return orig(repo, op, unbundler)
1047 1052
1048 1053 # this server routes each push to bundle store
1049 1054 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1050 1055 return storetobundlestore(orig, repo, op, unbundler)
1051 1056
1052 1057 if unbundler.params.get(b'infinitepush') != b'True':
1053 1058 return orig(repo, op, unbundler)
1054 1059
1055 1060 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1056 1061
1057 1062 bundler = bundle2.bundle20(repo.ui)
1058 1063 cgparams = None
1059 1064 with bundle2.partiterator(repo, op, unbundler) as parts:
1060 1065 for part in parts:
1061 1066 bundlepart = None
1062 1067 if part.type == b'replycaps':
1063 1068 # This configures the current operation to allow reply parts.
1064 1069 bundle2._processpart(op, part)
1065 1070 elif part.type == bundleparts.scratchbranchparttype:
1066 1071 # Scratch branch parts need to be converted to normal
1067 1072 # changegroup parts, and the extra parameters stored for later
1068 1073 # when we upload to the store. Eventually those parameters will
1069 1074 # be put on the actual bundle instead of this part, then we can
1070 1075 # send a vanilla changegroup instead of the scratchbranch part.
1071 1076 cgversion = part.params.get(b'cgversion', b'01')
1072 1077 bundlepart = bundle2.bundlepart(
1073 1078 b'changegroup', data=part.read()
1074 1079 )
1075 1080 bundlepart.addparam(b'version', cgversion)
1076 1081 cgparams = part.params
1077 1082
1078 1083 # If we're not dumping all parts into the new bundle, we need to
1079 1084 # alert the future pushkey and phase-heads handler to skip
1080 1085 # the part.
1081 1086 if not handleallparts:
1082 1087 op.records.add(
1083 1088 scratchbranchparttype + b'_skippushkey', True
1084 1089 )
1085 1090 op.records.add(
1086 1091 scratchbranchparttype + b'_skipphaseheads', True
1087 1092 )
1088 1093 else:
1089 1094 if handleallparts:
1090 1095 # Ideally we would not process any parts, and instead just
1091 1096 # forward them to the bundle for storage, but since this
1092 1097 # differs from previous behavior, we need to put it behind a
1093 1098 # config flag for incremental rollout.
1094 1099 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1095 1100 for key, value in pycompat.iteritems(part.params):
1096 1101 bundlepart.addparam(key, value)
1097 1102
1098 1103 # Certain parts require a response
1099 1104 if part.type == b'pushkey':
1100 1105 if op.reply is not None:
1101 1106 rpart = op.reply.newpart(b'reply:pushkey')
1102 1107 rpart.addparam(
1103 1108 b'in-reply-to', str(part.id), mandatory=False
1104 1109 )
1105 1110 rpart.addparam(b'return', b'1', mandatory=False)
1106 1111 else:
1107 1112 bundle2._processpart(op, part)
1108 1113
1109 1114 if handleallparts:
1110 1115 op.records.add(part.type, {b'return': 1,})
1111 1116 if bundlepart:
1112 1117 bundler.addpart(bundlepart)
1113 1118
1114 1119 # If commits were sent, store them
1115 1120 if cgparams:
1116 1121 buf = util.chunkbuffer(bundler.getchunks())
1117 1122 fd, bundlefile = pycompat.mkstemp()
1118 1123 try:
1119 1124 try:
1120 1125 fp = os.fdopen(fd, r'wb')
1121 1126 fp.write(buf.read())
1122 1127 finally:
1123 1128 fp.close()
1124 1129 storebundle(op, cgparams, bundlefile)
1125 1130 finally:
1126 1131 try:
1127 1132 os.unlink(bundlefile)
1128 1133 except Exception:
1129 1134 # we would rather see the original exception
1130 1135 pass
1131 1136
1132 1137
1133 1138 def storebundle(op, params, bundlefile):
1134 1139 log = _getorcreateinfinitepushlogger(op)
1135 1140 parthandlerstart = time.time()
1136 1141 log(scratchbranchparttype, eventtype=b'start')
1137 1142 index = op.repo.bundlestore.index
1138 1143 store = op.repo.bundlestore.store
1139 1144 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1140 1145
1141 1146 bundle = None
1142 1147 try: # guards bundle
1143 1148 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1144 1149 bundle = hg.repository(op.repo.ui, bundlepath)
1145 1150
1146 1151 bookmark = params.get(b'bookmark')
1147 1152 bookprevnode = params.get(b'bookprevnode', b'')
1148 1153 force = params.get(b'force')
1149 1154
1150 1155 if bookmark:
1151 1156 oldnode = index.getnode(bookmark)
1152 1157 else:
1153 1158 oldnode = None
1154 1159 bundleheads = bundle.revs(b'heads(bundle())')
1155 1160 if bookmark and len(bundleheads) > 1:
1156 1161 raise error.Abort(
1157 1162 _(b'cannot push more than one head to a scratch branch')
1158 1163 )
1159 1164
1160 1165 revs = _getrevs(bundle, oldnode, force, bookmark)
1161 1166
1162 1167 # Notify the user of what is being pushed
1163 1168 plural = b's' if len(revs) > 1 else b''
1164 1169 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1165 1170 maxoutput = 10
1166 1171 for i in range(0, min(len(revs), maxoutput)):
1167 1172 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1168 1173 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1169 1174
1170 1175 if len(revs) > maxoutput + 1:
1171 1176 op.repo.ui.warn(b" ...\n")
1172 1177 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1173 1178 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1174 1179
1175 1180 nodesctx = [bundle[rev] for rev in revs]
1176 1181 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1177 1182 if bundleheads:
1178 1183 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1179 1184 else:
1180 1185 newheadscount = 0
1181 1186 # If there's a bookmark specified, there should be only one head,
1182 1187 # so we choose the last node, which will be that head.
1183 1188 # If a bug or malicious client allows there to be a bookmark
1184 1189 # with multiple heads, we will place the bookmark on the last head.
1185 1190 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1186 1191 key = None
1187 1192 if newheadscount:
1188 1193 with open(bundlefile, b'rb') as f:
1189 1194 bundledata = f.read()
1190 1195 with logservicecall(
1191 1196 log, b'bundlestore', bundlesize=len(bundledata)
1192 1197 ):
1193 1198 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1194 1199 if len(bundledata) > bundlesizelimit:
1195 1200 error_msg = (
1196 1201 b'bundle is too big: %d bytes. '
1197 1202 + b'max allowed size is 100 MB'
1198 1203 )
1199 1204 raise error.Abort(error_msg % (len(bundledata),))
1200 1205 key = store.write(bundledata)
1201 1206
1202 1207 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1203 1208 if key:
1204 1209 index.addbundle(key, nodesctx)
1205 1210 if bookmark:
1206 1211 index.addbookmark(bookmark, bookmarknode)
1207 1212 _maybeaddpushbackpart(
1208 1213 op, bookmark, bookmarknode, bookprevnode, params
1209 1214 )
1210 1215 log(
1211 1216 scratchbranchparttype,
1212 1217 eventtype=b'success',
1213 1218 elapsedms=(time.time() - parthandlerstart) * 1000,
1214 1219 )
1215 1220
1216 1221 except Exception as e:
1217 1222 log(
1218 1223 scratchbranchparttype,
1219 1224 eventtype=b'failure',
1220 1225 elapsedms=(time.time() - parthandlerstart) * 1000,
1221 1226 errormsg=str(e),
1222 1227 )
1223 1228 raise
1224 1229 finally:
1225 1230 if bundle:
1226 1231 bundle.close()
1227 1232
1228 1233
1229 1234 @bundle2.parthandler(
1230 1235 scratchbranchparttype,
1231 1236 (
1232 1237 b'bookmark',
1233 1238 b'bookprevnode',
1234 1239 b'force',
1235 1240 b'pushbackbookmarks',
1236 1241 b'cgversion',
1237 1242 ),
1238 1243 )
1239 1244 def bundle2scratchbranch(op, part):
1240 1245 '''unbundle a bundle2 part containing a changegroup to store'''
1241 1246
1242 1247 bundler = bundle2.bundle20(op.repo.ui)
1243 1248 cgversion = part.params.get(b'cgversion', b'01')
1244 1249 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1245 1250 cgpart.addparam(b'version', cgversion)
1246 1251 bundler.addpart(cgpart)
1247 1252 buf = util.chunkbuffer(bundler.getchunks())
1248 1253
1249 1254 fd, bundlefile = pycompat.mkstemp()
1250 1255 try:
1251 1256 try:
1252 1257 fp = os.fdopen(fd, r'wb')
1253 1258 fp.write(buf.read())
1254 1259 finally:
1255 1260 fp.close()
1256 1261 storebundle(op, part.params, bundlefile)
1257 1262 finally:
1258 1263 try:
1259 1264 os.unlink(bundlefile)
1260 1265 except OSError as e:
1261 1266 if e.errno != errno.ENOENT:
1262 1267 raise
1263 1268
1264 1269 return 1
1265 1270
1266 1271
1267 1272 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1268 1273 if params.get(b'pushbackbookmarks'):
1269 1274 if op.reply and b'pushback' in op.reply.capabilities:
1270 1275 params = {
1271 1276 b'namespace': b'bookmarks',
1272 1277 b'key': bookmark,
1273 1278 b'new': newnode,
1274 1279 b'old': oldnode,
1275 1280 }
1276 1281 op.reply.newpart(
1277 1282 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1278 1283 )
1279 1284
1280 1285
1281 1286 def bundle2pushkey(orig, op, part):
1282 1287 '''Wrapper of bundle2.handlepushkey()
1283 1288
1284 1289 The only goal is to skip calling the original function if flag is set.
1285 1290 It's set if infinitepush push is happening.
1286 1291 '''
1287 1292 if op.records[scratchbranchparttype + b'_skippushkey']:
1288 1293 if op.reply is not None:
1289 1294 rpart = op.reply.newpart(b'reply:pushkey')
1290 1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1291 1296 rpart.addparam(b'return', b'1', mandatory=False)
1292 1297 return 1
1293 1298
1294 1299 return orig(op, part)
1295 1300
1296 1301
1297 1302 def bundle2handlephases(orig, op, part):
1298 1303 '''Wrapper of bundle2.handlephases()
1299 1304
1300 1305 The only goal is to skip calling the original function if flag is set.
1301 1306 It's set if infinitepush push is happening.
1302 1307 '''
1303 1308
1304 1309 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1305 1310 return
1306 1311
1307 1312 return orig(op, part)
1308 1313
1309 1314
1310 1315 def _asyncsavemetadata(root, nodes):
1311 1316 '''starts a separate process that fills metadata for the nodes
1312 1317
1313 1318 This function creates a separate process and doesn't wait for it's
1314 1319 completion. This was done to avoid slowing down pushes
1315 1320 '''
1316 1321
1317 1322 maxnodes = 50
1318 1323 if len(nodes) > maxnodes:
1319 1324 return
1320 1325 nodesargs = []
1321 1326 for node in nodes:
1322 1327 nodesargs.append(b'--node')
1323 1328 nodesargs.append(node)
1324 1329 with open(os.devnull, b'w+b') as devnull:
1325 1330 cmdline = [
1326 1331 util.hgexecutable(),
1327 1332 b'debugfillinfinitepushmetadata',
1328 1333 b'-R',
1329 1334 root,
1330 1335 ] + nodesargs
1331 1336 # Process will run in background. We don't care about the return code
1332 1337 subprocess.Popen(
1333 1338 pycompat.rapply(procutil.tonativestr, cmdline),
1334 1339 close_fds=True,
1335 1340 shell=False,
1336 1341 stdin=devnull,
1337 1342 stdout=devnull,
1338 1343 stderr=devnull,
1339 1344 )
General Comments 0
You need to be logged in to leave comments. Login now