##// END OF EJS Templates
py3: fix exception message encoding in infinitepush...
Emmanuel Leblond -
r43680:2ded39ef stable
parent child Browse files
Show More
@@ -1,1344 +1,1344 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 IMPORTANT: if you use this extension, please contact
10 10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 11 be unused and barring learning of users of this functionality, we will
12 12 delete this code at the end of 2020.
13 13
14 14 [infinitepush]
15 15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 16 branchpattern = PATTERN
17 17
18 18 # Server or client
19 19 server = False
20 20
21 21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 22 indextype = disk
23 23
24 24 # Server-side option. Used only if indextype=sql.
25 25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27 27
28 28 # Server-side option. Used only if indextype=disk.
29 29 # Filesystem path to the index store
30 30 indexpath = PATH
31 31
32 32 # Server-side option. Possible values: 'disk' or 'external'
33 33 # Fails if not set
34 34 storetype = disk
35 35
36 36 # Server-side option.
37 37 # Path to the binary that will save bundle to the bundlestore
38 38 # Formatted cmd line will be passed to it (see `put_args`)
39 39 put_binary = put
40 40
41 41 # Serser-side option. Used only if storetype=external.
42 42 # Format cmd-line string for put binary. Placeholder: {filename}
43 43 put_args = {filename}
44 44
45 45 # Server-side option.
46 46 # Path to the binary that get bundle from the bundlestore.
47 47 # Formatted cmd line will be passed to it (see `get_args`)
48 48 get_binary = get
49 49
50 50 # Serser-side option. Used only if storetype=external.
51 51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 52 get_args = {filename} {handle}
53 53
54 54 # Server-side option
55 55 logfile = FIlE
56 56
57 57 # Server-side option
58 58 loglevel = DEBUG
59 59
60 60 # Server-side option. Used only if indextype=sql.
61 61 # Sets mysql wait_timeout option.
62 62 waittimeout = 300
63 63
64 64 # Server-side option. Used only if indextype=sql.
65 65 # Sets mysql innodb_lock_wait_timeout option.
66 66 locktimeout = 120
67 67
68 68 # Server-side option. Used only if indextype=sql.
69 69 # Name of the repository
70 70 reponame = ''
71 71
72 72 # Client-side option. Used by --list-remote option. List of remote scratch
73 73 # patterns to list if no patterns are specified.
74 74 defaultremotepatterns = ['*']
75 75
76 76 # Instructs infinitepush to forward all received bundle2 parts to the
77 77 # bundle for storage. Defaults to False.
78 78 storeallparts = True
79 79
80 80 # routes each incoming push to the bundlestore. defaults to False
81 81 pushtobundlestore = True
82 82
83 83 [remotenames]
84 84 # Client-side option
85 85 # This option should be set only if remotenames extension is enabled.
86 86 # Whether remote bookmarks are tracked by remotenames extension.
87 87 bookmarks = True
88 88 """
89 89
90 90 from __future__ import absolute_import
91 91
92 92 import collections
93 93 import contextlib
94 94 import errno
95 95 import functools
96 96 import logging
97 97 import os
98 98 import random
99 99 import re
100 100 import socket
101 101 import subprocess
102 102 import time
103 103
104 104 from mercurial.node import (
105 105 bin,
106 106 hex,
107 107 )
108 108
109 109 from mercurial.i18n import _
110 110
111 111 from mercurial.pycompat import (
112 112 getattr,
113 113 open,
114 114 )
115 115
116 116 from mercurial.utils import (
117 117 procutil,
118 118 stringutil,
119 119 )
120 120
121 121 from mercurial import (
122 122 bundle2,
123 123 changegroup,
124 124 commands,
125 125 discovery,
126 126 encoding,
127 127 error,
128 128 exchange,
129 129 extensions,
130 130 hg,
131 131 localrepo,
132 132 phases,
133 133 pushkey,
134 134 pycompat,
135 135 registrar,
136 136 util,
137 137 wireprototypes,
138 138 wireprotov1peer,
139 139 wireprotov1server,
140 140 )
141 141
142 142 from . import (
143 143 bundleparts,
144 144 common,
145 145 )
146 146
147 147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 149 # be specifying the version(s) of Mercurial they are tested with, or
150 150 # leave the attribute unspecified.
151 151 testedwith = b'ships-with-hg-core'
152 152
153 153 configtable = {}
154 154 configitem = registrar.configitem(configtable)
155 155
156 156 configitem(
157 157 b'infinitepush', b'server', default=False,
158 158 )
159 159 configitem(
160 160 b'infinitepush', b'storetype', default=b'',
161 161 )
162 162 configitem(
163 163 b'infinitepush', b'indextype', default=b'',
164 164 )
165 165 configitem(
166 166 b'infinitepush', b'indexpath', default=b'',
167 167 )
168 168 configitem(
169 169 b'infinitepush', b'storeallparts', default=False,
170 170 )
171 171 configitem(
172 172 b'infinitepush', b'reponame', default=b'',
173 173 )
174 174 configitem(
175 175 b'scratchbranch', b'storepath', default=b'',
176 176 )
177 177 configitem(
178 178 b'infinitepush', b'branchpattern', default=b'',
179 179 )
180 180 configitem(
181 181 b'infinitepush', b'pushtobundlestore', default=False,
182 182 )
183 183 configitem(
184 184 b'experimental', b'server-bundlestore-bookmark', default=b'',
185 185 )
186 186 configitem(
187 187 b'experimental', b'infinitepush-scratchpush', default=False,
188 188 )
189 189
190 190 experimental = b'experimental'
191 191 configbookmark = b'server-bundlestore-bookmark'
192 192 configscratchpush = b'infinitepush-scratchpush'
193 193
194 194 scratchbranchparttype = bundleparts.scratchbranchparttype
195 195 revsetpredicate = registrar.revsetpredicate()
196 196 templatekeyword = registrar.templatekeyword()
197 197 _scratchbranchmatcher = lambda x: False
198 198 _maybehash = re.compile(r'^[a-f0-9]+$').search
199 199
200 200
201 201 def _buildexternalbundlestore(ui):
202 202 put_args = ui.configlist(b'infinitepush', b'put_args', [])
203 203 put_binary = ui.config(b'infinitepush', b'put_binary')
204 204 if not put_binary:
205 205 raise error.Abort(b'put binary is not specified')
206 206 get_args = ui.configlist(b'infinitepush', b'get_args', [])
207 207 get_binary = ui.config(b'infinitepush', b'get_binary')
208 208 if not get_binary:
209 209 raise error.Abort(b'get binary is not specified')
210 210 from . import store
211 211
212 212 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
213 213
214 214
215 215 def _buildsqlindex(ui):
216 216 sqlhost = ui.config(b'infinitepush', b'sqlhost')
217 217 if not sqlhost:
218 218 raise error.Abort(_(b'please set infinitepush.sqlhost'))
219 219 host, port, db, user, password = sqlhost.split(b':')
220 220 reponame = ui.config(b'infinitepush', b'reponame')
221 221 if not reponame:
222 222 raise error.Abort(_(b'please set infinitepush.reponame'))
223 223
224 224 logfile = ui.config(b'infinitepush', b'logfile', b'')
225 225 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
226 226 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
227 227 from . import sqlindexapi
228 228
229 229 return sqlindexapi.sqlindexapi(
230 230 reponame,
231 231 host,
232 232 port,
233 233 db,
234 234 user,
235 235 password,
236 236 logfile,
237 237 _getloglevel(ui),
238 238 waittimeout=waittimeout,
239 239 locktimeout=locktimeout,
240 240 )
241 241
242 242
243 243 def _getloglevel(ui):
244 244 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
245 245 numeric_loglevel = getattr(logging, loglevel.upper(), None)
246 246 if not isinstance(numeric_loglevel, int):
247 247 raise error.Abort(_(b'invalid log level %s') % loglevel)
248 248 return numeric_loglevel
249 249
250 250
251 251 def _tryhoist(ui, remotebookmark):
252 252 '''returns a bookmarks with hoisted part removed
253 253
254 254 Remotenames extension has a 'hoist' config that allows to use remote
255 255 bookmarks without specifying remote path. For example, 'hg update master'
256 256 works as well as 'hg update remote/master'. We want to allow the same in
257 257 infinitepush.
258 258 '''
259 259
260 260 if common.isremotebooksenabled(ui):
261 261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
262 262 if remotebookmark.startswith(hoist):
263 263 return remotebookmark[len(hoist) :]
264 264 return remotebookmark
265 265
266 266
267 267 class bundlestore(object):
268 268 def __init__(self, repo):
269 269 self._repo = repo
270 270 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
271 271 if storetype == b'disk':
272 272 from . import store
273 273
274 274 self.store = store.filebundlestore(self._repo.ui, self._repo)
275 275 elif storetype == b'external':
276 276 self.store = _buildexternalbundlestore(self._repo.ui)
277 277 else:
278 278 raise error.Abort(
279 279 _(b'unknown infinitepush store type specified %s') % storetype
280 280 )
281 281
282 282 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
283 283 if indextype == b'disk':
284 284 from . import fileindexapi
285 285
286 286 self.index = fileindexapi.fileindexapi(self._repo)
287 287 elif indextype == b'sql':
288 288 self.index = _buildsqlindex(self._repo.ui)
289 289 else:
290 290 raise error.Abort(
291 291 _(b'unknown infinitepush index type specified %s') % indextype
292 292 )
293 293
294 294
295 295 def _isserver(ui):
296 296 return ui.configbool(b'infinitepush', b'server')
297 297
298 298
299 299 def reposetup(ui, repo):
300 300 if _isserver(ui) and repo.local():
301 301 repo.bundlestore = bundlestore(repo)
302 302
303 303
304 304 def extsetup(ui):
305 305 commonsetup(ui)
306 306 if _isserver(ui):
307 307 serverextsetup(ui)
308 308 else:
309 309 clientextsetup(ui)
310 310
311 311
312 312 def commonsetup(ui):
313 313 wireprotov1server.commands[b'listkeyspatterns'] = (
314 314 wireprotolistkeyspatterns,
315 315 b'namespace patterns',
316 316 )
317 317 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
318 318 if scratchbranchpat:
319 319 global _scratchbranchmatcher
320 320 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
321 321 scratchbranchpat
322 322 )
323 323
324 324
325 325 def serverextsetup(ui):
326 326 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
327 327
328 328 def newpushkeyhandler(*args, **kwargs):
329 329 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
330 330
331 331 newpushkeyhandler.params = origpushkeyhandler.params
332 332 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
333 333
334 334 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
335 335 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
336 336 orighandlephasehandler, *args, **kwargs
337 337 )
338 338 newphaseheadshandler.params = orighandlephasehandler.params
339 339 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
340 340
341 341 extensions.wrapfunction(
342 342 localrepo.localrepository, b'listkeys', localrepolistkeys
343 343 )
344 344 wireprotov1server.commands[b'lookup'] = (
345 345 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
346 346 b'key',
347 347 )
348 348 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
349 349
350 350 extensions.wrapfunction(bundle2, b'processparts', processparts)
351 351
352 352
353 353 def clientextsetup(ui):
354 354 entry = extensions.wrapcommand(commands.table, b'push', _push)
355 355
356 356 entry[1].append(
357 357 (
358 358 b'',
359 359 b'bundle-store',
360 360 None,
361 361 _(b'force push to go to bundle store (EXPERIMENTAL)'),
362 362 )
363 363 )
364 364
365 365 extensions.wrapcommand(commands.table, b'pull', _pull)
366 366
367 367 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
368 368
369 369 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
370 370
371 371 partorder = exchange.b2partsgenorder
372 372 index = partorder.index(b'changeset')
373 373 partorder.insert(
374 374 index, partorder.pop(partorder.index(scratchbranchparttype))
375 375 )
376 376
377 377
378 378 def _checkheads(orig, pushop):
379 379 if pushop.ui.configbool(experimental, configscratchpush, False):
380 380 return
381 381 return orig(pushop)
382 382
383 383
384 384 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
385 385 patterns = wireprototypes.decodelist(patterns)
386 386 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
387 387 return pushkey.encodekeys(d)
388 388
389 389
390 390 def localrepolistkeys(orig, self, namespace, patterns=None):
391 391 if namespace == b'bookmarks' and patterns:
392 392 index = self.bundlestore.index
393 393 results = {}
394 394 bookmarks = orig(self, namespace)
395 395 for pattern in patterns:
396 396 results.update(index.getbookmarks(pattern))
397 397 if pattern.endswith(b'*'):
398 398 pattern = b're:^' + pattern[:-1] + b'.*'
399 399 kind, pat, matcher = stringutil.stringmatcher(pattern)
400 400 for bookmark, node in pycompat.iteritems(bookmarks):
401 401 if matcher(bookmark):
402 402 results[bookmark] = node
403 403 return results
404 404 else:
405 405 return orig(self, namespace)
406 406
407 407
408 408 @wireprotov1peer.batchable
409 409 def listkeyspatterns(self, namespace, patterns):
410 410 if not self.capable(b'pushkey'):
411 411 yield {}, None
412 412 f = wireprotov1peer.future()
413 413 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
414 414 yield {
415 415 b'namespace': encoding.fromlocal(namespace),
416 416 b'patterns': wireprototypes.encodelist(patterns),
417 417 }, f
418 418 d = f.value
419 419 self.ui.debug(
420 420 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
421 421 )
422 422 yield pushkey.decodekeys(d)
423 423
424 424
425 425 def _readbundlerevs(bundlerepo):
426 426 return list(bundlerepo.revs(b'bundle()'))
427 427
428 428
429 429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
430 430 '''Tells remotefilelog to include all changed files to the changegroup
431 431
432 432 By default remotefilelog doesn't include file content to the changegroup.
433 433 But we need to include it if we are fetching from bundlestore.
434 434 '''
435 435 changedfiles = set()
436 436 cl = bundlerepo.changelog
437 437 for r in bundlerevs:
438 438 # [3] means changed files
439 439 changedfiles.update(cl.read(r)[3])
440 440 if not changedfiles:
441 441 return bundlecaps
442 442
443 443 changedfiles = b'\0'.join(changedfiles)
444 444 newcaps = []
445 445 appended = False
446 446 for cap in bundlecaps or []:
447 447 if cap.startswith(b'excludepattern='):
448 448 newcaps.append(b'\0'.join((cap, changedfiles)))
449 449 appended = True
450 450 else:
451 451 newcaps.append(cap)
452 452 if not appended:
453 453 # Not found excludepattern cap. Just append it
454 454 newcaps.append(b'excludepattern=' + changedfiles)
455 455
456 456 return newcaps
457 457
458 458
459 459 def _rebundle(bundlerepo, bundleroots, unknownhead):
460 460 '''
461 461 Bundle may include more revision then user requested. For example,
462 462 if user asks for revision but bundle also consists its descendants.
463 463 This function will filter out all revision that user is not requested.
464 464 '''
465 465 parts = []
466 466
467 467 version = b'02'
468 468 outgoing = discovery.outgoing(
469 469 bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
470 470 )
471 471 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
472 472 cgstream = util.chunkbuffer(cgstream).read()
473 473 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
474 474 cgpart.addparam(b'version', version)
475 475 parts.append(cgpart)
476 476
477 477 return parts
478 478
479 479
480 480 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
481 481 cl = bundlerepo.changelog
482 482 bundleroots = []
483 483 for rev in bundlerevs:
484 484 node = cl.node(rev)
485 485 parents = cl.parents(node)
486 486 for parent in parents:
487 487 # include all revs that exist in the main repo
488 488 # to make sure that bundle may apply client-side
489 489 if parent in oldrepo:
490 490 bundleroots.append(parent)
491 491 return bundleroots
492 492
493 493
494 494 def _needsrebundling(head, bundlerepo):
495 495 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
496 496 return not (
497 497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
498 498 )
499 499
500 500
501 501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
502 502 '''generates bundle that will be send to the user
503 503
504 504 returns tuple with raw bundle string and bundle type
505 505 '''
506 506 parts = []
507 507 if not _needsrebundling(head, bundlerepo):
508 508 with util.posixfile(bundlefile, b"rb") as f:
509 509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
510 510 if isinstance(unbundler, changegroup.cg1unpacker):
511 511 part = bundle2.bundlepart(
512 512 b'changegroup', data=unbundler._stream.read()
513 513 )
514 514 part.addparam(b'version', b'01')
515 515 parts.append(part)
516 516 elif isinstance(unbundler, bundle2.unbundle20):
517 517 haschangegroup = False
518 518 for part in unbundler.iterparts():
519 519 if part.type == b'changegroup':
520 520 haschangegroup = True
521 521 newpart = bundle2.bundlepart(part.type, data=part.read())
522 522 for key, value in pycompat.iteritems(part.params):
523 523 newpart.addparam(key, value)
524 524 parts.append(newpart)
525 525
526 526 if not haschangegroup:
527 527 raise error.Abort(
528 528 b'unexpected bundle without changegroup part, '
529 529 + b'head: %s' % hex(head),
530 530 hint=b'report to administrator',
531 531 )
532 532 else:
533 533 raise error.Abort(b'unknown bundle type')
534 534 else:
535 535 parts = _rebundle(bundlerepo, bundleroots, head)
536 536
537 537 return parts
538 538
539 539
540 540 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
541 541 heads = heads or []
542 542 # newheads are parents of roots of scratch bundles that were requested
543 543 newphases = {}
544 544 scratchbundles = []
545 545 newheads = []
546 546 scratchheads = []
547 547 nodestobundle = {}
548 548 allbundlestocleanup = []
549 549 try:
550 550 for head in heads:
551 551 if head not in repo.changelog.nodemap:
552 552 if head not in nodestobundle:
553 553 newbundlefile = common.downloadbundle(repo, head)
554 554 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
555 555 bundlerepo = hg.repository(repo.ui, bundlepath)
556 556
557 557 allbundlestocleanup.append((bundlerepo, newbundlefile))
558 558 bundlerevs = set(_readbundlerevs(bundlerepo))
559 559 bundlecaps = _includefilelogstobundle(
560 560 bundlecaps, bundlerepo, bundlerevs, repo.ui
561 561 )
562 562 cl = bundlerepo.changelog
563 563 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
564 564 for rev in bundlerevs:
565 565 node = cl.node(rev)
566 566 newphases[hex(node)] = str(phases.draft)
567 567 nodestobundle[node] = (
568 568 bundlerepo,
569 569 bundleroots,
570 570 newbundlefile,
571 571 )
572 572
573 573 scratchbundles.append(
574 574 _generateoutputparts(head, *nodestobundle[head])
575 575 )
576 576 newheads.extend(bundleroots)
577 577 scratchheads.append(head)
578 578 finally:
579 579 for bundlerepo, bundlefile in allbundlestocleanup:
580 580 bundlerepo.close()
581 581 try:
582 582 os.unlink(bundlefile)
583 583 except (IOError, OSError):
584 584 # if we can't cleanup the file then just ignore the error,
585 585 # no need to fail
586 586 pass
587 587
588 588 pullfrombundlestore = bool(scratchbundles)
589 589 wrappedchangegrouppart = False
590 590 wrappedlistkeys = False
591 591 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
592 592 try:
593 593
594 594 def _changegrouppart(bundler, *args, **kwargs):
595 595 # Order is important here. First add non-scratch part
596 596 # and only then add parts with scratch bundles because
597 597 # non-scratch part contains parents of roots of scratch bundles.
598 598 result = oldchangegrouppart(bundler, *args, **kwargs)
599 599 for bundle in scratchbundles:
600 600 for part in bundle:
601 601 bundler.addpart(part)
602 602 return result
603 603
604 604 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
605 605 wrappedchangegrouppart = True
606 606
607 607 def _listkeys(orig, self, namespace):
608 608 origvalues = orig(self, namespace)
609 609 if namespace == b'phases' and pullfrombundlestore:
610 610 if origvalues.get(b'publishing') == b'True':
611 611 # Make repo non-publishing to preserve draft phase
612 612 del origvalues[b'publishing']
613 613 origvalues.update(newphases)
614 614 return origvalues
615 615
616 616 extensions.wrapfunction(
617 617 localrepo.localrepository, b'listkeys', _listkeys
618 618 )
619 619 wrappedlistkeys = True
620 620 heads = list((set(newheads) | set(heads)) - set(scratchheads))
621 621 result = orig(
622 622 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
623 623 )
624 624 finally:
625 625 if wrappedchangegrouppart:
626 626 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
627 627 if wrappedlistkeys:
628 628 extensions.unwrapfunction(
629 629 localrepo.localrepository, b'listkeys', _listkeys
630 630 )
631 631 return result
632 632
633 633
634 634 def _lookupwrap(orig):
635 635 def _lookup(repo, proto, key):
636 636 localkey = encoding.tolocal(key)
637 637
638 638 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
639 639 scratchnode = repo.bundlestore.index.getnode(localkey)
640 640 if scratchnode:
641 641 return b"%d %s\n" % (1, scratchnode)
642 642 else:
643 643 return b"%d %s\n" % (
644 644 0,
645 645 b'scratch branch %s not found' % localkey,
646 646 )
647 647 else:
648 648 try:
649 649 r = hex(repo.lookup(localkey))
650 650 return b"%d %s\n" % (1, r)
651 651 except Exception as inst:
652 652 if repo.bundlestore.index.getbundle(localkey):
653 653 return b"%d %s\n" % (1, localkey)
654 654 else:
655 655 r = stringutil.forcebytestr(inst)
656 656 return b"%d %s\n" % (0, r)
657 657
658 658 return _lookup
659 659
660 660
661 661 def _pull(orig, ui, repo, source=b"default", **opts):
662 662 opts = pycompat.byteskwargs(opts)
663 663 # Copy paste from `pull` command
664 664 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
665 665
666 666 scratchbookmarks = {}
667 667 unfi = repo.unfiltered()
668 668 unknownnodes = []
669 669 for rev in opts.get(b'rev', []):
670 670 if rev not in unfi:
671 671 unknownnodes.append(rev)
672 672 if opts.get(b'bookmark'):
673 673 bookmarks = []
674 674 revs = opts.get(b'rev') or []
675 675 for bookmark in opts.get(b'bookmark'):
676 676 if _scratchbranchmatcher(bookmark):
677 677 # rev is not known yet
678 678 # it will be fetched with listkeyspatterns next
679 679 scratchbookmarks[bookmark] = b'REVTOFETCH'
680 680 else:
681 681 bookmarks.append(bookmark)
682 682
683 683 if scratchbookmarks:
684 684 other = hg.peer(repo, opts, source)
685 685 fetchedbookmarks = other.listkeyspatterns(
686 686 b'bookmarks', patterns=scratchbookmarks
687 687 )
688 688 for bookmark in scratchbookmarks:
689 689 if bookmark not in fetchedbookmarks:
690 690 raise error.Abort(
691 691 b'remote bookmark %s not found!' % bookmark
692 692 )
693 693 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
694 694 revs.append(fetchedbookmarks[bookmark])
695 695 opts[b'bookmark'] = bookmarks
696 696 opts[b'rev'] = revs
697 697
698 698 if scratchbookmarks or unknownnodes:
699 699 # Set anyincoming to True
700 700 extensions.wrapfunction(
701 701 discovery, b'findcommonincoming', _findcommonincoming
702 702 )
703 703 try:
704 704 # Remote scratch bookmarks will be deleted because remotenames doesn't
705 705 # know about them. Let's save it before pull and restore after
706 706 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
707 707 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
708 708 # TODO(stash): race condition is possible
709 709 # if scratch bookmarks was updated right after orig.
710 710 # But that's unlikely and shouldn't be harmful.
711 711 if common.isremotebooksenabled(ui):
712 712 remotescratchbookmarks.update(scratchbookmarks)
713 713 _saveremotebookmarks(repo, remotescratchbookmarks, source)
714 714 else:
715 715 _savelocalbookmarks(repo, scratchbookmarks)
716 716 return result
717 717 finally:
718 718 if scratchbookmarks:
719 719 extensions.unwrapfunction(discovery, b'findcommonincoming')
720 720
721 721
722 722 def _readscratchremotebookmarks(ui, repo, other):
723 723 if common.isremotebooksenabled(ui):
724 724 remotenamesext = extensions.find(b'remotenames')
725 725 remotepath = remotenamesext.activepath(repo.ui, other)
726 726 result = {}
727 727 # Let's refresh remotenames to make sure we have it up to date
728 728 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
729 729 # and it results in deleting scratch bookmarks. Our best guess how to
730 730 # fix it is to use `clearnames()`
731 731 repo._remotenames.clearnames()
732 732 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
733 733 path, bookname = remotenamesext.splitremotename(remotebookmark)
734 734 if path == remotepath and _scratchbranchmatcher(bookname):
735 735 nodes = repo.names[b'remotebookmarks'].nodes(
736 736 repo, remotebookmark
737 737 )
738 738 if nodes:
739 739 result[bookname] = hex(nodes[0])
740 740 return result
741 741 else:
742 742 return {}
743 743
744 744
745 745 def _saveremotebookmarks(repo, newbookmarks, remote):
746 746 remotenamesext = extensions.find(b'remotenames')
747 747 remotepath = remotenamesext.activepath(repo.ui, remote)
748 748 branches = collections.defaultdict(list)
749 749 bookmarks = {}
750 750 remotenames = remotenamesext.readremotenames(repo)
751 751 for hexnode, nametype, remote, rname in remotenames:
752 752 if remote != remotepath:
753 753 continue
754 754 if nametype == b'bookmarks':
755 755 if rname in newbookmarks:
756 756 # It's possible if we have a normal bookmark that matches
757 757 # scratch branch pattern. In this case just use the current
758 758 # bookmark node
759 759 del newbookmarks[rname]
760 760 bookmarks[rname] = hexnode
761 761 elif nametype == b'branches':
762 762 # saveremotenames expects 20 byte binary nodes for branches
763 763 branches[rname].append(bin(hexnode))
764 764
765 765 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
766 766 bookmarks[bookmark] = hexnode
767 767 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
768 768
769 769
770 770 def _savelocalbookmarks(repo, bookmarks):
771 771 if not bookmarks:
772 772 return
773 773 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
774 774 changes = []
775 775 for scratchbook, node in pycompat.iteritems(bookmarks):
776 776 changectx = repo[node]
777 777 changes.append((scratchbook, changectx.node()))
778 778 repo._bookmarks.applychanges(repo, tr, changes)
779 779
780 780
781 781 def _findcommonincoming(orig, *args, **kwargs):
782 782 common, inc, remoteheads = orig(*args, **kwargs)
783 783 return common, True, remoteheads
784 784
785 785
786 786 def _push(orig, ui, repo, dest=None, *args, **opts):
787 787 opts = pycompat.byteskwargs(opts)
788 788 bookmark = opts.get(b'bookmark')
789 789 # we only support pushing one infinitepush bookmark at once
790 790 if len(bookmark) == 1:
791 791 bookmark = bookmark[0]
792 792 else:
793 793 bookmark = b''
794 794
795 795 oldphasemove = None
796 796 overrides = {(experimental, configbookmark): bookmark}
797 797
798 798 with ui.configoverride(overrides, b'infinitepush'):
799 799 scratchpush = opts.get(b'bundle_store')
800 800 if _scratchbranchmatcher(bookmark):
801 801 scratchpush = True
802 802 # bundle2 can be sent back after push (for example, bundle2
803 803 # containing `pushkey` part to update bookmarks)
804 804 ui.setconfig(experimental, b'bundle2.pushback', True)
805 805
806 806 if scratchpush:
807 807 # this is an infinitepush, we don't want the bookmark to be applied
808 808 # rather that should be stored in the bundlestore
809 809 opts[b'bookmark'] = []
810 810 ui.setconfig(experimental, configscratchpush, True)
811 811 oldphasemove = extensions.wrapfunction(
812 812 exchange, b'_localphasemove', _phasemove
813 813 )
814 814 # Copy-paste from `push` command
815 815 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
816 816 if not path:
817 817 raise error.Abort(
818 818 _(b'default repository not configured!'),
819 819 hint=_(b"see 'hg help config.paths'"),
820 820 )
821 821 destpath = path.pushloc or path.loc
822 822 # Remote scratch bookmarks will be deleted because remotenames doesn't
823 823 # know about them. Let's save it before push and restore after
824 824 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
825 825 result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts))
826 826 if common.isremotebooksenabled(ui):
827 827 if bookmark and scratchpush:
828 828 other = hg.peer(repo, opts, destpath)
829 829 fetchedbookmarks = other.listkeyspatterns(
830 830 b'bookmarks', patterns=[bookmark]
831 831 )
832 832 remotescratchbookmarks.update(fetchedbookmarks)
833 833 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
834 834 if oldphasemove:
835 835 exchange._localphasemove = oldphasemove
836 836 return result
837 837
838 838
839 839 def _deleteinfinitepushbookmarks(ui, repo, path, names):
840 840 """Prune remote names by removing the bookmarks we don't want anymore,
841 841 then writing the result back to disk
842 842 """
843 843 remotenamesext = extensions.find(b'remotenames')
844 844
845 845 # remotename format is:
846 846 # (node, nametype ("branches" or "bookmarks"), remote, name)
847 847 nametype_idx = 1
848 848 remote_idx = 2
849 849 name_idx = 3
850 850 remotenames = [
851 851 remotename
852 852 for remotename in remotenamesext.readremotenames(repo)
853 853 if remotename[remote_idx] == path
854 854 ]
855 855 remote_bm_names = [
856 856 remotename[name_idx]
857 857 for remotename in remotenames
858 858 if remotename[nametype_idx] == b"bookmarks"
859 859 ]
860 860
861 861 for name in names:
862 862 if name not in remote_bm_names:
863 863 raise error.Abort(
864 864 _(
865 865 b"infinitepush bookmark '{}' does not exist "
866 866 b"in path '{}'"
867 867 ).format(name, path)
868 868 )
869 869
870 870 bookmarks = {}
871 871 branches = collections.defaultdict(list)
872 872 for node, nametype, remote, name in remotenames:
873 873 if nametype == b"bookmarks" and name not in names:
874 874 bookmarks[name] = node
875 875 elif nametype == b"branches":
876 876 # saveremotenames wants binary nodes for branches
877 877 branches[name].append(bin(node))
878 878
879 879 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
880 880
881 881
882 882 def _phasemove(orig, pushop, nodes, phase=phases.public):
883 883 """prevent commits from being marked public
884 884
885 885 Since these are going to a scratch branch, they aren't really being
886 886 published."""
887 887
888 888 if phase != phases.public:
889 889 orig(pushop, nodes, phase)
890 890
891 891
892 892 @exchange.b2partsgenerator(scratchbranchparttype)
893 893 def partgen(pushop, bundler):
894 894 bookmark = pushop.ui.config(experimental, configbookmark)
895 895 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
896 896 if b'changesets' in pushop.stepsdone or not scratchpush:
897 897 return
898 898
899 899 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
900 900 return
901 901
902 902 pushop.stepsdone.add(b'changesets')
903 903 if not pushop.outgoing.missing:
904 904 pushop.ui.status(_(b'no changes found\n'))
905 905 pushop.cgresult = 0
906 906 return
907 907
908 908 # This parameter tells the server that the following bundle is an
909 909 # infinitepush. This let's it switch the part processing to our infinitepush
910 910 # code path.
911 911 bundler.addparam(b"infinitepush", b"True")
912 912
913 913 scratchparts = bundleparts.getscratchbranchparts(
914 914 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
915 915 )
916 916
917 917 for scratchpart in scratchparts:
918 918 bundler.addpart(scratchpart)
919 919
920 920 def handlereply(op):
921 921 # server either succeeds or aborts; no code to read
922 922 pushop.cgresult = 1
923 923
924 924 return handlereply
925 925
926 926
927 927 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
928 928
929 929
930 930 def _getrevs(bundle, oldnode, force, bookmark):
931 931 b'extracts and validates the revs to be imported'
932 932 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
933 933
934 934 # new bookmark
935 935 if oldnode is None:
936 936 return revs
937 937
938 938 # Fast forward update
939 939 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
940 940 return revs
941 941
942 942 return revs
943 943
944 944
945 945 @contextlib.contextmanager
946 946 def logservicecall(logger, service, **kwargs):
947 947 start = time.time()
948 948 logger(service, eventtype=b'start', **kwargs)
949 949 try:
950 950 yield
951 951 logger(
952 952 service,
953 953 eventtype=b'success',
954 954 elapsedms=(time.time() - start) * 1000,
955 955 **kwargs
956 956 )
957 957 except Exception as e:
958 958 logger(
959 959 service,
960 960 eventtype=b'failure',
961 961 elapsedms=(time.time() - start) * 1000,
962 errormsg=str(e),
962 errormsg=stringutil.forcebytestr(e),
963 963 **kwargs
964 964 )
965 965 raise
966 966
967 967
968 968 def _getorcreateinfinitepushlogger(op):
969 969 logger = op.records[b'infinitepushlogger']
970 970 if not logger:
971 971 ui = op.repo.ui
972 972 try:
973 973 username = procutil.getuser()
974 974 except Exception:
975 975 username = b'unknown'
976 976 # Generate random request id to be able to find all logged entries
977 977 # for the same request. Since requestid is pseudo-generated it may
978 978 # not be unique, but we assume that (hostname, username, requestid)
979 979 # is unique.
980 980 random.seed()
981 981 requestid = random.randint(0, 2000000000)
982 982 hostname = socket.gethostname()
983 983 logger = functools.partial(
984 984 ui.log,
985 985 b'infinitepush',
986 986 user=username,
987 987 requestid=requestid,
988 988 hostname=hostname,
989 989 reponame=ui.config(b'infinitepush', b'reponame'),
990 990 )
991 991 op.records.add(b'infinitepushlogger', logger)
992 992 else:
993 993 logger = logger[0]
994 994 return logger
995 995
996 996
997 997 def storetobundlestore(orig, repo, op, unbundler):
998 998 """stores the incoming bundle coming from push command to the bundlestore
999 999 instead of applying on the revlogs"""
1000 1000
1001 1001 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1002 1002 bundler = bundle2.bundle20(repo.ui)
1003 1003
1004 1004 # processing each part and storing it in bundler
1005 1005 with bundle2.partiterator(repo, op, unbundler) as parts:
1006 1006 for part in parts:
1007 1007 bundlepart = None
1008 1008 if part.type == b'replycaps':
1009 1009 # This configures the current operation to allow reply parts.
1010 1010 bundle2._processpart(op, part)
1011 1011 else:
1012 1012 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1013 1013 for key, value in pycompat.iteritems(part.params):
1014 1014 bundlepart.addparam(key, value)
1015 1015
1016 1016 # Certain parts require a response
1017 1017 if part.type in (b'pushkey', b'changegroup'):
1018 1018 if op.reply is not None:
1019 1019 rpart = op.reply.newpart(b'reply:%s' % part.type)
1020 1020 rpart.addparam(
1021 1021 b'in-reply-to', b'%d' % part.id, mandatory=False
1022 1022 )
1023 1023 rpart.addparam(b'return', b'1', mandatory=False)
1024 1024
1025 1025 op.records.add(part.type, {b'return': 1,})
1026 1026 if bundlepart:
1027 1027 bundler.addpart(bundlepart)
1028 1028
1029 1029 # storing the bundle in the bundlestore
1030 1030 buf = util.chunkbuffer(bundler.getchunks())
1031 1031 fd, bundlefile = pycompat.mkstemp()
1032 1032 try:
1033 1033 try:
1034 1034 fp = os.fdopen(fd, r'wb')
1035 1035 fp.write(buf.read())
1036 1036 finally:
1037 1037 fp.close()
1038 1038 storebundle(op, {}, bundlefile)
1039 1039 finally:
1040 1040 try:
1041 1041 os.unlink(bundlefile)
1042 1042 except Exception:
1043 1043 # we would rather see the original exception
1044 1044 pass
1045 1045
1046 1046
1047 1047 def processparts(orig, repo, op, unbundler):
1048 1048
1049 1049 # make sure we don't wrap processparts in case of `hg unbundle`
1050 1050 if op.source == b'unbundle':
1051 1051 return orig(repo, op, unbundler)
1052 1052
1053 1053 # this server routes each push to bundle store
1054 1054 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1055 1055 return storetobundlestore(orig, repo, op, unbundler)
1056 1056
1057 1057 if unbundler.params.get(b'infinitepush') != b'True':
1058 1058 return orig(repo, op, unbundler)
1059 1059
1060 1060 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1061 1061
1062 1062 bundler = bundle2.bundle20(repo.ui)
1063 1063 cgparams = None
1064 1064 with bundle2.partiterator(repo, op, unbundler) as parts:
1065 1065 for part in parts:
1066 1066 bundlepart = None
1067 1067 if part.type == b'replycaps':
1068 1068 # This configures the current operation to allow reply parts.
1069 1069 bundle2._processpart(op, part)
1070 1070 elif part.type == bundleparts.scratchbranchparttype:
1071 1071 # Scratch branch parts need to be converted to normal
1072 1072 # changegroup parts, and the extra parameters stored for later
1073 1073 # when we upload to the store. Eventually those parameters will
1074 1074 # be put on the actual bundle instead of this part, then we can
1075 1075 # send a vanilla changegroup instead of the scratchbranch part.
1076 1076 cgversion = part.params.get(b'cgversion', b'01')
1077 1077 bundlepart = bundle2.bundlepart(
1078 1078 b'changegroup', data=part.read()
1079 1079 )
1080 1080 bundlepart.addparam(b'version', cgversion)
1081 1081 cgparams = part.params
1082 1082
1083 1083 # If we're not dumping all parts into the new bundle, we need to
1084 1084 # alert the future pushkey and phase-heads handler to skip
1085 1085 # the part.
1086 1086 if not handleallparts:
1087 1087 op.records.add(
1088 1088 scratchbranchparttype + b'_skippushkey', True
1089 1089 )
1090 1090 op.records.add(
1091 1091 scratchbranchparttype + b'_skipphaseheads', True
1092 1092 )
1093 1093 else:
1094 1094 if handleallparts:
1095 1095 # Ideally we would not process any parts, and instead just
1096 1096 # forward them to the bundle for storage, but since this
1097 1097 # differs from previous behavior, we need to put it behind a
1098 1098 # config flag for incremental rollout.
1099 1099 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1100 1100 for key, value in pycompat.iteritems(part.params):
1101 1101 bundlepart.addparam(key, value)
1102 1102
1103 1103 # Certain parts require a response
1104 1104 if part.type == b'pushkey':
1105 1105 if op.reply is not None:
1106 1106 rpart = op.reply.newpart(b'reply:pushkey')
1107 1107 rpart.addparam(
1108 1108 b'in-reply-to', str(part.id), mandatory=False
1109 1109 )
1110 1110 rpart.addparam(b'return', b'1', mandatory=False)
1111 1111 else:
1112 1112 bundle2._processpart(op, part)
1113 1113
1114 1114 if handleallparts:
1115 1115 op.records.add(part.type, {b'return': 1,})
1116 1116 if bundlepart:
1117 1117 bundler.addpart(bundlepart)
1118 1118
1119 1119 # If commits were sent, store them
1120 1120 if cgparams:
1121 1121 buf = util.chunkbuffer(bundler.getchunks())
1122 1122 fd, bundlefile = pycompat.mkstemp()
1123 1123 try:
1124 1124 try:
1125 1125 fp = os.fdopen(fd, r'wb')
1126 1126 fp.write(buf.read())
1127 1127 finally:
1128 1128 fp.close()
1129 1129 storebundle(op, cgparams, bundlefile)
1130 1130 finally:
1131 1131 try:
1132 1132 os.unlink(bundlefile)
1133 1133 except Exception:
1134 1134 # we would rather see the original exception
1135 1135 pass
1136 1136
1137 1137
1138 1138 def storebundle(op, params, bundlefile):
1139 1139 log = _getorcreateinfinitepushlogger(op)
1140 1140 parthandlerstart = time.time()
1141 1141 log(scratchbranchparttype, eventtype=b'start')
1142 1142 index = op.repo.bundlestore.index
1143 1143 store = op.repo.bundlestore.store
1144 1144 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1145 1145
1146 1146 bundle = None
1147 1147 try: # guards bundle
1148 1148 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1149 1149 bundle = hg.repository(op.repo.ui, bundlepath)
1150 1150
1151 1151 bookmark = params.get(b'bookmark')
1152 1152 bookprevnode = params.get(b'bookprevnode', b'')
1153 1153 force = params.get(b'force')
1154 1154
1155 1155 if bookmark:
1156 1156 oldnode = index.getnode(bookmark)
1157 1157 else:
1158 1158 oldnode = None
1159 1159 bundleheads = bundle.revs(b'heads(bundle())')
1160 1160 if bookmark and len(bundleheads) > 1:
1161 1161 raise error.Abort(
1162 1162 _(b'cannot push more than one head to a scratch branch')
1163 1163 )
1164 1164
1165 1165 revs = _getrevs(bundle, oldnode, force, bookmark)
1166 1166
1167 1167 # Notify the user of what is being pushed
1168 1168 plural = b's' if len(revs) > 1 else b''
1169 1169 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1170 1170 maxoutput = 10
1171 1171 for i in range(0, min(len(revs), maxoutput)):
1172 1172 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1173 1173 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1174 1174
1175 1175 if len(revs) > maxoutput + 1:
1176 1176 op.repo.ui.warn(b" ...\n")
1177 1177 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1178 1178 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1179 1179
1180 1180 nodesctx = [bundle[rev] for rev in revs]
1181 1181 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1182 1182 if bundleheads:
1183 1183 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1184 1184 else:
1185 1185 newheadscount = 0
1186 1186 # If there's a bookmark specified, there should be only one head,
1187 1187 # so we choose the last node, which will be that head.
1188 1188 # If a bug or malicious client allows there to be a bookmark
1189 1189 # with multiple heads, we will place the bookmark on the last head.
1190 1190 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1191 1191 key = None
1192 1192 if newheadscount:
1193 1193 with open(bundlefile, b'rb') as f:
1194 1194 bundledata = f.read()
1195 1195 with logservicecall(
1196 1196 log, b'bundlestore', bundlesize=len(bundledata)
1197 1197 ):
1198 1198 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1199 1199 if len(bundledata) > bundlesizelimit:
1200 1200 error_msg = (
1201 1201 b'bundle is too big: %d bytes. '
1202 1202 + b'max allowed size is 100 MB'
1203 1203 )
1204 1204 raise error.Abort(error_msg % (len(bundledata),))
1205 1205 key = store.write(bundledata)
1206 1206
1207 1207 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1208 1208 if key:
1209 1209 index.addbundle(key, nodesctx)
1210 1210 if bookmark:
1211 1211 index.addbookmark(bookmark, bookmarknode)
1212 1212 _maybeaddpushbackpart(
1213 1213 op, bookmark, bookmarknode, bookprevnode, params
1214 1214 )
1215 1215 log(
1216 1216 scratchbranchparttype,
1217 1217 eventtype=b'success',
1218 1218 elapsedms=(time.time() - parthandlerstart) * 1000,
1219 1219 )
1220 1220
1221 1221 except Exception as e:
1222 1222 log(
1223 1223 scratchbranchparttype,
1224 1224 eventtype=b'failure',
1225 1225 elapsedms=(time.time() - parthandlerstart) * 1000,
1226 errormsg=str(e),
1226 errormsg=stringutil.forcebytestr(e),
1227 1227 )
1228 1228 raise
1229 1229 finally:
1230 1230 if bundle:
1231 1231 bundle.close()
1232 1232
1233 1233
1234 1234 @bundle2.parthandler(
1235 1235 scratchbranchparttype,
1236 1236 (
1237 1237 b'bookmark',
1238 1238 b'bookprevnode',
1239 1239 b'force',
1240 1240 b'pushbackbookmarks',
1241 1241 b'cgversion',
1242 1242 ),
1243 1243 )
1244 1244 def bundle2scratchbranch(op, part):
1245 1245 '''unbundle a bundle2 part containing a changegroup to store'''
1246 1246
1247 1247 bundler = bundle2.bundle20(op.repo.ui)
1248 1248 cgversion = part.params.get(b'cgversion', b'01')
1249 1249 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1250 1250 cgpart.addparam(b'version', cgversion)
1251 1251 bundler.addpart(cgpart)
1252 1252 buf = util.chunkbuffer(bundler.getchunks())
1253 1253
1254 1254 fd, bundlefile = pycompat.mkstemp()
1255 1255 try:
1256 1256 try:
1257 1257 fp = os.fdopen(fd, r'wb')
1258 1258 fp.write(buf.read())
1259 1259 finally:
1260 1260 fp.close()
1261 1261 storebundle(op, part.params, bundlefile)
1262 1262 finally:
1263 1263 try:
1264 1264 os.unlink(bundlefile)
1265 1265 except OSError as e:
1266 1266 if e.errno != errno.ENOENT:
1267 1267 raise
1268 1268
1269 1269 return 1
1270 1270
1271 1271
1272 1272 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1273 1273 if params.get(b'pushbackbookmarks'):
1274 1274 if op.reply and b'pushback' in op.reply.capabilities:
1275 1275 params = {
1276 1276 b'namespace': b'bookmarks',
1277 1277 b'key': bookmark,
1278 1278 b'new': newnode,
1279 1279 b'old': oldnode,
1280 1280 }
1281 1281 op.reply.newpart(
1282 1282 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1283 1283 )
1284 1284
1285 1285
1286 1286 def bundle2pushkey(orig, op, part):
1287 1287 '''Wrapper of bundle2.handlepushkey()
1288 1288
1289 1289 The only goal is to skip calling the original function if flag is set.
1290 1290 It's set if infinitepush push is happening.
1291 1291 '''
1292 1292 if op.records[scratchbranchparttype + b'_skippushkey']:
1293 1293 if op.reply is not None:
1294 1294 rpart = op.reply.newpart(b'reply:pushkey')
1295 1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1296 1296 rpart.addparam(b'return', b'1', mandatory=False)
1297 1297 return 1
1298 1298
1299 1299 return orig(op, part)
1300 1300
1301 1301
1302 1302 def bundle2handlephases(orig, op, part):
1303 1303 '''Wrapper of bundle2.handlephases()
1304 1304
1305 1305 The only goal is to skip calling the original function if flag is set.
1306 1306 It's set if infinitepush push is happening.
1307 1307 '''
1308 1308
1309 1309 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1310 1310 return
1311 1311
1312 1312 return orig(op, part)
1313 1313
1314 1314
1315 1315 def _asyncsavemetadata(root, nodes):
1316 1316 '''starts a separate process that fills metadata for the nodes
1317 1317
1318 1318 This function creates a separate process and doesn't wait for it's
1319 1319 completion. This was done to avoid slowing down pushes
1320 1320 '''
1321 1321
1322 1322 maxnodes = 50
1323 1323 if len(nodes) > maxnodes:
1324 1324 return
1325 1325 nodesargs = []
1326 1326 for node in nodes:
1327 1327 nodesargs.append(b'--node')
1328 1328 nodesargs.append(node)
1329 1329 with open(os.devnull, b'w+b') as devnull:
1330 1330 cmdline = [
1331 1331 util.hgexecutable(),
1332 1332 b'debugfillinfinitepushmetadata',
1333 1333 b'-R',
1334 1334 root,
1335 1335 ] + nodesargs
1336 1336 # Process will run in background. We don't care about the return code
1337 1337 subprocess.Popen(
1338 1338 pycompat.rapply(procutil.tonativestr, cmdline),
1339 1339 close_fds=True,
1340 1340 shell=False,
1341 1341 stdin=devnull,
1342 1342 stdout=devnull,
1343 1343 stderr=devnull,
1344 1344 )
General Comments 0
You need to be logged in to leave comments. Login now