##// END OF EJS Templates
infinitepush: drop logic related to treemanifest extension...
Pulkit Goyal -
r37214:7fa00a6f default
parent child Browse files
Show More
@@ -1,1217 +1,1199 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Server-side option. If bookmark that was pushed matches
72 72 # `fillmetadatabranchpattern` then background
73 73 # `hg debugfillinfinitepushmetadata` process will save metadata
74 74 # in infinitepush index for nodes that are ancestor of the bookmark.
75 75 fillmetadatabranchpattern = ''
76 76
77 77 # Instructs infinitepush to forward all received bundle2 parts to the
78 78 # bundle for storage. Defaults to False.
79 79 storeallparts = True
80 80
81 81 [remotenames]
82 82 # Client-side option
83 83 # This option should be set only if remotenames extension is enabled.
84 84 # Whether remote bookmarks are tracked by remotenames extension.
85 85 bookmarks = True
86 86 """
87 87
88 88 from __future__ import absolute_import
89 89
90 90 import collections
91 91 import contextlib
92 92 import errno
93 93 import functools
94 94 import logging
95 95 import os
96 96 import random
97 97 import re
98 98 import socket
99 99 import subprocess
100 100 import sys
101 101 import tempfile
102 102 import time
103 103
104 104 from mercurial.node import (
105 105 bin,
106 106 hex,
107 107 )
108 108
109 109 from mercurial.i18n import _
110 110
111 111 from mercurial import (
112 112 bundle2,
113 113 changegroup,
114 114 commands,
115 115 discovery,
116 116 encoding,
117 117 error,
118 118 exchange,
119 119 extensions,
120 120 hg,
121 121 localrepo,
122 122 peer,
123 123 phases,
124 124 pushkey,
125 125 registrar,
126 126 util,
127 127 wireproto,
128 128 )
129 129
130 130 from . import (
131 131 bundleparts,
132 132 common,
133 133 infinitepushcommands,
134 134 )
135 135
136 136 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
137 137 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
138 138 # be specifying the version(s) of Mercurial they are tested with, or
139 139 # leave the attribute unspecified.
140 140 testedwith = 'ships-with-hg-core'
141 141
142 142 configtable = {}
143 143 configitem = registrar.configitem(configtable)
144 144
145 145 configitem('infinitepush', 'server',
146 146 default=False,
147 147 )
148 148 configitem('infinitepush', 'storetype',
149 149 default='',
150 150 )
151 151 configitem('infinitepush', 'indextype',
152 152 default='',
153 153 )
154 154 configitem('infinitepush', 'indexpath',
155 155 default='',
156 156 )
157 157 configitem('infinitepush', 'fillmetadatabranchpattern',
158 158 default='',
159 159 )
160 160 configitem('infinitepush', 'storeallparts',
161 161 default=False,
162 162 )
163 163 configitem('infinitepush', 'reponame',
164 164 default='',
165 165 )
166 166 configitem('scratchbranch', 'storepath',
167 167 default='',
168 168 )
169 169 configitem('infinitepush', 'branchpattern',
170 170 default='',
171 171 )
172 172 configitem('infinitepush', 'metadatafilelimit',
173 173 default=100,
174 174 )
175 175 configitem('experimental', 'server-bundlestore-bookmark',
176 176 default='',
177 177 )
178 178 configitem('experimental', 'server-bundlestore-create',
179 179 default='',
180 180 )
181 181 configitem('experimental', 'infinitepush-scratchpush',
182 182 default=False,
183 183 )
184 184 configitem('experimental', 'non-forward-move',
185 185 default=False,
186 186 )
187 187
188 188 experimental = 'experimental'
189 189 configbookmark = 'server-bundlestore-bookmark'
190 190 configcreate = 'server-bundlestore-create'
191 191 configscratchpush = 'infinitepush-scratchpush'
192 192 confignonforwardmove = 'non-forward-move'
193 193
194 194 scratchbranchparttype = bundleparts.scratchbranchparttype
195 195 cmdtable = infinitepushcommands.cmdtable
196 196 revsetpredicate = registrar.revsetpredicate()
197 197 templatekeyword = registrar.templatekeyword()
198 198 _scratchbranchmatcher = lambda x: False
199 199 _maybehash = re.compile(r'^[a-f0-9]+$').search
200 200
201 201 def _buildexternalbundlestore(ui):
202 202 put_args = ui.configlist('infinitepush', 'put_args', [])
203 203 put_binary = ui.config('infinitepush', 'put_binary')
204 204 if not put_binary:
205 205 raise error.Abort('put binary is not specified')
206 206 get_args = ui.configlist('infinitepush', 'get_args', [])
207 207 get_binary = ui.config('infinitepush', 'get_binary')
208 208 if not get_binary:
209 209 raise error.Abort('get binary is not specified')
210 210 from . import store
211 211 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
212 212
213 213 def _buildsqlindex(ui):
214 214 sqlhost = ui.config('infinitepush', 'sqlhost')
215 215 if not sqlhost:
216 216 raise error.Abort(_('please set infinitepush.sqlhost'))
217 217 host, port, db, user, password = sqlhost.split(':')
218 218 reponame = ui.config('infinitepush', 'reponame')
219 219 if not reponame:
220 220 raise error.Abort(_('please set infinitepush.reponame'))
221 221
222 222 logfile = ui.config('infinitepush', 'logfile', '')
223 223 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
224 224 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
225 225 from . import sqlindexapi
226 226 return sqlindexapi.sqlindexapi(
227 227 reponame, host, port, db, user, password,
228 228 logfile, _getloglevel(ui), waittimeout=waittimeout,
229 229 locktimeout=locktimeout)
230 230
231 231 def _getloglevel(ui):
232 232 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
233 233 numeric_loglevel = getattr(logging, loglevel.upper(), None)
234 234 if not isinstance(numeric_loglevel, int):
235 235 raise error.Abort(_('invalid log level %s') % loglevel)
236 236 return numeric_loglevel
237 237
238 238 def _tryhoist(ui, remotebookmark):
239 239 '''returns a bookmarks with hoisted part removed
240 240
241 241 Remotenames extension has a 'hoist' config that allows to use remote
242 242 bookmarks without specifying remote path. For example, 'hg update master'
243 243 works as well as 'hg update remote/master'. We want to allow the same in
244 244 infinitepush.
245 245 '''
246 246
247 247 if common.isremotebooksenabled(ui):
248 248 hoist = ui.config('remotenames', 'hoist') + '/'
249 249 if remotebookmark.startswith(hoist):
250 250 return remotebookmark[len(hoist):]
251 251 return remotebookmark
252 252
253 253 class bundlestore(object):
254 254 def __init__(self, repo):
255 255 self._repo = repo
256 256 storetype = self._repo.ui.config('infinitepush', 'storetype', '')
257 257 if storetype == 'disk':
258 258 from . import store
259 259 self.store = store.filebundlestore(self._repo.ui, self._repo)
260 260 elif storetype == 'external':
261 261 self.store = _buildexternalbundlestore(self._repo.ui)
262 262 else:
263 263 raise error.Abort(
264 264 _('unknown infinitepush store type specified %s') % storetype)
265 265
266 266 indextype = self._repo.ui.config('infinitepush', 'indextype', '')
267 267 if indextype == 'disk':
268 268 from . import fileindexapi
269 269 self.index = fileindexapi.fileindexapi(self._repo)
270 270 elif indextype == 'sql':
271 271 self.index = _buildsqlindex(self._repo.ui)
272 272 else:
273 273 raise error.Abort(
274 274 _('unknown infinitepush index type specified %s') % indextype)
275 275
276 276 def _isserver(ui):
277 277 return ui.configbool('infinitepush', 'server')
278 278
279 279 def reposetup(ui, repo):
280 280 if _isserver(ui) and repo.local():
281 281 repo.bundlestore = bundlestore(repo)
282 282
283 283 def extsetup(ui):
284 284 commonsetup(ui)
285 285 if _isserver(ui):
286 286 serverextsetup(ui)
287 287 else:
288 288 clientextsetup(ui)
289 289
290 290 def commonsetup(ui):
291 291 wireproto.commands['listkeyspatterns'] = (
292 292 wireprotolistkeyspatterns, 'namespace patterns')
293 293 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
294 294 if scratchbranchpat:
295 295 global _scratchbranchmatcher
296 296 kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
297 297
298 298 def serverextsetup(ui):
299 299 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
300 300
301 301 def newpushkeyhandler(*args, **kwargs):
302 302 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
303 303 newpushkeyhandler.params = origpushkeyhandler.params
304 304 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
305 305
306 306 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
307 307 newphaseheadshandler = lambda *args, **kwargs: \
308 308 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
309 309 newphaseheadshandler.params = orighandlephasehandler.params
310 310 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
311 311
312 312 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
313 313 localrepolistkeys)
314 314 wireproto.commands['lookup'] = (
315 315 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
316 316 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
317 317
318 318 extensions.wrapfunction(bundle2, 'processparts', processparts)
319 319
320 320 def clientextsetup(ui):
321 321 entry = extensions.wrapcommand(commands.table, 'push', _push)
322 322 # Don't add the 'to' arg if it already exists
323 323 if not any(a for a in entry[1] if a[1] == 'to'):
324 324 entry[1].append(('', 'to', '', _('push revs to this bookmark')))
325 325
326 326 if not any(a for a in entry[1] if a[1] == 'non-forward-move'):
327 327 entry[1].append(('', 'non-forward-move', None,
328 328 _('allows moving a remote bookmark to an '
329 329 'arbitrary place')))
330 330
331 331 if not any(a for a in entry[1] if a[1] == 'create'):
332 332 entry[1].append(
333 333 ('', 'create', None, _('create a new remote bookmark')))
334 334
335 335 entry[1].append(
336 336 ('', 'bundle-store', None,
337 337 _('force push to go to bundle store (EXPERIMENTAL)')))
338 338
339 339 extensions.wrapcommand(commands.table, 'pull', _pull)
340 340 extensions.wrapcommand(commands.table, 'update', _update)
341 341
342 342 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
343 343
344 344 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
345 345
346 346 partorder = exchange.b2partsgenorder
347 347 index = partorder.index('changeset')
348 348 partorder.insert(
349 349 index, partorder.pop(partorder.index(scratchbranchparttype)))
350 350
351 351 def _checkheads(orig, pushop):
352 352 if pushop.ui.configbool(experimental, configscratchpush, False):
353 353 return
354 354 return orig(pushop)
355 355
356 356 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
357 357 patterns = wireproto.decodelist(patterns)
358 358 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
359 359 return pushkey.encodekeys(d)
360 360
361 361 def localrepolistkeys(orig, self, namespace, patterns=None):
362 362 if namespace == 'bookmarks' and patterns:
363 363 index = self.bundlestore.index
364 364 results = {}
365 365 bookmarks = orig(self, namespace)
366 366 for pattern in patterns:
367 367 results.update(index.getbookmarks(pattern))
368 368 if pattern.endswith('*'):
369 369 pattern = 're:^' + pattern[:-1] + '.*'
370 370 kind, pat, matcher = util.stringmatcher(pattern)
371 371 for bookmark, node in bookmarks.iteritems():
372 372 if matcher(bookmark):
373 373 results[bookmark] = node
374 374 return results
375 375 else:
376 376 return orig(self, namespace)
377 377
378 378 @peer.batchable
379 379 def listkeyspatterns(self, namespace, patterns):
380 380 if not self.capable('pushkey'):
381 381 yield {}, None
382 382 f = peer.future()
383 383 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
384 384 (namespace, patterns))
385 385 yield {
386 386 'namespace': encoding.fromlocal(namespace),
387 387 'patterns': wireproto.encodelist(patterns)
388 388 }, f
389 389 d = f.value
390 390 self.ui.debug('received listkey for "%s": %i bytes\n'
391 391 % (namespace, len(d)))
392 392 yield pushkey.decodekeys(d)
393 393
394 394 def _readbundlerevs(bundlerepo):
395 395 return list(bundlerepo.revs('bundle()'))
396 396
397 397 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
398 398 '''Tells remotefilelog to include all changed files to the changegroup
399 399
400 400 By default remotefilelog doesn't include file content to the changegroup.
401 401 But we need to include it if we are fetching from bundlestore.
402 402 '''
403 403 changedfiles = set()
404 404 cl = bundlerepo.changelog
405 405 for r in bundlerevs:
406 406 # [3] means changed files
407 407 changedfiles.update(cl.read(r)[3])
408 408 if not changedfiles:
409 409 return bundlecaps
410 410
411 411 changedfiles = '\0'.join(changedfiles)
412 412 newcaps = []
413 413 appended = False
414 414 for cap in (bundlecaps or []):
415 415 if cap.startswith('excludepattern='):
416 416 newcaps.append('\0'.join((cap, changedfiles)))
417 417 appended = True
418 418 else:
419 419 newcaps.append(cap)
420 420 if not appended:
421 421 # Not found excludepattern cap. Just append it
422 422 newcaps.append('excludepattern=' + changedfiles)
423 423
424 424 return newcaps
425 425
426 426 def _rebundle(bundlerepo, bundleroots, unknownhead):
427 427 '''
428 428 Bundle may include more revision then user requested. For example,
429 429 if user asks for revision but bundle also consists its descendants.
430 430 This function will filter out all revision that user is not requested.
431 431 '''
432 432 parts = []
433 433
434 434 version = '02'
435 435 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
436 436 missingheads=[unknownhead])
437 437 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
438 438 cgstream = util.chunkbuffer(cgstream).read()
439 439 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
440 440 cgpart.addparam('version', version)
441 441 parts.append(cgpart)
442 442
443 try:
444 treemod = extensions.find('treemanifest')
445 except KeyError:
446 pass
447 else:
448 if treemod._cansendtrees(bundlerepo, outgoing.missing):
449 treepart = treemod.createtreepackpart(bundlerepo, outgoing,
450 treemod.TREEGROUP_PARTTYPE2)
451 parts.append(treepart)
452
453 443 return parts
454 444
455 445 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
456 446 cl = bundlerepo.changelog
457 447 bundleroots = []
458 448 for rev in bundlerevs:
459 449 node = cl.node(rev)
460 450 parents = cl.parents(node)
461 451 for parent in parents:
462 452 # include all revs that exist in the main repo
463 453 # to make sure that bundle may apply client-side
464 454 if parent in oldrepo:
465 455 bundleroots.append(parent)
466 456 return bundleroots
467 457
468 458 def _needsrebundling(head, bundlerepo):
469 459 bundleheads = list(bundlerepo.revs('heads(bundle())'))
470 460 return not (len(bundleheads) == 1 and
471 461 bundlerepo[bundleheads[0]].node() == head)
472 462
473 463 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
474 464 '''generates bundle that will be send to the user
475 465
476 466 returns tuple with raw bundle string and bundle type
477 467 '''
478 468 parts = []
479 469 if not _needsrebundling(head, bundlerepo):
480 470 with util.posixfile(bundlefile, "rb") as f:
481 471 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
482 472 if isinstance(unbundler, changegroup.cg1unpacker):
483 473 part = bundle2.bundlepart('changegroup',
484 474 data=unbundler._stream.read())
485 475 part.addparam('version', '01')
486 476 parts.append(part)
487 477 elif isinstance(unbundler, bundle2.unbundle20):
488 478 haschangegroup = False
489 479 for part in unbundler.iterparts():
490 480 if part.type == 'changegroup':
491 481 haschangegroup = True
492 482 newpart = bundle2.bundlepart(part.type, data=part.read())
493 483 for key, value in part.params.iteritems():
494 484 newpart.addparam(key, value)
495 485 parts.append(newpart)
496 486
497 487 if not haschangegroup:
498 488 raise error.Abort(
499 489 'unexpected bundle without changegroup part, ' +
500 490 'head: %s' % hex(head),
501 491 hint='report to administrator')
502 492 else:
503 493 raise error.Abort('unknown bundle type')
504 494 else:
505 495 parts = _rebundle(bundlerepo, bundleroots, head)
506 496
507 497 return parts
508 498
509 499 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
510 500 heads = heads or []
511 501 # newheads are parents of roots of scratch bundles that were requested
512 502 newphases = {}
513 503 scratchbundles = []
514 504 newheads = []
515 505 scratchheads = []
516 506 nodestobundle = {}
517 507 allbundlestocleanup = []
518 508 try:
519 509 for head in heads:
520 510 if head not in repo.changelog.nodemap:
521 511 if head not in nodestobundle:
522 512 newbundlefile = common.downloadbundle(repo, head)
523 513 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
524 514 bundlerepo = hg.repository(repo.ui, bundlepath)
525 515
526 516 allbundlestocleanup.append((bundlerepo, newbundlefile))
527 517 bundlerevs = set(_readbundlerevs(bundlerepo))
528 518 bundlecaps = _includefilelogstobundle(
529 519 bundlecaps, bundlerepo, bundlerevs, repo.ui)
530 520 cl = bundlerepo.changelog
531 521 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
532 522 for rev in bundlerevs:
533 523 node = cl.node(rev)
534 524 newphases[hex(node)] = str(phases.draft)
535 525 nodestobundle[node] = (bundlerepo, bundleroots,
536 526 newbundlefile)
537 527
538 528 scratchbundles.append(
539 529 _generateoutputparts(head, *nodestobundle[head]))
540 530 newheads.extend(bundleroots)
541 531 scratchheads.append(head)
542 532 finally:
543 533 for bundlerepo, bundlefile in allbundlestocleanup:
544 534 bundlerepo.close()
545 535 try:
546 536 os.unlink(bundlefile)
547 537 except (IOError, OSError):
548 538 # if we can't cleanup the file then just ignore the error,
549 539 # no need to fail
550 540 pass
551 541
552 542 pullfrombundlestore = bool(scratchbundles)
553 543 wrappedchangegrouppart = False
554 544 wrappedlistkeys = False
555 545 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
556 546 try:
557 547 def _changegrouppart(bundler, *args, **kwargs):
558 548 # Order is important here. First add non-scratch part
559 549 # and only then add parts with scratch bundles because
560 550 # non-scratch part contains parents of roots of scratch bundles.
561 551 result = oldchangegrouppart(bundler, *args, **kwargs)
562 552 for bundle in scratchbundles:
563 553 for part in bundle:
564 554 bundler.addpart(part)
565 555 return result
566 556
567 557 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
568 558 wrappedchangegrouppart = True
569 559
570 560 def _listkeys(orig, self, namespace):
571 561 origvalues = orig(self, namespace)
572 562 if namespace == 'phases' and pullfrombundlestore:
573 563 if origvalues.get('publishing') == 'True':
574 564 # Make repo non-publishing to preserve draft phase
575 565 del origvalues['publishing']
576 566 origvalues.update(newphases)
577 567 return origvalues
578 568
579 569 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
580 570 _listkeys)
581 571 wrappedlistkeys = True
582 572 heads = list((set(newheads) | set(heads)) - set(scratchheads))
583 573 result = orig(repo, source, heads=heads,
584 574 bundlecaps=bundlecaps, **kwargs)
585 575 finally:
586 576 if wrappedchangegrouppart:
587 577 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
588 578 if wrappedlistkeys:
589 579 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
590 580 _listkeys)
591 581 return result
592 582
593 583 def _lookupwrap(orig):
594 584 def _lookup(repo, proto, key):
595 585 localkey = encoding.tolocal(key)
596 586
597 587 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
598 588 scratchnode = repo.bundlestore.index.getnode(localkey)
599 589 if scratchnode:
600 590 return "%s %s\n" % (1, scratchnode)
601 591 else:
602 592 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
603 593 else:
604 594 try:
605 595 r = hex(repo.lookup(localkey))
606 596 return "%s %s\n" % (1, r)
607 597 except Exception as inst:
608 598 if repo.bundlestore.index.getbundle(localkey):
609 599 return "%s %s\n" % (1, localkey)
610 600 else:
611 601 r = str(inst)
612 602 return "%s %s\n" % (0, r)
613 603 return _lookup
614 604
615 605 def _update(orig, ui, repo, node=None, rev=None, **opts):
616 606 if rev and node:
617 607 raise error.Abort(_("please specify just one revision"))
618 608
619 609 if not opts.get('date') and (rev or node) not in repo:
620 610 mayberemote = rev or node
621 611 mayberemote = _tryhoist(ui, mayberemote)
622 612 dopull = False
623 613 kwargs = {}
624 614 if _scratchbranchmatcher(mayberemote):
625 615 dopull = True
626 616 kwargs['bookmark'] = [mayberemote]
627 617 elif len(mayberemote) == 40 and _maybehash(mayberemote):
628 618 dopull = True
629 619 kwargs['rev'] = [mayberemote]
630 620
631 621 if dopull:
632 622 ui.warn(
633 623 _("'%s' does not exist locally - looking for it " +
634 624 "remotely...\n") % mayberemote)
635 625 # Try pulling node from remote repo
636 626 try:
637 627 cmdname = '^pull'
638 628 pullcmd = commands.table[cmdname][0]
639 629 pullopts = dict(opt[1:3] for opt in commands.table[cmdname][1])
640 630 pullopts.update(kwargs)
641 631 pullcmd(ui, repo, **pullopts)
642 632 except Exception:
643 633 ui.warn(_('pull failed: %s\n') % sys.exc_info()[1])
644 634 else:
645 635 ui.warn(_("'%s' found remotely\n") % mayberemote)
646 636 return orig(ui, repo, node, rev, **opts)
647 637
648 638 def _pull(orig, ui, repo, source="default", **opts):
649 639 # Copy paste from `pull` command
650 640 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
651 641
652 642 scratchbookmarks = {}
653 643 unfi = repo.unfiltered()
654 644 unknownnodes = []
655 645 for rev in opts.get('rev', []):
656 646 if rev not in unfi:
657 647 unknownnodes.append(rev)
658 648 if opts.get('bookmark'):
659 649 bookmarks = []
660 650 revs = opts.get('rev') or []
661 651 for bookmark in opts.get('bookmark'):
662 652 if _scratchbranchmatcher(bookmark):
663 653 # rev is not known yet
664 654 # it will be fetched with listkeyspatterns next
665 655 scratchbookmarks[bookmark] = 'REVTOFETCH'
666 656 else:
667 657 bookmarks.append(bookmark)
668 658
669 659 if scratchbookmarks:
670 660 other = hg.peer(repo, opts, source)
671 661 fetchedbookmarks = other.listkeyspatterns(
672 662 'bookmarks', patterns=scratchbookmarks)
673 663 for bookmark in scratchbookmarks:
674 664 if bookmark not in fetchedbookmarks:
675 665 raise error.Abort('remote bookmark %s not found!' %
676 666 bookmark)
677 667 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
678 668 revs.append(fetchedbookmarks[bookmark])
679 669 opts['bookmark'] = bookmarks
680 670 opts['rev'] = revs
681 671
682 672 if scratchbookmarks or unknownnodes:
683 673 # Set anyincoming to True
684 674 extensions.wrapfunction(discovery, 'findcommonincoming',
685 675 _findcommonincoming)
686 676 try:
687 677 # Remote scratch bookmarks will be deleted because remotenames doesn't
688 678 # know about them. Let's save it before pull and restore after
689 679 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
690 680 result = orig(ui, repo, source, **opts)
691 681 # TODO(stash): race condition is possible
692 682 # if scratch bookmarks was updated right after orig.
693 683 # But that's unlikely and shouldn't be harmful.
694 684 if common.isremotebooksenabled(ui):
695 685 remotescratchbookmarks.update(scratchbookmarks)
696 686 _saveremotebookmarks(repo, remotescratchbookmarks, source)
697 687 else:
698 688 _savelocalbookmarks(repo, scratchbookmarks)
699 689 return result
700 690 finally:
701 691 if scratchbookmarks:
702 692 extensions.unwrapfunction(discovery, 'findcommonincoming')
703 693
704 694 def _readscratchremotebookmarks(ui, repo, other):
705 695 if common.isremotebooksenabled(ui):
706 696 remotenamesext = extensions.find('remotenames')
707 697 remotepath = remotenamesext.activepath(repo.ui, other)
708 698 result = {}
709 699 # Let's refresh remotenames to make sure we have it up to date
710 700 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
711 701 # and it results in deleting scratch bookmarks. Our best guess how to
712 702 # fix it is to use `clearnames()`
713 703 repo._remotenames.clearnames()
714 704 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
715 705 path, bookname = remotenamesext.splitremotename(remotebookmark)
716 706 if path == remotepath and _scratchbranchmatcher(bookname):
717 707 nodes = repo.names['remotebookmarks'].nodes(repo,
718 708 remotebookmark)
719 709 if nodes:
720 710 result[bookname] = hex(nodes[0])
721 711 return result
722 712 else:
723 713 return {}
724 714
725 715 def _saveremotebookmarks(repo, newbookmarks, remote):
726 716 remotenamesext = extensions.find('remotenames')
727 717 remotepath = remotenamesext.activepath(repo.ui, remote)
728 718 branches = collections.defaultdict(list)
729 719 bookmarks = {}
730 720 remotenames = remotenamesext.readremotenames(repo)
731 721 for hexnode, nametype, remote, rname in remotenames:
732 722 if remote != remotepath:
733 723 continue
734 724 if nametype == 'bookmarks':
735 725 if rname in newbookmarks:
736 726 # It's possible if we have a normal bookmark that matches
737 727 # scratch branch pattern. In this case just use the current
738 728 # bookmark node
739 729 del newbookmarks[rname]
740 730 bookmarks[rname] = hexnode
741 731 elif nametype == 'branches':
742 732 # saveremotenames expects 20 byte binary nodes for branches
743 733 branches[rname].append(bin(hexnode))
744 734
745 735 for bookmark, hexnode in newbookmarks.iteritems():
746 736 bookmarks[bookmark] = hexnode
747 737 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
748 738
749 739 def _savelocalbookmarks(repo, bookmarks):
750 740 if not bookmarks:
751 741 return
752 742 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
753 743 changes = []
754 744 for scratchbook, node in bookmarks.iteritems():
755 745 changectx = repo[node]
756 746 changes.append((scratchbook, changectx.node()))
757 747 repo._bookmarks.applychanges(repo, tr, changes)
758 748
759 749 def _findcommonincoming(orig, *args, **kwargs):
760 750 common, inc, remoteheads = orig(*args, **kwargs)
761 751 return common, True, remoteheads
762 752
763 753 def _push(orig, ui, repo, dest=None, *args, **opts):
764 754 bookmark = opts.get('to') or ''
765 755 create = opts.get('create') or False
766 756
767 757 oldphasemove = None
768 758 overrides = {(experimental, configbookmark): bookmark,
769 759 (experimental, configcreate): create}
770 760
771 761 with ui.configoverride(overrides, 'infinitepush'):
772 762 scratchpush = opts.get('bundle_store')
773 763 if _scratchbranchmatcher(bookmark):
774 764 scratchpush = True
775 765 # bundle2 can be sent back after push (for example, bundle2
776 766 # containing `pushkey` part to update bookmarks)
777 767 ui.setconfig(experimental, 'bundle2.pushback', True)
778 768
779 769 ui.setconfig(experimental, confignonforwardmove,
780 770 opts.get('non_forward_move'), '--non-forward-move')
781 771 if scratchpush:
782 772 ui.setconfig(experimental, configscratchpush, True)
783 773 oldphasemove = extensions.wrapfunction(exchange,
784 774 '_localphasemove',
785 775 _phasemove)
786 776 # Copy-paste from `push` command
787 777 path = ui.paths.getpath(dest, default=('default-push', 'default'))
788 778 if not path:
789 779 raise error.Abort(_('default repository not configured!'),
790 780 hint=_("see 'hg help config.paths'"))
791 781 destpath = path.pushloc or path.loc
792 782 # Remote scratch bookmarks will be deleted because remotenames doesn't
793 783 # know about them. Let's save it before push and restore after
794 784 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
795 785 result = orig(ui, repo, dest, *args, **opts)
796 786 if common.isremotebooksenabled(ui):
797 787 if bookmark and scratchpush:
798 788 other = hg.peer(repo, opts, destpath)
799 789 fetchedbookmarks = other.listkeyspatterns('bookmarks',
800 790 patterns=[bookmark])
801 791 remotescratchbookmarks.update(fetchedbookmarks)
802 792 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
803 793 if oldphasemove:
804 794 exchange._localphasemove = oldphasemove
805 795 return result
806 796
807 797 def _deleteinfinitepushbookmarks(ui, repo, path, names):
808 798 """Prune remote names by removing the bookmarks we don't want anymore,
809 799 then writing the result back to disk
810 800 """
811 801 remotenamesext = extensions.find('remotenames')
812 802
813 803 # remotename format is:
814 804 # (node, nametype ("branches" or "bookmarks"), remote, name)
815 805 nametype_idx = 1
816 806 remote_idx = 2
817 807 name_idx = 3
818 808 remotenames = [remotename for remotename in \
819 809 remotenamesext.readremotenames(repo) \
820 810 if remotename[remote_idx] == path]
821 811 remote_bm_names = [remotename[name_idx] for remotename in \
822 812 remotenames if remotename[nametype_idx] == "bookmarks"]
823 813
824 814 for name in names:
825 815 if name not in remote_bm_names:
826 816 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
827 817 "in path '{}'").format(name, path))
828 818
829 819 bookmarks = {}
830 820 branches = collections.defaultdict(list)
831 821 for node, nametype, remote, name in remotenames:
832 822 if nametype == "bookmarks" and name not in names:
833 823 bookmarks[name] = node
834 824 elif nametype == "branches":
835 825 # saveremotenames wants binary nodes for branches
836 826 branches[name].append(bin(node))
837 827
838 828 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
839 829
840 830 def _phasemove(orig, pushop, nodes, phase=phases.public):
841 831 """prevent commits from being marked public
842 832
843 833 Since these are going to a scratch branch, they aren't really being
844 834 published."""
845 835
846 836 if phase != phases.public:
847 837 orig(pushop, nodes, phase)
848 838
849 839 @exchange.b2partsgenerator(scratchbranchparttype)
850 840 def partgen(pushop, bundler):
851 841 bookmark = pushop.ui.config(experimental, configbookmark)
852 842 create = pushop.ui.configbool(experimental, configcreate)
853 843 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
854 844 if 'changesets' in pushop.stepsdone or not scratchpush:
855 845 return
856 846
857 847 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
858 848 return
859 849
860 850 pushop.stepsdone.add('changesets')
861 pushop.stepsdone.add('treepack')
862 851 if not pushop.outgoing.missing:
863 852 pushop.ui.status(_('no changes found\n'))
864 853 pushop.cgresult = 0
865 854 return
866 855
867 856 # This parameter tells the server that the following bundle is an
868 857 # infinitepush. This let's it switch the part processing to our infinitepush
869 858 # code path.
870 859 bundler.addparam("infinitepush", "True")
871 860
872 861 nonforwardmove = pushop.force or pushop.ui.configbool(experimental,
873 862 confignonforwardmove)
874 863 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
875 864 pushop.remote,
876 865 pushop.outgoing,
877 866 nonforwardmove,
878 867 pushop.ui,
879 868 bookmark,
880 869 create)
881 870
882 871 for scratchpart in scratchparts:
883 872 bundler.addpart(scratchpart)
884 873
885 874 def handlereply(op):
886 875 # server either succeeds or aborts; no code to read
887 876 pushop.cgresult = 1
888 877
889 878 return handlereply
890 879
891 880 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
892 881
893 882 def _getrevs(bundle, oldnode, force, bookmark):
894 883 'extracts and validates the revs to be imported'
895 884 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
896 885
897 886 # new bookmark
898 887 if oldnode is None:
899 888 return revs
900 889
901 890 # Fast forward update
902 891 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
903 892 return revs
904 893
905 894 # Forced non-fast forward update
906 895 if force:
907 896 return revs
908 897 else:
909 898 raise error.Abort(_('non-forward push'),
910 899 hint=_('use --non-forward-move to override'))
911 900
912 901 @contextlib.contextmanager
913 902 def logservicecall(logger, service, **kwargs):
914 903 start = time.time()
915 904 logger(service, eventtype='start', **kwargs)
916 905 try:
917 906 yield
918 907 logger(service, eventtype='success',
919 908 elapsedms=(time.time() - start) * 1000, **kwargs)
920 909 except Exception as e:
921 910 logger(service, eventtype='failure',
922 911 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
923 912 **kwargs)
924 913 raise
925 914
926 915 def _getorcreateinfinitepushlogger(op):
927 916 logger = op.records['infinitepushlogger']
928 917 if not logger:
929 918 ui = op.repo.ui
930 919 try:
931 920 username = util.getuser()
932 921 except Exception:
933 922 username = 'unknown'
934 923 # Generate random request id to be able to find all logged entries
935 924 # for the same request. Since requestid is pseudo-generated it may
936 925 # not be unique, but we assume that (hostname, username, requestid)
937 926 # is unique.
938 927 random.seed()
939 928 requestid = random.randint(0, 2000000000)
940 929 hostname = socket.gethostname()
941 930 logger = functools.partial(ui.log, 'infinitepush', user=username,
942 931 requestid=requestid, hostname=hostname,
943 932 reponame=ui.config('infinitepush',
944 933 'reponame'))
945 934 op.records.add('infinitepushlogger', logger)
946 935 else:
947 936 logger = logger[0]
948 937 return logger
949 938
950 939 def processparts(orig, repo, op, unbundler):
951 940 if unbundler.params.get('infinitepush') != 'True':
952 941 return orig(repo, op, unbundler)
953 942
954 943 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
955 944
956 partforwardingwhitelist = []
957 try:
958 treemfmod = extensions.find('treemanifest')
959 partforwardingwhitelist.append(treemfmod.TREEGROUP_PARTTYPE2)
960 except KeyError:
961 pass
962
963 945 bundler = bundle2.bundle20(repo.ui)
964 946 cgparams = None
965 947 with bundle2.partiterator(repo, op, unbundler) as parts:
966 948 for part in parts:
967 949 bundlepart = None
968 950 if part.type == 'replycaps':
969 951 # This configures the current operation to allow reply parts.
970 952 bundle2._processpart(op, part)
971 953 elif part.type == bundleparts.scratchbranchparttype:
972 954 # Scratch branch parts need to be converted to normal
973 955 # changegroup parts, and the extra parameters stored for later
974 956 # when we upload to the store. Eventually those parameters will
975 957 # be put on the actual bundle instead of this part, then we can
976 958 # send a vanilla changegroup instead of the scratchbranch part.
977 959 cgversion = part.params.get('cgversion', '01')
978 960 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
979 961 bundlepart.addparam('version', cgversion)
980 962 cgparams = part.params
981 963
982 964 # If we're not dumping all parts into the new bundle, we need to
983 965 # alert the future pushkey and phase-heads handler to skip
984 966 # the part.
985 967 if not handleallparts:
986 968 op.records.add(scratchbranchparttype + '_skippushkey', True)
987 969 op.records.add(scratchbranchparttype + '_skipphaseheads',
988 970 True)
989 971 else:
990 if handleallparts or part.type in partforwardingwhitelist:
972 if handleallparts:
991 973 # Ideally we would not process any parts, and instead just
992 974 # forward them to the bundle for storage, but since this
993 975 # differs from previous behavior, we need to put it behind a
994 976 # config flag for incremental rollout.
995 977 bundlepart = bundle2.bundlepart(part.type, data=part.read())
996 978 for key, value in part.params.iteritems():
997 979 bundlepart.addparam(key, value)
998 980
999 981 # Certain parts require a response
1000 982 if part.type == 'pushkey':
1001 983 if op.reply is not None:
1002 984 rpart = op.reply.newpart('reply:pushkey')
1003 985 rpart.addparam('in-reply-to', str(part.id),
1004 986 mandatory=False)
1005 987 rpart.addparam('return', '1', mandatory=False)
1006 988 else:
1007 989 bundle2._processpart(op, part)
1008 990
1009 991 if handleallparts:
1010 992 op.records.add(part.type, {
1011 993 'return': 1,
1012 994 })
1013 995 if bundlepart:
1014 996 bundler.addpart(bundlepart)
1015 997
1016 998 # If commits were sent, store them
1017 999 if cgparams:
1018 1000 buf = util.chunkbuffer(bundler.getchunks())
1019 1001 fd, bundlefile = tempfile.mkstemp()
1020 1002 try:
1021 1003 try:
1022 1004 fp = os.fdopen(fd, 'wb')
1023 1005 fp.write(buf.read())
1024 1006 finally:
1025 1007 fp.close()
1026 1008 storebundle(op, cgparams, bundlefile)
1027 1009 finally:
1028 1010 try:
1029 1011 os.unlink(bundlefile)
1030 1012 except Exception:
1031 1013 # we would rather see the original exception
1032 1014 pass
1033 1015
1034 1016 def storebundle(op, params, bundlefile):
1035 1017 log = _getorcreateinfinitepushlogger(op)
1036 1018 parthandlerstart = time.time()
1037 1019 log(scratchbranchparttype, eventtype='start')
1038 1020 index = op.repo.bundlestore.index
1039 1021 store = op.repo.bundlestore.store
1040 1022 op.records.add(scratchbranchparttype + '_skippushkey', True)
1041 1023
1042 1024 bundle = None
1043 1025 try: # guards bundle
1044 1026 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1045 1027 bundle = hg.repository(op.repo.ui, bundlepath)
1046 1028
1047 1029 bookmark = params.get('bookmark')
1048 1030 bookprevnode = params.get('bookprevnode', '')
1049 1031 create = params.get('create')
1050 1032 force = params.get('force')
1051 1033
1052 1034 if bookmark:
1053 1035 oldnode = index.getnode(bookmark)
1054 1036
1055 1037 if not oldnode and not create:
1056 1038 raise error.Abort("unknown bookmark %s" % bookmark,
1057 1039 hint="use --create if you want to create one")
1058 1040 else:
1059 1041 oldnode = None
1060 1042 bundleheads = bundle.revs('heads(bundle())')
1061 1043 if bookmark and len(bundleheads) > 1:
1062 1044 raise error.Abort(
1063 1045 _('cannot push more than one head to a scratch branch'))
1064 1046
1065 1047 revs = _getrevs(bundle, oldnode, force, bookmark)
1066 1048
1067 1049 # Notify the user of what is being pushed
1068 1050 plural = 's' if len(revs) > 1 else ''
1069 1051 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1070 1052 maxoutput = 10
1071 1053 for i in range(0, min(len(revs), maxoutput)):
1072 1054 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1073 1055 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1074 1056
1075 1057 if len(revs) > maxoutput + 1:
1076 1058 op.repo.ui.warn((" ...\n"))
1077 1059 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1078 1060 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1079 1061
1080 1062 nodesctx = [bundle[rev] for rev in revs]
1081 1063 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1082 1064 if bundleheads:
1083 1065 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1084 1066 else:
1085 1067 newheadscount = 0
1086 1068 # If there's a bookmark specified, there should be only one head,
1087 1069 # so we choose the last node, which will be that head.
1088 1070 # If a bug or malicious client allows there to be a bookmark
1089 1071 # with multiple heads, we will place the bookmark on the last head.
1090 1072 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1091 1073 key = None
1092 1074 if newheadscount:
1093 1075 with open(bundlefile, 'r') as f:
1094 1076 bundledata = f.read()
1095 1077 with logservicecall(log, 'bundlestore',
1096 1078 bundlesize=len(bundledata)):
1097 1079 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1098 1080 if len(bundledata) > bundlesizelimit:
1099 1081 error_msg = ('bundle is too big: %d bytes. ' +
1100 1082 'max allowed size is 100 MB')
1101 1083 raise error.Abort(error_msg % (len(bundledata),))
1102 1084 key = store.write(bundledata)
1103 1085
1104 1086 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1105 1087 if key:
1106 1088 index.addbundle(key, nodesctx)
1107 1089 if bookmark:
1108 1090 index.addbookmark(bookmark, bookmarknode)
1109 1091 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1110 1092 bookprevnode, params)
1111 1093 log(scratchbranchparttype, eventtype='success',
1112 1094 elapsedms=(time.time() - parthandlerstart) * 1000)
1113 1095
1114 1096 fillmetadatabranchpattern = op.repo.ui.config(
1115 1097 'infinitepush', 'fillmetadatabranchpattern', '')
1116 1098 if bookmark and fillmetadatabranchpattern:
1117 1099 __, __, matcher = util.stringmatcher(fillmetadatabranchpattern)
1118 1100 if matcher(bookmark):
1119 1101 _asyncsavemetadata(op.repo.root,
1120 1102 [ctx.hex() for ctx in nodesctx])
1121 1103 except Exception as e:
1122 1104 log(scratchbranchparttype, eventtype='failure',
1123 1105 elapsedms=(time.time() - parthandlerstart) * 1000,
1124 1106 errormsg=str(e))
1125 1107 raise
1126 1108 finally:
1127 1109 if bundle:
1128 1110 bundle.close()
1129 1111
1130 1112 @bundle2.parthandler(scratchbranchparttype,
1131 1113 ('bookmark', 'bookprevnode' 'create', 'force',
1132 1114 'pushbackbookmarks', 'cgversion'))
1133 1115 def bundle2scratchbranch(op, part):
1134 1116 '''unbundle a bundle2 part containing a changegroup to store'''
1135 1117
1136 1118 bundler = bundle2.bundle20(op.repo.ui)
1137 1119 cgversion = part.params.get('cgversion', '01')
1138 1120 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1139 1121 cgpart.addparam('version', cgversion)
1140 1122 bundler.addpart(cgpart)
1141 1123 buf = util.chunkbuffer(bundler.getchunks())
1142 1124
1143 1125 fd, bundlefile = tempfile.mkstemp()
1144 1126 try:
1145 1127 try:
1146 1128 fp = os.fdopen(fd, 'wb')
1147 1129 fp.write(buf.read())
1148 1130 finally:
1149 1131 fp.close()
1150 1132 storebundle(op, part.params, bundlefile)
1151 1133 finally:
1152 1134 try:
1153 1135 os.unlink(bundlefile)
1154 1136 except OSError as e:
1155 1137 if e.errno != errno.ENOENT:
1156 1138 raise
1157 1139
1158 1140 return 1
1159 1141
1160 1142 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1161 1143 if params.get('pushbackbookmarks'):
1162 1144 if op.reply and 'pushback' in op.reply.capabilities:
1163 1145 params = {
1164 1146 'namespace': 'bookmarks',
1165 1147 'key': bookmark,
1166 1148 'new': newnode,
1167 1149 'old': oldnode,
1168 1150 }
1169 1151 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1170 1152
1171 1153 def bundle2pushkey(orig, op, part):
1172 1154 '''Wrapper of bundle2.handlepushkey()
1173 1155
1174 1156 The only goal is to skip calling the original function if flag is set.
1175 1157 It's set if infinitepush push is happening.
1176 1158 '''
1177 1159 if op.records[scratchbranchparttype + '_skippushkey']:
1178 1160 if op.reply is not None:
1179 1161 rpart = op.reply.newpart('reply:pushkey')
1180 1162 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1181 1163 rpart.addparam('return', '1', mandatory=False)
1182 1164 return 1
1183 1165
1184 1166 return orig(op, part)
1185 1167
1186 1168 def bundle2handlephases(orig, op, part):
1187 1169 '''Wrapper of bundle2.handlephases()
1188 1170
1189 1171 The only goal is to skip calling the original function if flag is set.
1190 1172 It's set if infinitepush push is happening.
1191 1173 '''
1192 1174
1193 1175 if op.records[scratchbranchparttype + '_skipphaseheads']:
1194 1176 return
1195 1177
1196 1178 return orig(op, part)
1197 1179
1198 1180 def _asyncsavemetadata(root, nodes):
1199 1181 '''starts a separate process that fills metadata for the nodes
1200 1182
1201 1183 This function creates a separate process and doesn't wait for it's
1202 1184 completion. This was done to avoid slowing down pushes
1203 1185 '''
1204 1186
1205 1187 maxnodes = 50
1206 1188 if len(nodes) > maxnodes:
1207 1189 return
1208 1190 nodesargs = []
1209 1191 for node in nodes:
1210 1192 nodesargs.append('--node')
1211 1193 nodesargs.append(node)
1212 1194 with open(os.devnull, 'w+b') as devnull:
1213 1195 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1214 1196 '-R', root] + nodesargs
1215 1197 # Process will run in background. We don't care about the return code
1216 1198 subprocess.Popen(cmdline, close_fds=True, shell=False,
1217 1199 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,132 +1,119 b''
1 1 # Copyright 2017 Facebook, Inc.
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 from mercurial.i18n import _
9 9
10 10 from mercurial import (
11 11 bundle2,
12 12 changegroup,
13 13 error,
14 14 extensions,
15 15 revsetlang,
16 16 util,
17 17 )
18 18
19 19 from . import common
20 20
21 21 isremotebooksenabled = common.isremotebooksenabled
22 22
23 23 scratchbranchparttype = 'b2x:infinitepush'
24 24
25 25 def getscratchbranchparts(repo, peer, outgoing, confignonforwardmove,
26 26 ui, bookmark, create):
27 27 if not outgoing.missing:
28 28 raise error.Abort(_('no commits to push'))
29 29
30 30 if scratchbranchparttype not in bundle2.bundle2caps(peer):
31 31 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
32 32
33 33 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
34 34 bookmark)
35 35
36 36 supportedversions = changegroup.supportedoutgoingversions(repo)
37 37 # Explicitly avoid using '01' changegroup version in infinitepush to
38 38 # support general delta
39 39 supportedversions.discard('01')
40 40 cgversion = min(supportedversions)
41 41 _handlelfs(repo, outgoing.missing)
42 42 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
43 43
44 44 params = {}
45 45 params['cgversion'] = cgversion
46 46 if bookmark:
47 47 params['bookmark'] = bookmark
48 48 # 'prevbooknode' is necessary for pushkey reply part
49 49 params['bookprevnode'] = ''
50 50 if bookmark in repo:
51 51 params['bookprevnode'] = repo[bookmark].hex()
52 52 if create:
53 53 params['create'] = '1'
54 54 if confignonforwardmove:
55 55 params['force'] = '1'
56 56
57 57 # Do not send pushback bundle2 part with bookmarks if remotenames extension
58 58 # is enabled. It will be handled manually in `_push()`
59 59 if not isremotebooksenabled(ui):
60 60 params['pushbackbookmarks'] = '1'
61 61
62 62 parts = []
63 63
64 64 # .upper() marks this as a mandatory part: server will abort if there's no
65 65 # handler
66 66 parts.append(bundle2.bundlepart(
67 67 scratchbranchparttype.upper(),
68 68 advisoryparams=params.iteritems(),
69 69 data=cg))
70 70
71 try:
72 treemod = extensions.find('treemanifest')
73 mfnodes = []
74 for node in outgoing.missing:
75 mfnodes.append(('', repo[node].manifestnode()))
76
77 # Only include the tree parts if they all exist
78 if not repo.manifestlog.datastore.getmissing(mfnodes):
79 parts.append(treemod.createtreepackpart(
80 repo, outgoing, treemod.TREEGROUP_PARTTYPE2))
81 except KeyError:
82 pass
83
84 71 return parts
85 72
86 73 def _validaterevset(repo, revset, bookmark):
87 74 """Abort if the revs to be pushed aren't valid for a scratch branch."""
88 75 if not repo.revs(revset):
89 76 raise error.Abort(_('nothing to push'))
90 77 if bookmark:
91 78 # Allow bundle with many heads only if no bookmark is specified
92 79 heads = repo.revs('heads(%r)', revset)
93 80 if len(heads) > 1:
94 81 raise error.Abort(
95 82 _('cannot push more than one head to a scratch branch'))
96 83
97 84 def _handlelfs(repo, missing):
98 85 '''Special case if lfs is enabled
99 86
100 87 If lfs is enabled then we need to call prepush hook
101 88 to make sure large files are uploaded to lfs
102 89 '''
103 90 try:
104 91 lfsmod = extensions.find('lfs')
105 92 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
106 93 except KeyError:
107 94 # Ignore if lfs extension is not enabled
108 95 return
109 96
110 97 class copiedpart(object):
111 98 """a copy of unbundlepart content that can be consumed later"""
112 99
113 100 def __init__(self, part):
114 101 # copy "public properties"
115 102 self.type = part.type
116 103 self.id = part.id
117 104 self.mandatory = part.mandatory
118 105 self.mandatoryparams = part.mandatoryparams
119 106 self.advisoryparams = part.advisoryparams
120 107 self.params = part.params
121 108 self.mandatorykeys = part.mandatorykeys
122 109 # copy the buffer
123 110 self._io = util.stringio(part.read())
124 111
125 112 def consume(self):
126 113 return
127 114
128 115 def read(self, size=None):
129 116 if size is None:
130 117 return self._io.read()
131 118 else:
132 119 return self._io.read(size)
General Comments 0
You need to be logged in to leave comments. Login now