##// END OF EJS Templates
infinitepush: drop the `--to` flag to push and use `-B` instead...
Pulkit Goyal -
r37216:c5687ce3 default
parent child Browse files
Show More
@@ -1,1182 +1,1188 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Server-side option. If bookmark that was pushed matches
72 72 # `fillmetadatabranchpattern` then background
73 73 # `hg debugfillinfinitepushmetadata` process will save metadata
74 74 # in infinitepush index for nodes that are ancestor of the bookmark.
75 75 fillmetadatabranchpattern = ''
76 76
77 77 # Instructs infinitepush to forward all received bundle2 parts to the
78 78 # bundle for storage. Defaults to False.
79 79 storeallparts = True
80 80
81 81 [remotenames]
82 82 # Client-side option
83 83 # This option should be set only if remotenames extension is enabled.
84 84 # Whether remote bookmarks are tracked by remotenames extension.
85 85 bookmarks = True
86 86 """
87 87
88 88 from __future__ import absolute_import
89 89
90 90 import collections
91 91 import contextlib
92 92 import errno
93 93 import functools
94 94 import logging
95 95 import os
96 96 import random
97 97 import re
98 98 import socket
99 99 import subprocess
100 100 import sys
101 101 import tempfile
102 102 import time
103 103
104 104 from mercurial.node import (
105 105 bin,
106 106 hex,
107 107 )
108 108
109 109 from mercurial.i18n import _
110 110
111 111 from mercurial import (
112 112 bundle2,
113 113 changegroup,
114 114 commands,
115 115 discovery,
116 116 encoding,
117 117 error,
118 118 exchange,
119 119 extensions,
120 120 hg,
121 121 localrepo,
122 122 peer,
123 123 phases,
124 124 pushkey,
125 125 registrar,
126 126 util,
127 127 wireproto,
128 128 )
129 129
130 130 from . import (
131 131 bundleparts,
132 132 common,
133 133 infinitepushcommands,
134 134 )
135 135
136 136 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
137 137 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
138 138 # be specifying the version(s) of Mercurial they are tested with, or
139 139 # leave the attribute unspecified.
140 140 testedwith = 'ships-with-hg-core'
141 141
142 142 configtable = {}
143 143 configitem = registrar.configitem(configtable)
144 144
145 145 configitem('infinitepush', 'server',
146 146 default=False,
147 147 )
148 148 configitem('infinitepush', 'storetype',
149 149 default='',
150 150 )
151 151 configitem('infinitepush', 'indextype',
152 152 default='',
153 153 )
154 154 configitem('infinitepush', 'indexpath',
155 155 default='',
156 156 )
157 157 configitem('infinitepush', 'fillmetadatabranchpattern',
158 158 default='',
159 159 )
160 160 configitem('infinitepush', 'storeallparts',
161 161 default=False,
162 162 )
163 163 configitem('infinitepush', 'reponame',
164 164 default='',
165 165 )
166 166 configitem('scratchbranch', 'storepath',
167 167 default='',
168 168 )
169 169 configitem('infinitepush', 'branchpattern',
170 170 default='',
171 171 )
172 172 configitem('infinitepush', 'metadatafilelimit',
173 173 default=100,
174 174 )
175 175 configitem('experimental', 'server-bundlestore-bookmark',
176 176 default='',
177 177 )
178 178 configitem('experimental', 'infinitepush-scratchpush',
179 179 default=False,
180 180 )
181 181 configitem('experimental', 'non-forward-move',
182 182 default=False,
183 183 )
184 184
185 185 experimental = 'experimental'
186 186 configbookmark = 'server-bundlestore-bookmark'
187 187 configscratchpush = 'infinitepush-scratchpush'
188 188 confignonforwardmove = 'non-forward-move'
189 189
190 190 scratchbranchparttype = bundleparts.scratchbranchparttype
191 191 cmdtable = infinitepushcommands.cmdtable
192 192 revsetpredicate = registrar.revsetpredicate()
193 193 templatekeyword = registrar.templatekeyword()
194 194 _scratchbranchmatcher = lambda x: False
195 195 _maybehash = re.compile(r'^[a-f0-9]+$').search
196 196
197 197 def _buildexternalbundlestore(ui):
198 198 put_args = ui.configlist('infinitepush', 'put_args', [])
199 199 put_binary = ui.config('infinitepush', 'put_binary')
200 200 if not put_binary:
201 201 raise error.Abort('put binary is not specified')
202 202 get_args = ui.configlist('infinitepush', 'get_args', [])
203 203 get_binary = ui.config('infinitepush', 'get_binary')
204 204 if not get_binary:
205 205 raise error.Abort('get binary is not specified')
206 206 from . import store
207 207 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
208 208
209 209 def _buildsqlindex(ui):
210 210 sqlhost = ui.config('infinitepush', 'sqlhost')
211 211 if not sqlhost:
212 212 raise error.Abort(_('please set infinitepush.sqlhost'))
213 213 host, port, db, user, password = sqlhost.split(':')
214 214 reponame = ui.config('infinitepush', 'reponame')
215 215 if not reponame:
216 216 raise error.Abort(_('please set infinitepush.reponame'))
217 217
218 218 logfile = ui.config('infinitepush', 'logfile', '')
219 219 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
220 220 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
221 221 from . import sqlindexapi
222 222 return sqlindexapi.sqlindexapi(
223 223 reponame, host, port, db, user, password,
224 224 logfile, _getloglevel(ui), waittimeout=waittimeout,
225 225 locktimeout=locktimeout)
226 226
227 227 def _getloglevel(ui):
228 228 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
229 229 numeric_loglevel = getattr(logging, loglevel.upper(), None)
230 230 if not isinstance(numeric_loglevel, int):
231 231 raise error.Abort(_('invalid log level %s') % loglevel)
232 232 return numeric_loglevel
233 233
234 234 def _tryhoist(ui, remotebookmark):
235 235 '''returns a bookmarks with hoisted part removed
236 236
237 237 Remotenames extension has a 'hoist' config that allows to use remote
238 238 bookmarks without specifying remote path. For example, 'hg update master'
239 239 works as well as 'hg update remote/master'. We want to allow the same in
240 240 infinitepush.
241 241 '''
242 242
243 243 if common.isremotebooksenabled(ui):
244 244 hoist = ui.config('remotenames', 'hoist') + '/'
245 245 if remotebookmark.startswith(hoist):
246 246 return remotebookmark[len(hoist):]
247 247 return remotebookmark
248 248
249 249 class bundlestore(object):
250 250 def __init__(self, repo):
251 251 self._repo = repo
252 252 storetype = self._repo.ui.config('infinitepush', 'storetype', '')
253 253 if storetype == 'disk':
254 254 from . import store
255 255 self.store = store.filebundlestore(self._repo.ui, self._repo)
256 256 elif storetype == 'external':
257 257 self.store = _buildexternalbundlestore(self._repo.ui)
258 258 else:
259 259 raise error.Abort(
260 260 _('unknown infinitepush store type specified %s') % storetype)
261 261
262 262 indextype = self._repo.ui.config('infinitepush', 'indextype', '')
263 263 if indextype == 'disk':
264 264 from . import fileindexapi
265 265 self.index = fileindexapi.fileindexapi(self._repo)
266 266 elif indextype == 'sql':
267 267 self.index = _buildsqlindex(self._repo.ui)
268 268 else:
269 269 raise error.Abort(
270 270 _('unknown infinitepush index type specified %s') % indextype)
271 271
272 272 def _isserver(ui):
273 273 return ui.configbool('infinitepush', 'server')
274 274
275 275 def reposetup(ui, repo):
276 276 if _isserver(ui) and repo.local():
277 277 repo.bundlestore = bundlestore(repo)
278 278
279 279 def extsetup(ui):
280 280 commonsetup(ui)
281 281 if _isserver(ui):
282 282 serverextsetup(ui)
283 283 else:
284 284 clientextsetup(ui)
285 285
286 286 def commonsetup(ui):
287 287 wireproto.commands['listkeyspatterns'] = (
288 288 wireprotolistkeyspatterns, 'namespace patterns')
289 289 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
290 290 if scratchbranchpat:
291 291 global _scratchbranchmatcher
292 292 kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
293 293
294 294 def serverextsetup(ui):
295 295 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
296 296
297 297 def newpushkeyhandler(*args, **kwargs):
298 298 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
299 299 newpushkeyhandler.params = origpushkeyhandler.params
300 300 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
301 301
302 302 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
303 303 newphaseheadshandler = lambda *args, **kwargs: \
304 304 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
305 305 newphaseheadshandler.params = orighandlephasehandler.params
306 306 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
307 307
308 308 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
309 309 localrepolistkeys)
310 310 wireproto.commands['lookup'] = (
311 311 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
312 312 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
313 313
314 314 extensions.wrapfunction(bundle2, 'processparts', processparts)
315 315
316 316 def clientextsetup(ui):
317 317 entry = extensions.wrapcommand(commands.table, 'push', _push)
318 # Don't add the 'to' arg if it already exists
319 if not any(a for a in entry[1] if a[1] == 'to'):
320 entry[1].append(('', 'to', '', _('push revs to this bookmark')))
321 318
322 319 if not any(a for a in entry[1] if a[1] == 'non-forward-move'):
323 320 entry[1].append(('', 'non-forward-move', None,
324 321 _('allows moving a remote bookmark to an '
325 322 'arbitrary place')))
326 323
327 324 entry[1].append(
328 325 ('', 'bundle-store', None,
329 326 _('force push to go to bundle store (EXPERIMENTAL)')))
330 327
331 328 extensions.wrapcommand(commands.table, 'pull', _pull)
332 329 extensions.wrapcommand(commands.table, 'update', _update)
333 330
334 331 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
335 332
336 333 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
337 334
338 335 partorder = exchange.b2partsgenorder
339 336 index = partorder.index('changeset')
340 337 partorder.insert(
341 338 index, partorder.pop(partorder.index(scratchbranchparttype)))
342 339
343 340 def _checkheads(orig, pushop):
344 341 if pushop.ui.configbool(experimental, configscratchpush, False):
345 342 return
346 343 return orig(pushop)
347 344
348 345 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
349 346 patterns = wireproto.decodelist(patterns)
350 347 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
351 348 return pushkey.encodekeys(d)
352 349
353 350 def localrepolistkeys(orig, self, namespace, patterns=None):
354 351 if namespace == 'bookmarks' and patterns:
355 352 index = self.bundlestore.index
356 353 results = {}
357 354 bookmarks = orig(self, namespace)
358 355 for pattern in patterns:
359 356 results.update(index.getbookmarks(pattern))
360 357 if pattern.endswith('*'):
361 358 pattern = 're:^' + pattern[:-1] + '.*'
362 359 kind, pat, matcher = util.stringmatcher(pattern)
363 360 for bookmark, node in bookmarks.iteritems():
364 361 if matcher(bookmark):
365 362 results[bookmark] = node
366 363 return results
367 364 else:
368 365 return orig(self, namespace)
369 366
370 367 @peer.batchable
371 368 def listkeyspatterns(self, namespace, patterns):
372 369 if not self.capable('pushkey'):
373 370 yield {}, None
374 371 f = peer.future()
375 372 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
376 373 (namespace, patterns))
377 374 yield {
378 375 'namespace': encoding.fromlocal(namespace),
379 376 'patterns': wireproto.encodelist(patterns)
380 377 }, f
381 378 d = f.value
382 379 self.ui.debug('received listkey for "%s": %i bytes\n'
383 380 % (namespace, len(d)))
384 381 yield pushkey.decodekeys(d)
385 382
386 383 def _readbundlerevs(bundlerepo):
387 384 return list(bundlerepo.revs('bundle()'))
388 385
389 386 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
390 387 '''Tells remotefilelog to include all changed files to the changegroup
391 388
392 389 By default remotefilelog doesn't include file content to the changegroup.
393 390 But we need to include it if we are fetching from bundlestore.
394 391 '''
395 392 changedfiles = set()
396 393 cl = bundlerepo.changelog
397 394 for r in bundlerevs:
398 395 # [3] means changed files
399 396 changedfiles.update(cl.read(r)[3])
400 397 if not changedfiles:
401 398 return bundlecaps
402 399
403 400 changedfiles = '\0'.join(changedfiles)
404 401 newcaps = []
405 402 appended = False
406 403 for cap in (bundlecaps or []):
407 404 if cap.startswith('excludepattern='):
408 405 newcaps.append('\0'.join((cap, changedfiles)))
409 406 appended = True
410 407 else:
411 408 newcaps.append(cap)
412 409 if not appended:
413 410 # Not found excludepattern cap. Just append it
414 411 newcaps.append('excludepattern=' + changedfiles)
415 412
416 413 return newcaps
417 414
418 415 def _rebundle(bundlerepo, bundleroots, unknownhead):
419 416 '''
420 417 Bundle may include more revision then user requested. For example,
421 418 if user asks for revision but bundle also consists its descendants.
422 419 This function will filter out all revision that user is not requested.
423 420 '''
424 421 parts = []
425 422
426 423 version = '02'
427 424 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
428 425 missingheads=[unknownhead])
429 426 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
430 427 cgstream = util.chunkbuffer(cgstream).read()
431 428 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
432 429 cgpart.addparam('version', version)
433 430 parts.append(cgpart)
434 431
435 432 return parts
436 433
437 434 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
438 435 cl = bundlerepo.changelog
439 436 bundleroots = []
440 437 for rev in bundlerevs:
441 438 node = cl.node(rev)
442 439 parents = cl.parents(node)
443 440 for parent in parents:
444 441 # include all revs that exist in the main repo
445 442 # to make sure that bundle may apply client-side
446 443 if parent in oldrepo:
447 444 bundleroots.append(parent)
448 445 return bundleroots
449 446
450 447 def _needsrebundling(head, bundlerepo):
451 448 bundleheads = list(bundlerepo.revs('heads(bundle())'))
452 449 return not (len(bundleheads) == 1 and
453 450 bundlerepo[bundleheads[0]].node() == head)
454 451
455 452 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
456 453 '''generates bundle that will be send to the user
457 454
458 455 returns tuple with raw bundle string and bundle type
459 456 '''
460 457 parts = []
461 458 if not _needsrebundling(head, bundlerepo):
462 459 with util.posixfile(bundlefile, "rb") as f:
463 460 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
464 461 if isinstance(unbundler, changegroup.cg1unpacker):
465 462 part = bundle2.bundlepart('changegroup',
466 463 data=unbundler._stream.read())
467 464 part.addparam('version', '01')
468 465 parts.append(part)
469 466 elif isinstance(unbundler, bundle2.unbundle20):
470 467 haschangegroup = False
471 468 for part in unbundler.iterparts():
472 469 if part.type == 'changegroup':
473 470 haschangegroup = True
474 471 newpart = bundle2.bundlepart(part.type, data=part.read())
475 472 for key, value in part.params.iteritems():
476 473 newpart.addparam(key, value)
477 474 parts.append(newpart)
478 475
479 476 if not haschangegroup:
480 477 raise error.Abort(
481 478 'unexpected bundle without changegroup part, ' +
482 479 'head: %s' % hex(head),
483 480 hint='report to administrator')
484 481 else:
485 482 raise error.Abort('unknown bundle type')
486 483 else:
487 484 parts = _rebundle(bundlerepo, bundleroots, head)
488 485
489 486 return parts
490 487
491 488 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
492 489 heads = heads or []
493 490 # newheads are parents of roots of scratch bundles that were requested
494 491 newphases = {}
495 492 scratchbundles = []
496 493 newheads = []
497 494 scratchheads = []
498 495 nodestobundle = {}
499 496 allbundlestocleanup = []
500 497 try:
501 498 for head in heads:
502 499 if head not in repo.changelog.nodemap:
503 500 if head not in nodestobundle:
504 501 newbundlefile = common.downloadbundle(repo, head)
505 502 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
506 503 bundlerepo = hg.repository(repo.ui, bundlepath)
507 504
508 505 allbundlestocleanup.append((bundlerepo, newbundlefile))
509 506 bundlerevs = set(_readbundlerevs(bundlerepo))
510 507 bundlecaps = _includefilelogstobundle(
511 508 bundlecaps, bundlerepo, bundlerevs, repo.ui)
512 509 cl = bundlerepo.changelog
513 510 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
514 511 for rev in bundlerevs:
515 512 node = cl.node(rev)
516 513 newphases[hex(node)] = str(phases.draft)
517 514 nodestobundle[node] = (bundlerepo, bundleroots,
518 515 newbundlefile)
519 516
520 517 scratchbundles.append(
521 518 _generateoutputparts(head, *nodestobundle[head]))
522 519 newheads.extend(bundleroots)
523 520 scratchheads.append(head)
524 521 finally:
525 522 for bundlerepo, bundlefile in allbundlestocleanup:
526 523 bundlerepo.close()
527 524 try:
528 525 os.unlink(bundlefile)
529 526 except (IOError, OSError):
530 527 # if we can't cleanup the file then just ignore the error,
531 528 # no need to fail
532 529 pass
533 530
534 531 pullfrombundlestore = bool(scratchbundles)
535 532 wrappedchangegrouppart = False
536 533 wrappedlistkeys = False
537 534 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
538 535 try:
539 536 def _changegrouppart(bundler, *args, **kwargs):
540 537 # Order is important here. First add non-scratch part
541 538 # and only then add parts with scratch bundles because
542 539 # non-scratch part contains parents of roots of scratch bundles.
543 540 result = oldchangegrouppart(bundler, *args, **kwargs)
544 541 for bundle in scratchbundles:
545 542 for part in bundle:
546 543 bundler.addpart(part)
547 544 return result
548 545
549 546 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
550 547 wrappedchangegrouppart = True
551 548
552 549 def _listkeys(orig, self, namespace):
553 550 origvalues = orig(self, namespace)
554 551 if namespace == 'phases' and pullfrombundlestore:
555 552 if origvalues.get('publishing') == 'True':
556 553 # Make repo non-publishing to preserve draft phase
557 554 del origvalues['publishing']
558 555 origvalues.update(newphases)
559 556 return origvalues
560 557
561 558 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
562 559 _listkeys)
563 560 wrappedlistkeys = True
564 561 heads = list((set(newheads) | set(heads)) - set(scratchheads))
565 562 result = orig(repo, source, heads=heads,
566 563 bundlecaps=bundlecaps, **kwargs)
567 564 finally:
568 565 if wrappedchangegrouppart:
569 566 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
570 567 if wrappedlistkeys:
571 568 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
572 569 _listkeys)
573 570 return result
574 571
575 572 def _lookupwrap(orig):
576 573 def _lookup(repo, proto, key):
577 574 localkey = encoding.tolocal(key)
578 575
579 576 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
580 577 scratchnode = repo.bundlestore.index.getnode(localkey)
581 578 if scratchnode:
582 579 return "%s %s\n" % (1, scratchnode)
583 580 else:
584 581 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
585 582 else:
586 583 try:
587 584 r = hex(repo.lookup(localkey))
588 585 return "%s %s\n" % (1, r)
589 586 except Exception as inst:
590 587 if repo.bundlestore.index.getbundle(localkey):
591 588 return "%s %s\n" % (1, localkey)
592 589 else:
593 590 r = str(inst)
594 591 return "%s %s\n" % (0, r)
595 592 return _lookup
596 593
597 594 def _update(orig, ui, repo, node=None, rev=None, **opts):
598 595 if rev and node:
599 596 raise error.Abort(_("please specify just one revision"))
600 597
601 598 if not opts.get('date') and (rev or node) not in repo:
602 599 mayberemote = rev or node
603 600 mayberemote = _tryhoist(ui, mayberemote)
604 601 dopull = False
605 602 kwargs = {}
606 603 if _scratchbranchmatcher(mayberemote):
607 604 dopull = True
608 605 kwargs['bookmark'] = [mayberemote]
609 606 elif len(mayberemote) == 40 and _maybehash(mayberemote):
610 607 dopull = True
611 608 kwargs['rev'] = [mayberemote]
612 609
613 610 if dopull:
614 611 ui.warn(
615 612 _("'%s' does not exist locally - looking for it " +
616 613 "remotely...\n") % mayberemote)
617 614 # Try pulling node from remote repo
618 615 try:
619 616 cmdname = '^pull'
620 617 pullcmd = commands.table[cmdname][0]
621 618 pullopts = dict(opt[1:3] for opt in commands.table[cmdname][1])
622 619 pullopts.update(kwargs)
623 620 pullcmd(ui, repo, **pullopts)
624 621 except Exception:
625 622 ui.warn(_('pull failed: %s\n') % sys.exc_info()[1])
626 623 else:
627 624 ui.warn(_("'%s' found remotely\n") % mayberemote)
628 625 return orig(ui, repo, node, rev, **opts)
629 626
630 627 def _pull(orig, ui, repo, source="default", **opts):
631 628 # Copy paste from `pull` command
632 629 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
633 630
634 631 scratchbookmarks = {}
635 632 unfi = repo.unfiltered()
636 633 unknownnodes = []
637 634 for rev in opts.get('rev', []):
638 635 if rev not in unfi:
639 636 unknownnodes.append(rev)
640 637 if opts.get('bookmark'):
641 638 bookmarks = []
642 639 revs = opts.get('rev') or []
643 640 for bookmark in opts.get('bookmark'):
644 641 if _scratchbranchmatcher(bookmark):
645 642 # rev is not known yet
646 643 # it will be fetched with listkeyspatterns next
647 644 scratchbookmarks[bookmark] = 'REVTOFETCH'
648 645 else:
649 646 bookmarks.append(bookmark)
650 647
651 648 if scratchbookmarks:
652 649 other = hg.peer(repo, opts, source)
653 650 fetchedbookmarks = other.listkeyspatterns(
654 651 'bookmarks', patterns=scratchbookmarks)
655 652 for bookmark in scratchbookmarks:
656 653 if bookmark not in fetchedbookmarks:
657 654 raise error.Abort('remote bookmark %s not found!' %
658 655 bookmark)
659 656 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
660 657 revs.append(fetchedbookmarks[bookmark])
661 658 opts['bookmark'] = bookmarks
662 659 opts['rev'] = revs
663 660
664 661 if scratchbookmarks or unknownnodes:
665 662 # Set anyincoming to True
666 663 extensions.wrapfunction(discovery, 'findcommonincoming',
667 664 _findcommonincoming)
668 665 try:
669 666 # Remote scratch bookmarks will be deleted because remotenames doesn't
670 667 # know about them. Let's save it before pull and restore after
671 668 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
672 669 result = orig(ui, repo, source, **opts)
673 670 # TODO(stash): race condition is possible
674 671 # if scratch bookmarks was updated right after orig.
675 672 # But that's unlikely and shouldn't be harmful.
676 673 if common.isremotebooksenabled(ui):
677 674 remotescratchbookmarks.update(scratchbookmarks)
678 675 _saveremotebookmarks(repo, remotescratchbookmarks, source)
679 676 else:
680 677 _savelocalbookmarks(repo, scratchbookmarks)
681 678 return result
682 679 finally:
683 680 if scratchbookmarks:
684 681 extensions.unwrapfunction(discovery, 'findcommonincoming')
685 682
686 683 def _readscratchremotebookmarks(ui, repo, other):
687 684 if common.isremotebooksenabled(ui):
688 685 remotenamesext = extensions.find('remotenames')
689 686 remotepath = remotenamesext.activepath(repo.ui, other)
690 687 result = {}
691 688 # Let's refresh remotenames to make sure we have it up to date
692 689 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
693 690 # and it results in deleting scratch bookmarks. Our best guess how to
694 691 # fix it is to use `clearnames()`
695 692 repo._remotenames.clearnames()
696 693 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
697 694 path, bookname = remotenamesext.splitremotename(remotebookmark)
698 695 if path == remotepath and _scratchbranchmatcher(bookname):
699 696 nodes = repo.names['remotebookmarks'].nodes(repo,
700 697 remotebookmark)
701 698 if nodes:
702 699 result[bookname] = hex(nodes[0])
703 700 return result
704 701 else:
705 702 return {}
706 703
707 704 def _saveremotebookmarks(repo, newbookmarks, remote):
708 705 remotenamesext = extensions.find('remotenames')
709 706 remotepath = remotenamesext.activepath(repo.ui, remote)
710 707 branches = collections.defaultdict(list)
711 708 bookmarks = {}
712 709 remotenames = remotenamesext.readremotenames(repo)
713 710 for hexnode, nametype, remote, rname in remotenames:
714 711 if remote != remotepath:
715 712 continue
716 713 if nametype == 'bookmarks':
717 714 if rname in newbookmarks:
718 715 # It's possible if we have a normal bookmark that matches
719 716 # scratch branch pattern. In this case just use the current
720 717 # bookmark node
721 718 del newbookmarks[rname]
722 719 bookmarks[rname] = hexnode
723 720 elif nametype == 'branches':
724 721 # saveremotenames expects 20 byte binary nodes for branches
725 722 branches[rname].append(bin(hexnode))
726 723
727 724 for bookmark, hexnode in newbookmarks.iteritems():
728 725 bookmarks[bookmark] = hexnode
729 726 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
730 727
731 728 def _savelocalbookmarks(repo, bookmarks):
732 729 if not bookmarks:
733 730 return
734 731 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
735 732 changes = []
736 733 for scratchbook, node in bookmarks.iteritems():
737 734 changectx = repo[node]
738 735 changes.append((scratchbook, changectx.node()))
739 736 repo._bookmarks.applychanges(repo, tr, changes)
740 737
741 738 def _findcommonincoming(orig, *args, **kwargs):
742 739 common, inc, remoteheads = orig(*args, **kwargs)
743 740 return common, True, remoteheads
744 741
745 742 def _push(orig, ui, repo, dest=None, *args, **opts):
746 bookmark = opts.get('to') or ''
743
744 bookmark = opts.get('bookmark')
745 # we only support pushing one infinitepush bookmark at once
746 if len(bookmark) == 1:
747 bookmark = bookmark[0]
748 else:
749 bookmark = ''
747 750
748 751 oldphasemove = None
749 752 overrides = {(experimental, configbookmark): bookmark}
750 753
751 754 with ui.configoverride(overrides, 'infinitepush'):
752 755 scratchpush = opts.get('bundle_store')
753 756 if _scratchbranchmatcher(bookmark):
754 757 scratchpush = True
755 758 # bundle2 can be sent back after push (for example, bundle2
756 759 # containing `pushkey` part to update bookmarks)
757 760 ui.setconfig(experimental, 'bundle2.pushback', True)
758 761
759 762 ui.setconfig(experimental, confignonforwardmove,
760 763 opts.get('non_forward_move'), '--non-forward-move')
761 764 if scratchpush:
765 # this is an infinitepush, we don't want the bookmark to be applied
766 # rather that should be stored in the bundlestore
767 opts['bookmark'] = []
762 768 ui.setconfig(experimental, configscratchpush, True)
763 769 oldphasemove = extensions.wrapfunction(exchange,
764 770 '_localphasemove',
765 771 _phasemove)
766 772 # Copy-paste from `push` command
767 773 path = ui.paths.getpath(dest, default=('default-push', 'default'))
768 774 if not path:
769 775 raise error.Abort(_('default repository not configured!'),
770 776 hint=_("see 'hg help config.paths'"))
771 777 destpath = path.pushloc or path.loc
772 778 # Remote scratch bookmarks will be deleted because remotenames doesn't
773 779 # know about them. Let's save it before push and restore after
774 780 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
775 781 result = orig(ui, repo, dest, *args, **opts)
776 782 if common.isremotebooksenabled(ui):
777 783 if bookmark and scratchpush:
778 784 other = hg.peer(repo, opts, destpath)
779 785 fetchedbookmarks = other.listkeyspatterns('bookmarks',
780 786 patterns=[bookmark])
781 787 remotescratchbookmarks.update(fetchedbookmarks)
782 788 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
783 789 if oldphasemove:
784 790 exchange._localphasemove = oldphasemove
785 791 return result
786 792
787 793 def _deleteinfinitepushbookmarks(ui, repo, path, names):
788 794 """Prune remote names by removing the bookmarks we don't want anymore,
789 795 then writing the result back to disk
790 796 """
791 797 remotenamesext = extensions.find('remotenames')
792 798
793 799 # remotename format is:
794 800 # (node, nametype ("branches" or "bookmarks"), remote, name)
795 801 nametype_idx = 1
796 802 remote_idx = 2
797 803 name_idx = 3
798 804 remotenames = [remotename for remotename in \
799 805 remotenamesext.readremotenames(repo) \
800 806 if remotename[remote_idx] == path]
801 807 remote_bm_names = [remotename[name_idx] for remotename in \
802 808 remotenames if remotename[nametype_idx] == "bookmarks"]
803 809
804 810 for name in names:
805 811 if name not in remote_bm_names:
806 812 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
807 813 "in path '{}'").format(name, path))
808 814
809 815 bookmarks = {}
810 816 branches = collections.defaultdict(list)
811 817 for node, nametype, remote, name in remotenames:
812 818 if nametype == "bookmarks" and name not in names:
813 819 bookmarks[name] = node
814 820 elif nametype == "branches":
815 821 # saveremotenames wants binary nodes for branches
816 822 branches[name].append(bin(node))
817 823
818 824 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
819 825
820 826 def _phasemove(orig, pushop, nodes, phase=phases.public):
821 827 """prevent commits from being marked public
822 828
823 829 Since these are going to a scratch branch, they aren't really being
824 830 published."""
825 831
826 832 if phase != phases.public:
827 833 orig(pushop, nodes, phase)
828 834
829 835 @exchange.b2partsgenerator(scratchbranchparttype)
830 836 def partgen(pushop, bundler):
831 837 bookmark = pushop.ui.config(experimental, configbookmark)
832 838 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
833 839 if 'changesets' in pushop.stepsdone or not scratchpush:
834 840 return
835 841
836 842 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
837 843 return
838 844
839 845 pushop.stepsdone.add('changesets')
840 846 if not pushop.outgoing.missing:
841 847 pushop.ui.status(_('no changes found\n'))
842 848 pushop.cgresult = 0
843 849 return
844 850
845 851 # This parameter tells the server that the following bundle is an
846 852 # infinitepush. This let's it switch the part processing to our infinitepush
847 853 # code path.
848 854 bundler.addparam("infinitepush", "True")
849 855
850 856 nonforwardmove = pushop.force or pushop.ui.configbool(experimental,
851 857 confignonforwardmove)
852 858 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
853 859 pushop.remote,
854 860 pushop.outgoing,
855 861 nonforwardmove,
856 862 pushop.ui,
857 863 bookmark)
858 864
859 865 for scratchpart in scratchparts:
860 866 bundler.addpart(scratchpart)
861 867
862 868 def handlereply(op):
863 869 # server either succeeds or aborts; no code to read
864 870 pushop.cgresult = 1
865 871
866 872 return handlereply
867 873
868 874 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
869 875
870 876 def _getrevs(bundle, oldnode, force, bookmark):
871 877 'extracts and validates the revs to be imported'
872 878 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
873 879
874 880 # new bookmark
875 881 if oldnode is None:
876 882 return revs
877 883
878 884 # Fast forward update
879 885 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
880 886 return revs
881 887
882 888 # Forced non-fast forward update
883 889 if force:
884 890 return revs
885 891 else:
886 892 raise error.Abort(_('non-forward push'),
887 893 hint=_('use --non-forward-move to override'))
888 894
889 895 @contextlib.contextmanager
890 896 def logservicecall(logger, service, **kwargs):
891 897 start = time.time()
892 898 logger(service, eventtype='start', **kwargs)
893 899 try:
894 900 yield
895 901 logger(service, eventtype='success',
896 902 elapsedms=(time.time() - start) * 1000, **kwargs)
897 903 except Exception as e:
898 904 logger(service, eventtype='failure',
899 905 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
900 906 **kwargs)
901 907 raise
902 908
903 909 def _getorcreateinfinitepushlogger(op):
904 910 logger = op.records['infinitepushlogger']
905 911 if not logger:
906 912 ui = op.repo.ui
907 913 try:
908 914 username = util.getuser()
909 915 except Exception:
910 916 username = 'unknown'
911 917 # Generate random request id to be able to find all logged entries
912 918 # for the same request. Since requestid is pseudo-generated it may
913 919 # not be unique, but we assume that (hostname, username, requestid)
914 920 # is unique.
915 921 random.seed()
916 922 requestid = random.randint(0, 2000000000)
917 923 hostname = socket.gethostname()
918 924 logger = functools.partial(ui.log, 'infinitepush', user=username,
919 925 requestid=requestid, hostname=hostname,
920 926 reponame=ui.config('infinitepush',
921 927 'reponame'))
922 928 op.records.add('infinitepushlogger', logger)
923 929 else:
924 930 logger = logger[0]
925 931 return logger
926 932
927 933 def processparts(orig, repo, op, unbundler):
928 934 if unbundler.params.get('infinitepush') != 'True':
929 935 return orig(repo, op, unbundler)
930 936
931 937 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
932 938
933 939 bundler = bundle2.bundle20(repo.ui)
934 940 cgparams = None
935 941 with bundle2.partiterator(repo, op, unbundler) as parts:
936 942 for part in parts:
937 943 bundlepart = None
938 944 if part.type == 'replycaps':
939 945 # This configures the current operation to allow reply parts.
940 946 bundle2._processpart(op, part)
941 947 elif part.type == bundleparts.scratchbranchparttype:
942 948 # Scratch branch parts need to be converted to normal
943 949 # changegroup parts, and the extra parameters stored for later
944 950 # when we upload to the store. Eventually those parameters will
945 951 # be put on the actual bundle instead of this part, then we can
946 952 # send a vanilla changegroup instead of the scratchbranch part.
947 953 cgversion = part.params.get('cgversion', '01')
948 954 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
949 955 bundlepart.addparam('version', cgversion)
950 956 cgparams = part.params
951 957
952 958 # If we're not dumping all parts into the new bundle, we need to
953 959 # alert the future pushkey and phase-heads handler to skip
954 960 # the part.
955 961 if not handleallparts:
956 962 op.records.add(scratchbranchparttype + '_skippushkey', True)
957 963 op.records.add(scratchbranchparttype + '_skipphaseheads',
958 964 True)
959 965 else:
960 966 if handleallparts:
961 967 # Ideally we would not process any parts, and instead just
962 968 # forward them to the bundle for storage, but since this
963 969 # differs from previous behavior, we need to put it behind a
964 970 # config flag for incremental rollout.
965 971 bundlepart = bundle2.bundlepart(part.type, data=part.read())
966 972 for key, value in part.params.iteritems():
967 973 bundlepart.addparam(key, value)
968 974
969 975 # Certain parts require a response
970 976 if part.type == 'pushkey':
971 977 if op.reply is not None:
972 978 rpart = op.reply.newpart('reply:pushkey')
973 979 rpart.addparam('in-reply-to', str(part.id),
974 980 mandatory=False)
975 981 rpart.addparam('return', '1', mandatory=False)
976 982 else:
977 983 bundle2._processpart(op, part)
978 984
979 985 if handleallparts:
980 986 op.records.add(part.type, {
981 987 'return': 1,
982 988 })
983 989 if bundlepart:
984 990 bundler.addpart(bundlepart)
985 991
986 992 # If commits were sent, store them
987 993 if cgparams:
988 994 buf = util.chunkbuffer(bundler.getchunks())
989 995 fd, bundlefile = tempfile.mkstemp()
990 996 try:
991 997 try:
992 998 fp = os.fdopen(fd, 'wb')
993 999 fp.write(buf.read())
994 1000 finally:
995 1001 fp.close()
996 1002 storebundle(op, cgparams, bundlefile)
997 1003 finally:
998 1004 try:
999 1005 os.unlink(bundlefile)
1000 1006 except Exception:
1001 1007 # we would rather see the original exception
1002 1008 pass
1003 1009
1004 1010 def storebundle(op, params, bundlefile):
1005 1011 log = _getorcreateinfinitepushlogger(op)
1006 1012 parthandlerstart = time.time()
1007 1013 log(scratchbranchparttype, eventtype='start')
1008 1014 index = op.repo.bundlestore.index
1009 1015 store = op.repo.bundlestore.store
1010 1016 op.records.add(scratchbranchparttype + '_skippushkey', True)
1011 1017
1012 1018 bundle = None
1013 1019 try: # guards bundle
1014 1020 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1015 1021 bundle = hg.repository(op.repo.ui, bundlepath)
1016 1022
1017 1023 bookmark = params.get('bookmark')
1018 1024 bookprevnode = params.get('bookprevnode', '')
1019 1025 force = params.get('force')
1020 1026
1021 1027 if bookmark:
1022 1028 oldnode = index.getnode(bookmark)
1023 1029 else:
1024 1030 oldnode = None
1025 1031 bundleheads = bundle.revs('heads(bundle())')
1026 1032 if bookmark and len(bundleheads) > 1:
1027 1033 raise error.Abort(
1028 1034 _('cannot push more than one head to a scratch branch'))
1029 1035
1030 1036 revs = _getrevs(bundle, oldnode, force, bookmark)
1031 1037
1032 1038 # Notify the user of what is being pushed
1033 1039 plural = 's' if len(revs) > 1 else ''
1034 1040 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1035 1041 maxoutput = 10
1036 1042 for i in range(0, min(len(revs), maxoutput)):
1037 1043 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1038 1044 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1039 1045
1040 1046 if len(revs) > maxoutput + 1:
1041 1047 op.repo.ui.warn((" ...\n"))
1042 1048 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1043 1049 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1044 1050
1045 1051 nodesctx = [bundle[rev] for rev in revs]
1046 1052 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1047 1053 if bundleheads:
1048 1054 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1049 1055 else:
1050 1056 newheadscount = 0
1051 1057 # If there's a bookmark specified, there should be only one head,
1052 1058 # so we choose the last node, which will be that head.
1053 1059 # If a bug or malicious client allows there to be a bookmark
1054 1060 # with multiple heads, we will place the bookmark on the last head.
1055 1061 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1056 1062 key = None
1057 1063 if newheadscount:
1058 1064 with open(bundlefile, 'r') as f:
1059 1065 bundledata = f.read()
1060 1066 with logservicecall(log, 'bundlestore',
1061 1067 bundlesize=len(bundledata)):
1062 1068 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1063 1069 if len(bundledata) > bundlesizelimit:
1064 1070 error_msg = ('bundle is too big: %d bytes. ' +
1065 1071 'max allowed size is 100 MB')
1066 1072 raise error.Abort(error_msg % (len(bundledata),))
1067 1073 key = store.write(bundledata)
1068 1074
1069 1075 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1070 1076 if key:
1071 1077 index.addbundle(key, nodesctx)
1072 1078 if bookmark:
1073 1079 index.addbookmark(bookmark, bookmarknode)
1074 1080 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1075 1081 bookprevnode, params)
1076 1082 log(scratchbranchparttype, eventtype='success',
1077 1083 elapsedms=(time.time() - parthandlerstart) * 1000)
1078 1084
1079 1085 fillmetadatabranchpattern = op.repo.ui.config(
1080 1086 'infinitepush', 'fillmetadatabranchpattern', '')
1081 1087 if bookmark and fillmetadatabranchpattern:
1082 1088 __, __, matcher = util.stringmatcher(fillmetadatabranchpattern)
1083 1089 if matcher(bookmark):
1084 1090 _asyncsavemetadata(op.repo.root,
1085 1091 [ctx.hex() for ctx in nodesctx])
1086 1092 except Exception as e:
1087 1093 log(scratchbranchparttype, eventtype='failure',
1088 1094 elapsedms=(time.time() - parthandlerstart) * 1000,
1089 1095 errormsg=str(e))
1090 1096 raise
1091 1097 finally:
1092 1098 if bundle:
1093 1099 bundle.close()
1094 1100
1095 1101 @bundle2.parthandler(scratchbranchparttype,
1096 1102 ('bookmark', 'bookprevnode', 'force',
1097 1103 'pushbackbookmarks', 'cgversion'))
1098 1104 def bundle2scratchbranch(op, part):
1099 1105 '''unbundle a bundle2 part containing a changegroup to store'''
1100 1106
1101 1107 bundler = bundle2.bundle20(op.repo.ui)
1102 1108 cgversion = part.params.get('cgversion', '01')
1103 1109 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1104 1110 cgpart.addparam('version', cgversion)
1105 1111 bundler.addpart(cgpart)
1106 1112 buf = util.chunkbuffer(bundler.getchunks())
1107 1113
1108 1114 fd, bundlefile = tempfile.mkstemp()
1109 1115 try:
1110 1116 try:
1111 1117 fp = os.fdopen(fd, 'wb')
1112 1118 fp.write(buf.read())
1113 1119 finally:
1114 1120 fp.close()
1115 1121 storebundle(op, part.params, bundlefile)
1116 1122 finally:
1117 1123 try:
1118 1124 os.unlink(bundlefile)
1119 1125 except OSError as e:
1120 1126 if e.errno != errno.ENOENT:
1121 1127 raise
1122 1128
1123 1129 return 1
1124 1130
1125 1131 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1126 1132 if params.get('pushbackbookmarks'):
1127 1133 if op.reply and 'pushback' in op.reply.capabilities:
1128 1134 params = {
1129 1135 'namespace': 'bookmarks',
1130 1136 'key': bookmark,
1131 1137 'new': newnode,
1132 1138 'old': oldnode,
1133 1139 }
1134 1140 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1135 1141
1136 1142 def bundle2pushkey(orig, op, part):
1137 1143 '''Wrapper of bundle2.handlepushkey()
1138 1144
1139 1145 The only goal is to skip calling the original function if flag is set.
1140 1146 It's set if infinitepush push is happening.
1141 1147 '''
1142 1148 if op.records[scratchbranchparttype + '_skippushkey']:
1143 1149 if op.reply is not None:
1144 1150 rpart = op.reply.newpart('reply:pushkey')
1145 1151 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1146 1152 rpart.addparam('return', '1', mandatory=False)
1147 1153 return 1
1148 1154
1149 1155 return orig(op, part)
1150 1156
1151 1157 def bundle2handlephases(orig, op, part):
1152 1158 '''Wrapper of bundle2.handlephases()
1153 1159
1154 1160 The only goal is to skip calling the original function if flag is set.
1155 1161 It's set if infinitepush push is happening.
1156 1162 '''
1157 1163
1158 1164 if op.records[scratchbranchparttype + '_skipphaseheads']:
1159 1165 return
1160 1166
1161 1167 return orig(op, part)
1162 1168
1163 1169 def _asyncsavemetadata(root, nodes):
1164 1170 '''starts a separate process that fills metadata for the nodes
1165 1171
1166 1172 This function creates a separate process and doesn't wait for it's
1167 1173 completion. This was done to avoid slowing down pushes
1168 1174 '''
1169 1175
1170 1176 maxnodes = 50
1171 1177 if len(nodes) > maxnodes:
1172 1178 return
1173 1179 nodesargs = []
1174 1180 for node in nodes:
1175 1181 nodesargs.append('--node')
1176 1182 nodesargs.append(node)
1177 1183 with open(os.devnull, 'w+b') as devnull:
1178 1184 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1179 1185 '-R', root] + nodesargs
1180 1186 # Process will run in background. We don't care about the return code
1181 1187 subprocess.Popen(cmdline, close_fds=True, shell=False,
1182 1188 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,371 +1,371 b''
1 1
2 2 Create an ondisk bundlestore in .hg/scratchbranches
3 3 $ . "$TESTDIR/library-infinitepush.sh"
4 4 $ cp $HGRCPATH $TESTTMP/defaulthgrc
5 5 $ setupcommon
6 6 $ mkcommit() {
7 7 > echo "$1" > "$1"
8 8 > hg add "$1"
9 9 > hg ci -m "$1"
10 10 > }
11 11 $ hg init repo
12 12 $ cd repo
13 13
14 14 Check that we can send a scratch on the server and it does not show there in
15 15 the history but is stored on disk
16 16 $ setupserver
17 17 $ cd ..
18 18 $ hg clone ssh://user@dummy/repo client -q
19 19 $ cd client
20 20 $ mkcommit initialcommit
21 21 $ hg push -r .
22 22 pushing to ssh://user@dummy/repo
23 23 searching for changes
24 24 remote: adding changesets
25 25 remote: adding manifests
26 26 remote: adding file changes
27 27 remote: added 1 changesets with 1 changes to 1 files
28 28 $ mkcommit scratchcommit
29 $ hg push -r . --to scratch/mybranch
29 $ hg push -r . -B scratch/mybranch
30 30 pushing to ssh://user@dummy/repo
31 31 searching for changes
32 32 remote: pushing 1 commit:
33 33 remote: 20759b6926ce scratchcommit
34 34 $ hg log -G
35 35 @ changeset: 1:20759b6926ce
36 36 | bookmark: scratch/mybranch
37 37 | tag: tip
38 38 | user: test
39 39 | date: Thu Jan 01 00:00:00 1970 +0000
40 40 | summary: scratchcommit
41 41 |
42 42 o changeset: 0:67145f466344
43 43 user: test
44 44 date: Thu Jan 01 00:00:00 1970 +0000
45 45 summary: initialcommit
46 46
47 47 $ hg log -G -R ../repo
48 48 o changeset: 0:67145f466344
49 49 tag: tip
50 50 user: test
51 51 date: Thu Jan 01 00:00:00 1970 +0000
52 52 summary: initialcommit
53 53
54 54 $ find ../repo/.hg/scratchbranches | sort
55 55 ../repo/.hg/scratchbranches
56 56 ../repo/.hg/scratchbranches/filebundlestore
57 57 ../repo/.hg/scratchbranches/filebundlestore/b9
58 58 ../repo/.hg/scratchbranches/filebundlestore/b9/e1
59 59 ../repo/.hg/scratchbranches/filebundlestore/b9/e1/b9e1ee5f93fb6d7c42496fc176c09839639dd9cc
60 60 ../repo/.hg/scratchbranches/index
61 61 ../repo/.hg/scratchbranches/index/bookmarkmap
62 62 ../repo/.hg/scratchbranches/index/bookmarkmap/scratch
63 63 ../repo/.hg/scratchbranches/index/bookmarkmap/scratch/mybranch
64 64 ../repo/.hg/scratchbranches/index/nodemap
65 65 ../repo/.hg/scratchbranches/index/nodemap/20759b6926ce827d5a8c73eb1fa9726d6f7defb2
66 66
67 67 From another client we can get the scratchbranch if we ask for it explicitely
68 68
69 69 $ cd ..
70 70 $ hg clone ssh://user@dummy/repo client2 -q
71 71 $ cd client2
72 72 $ hg pull -B scratch/mybranch --traceback
73 73 pulling from ssh://user@dummy/repo
74 74 searching for changes
75 75 adding changesets
76 76 adding manifests
77 77 adding file changes
78 78 added 1 changesets with 1 changes to 1 files
79 79 new changesets 20759b6926ce
80 80 (run 'hg update' to get a working copy)
81 81 $ hg log -G
82 82 o changeset: 1:20759b6926ce
83 83 | bookmark: scratch/mybranch
84 84 | tag: tip
85 85 | user: test
86 86 | date: Thu Jan 01 00:00:00 1970 +0000
87 87 | summary: scratchcommit
88 88 |
89 89 @ changeset: 0:67145f466344
90 90 user: test
91 91 date: Thu Jan 01 00:00:00 1970 +0000
92 92 summary: initialcommit
93 93
94 94 $ cd ..
95 95
96 96 Push to non-scratch bookmark
97 97
98 98 $ cd client
99 99 $ hg up 0
100 100 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
101 101 $ mkcommit newcommit
102 102 created new head
103 103 $ hg push -r .
104 104 pushing to ssh://user@dummy/repo
105 105 searching for changes
106 106 remote: adding changesets
107 107 remote: adding manifests
108 108 remote: adding file changes
109 109 remote: added 1 changesets with 1 changes to 1 files
110 110 $ hg log -G -T '{desc} {phase} {bookmarks}'
111 111 @ newcommit public
112 112 |
113 113 | o scratchcommit draft scratch/mybranch
114 114 |/
115 115 o initialcommit public
116 116
117 117
118 118 Push to scratch branch
119 119 $ cd ../client2
120 120 $ hg up -q scratch/mybranch
121 121 $ mkcommit 'new scratch commit'
122 $ hg push -r . --to scratch/mybranch
122 $ hg push -r . -B scratch/mybranch
123 123 pushing to ssh://user@dummy/repo
124 124 searching for changes
125 125 remote: pushing 2 commits:
126 126 remote: 20759b6926ce scratchcommit
127 127 remote: 1de1d7d92f89 new scratch commit
128 128 $ hg log -G -T '{desc} {phase} {bookmarks}'
129 129 @ new scratch commit draft scratch/mybranch
130 130 |
131 131 o scratchcommit draft
132 132 |
133 133 o initialcommit public
134 134
135 135 $ scratchnodes
136 136 1de1d7d92f8965260391d0513fe8a8d5973d3042 bed63daed3beba97fff2e819a148cf415c217a85
137 137 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 bed63daed3beba97fff2e819a148cf415c217a85
138 138
139 139 $ scratchbookmarks
140 140 scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
141 141
142 142 Push scratch bookmark with no new revs
143 $ hg push -r . --to scratch/anotherbranch
143 $ hg push -r . -B scratch/anotherbranch
144 144 pushing to ssh://user@dummy/repo
145 145 searching for changes
146 146 remote: pushing 2 commits:
147 147 remote: 20759b6926ce scratchcommit
148 148 remote: 1de1d7d92f89 new scratch commit
149 149 $ hg log -G -T '{desc} {phase} {bookmarks}'
150 150 @ new scratch commit draft scratch/anotherbranch scratch/mybranch
151 151 |
152 152 o scratchcommit draft
153 153 |
154 154 o initialcommit public
155 155
156 156 $ scratchbookmarks
157 157 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
158 158 scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
159 159
160 160 Pull scratch and non-scratch bookmark at the same time
161 161
162 162 $ hg -R ../repo book newbook
163 163 $ cd ../client
164 164 $ hg pull -B newbook -B scratch/mybranch --traceback
165 165 pulling from ssh://user@dummy/repo
166 166 searching for changes
167 167 adding changesets
168 168 adding manifests
169 169 adding file changes
170 170 added 1 changesets with 1 changes to 2 files
171 171 adding remote bookmark newbook
172 172 new changesets 1de1d7d92f89
173 173 (run 'hg update' to get a working copy)
174 174 $ hg log -G -T '{desc} {phase} {bookmarks}'
175 175 o new scratch commit draft scratch/mybranch
176 176 |
177 177 | @ newcommit public
178 178 | |
179 179 o | scratchcommit draft
180 180 |/
181 181 o initialcommit public
182 182
183 183
184 184 Push scratch revision without bookmark with --bundle-store
185 185
186 186 $ hg up -q tip
187 187 $ mkcommit scratchcommitnobook
188 188 $ hg log -G -T '{desc} {phase} {bookmarks}'
189 189 @ scratchcommitnobook draft
190 190 |
191 191 o new scratch commit draft scratch/mybranch
192 192 |
193 193 | o newcommit public
194 194 | |
195 195 o | scratchcommit draft
196 196 |/
197 197 o initialcommit public
198 198
199 199 $ hg push -r . --bundle-store
200 200 pushing to ssh://user@dummy/repo
201 201 searching for changes
202 202 remote: pushing 3 commits:
203 203 remote: 20759b6926ce scratchcommit
204 204 remote: 1de1d7d92f89 new scratch commit
205 205 remote: 2b5d271c7e0d scratchcommitnobook
206 206 $ hg -R ../repo log -G -T '{desc} {phase}'
207 207 o newcommit public
208 208 |
209 209 o initialcommit public
210 210
211 211
212 212 $ scratchnodes
213 213 1de1d7d92f8965260391d0513fe8a8d5973d3042 66fa08ff107451320512817bed42b7f467a1bec3
214 214 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 66fa08ff107451320512817bed42b7f467a1bec3
215 215 2b5d271c7e0d25d811359a314d413ebcc75c9524 66fa08ff107451320512817bed42b7f467a1bec3
216 216
217 217 Test with pushrebase
218 218 $ mkcommit scratchcommitwithpushrebase
219 $ hg push -r . --to scratch/mybranch
219 $ hg push -r . -B scratch/mybranch
220 220 pushing to ssh://user@dummy/repo
221 221 searching for changes
222 222 remote: pushing 4 commits:
223 223 remote: 20759b6926ce scratchcommit
224 224 remote: 1de1d7d92f89 new scratch commit
225 225 remote: 2b5d271c7e0d scratchcommitnobook
226 226 remote: d8c4f54ab678 scratchcommitwithpushrebase
227 227 $ hg -R ../repo log -G -T '{desc} {phase}'
228 228 o newcommit public
229 229 |
230 230 o initialcommit public
231 231
232 232 $ scratchnodes
233 233 1de1d7d92f8965260391d0513fe8a8d5973d3042 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
234 234 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
235 235 2b5d271c7e0d25d811359a314d413ebcc75c9524 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
236 236 d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
237 237
238 238 Change the order of pushrebase and infinitepush
239 239 $ mkcommit scratchcommitwithpushrebase2
240 $ hg push -r . --to scratch/mybranch
240 $ hg push -r . -B scratch/mybranch
241 241 pushing to ssh://user@dummy/repo
242 242 searching for changes
243 243 remote: pushing 5 commits:
244 244 remote: 20759b6926ce scratchcommit
245 245 remote: 1de1d7d92f89 new scratch commit
246 246 remote: 2b5d271c7e0d scratchcommitnobook
247 247 remote: d8c4f54ab678 scratchcommitwithpushrebase
248 248 remote: 6c10d49fe927 scratchcommitwithpushrebase2
249 249 $ hg -R ../repo log -G -T '{desc} {phase}'
250 250 o newcommit public
251 251 |
252 252 o initialcommit public
253 253
254 254 $ scratchnodes
255 255 1de1d7d92f8965260391d0513fe8a8d5973d3042 cd0586065eaf8b483698518f5fc32531e36fd8e0
256 256 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 cd0586065eaf8b483698518f5fc32531e36fd8e0
257 257 2b5d271c7e0d25d811359a314d413ebcc75c9524 cd0586065eaf8b483698518f5fc32531e36fd8e0
258 258 6c10d49fe92751666c40263f96721b918170d3da cd0586065eaf8b483698518f5fc32531e36fd8e0
259 259 d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 cd0586065eaf8b483698518f5fc32531e36fd8e0
260 260
261 261 Non-fastforward scratch bookmark push
262 262
263 263 $ hg log -GT "{rev}:{node} {desc}\n"
264 264 @ 6:6c10d49fe92751666c40263f96721b918170d3da scratchcommitwithpushrebase2
265 265 |
266 266 o 5:d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 scratchcommitwithpushrebase
267 267 |
268 268 o 4:2b5d271c7e0d25d811359a314d413ebcc75c9524 scratchcommitnobook
269 269 |
270 270 o 3:1de1d7d92f8965260391d0513fe8a8d5973d3042 new scratch commit
271 271 |
272 272 | o 2:91894e11e8255bf41aa5434b7b98e8b2aa2786eb newcommit
273 273 | |
274 274 o | 1:20759b6926ce827d5a8c73eb1fa9726d6f7defb2 scratchcommit
275 275 |/
276 276 o 0:67145f4663446a9580364f70034fea6e21293b6f initialcommit
277 277
278 278 $ hg up 6c10d49fe927
279 279 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
280 280 $ echo 1 > amend
281 281 $ hg add amend
282 282 $ hg ci --amend -m 'scratch amended commit'
283 283 saved backup bundle to $TESTTMP/client/.hg/strip-backup/6c10d49fe927-c99ffec5-amend.hg (glob)
284 284 $ hg log -G -T '{desc} {phase} {bookmarks}'
285 285 @ scratch amended commit draft scratch/mybranch
286 286 |
287 287 o scratchcommitwithpushrebase draft
288 288 |
289 289 o scratchcommitnobook draft
290 290 |
291 291 o new scratch commit draft
292 292 |
293 293 | o newcommit public
294 294 | |
295 295 o | scratchcommit draft
296 296 |/
297 297 o initialcommit public
298 298
299 299
300 300 $ scratchbookmarks
301 301 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
302 302 scratch/mybranch 6c10d49fe92751666c40263f96721b918170d3da
303 $ hg push -r . --to scratch/mybranch
303 $ hg push -r . -B scratch/mybranch
304 304 pushing to ssh://user@dummy/repo
305 305 searching for changes
306 306 remote: non-forward push
307 307 remote: (use --non-forward-move to override)
308 308 abort: push failed on remote
309 309 [255]
310 310
311 $ hg push -r . --to scratch/mybranch --non-forward-move
311 $ hg push -r . -B scratch/mybranch --non-forward-move
312 312 pushing to ssh://user@dummy/repo
313 313 searching for changes
314 314 remote: pushing 5 commits:
315 315 remote: 20759b6926ce scratchcommit
316 316 remote: 1de1d7d92f89 new scratch commit
317 317 remote: 2b5d271c7e0d scratchcommitnobook
318 318 remote: d8c4f54ab678 scratchcommitwithpushrebase
319 319 remote: 8872775dd97a scratch amended commit
320 320 $ scratchbookmarks
321 321 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
322 322 scratch/mybranch 8872775dd97a750e1533dc1fbbca665644b32547
323 323 $ hg log -G -T '{desc} {phase} {bookmarks}'
324 324 @ scratch amended commit draft scratch/mybranch
325 325 |
326 326 o scratchcommitwithpushrebase draft
327 327 |
328 328 o scratchcommitnobook draft
329 329 |
330 330 o new scratch commit draft
331 331 |
332 332 | o newcommit public
333 333 | |
334 334 o | scratchcommit draft
335 335 |/
336 336 o initialcommit public
337 337
338 338 Check that push path is not ignored. Add new path to the hgrc
339 339 $ cat >> .hg/hgrc << EOF
340 340 > [paths]
341 341 > peer=ssh://user@dummy/client2
342 342 > EOF
343 343
344 344 Checkout last non-scrath commit
345 345 $ hg up 91894e11e8255
346 346 1 files updated, 0 files merged, 6 files removed, 0 files unresolved
347 347 $ mkcommit peercommit
348 348 Use --force because this push creates new head
349 349 $ hg push peer -r . -f
350 350 pushing to ssh://user@dummy/client2
351 351 searching for changes
352 352 remote: adding changesets
353 353 remote: adding manifests
354 354 remote: adding file changes
355 355 remote: added 2 changesets with 2 changes to 2 files (+1 heads)
356 356 $ hg -R ../repo log -G -T '{desc} {phase} {bookmarks}'
357 357 o newcommit public
358 358 |
359 359 o initialcommit public
360 360
361 361 $ hg -R ../client2 log -G -T '{desc} {phase} {bookmarks}'
362 362 o peercommit public
363 363 |
364 364 o newcommit public
365 365 |
366 366 | @ new scratch commit draft scratch/anotherbranch scratch/mybranch
367 367 | |
368 368 | o scratchcommit draft
369 369 |/
370 370 o initialcommit public
371 371
@@ -1,318 +1,318 b''
1 1 Testing infinipush extension and the confi options provided by it
2 2
3 3 Setup
4 4
5 5 $ . "$TESTDIR/library-infinitepush.sh"
6 6 $ cp $HGRCPATH $TESTTMP/defaulthgrc
7 7 $ setupcommon
8 8 $ hg init repo
9 9 $ cd repo
10 10 $ setupserver
11 11 $ echo initialcommit > initialcommit
12 12 $ hg ci -Aqm "initialcommit"
13 13 $ hg phase --public .
14 14
15 15 $ cd ..
16 16 $ hg clone ssh://user@dummy/repo client -q
17 17
18 18 Create two heads. Push first head alone, then two heads together. Make sure that
19 19 multihead push works.
20 20 $ cd client
21 21 $ echo multihead1 > multihead1
22 22 $ hg add multihead1
23 23 $ hg ci -m "multihead1"
24 24 $ hg up null
25 25 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
26 26 $ echo multihead2 > multihead2
27 27 $ hg ci -Am "multihead2"
28 28 adding multihead2
29 29 created new head
30 30 $ hg push -r . --bundle-store
31 31 pushing to ssh://user@dummy/repo
32 32 searching for changes
33 33 remote: pushing 1 commit:
34 34 remote: ee4802bf6864 multihead2
35 35 $ hg push -r '1:2' --bundle-store
36 36 pushing to ssh://user@dummy/repo
37 37 searching for changes
38 38 remote: pushing 2 commits:
39 39 remote: bc22f9a30a82 multihead1
40 40 remote: ee4802bf6864 multihead2
41 41 $ scratchnodes
42 42 bc22f9a30a821118244deacbd732e394ed0b686c ab1bc557aa090a9e4145512c734b6e8a828393a5
43 43 ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f ab1bc557aa090a9e4145512c734b6e8a828393a5
44 44
45 45 Create two new scratch bookmarks
46 46 $ hg up 0
47 47 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 48 $ echo scratchfirstpart > scratchfirstpart
49 49 $ hg ci -Am "scratchfirstpart"
50 50 adding scratchfirstpart
51 51 created new head
52 $ hg push -r . --to scratch/firstpart
52 $ hg push -r . -B scratch/firstpart
53 53 pushing to ssh://user@dummy/repo
54 54 searching for changes
55 55 remote: pushing 1 commit:
56 56 remote: 176993b87e39 scratchfirstpart
57 57 $ hg up 0
58 58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 59 $ echo scratchsecondpart > scratchsecondpart
60 60 $ hg ci -Am "scratchsecondpart"
61 61 adding scratchsecondpart
62 62 created new head
63 $ hg push -r . --to scratch/secondpart
63 $ hg push -r . -B scratch/secondpart
64 64 pushing to ssh://user@dummy/repo
65 65 searching for changes
66 66 remote: pushing 1 commit:
67 67 remote: 8db3891c220e scratchsecondpart
68 68
69 69 Pull two bookmarks from the second client
70 70 $ cd ..
71 71 $ hg clone ssh://user@dummy/repo client2 -q
72 72 $ cd client2
73 73 $ hg pull -B scratch/firstpart -B scratch/secondpart
74 74 pulling from ssh://user@dummy/repo
75 75 searching for changes
76 76 adding changesets
77 77 adding manifests
78 78 adding file changes
79 79 added 1 changesets with 1 changes to 1 files
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 84 new changesets * (glob)
85 85 (run 'hg heads' to see heads, 'hg merge' to merge)
86 86 $ hg log -r scratch/secondpart -T '{node}'
87 87 8db3891c220e216f6da214e8254bd4371f55efca (no-eol)
88 88 $ hg log -r scratch/firstpart -T '{node}'
89 89 176993b87e39bd88d66a2cccadabe33f0b346339 (no-eol)
90 90 Make two commits to the scratch branch
91 91
92 92 $ echo testpullbycommithash1 > testpullbycommithash1
93 93 $ hg ci -Am "testpullbycommithash1"
94 94 adding testpullbycommithash1
95 95 created new head
96 96 $ hg log -r '.' -T '{node}\n' > ../testpullbycommithash1
97 97 $ echo testpullbycommithash2 > testpullbycommithash2
98 98 $ hg ci -Aqm "testpullbycommithash2"
99 $ hg push -r . --to scratch/mybranch -q
99 $ hg push -r . -B scratch/mybranch -q
100 100
101 101 Create third client and pull by commit hash.
102 102 Make sure testpullbycommithash2 has not fetched
103 103 $ cd ..
104 104 $ hg clone ssh://user@dummy/repo client3 -q
105 105 $ cd client3
106 106 $ hg pull -r `cat ../testpullbycommithash1`
107 107 pulling from ssh://user@dummy/repo
108 108 searching for changes
109 109 adding changesets
110 110 adding manifests
111 111 adding file changes
112 112 added 1 changesets with 1 changes to 1 files
113 113 new changesets 33910bfe6ffe
114 114 (run 'hg update' to get a working copy)
115 115 $ hg log -G -T '{desc} {phase} {bookmarks}'
116 116 o testpullbycommithash1 draft
117 117 |
118 118 @ initialcommit public
119 119
120 120 Make public commit in the repo and pull it.
121 121 Make sure phase on the client is public.
122 122 $ cd ../repo
123 123 $ echo publiccommit > publiccommit
124 124 $ hg ci -Aqm "publiccommit"
125 125 $ hg phase --public .
126 126 $ cd ../client3
127 127 $ hg pull
128 128 pulling from ssh://user@dummy/repo
129 129 searching for changes
130 130 adding changesets
131 131 adding manifests
132 132 adding file changes
133 133 added 1 changesets with 1 changes to 1 files (+1 heads)
134 134 new changesets a79b6597f322
135 135 (run 'hg heads' to see heads, 'hg merge' to merge)
136 136 $ hg log -G -T '{desc} {phase} {bookmarks} {node|short}'
137 137 o publiccommit public a79b6597f322
138 138 |
139 139 | o testpullbycommithash1 draft 33910bfe6ffe
140 140 |/
141 141 @ initialcommit public 67145f466344
142 142
143 143 $ hg up a79b6597f322
144 144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 145 $ echo scratchontopofpublic > scratchontopofpublic
146 146 $ hg ci -Aqm "scratchontopofpublic"
147 $ hg push -r . --to scratch/scratchontopofpublic
147 $ hg push -r . -B scratch/scratchontopofpublic
148 148 pushing to ssh://user@dummy/repo
149 149 searching for changes
150 150 remote: pushing 1 commit:
151 151 remote: c70aee6da07d scratchontopofpublic
152 152 $ cd ../client2
153 153 $ hg pull -B scratch/scratchontopofpublic
154 154 pulling from ssh://user@dummy/repo
155 155 searching for changes
156 156 adding changesets
157 157 adding manifests
158 158 adding file changes
159 159 added 1 changesets with 1 changes to 1 files (+1 heads)
160 160 adding changesets
161 161 adding manifests
162 162 adding file changes
163 163 added 1 changesets with 1 changes to 1 files
164 164 new changesets a79b6597f322:c70aee6da07d
165 165 (run 'hg heads .' to see heads, 'hg merge' to merge)
166 166 $ hg log -r scratch/scratchontopofpublic -T '{phase}'
167 167 draft (no-eol)
168 168 Strip scratchontopofpublic commit and do hg update
169 169 $ hg log -r tip -T '{node}\n'
170 170 c70aee6da07d7cdb9897375473690df3a8563339
171 171 $ echo "[extensions]" >> .hg/hgrc
172 172 $ echo "strip=" >> .hg/hgrc
173 173 $ hg strip -q tip
174 174 $ hg up c70aee6da07d7cdb9897375473690df3a8563339
175 175 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
176 176 pulling from ssh://user@dummy/repo
177 177 searching for changes
178 178 adding changesets
179 179 adding manifests
180 180 adding file changes
181 181 added 1 changesets with 1 changes to 1 files
182 182 new changesets c70aee6da07d
183 183 (run 'hg update' to get a working copy)
184 184 'c70aee6da07d7cdb9897375473690df3a8563339' found remotely
185 185 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
186 186
187 187 Trying to pull from bad path
188 188 $ hg strip -q tip
189 189 $ hg --config paths.default=badpath up c70aee6da07d7cdb9897375473690df3a8563339
190 190 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
191 191 pulling from $TESTTMP/client2/badpath (glob)
192 192 pull failed: repository $TESTTMP/client2/badpath not found
193 193 abort: unknown revision 'c70aee6da07d7cdb9897375473690df3a8563339'!
194 194 [255]
195 195
196 196 Strip commit and pull it using hg update with bookmark name
197 197 $ hg strip -q d8fde0ddfc96
198 198 $ hg book -d scratch/mybranch
199 199 $ hg up scratch/mybranch
200 200 'scratch/mybranch' does not exist locally - looking for it remotely...
201 201 pulling from ssh://user@dummy/repo
202 202 searching for changes
203 203 adding changesets
204 204 adding manifests
205 205 adding file changes
206 206 added 1 changesets with 1 changes to 2 files
207 207 new changesets d8fde0ddfc96
208 208 (run 'hg update' to get a working copy)
209 209 'scratch/mybranch' found remotely
210 210 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
211 211 (activating bookmark scratch/mybranch)
212 212 $ hg log -r scratch/mybranch -T '{node}'
213 213 d8fde0ddfc962183977f92d2bc52d303b8840f9d (no-eol)
214 214
215 215 Test debugfillinfinitepushmetadata
216 216 $ cd ../repo
217 217 $ hg debugfillinfinitepushmetadata
218 218 abort: nodes are not specified
219 219 [255]
220 220 $ hg debugfillinfinitepushmetadata --node randomnode
221 221 abort: node randomnode is not found
222 222 [255]
223 223 $ hg debugfillinfinitepushmetadata --node d8fde0ddfc962183977f92d2bc52d303b8840f9d
224 224 $ cat .hg/scratchbranches/index/nodemetadatamap/d8fde0ddfc962183977f92d2bc52d303b8840f9d
225 225 {"changed_files": {"testpullbycommithash2": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
226 226
227 227 $ cd ../client
228 228 $ hg up d8fde0ddfc962183977f92d2bc52d303b8840f9d
229 229 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' does not exist locally - looking for it remotely...
230 230 pulling from ssh://user@dummy/repo
231 231 searching for changes
232 232 adding changesets
233 233 adding manifests
234 234 adding file changes
235 235 added 2 changesets with 2 changes to 2 files (+1 heads)
236 236 new changesets 33910bfe6ffe:d8fde0ddfc96
237 237 (run 'hg heads .' to see heads, 'hg merge' to merge)
238 238 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' found remotely
239 239 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
240 240 $ echo file > file
241 241 $ hg add file
242 242 $ hg rm testpullbycommithash2
243 243 $ hg ci -m 'add and rm files'
244 244 $ hg log -r . -T '{node}\n'
245 245 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
246 246 $ hg cp file cpfile
247 247 $ hg mv file mvfile
248 248 $ hg ci -m 'cpfile and mvfile'
249 249 $ hg log -r . -T '{node}\n'
250 250 c7ac39f638c6b39bcdacf868fa21b6195670f8ae
251 251 $ hg push -r . --bundle-store
252 252 pushing to ssh://user@dummy/repo
253 253 searching for changes
254 254 remote: pushing 4 commits:
255 255 remote: 33910bfe6ffe testpullbycommithash1
256 256 remote: d8fde0ddfc96 testpullbycommithash2
257 257 remote: 3edfe7e9089a add and rm files
258 258 remote: c7ac39f638c6 cpfile and mvfile
259 259 $ cd ../repo
260 260 $ hg debugfillinfinitepushmetadata --node 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239 --node c7ac39f638c6b39bcdacf868fa21b6195670f8ae
261 261 $ cat .hg/scratchbranches/index/nodemetadatamap/3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
262 262 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "testpullbycommithash2": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}}} (no-eol)
263 263 $ cat .hg/scratchbranches/index/nodemetadatamap/c7ac39f638c6b39bcdacf868fa21b6195670f8ae
264 264 {"changed_files": {"cpfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}, "file": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}, "mvfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
265 265
266 266 Test infinitepush.metadatafilelimit number
267 267 $ cd ../client
268 268 $ echo file > file
269 269 $ hg add file
270 270 $ echo file1 > file1
271 271 $ hg add file1
272 272 $ echo file2 > file2
273 273 $ hg add file2
274 274 $ hg ci -m 'add many files'
275 275 $ hg log -r . -T '{node}'
276 276 09904fb20c53ff351bd3b1d47681f569a4dab7e5 (no-eol)
277 277 $ hg push -r . --bundle-store
278 278 pushing to ssh://user@dummy/repo
279 279 searching for changes
280 280 remote: pushing 5 commits:
281 281 remote: 33910bfe6ffe testpullbycommithash1
282 282 remote: d8fde0ddfc96 testpullbycommithash2
283 283 remote: 3edfe7e9089a add and rm files
284 284 remote: c7ac39f638c6 cpfile and mvfile
285 285 remote: 09904fb20c53 add many files
286 286
287 287 $ cd ../repo
288 288 $ hg debugfillinfinitepushmetadata --node 09904fb20c53ff351bd3b1d47681f569a4dab7e5 --config infinitepush.metadatafilelimit=2
289 289 $ cat .hg/scratchbranches/index/nodemetadatamap/09904fb20c53ff351bd3b1d47681f569a4dab7e5
290 290 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "file1": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}, "changed_files_truncated": true} (no-eol)
291 291
292 292 Test infinitepush.fillmetadatabranchpattern
293 293 $ cd ../repo
294 294 $ cat >> .hg/hgrc << EOF
295 295 > [infinitepush]
296 296 > fillmetadatabranchpattern=re:scratch/fillmetadata/.*
297 297 > EOF
298 298 $ cd ../client
299 299 $ echo tofillmetadata > tofillmetadata
300 300 $ hg ci -Aqm "tofillmetadata"
301 301 $ hg log -r . -T '{node}\n'
302 302 d2b0410d4da084bc534b1d90df0de9eb21583496
303 $ hg push -r . --to scratch/fillmetadata/fill
303 $ hg push -r . -B scratch/fillmetadata/fill
304 304 pushing to ssh://user@dummy/repo
305 305 searching for changes
306 306 remote: pushing 6 commits:
307 307 remote: 33910bfe6ffe testpullbycommithash1
308 308 remote: d8fde0ddfc96 testpullbycommithash2
309 309 remote: 3edfe7e9089a add and rm files
310 310 remote: c7ac39f638c6 cpfile and mvfile
311 311 remote: 09904fb20c53 add many files
312 312 remote: d2b0410d4da0 tofillmetadata
313 313
314 314 Make sure background process finished
315 315 $ sleep 3
316 316 $ cd ../repo
317 317 $ cat .hg/scratchbranches/index/nodemetadatamap/d2b0410d4da084bc534b1d90df0de9eb21583496
318 318 {"changed_files": {"tofillmetadata": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
General Comments 0
You need to be logged in to leave comments. Login now