##// END OF EJS Templates
infinitepush: delete infinitepushcommands.py and related tests...
Pulkit Goyal -
r37218:aa015dd9 default
parent child Browse files
Show More
@@ -1,1172 +1,1167 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Instructs infinitepush to forward all received bundle2 parts to the
72 72 # bundle for storage. Defaults to False.
73 73 storeallparts = True
74 74
75 75 [remotenames]
76 76 # Client-side option
77 77 # This option should be set only if remotenames extension is enabled.
78 78 # Whether remote bookmarks are tracked by remotenames extension.
79 79 bookmarks = True
80 80 """
81 81
82 82 from __future__ import absolute_import
83 83
84 84 import collections
85 85 import contextlib
86 86 import errno
87 87 import functools
88 88 import logging
89 89 import os
90 90 import random
91 91 import re
92 92 import socket
93 93 import subprocess
94 94 import sys
95 95 import tempfile
96 96 import time
97 97
98 98 from mercurial.node import (
99 99 bin,
100 100 hex,
101 101 )
102 102
103 103 from mercurial.i18n import _
104 104
105 105 from mercurial import (
106 106 bundle2,
107 107 changegroup,
108 108 commands,
109 109 discovery,
110 110 encoding,
111 111 error,
112 112 exchange,
113 113 extensions,
114 114 hg,
115 115 localrepo,
116 116 peer,
117 117 phases,
118 118 pushkey,
119 119 registrar,
120 120 util,
121 121 wireproto,
122 122 )
123 123
124 124 from . import (
125 125 bundleparts,
126 126 common,
127 infinitepushcommands,
128 127 )
129 128
130 129 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
131 130 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
132 131 # be specifying the version(s) of Mercurial they are tested with, or
133 132 # leave the attribute unspecified.
134 133 testedwith = 'ships-with-hg-core'
135 134
136 135 configtable = {}
137 136 configitem = registrar.configitem(configtable)
138 137
139 138 configitem('infinitepush', 'server',
140 139 default=False,
141 140 )
142 141 configitem('infinitepush', 'storetype',
143 142 default='',
144 143 )
145 144 configitem('infinitepush', 'indextype',
146 145 default='',
147 146 )
148 147 configitem('infinitepush', 'indexpath',
149 148 default='',
150 149 )
151 150 configitem('infinitepush', 'storeallparts',
152 151 default=False,
153 152 )
154 153 configitem('infinitepush', 'reponame',
155 154 default='',
156 155 )
157 156 configitem('scratchbranch', 'storepath',
158 157 default='',
159 158 )
160 159 configitem('infinitepush', 'branchpattern',
161 160 default='',
162 161 )
163 configitem('infinitepush', 'metadatafilelimit',
164 default=100,
165 )
166 162 configitem('experimental', 'server-bundlestore-bookmark',
167 163 default='',
168 164 )
169 165 configitem('experimental', 'infinitepush-scratchpush',
170 166 default=False,
171 167 )
172 168 configitem('experimental', 'non-forward-move',
173 169 default=False,
174 170 )
175 171
176 172 experimental = 'experimental'
177 173 configbookmark = 'server-bundlestore-bookmark'
178 174 configscratchpush = 'infinitepush-scratchpush'
179 175 confignonforwardmove = 'non-forward-move'
180 176
181 177 scratchbranchparttype = bundleparts.scratchbranchparttype
182 cmdtable = infinitepushcommands.cmdtable
183 178 revsetpredicate = registrar.revsetpredicate()
184 179 templatekeyword = registrar.templatekeyword()
185 180 _scratchbranchmatcher = lambda x: False
186 181 _maybehash = re.compile(r'^[a-f0-9]+$').search
187 182
188 183 def _buildexternalbundlestore(ui):
189 184 put_args = ui.configlist('infinitepush', 'put_args', [])
190 185 put_binary = ui.config('infinitepush', 'put_binary')
191 186 if not put_binary:
192 187 raise error.Abort('put binary is not specified')
193 188 get_args = ui.configlist('infinitepush', 'get_args', [])
194 189 get_binary = ui.config('infinitepush', 'get_binary')
195 190 if not get_binary:
196 191 raise error.Abort('get binary is not specified')
197 192 from . import store
198 193 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
199 194
200 195 def _buildsqlindex(ui):
201 196 sqlhost = ui.config('infinitepush', 'sqlhost')
202 197 if not sqlhost:
203 198 raise error.Abort(_('please set infinitepush.sqlhost'))
204 199 host, port, db, user, password = sqlhost.split(':')
205 200 reponame = ui.config('infinitepush', 'reponame')
206 201 if not reponame:
207 202 raise error.Abort(_('please set infinitepush.reponame'))
208 203
209 204 logfile = ui.config('infinitepush', 'logfile', '')
210 205 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
211 206 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
212 207 from . import sqlindexapi
213 208 return sqlindexapi.sqlindexapi(
214 209 reponame, host, port, db, user, password,
215 210 logfile, _getloglevel(ui), waittimeout=waittimeout,
216 211 locktimeout=locktimeout)
217 212
218 213 def _getloglevel(ui):
219 214 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
220 215 numeric_loglevel = getattr(logging, loglevel.upper(), None)
221 216 if not isinstance(numeric_loglevel, int):
222 217 raise error.Abort(_('invalid log level %s') % loglevel)
223 218 return numeric_loglevel
224 219
225 220 def _tryhoist(ui, remotebookmark):
226 221 '''returns a bookmarks with hoisted part removed
227 222
228 223 Remotenames extension has a 'hoist' config that allows to use remote
229 224 bookmarks without specifying remote path. For example, 'hg update master'
230 225 works as well as 'hg update remote/master'. We want to allow the same in
231 226 infinitepush.
232 227 '''
233 228
234 229 if common.isremotebooksenabled(ui):
235 230 hoist = ui.config('remotenames', 'hoist') + '/'
236 231 if remotebookmark.startswith(hoist):
237 232 return remotebookmark[len(hoist):]
238 233 return remotebookmark
239 234
240 235 class bundlestore(object):
241 236 def __init__(self, repo):
242 237 self._repo = repo
243 238 storetype = self._repo.ui.config('infinitepush', 'storetype', '')
244 239 if storetype == 'disk':
245 240 from . import store
246 241 self.store = store.filebundlestore(self._repo.ui, self._repo)
247 242 elif storetype == 'external':
248 243 self.store = _buildexternalbundlestore(self._repo.ui)
249 244 else:
250 245 raise error.Abort(
251 246 _('unknown infinitepush store type specified %s') % storetype)
252 247
253 248 indextype = self._repo.ui.config('infinitepush', 'indextype', '')
254 249 if indextype == 'disk':
255 250 from . import fileindexapi
256 251 self.index = fileindexapi.fileindexapi(self._repo)
257 252 elif indextype == 'sql':
258 253 self.index = _buildsqlindex(self._repo.ui)
259 254 else:
260 255 raise error.Abort(
261 256 _('unknown infinitepush index type specified %s') % indextype)
262 257
263 258 def _isserver(ui):
264 259 return ui.configbool('infinitepush', 'server')
265 260
266 261 def reposetup(ui, repo):
267 262 if _isserver(ui) and repo.local():
268 263 repo.bundlestore = bundlestore(repo)
269 264
270 265 def extsetup(ui):
271 266 commonsetup(ui)
272 267 if _isserver(ui):
273 268 serverextsetup(ui)
274 269 else:
275 270 clientextsetup(ui)
276 271
277 272 def commonsetup(ui):
278 273 wireproto.commands['listkeyspatterns'] = (
279 274 wireprotolistkeyspatterns, 'namespace patterns')
280 275 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
281 276 if scratchbranchpat:
282 277 global _scratchbranchmatcher
283 278 kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
284 279
285 280 def serverextsetup(ui):
286 281 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
287 282
288 283 def newpushkeyhandler(*args, **kwargs):
289 284 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
290 285 newpushkeyhandler.params = origpushkeyhandler.params
291 286 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
292 287
293 288 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
294 289 newphaseheadshandler = lambda *args, **kwargs: \
295 290 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
296 291 newphaseheadshandler.params = orighandlephasehandler.params
297 292 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
298 293
299 294 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
300 295 localrepolistkeys)
301 296 wireproto.commands['lookup'] = (
302 297 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
303 298 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
304 299
305 300 extensions.wrapfunction(bundle2, 'processparts', processparts)
306 301
307 302 def clientextsetup(ui):
308 303 entry = extensions.wrapcommand(commands.table, 'push', _push)
309 304
310 305 if not any(a for a in entry[1] if a[1] == 'non-forward-move'):
311 306 entry[1].append(('', 'non-forward-move', None,
312 307 _('allows moving a remote bookmark to an '
313 308 'arbitrary place')))
314 309
315 310 entry[1].append(
316 311 ('', 'bundle-store', None,
317 312 _('force push to go to bundle store (EXPERIMENTAL)')))
318 313
319 314 extensions.wrapcommand(commands.table, 'pull', _pull)
320 315 extensions.wrapcommand(commands.table, 'update', _update)
321 316
322 317 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
323 318
324 319 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
325 320
326 321 partorder = exchange.b2partsgenorder
327 322 index = partorder.index('changeset')
328 323 partorder.insert(
329 324 index, partorder.pop(partorder.index(scratchbranchparttype)))
330 325
331 326 def _checkheads(orig, pushop):
332 327 if pushop.ui.configbool(experimental, configscratchpush, False):
333 328 return
334 329 return orig(pushop)
335 330
336 331 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
337 332 patterns = wireproto.decodelist(patterns)
338 333 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
339 334 return pushkey.encodekeys(d)
340 335
341 336 def localrepolistkeys(orig, self, namespace, patterns=None):
342 337 if namespace == 'bookmarks' and patterns:
343 338 index = self.bundlestore.index
344 339 results = {}
345 340 bookmarks = orig(self, namespace)
346 341 for pattern in patterns:
347 342 results.update(index.getbookmarks(pattern))
348 343 if pattern.endswith('*'):
349 344 pattern = 're:^' + pattern[:-1] + '.*'
350 345 kind, pat, matcher = util.stringmatcher(pattern)
351 346 for bookmark, node in bookmarks.iteritems():
352 347 if matcher(bookmark):
353 348 results[bookmark] = node
354 349 return results
355 350 else:
356 351 return orig(self, namespace)
357 352
358 353 @peer.batchable
359 354 def listkeyspatterns(self, namespace, patterns):
360 355 if not self.capable('pushkey'):
361 356 yield {}, None
362 357 f = peer.future()
363 358 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
364 359 (namespace, patterns))
365 360 yield {
366 361 'namespace': encoding.fromlocal(namespace),
367 362 'patterns': wireproto.encodelist(patterns)
368 363 }, f
369 364 d = f.value
370 365 self.ui.debug('received listkey for "%s": %i bytes\n'
371 366 % (namespace, len(d)))
372 367 yield pushkey.decodekeys(d)
373 368
374 369 def _readbundlerevs(bundlerepo):
375 370 return list(bundlerepo.revs('bundle()'))
376 371
377 372 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
378 373 '''Tells remotefilelog to include all changed files to the changegroup
379 374
380 375 By default remotefilelog doesn't include file content to the changegroup.
381 376 But we need to include it if we are fetching from bundlestore.
382 377 '''
383 378 changedfiles = set()
384 379 cl = bundlerepo.changelog
385 380 for r in bundlerevs:
386 381 # [3] means changed files
387 382 changedfiles.update(cl.read(r)[3])
388 383 if not changedfiles:
389 384 return bundlecaps
390 385
391 386 changedfiles = '\0'.join(changedfiles)
392 387 newcaps = []
393 388 appended = False
394 389 for cap in (bundlecaps or []):
395 390 if cap.startswith('excludepattern='):
396 391 newcaps.append('\0'.join((cap, changedfiles)))
397 392 appended = True
398 393 else:
399 394 newcaps.append(cap)
400 395 if not appended:
401 396 # Not found excludepattern cap. Just append it
402 397 newcaps.append('excludepattern=' + changedfiles)
403 398
404 399 return newcaps
405 400
406 401 def _rebundle(bundlerepo, bundleroots, unknownhead):
407 402 '''
408 403 Bundle may include more revision then user requested. For example,
409 404 if user asks for revision but bundle also consists its descendants.
410 405 This function will filter out all revision that user is not requested.
411 406 '''
412 407 parts = []
413 408
414 409 version = '02'
415 410 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
416 411 missingheads=[unknownhead])
417 412 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
418 413 cgstream = util.chunkbuffer(cgstream).read()
419 414 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
420 415 cgpart.addparam('version', version)
421 416 parts.append(cgpart)
422 417
423 418 return parts
424 419
425 420 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
426 421 cl = bundlerepo.changelog
427 422 bundleroots = []
428 423 for rev in bundlerevs:
429 424 node = cl.node(rev)
430 425 parents = cl.parents(node)
431 426 for parent in parents:
432 427 # include all revs that exist in the main repo
433 428 # to make sure that bundle may apply client-side
434 429 if parent in oldrepo:
435 430 bundleroots.append(parent)
436 431 return bundleroots
437 432
438 433 def _needsrebundling(head, bundlerepo):
439 434 bundleheads = list(bundlerepo.revs('heads(bundle())'))
440 435 return not (len(bundleheads) == 1 and
441 436 bundlerepo[bundleheads[0]].node() == head)
442 437
443 438 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
444 439 '''generates bundle that will be send to the user
445 440
446 441 returns tuple with raw bundle string and bundle type
447 442 '''
448 443 parts = []
449 444 if not _needsrebundling(head, bundlerepo):
450 445 with util.posixfile(bundlefile, "rb") as f:
451 446 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
452 447 if isinstance(unbundler, changegroup.cg1unpacker):
453 448 part = bundle2.bundlepart('changegroup',
454 449 data=unbundler._stream.read())
455 450 part.addparam('version', '01')
456 451 parts.append(part)
457 452 elif isinstance(unbundler, bundle2.unbundle20):
458 453 haschangegroup = False
459 454 for part in unbundler.iterparts():
460 455 if part.type == 'changegroup':
461 456 haschangegroup = True
462 457 newpart = bundle2.bundlepart(part.type, data=part.read())
463 458 for key, value in part.params.iteritems():
464 459 newpart.addparam(key, value)
465 460 parts.append(newpart)
466 461
467 462 if not haschangegroup:
468 463 raise error.Abort(
469 464 'unexpected bundle without changegroup part, ' +
470 465 'head: %s' % hex(head),
471 466 hint='report to administrator')
472 467 else:
473 468 raise error.Abort('unknown bundle type')
474 469 else:
475 470 parts = _rebundle(bundlerepo, bundleroots, head)
476 471
477 472 return parts
478 473
479 474 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
480 475 heads = heads or []
481 476 # newheads are parents of roots of scratch bundles that were requested
482 477 newphases = {}
483 478 scratchbundles = []
484 479 newheads = []
485 480 scratchheads = []
486 481 nodestobundle = {}
487 482 allbundlestocleanup = []
488 483 try:
489 484 for head in heads:
490 485 if head not in repo.changelog.nodemap:
491 486 if head not in nodestobundle:
492 487 newbundlefile = common.downloadbundle(repo, head)
493 488 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
494 489 bundlerepo = hg.repository(repo.ui, bundlepath)
495 490
496 491 allbundlestocleanup.append((bundlerepo, newbundlefile))
497 492 bundlerevs = set(_readbundlerevs(bundlerepo))
498 493 bundlecaps = _includefilelogstobundle(
499 494 bundlecaps, bundlerepo, bundlerevs, repo.ui)
500 495 cl = bundlerepo.changelog
501 496 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
502 497 for rev in bundlerevs:
503 498 node = cl.node(rev)
504 499 newphases[hex(node)] = str(phases.draft)
505 500 nodestobundle[node] = (bundlerepo, bundleroots,
506 501 newbundlefile)
507 502
508 503 scratchbundles.append(
509 504 _generateoutputparts(head, *nodestobundle[head]))
510 505 newheads.extend(bundleroots)
511 506 scratchheads.append(head)
512 507 finally:
513 508 for bundlerepo, bundlefile in allbundlestocleanup:
514 509 bundlerepo.close()
515 510 try:
516 511 os.unlink(bundlefile)
517 512 except (IOError, OSError):
518 513 # if we can't cleanup the file then just ignore the error,
519 514 # no need to fail
520 515 pass
521 516
522 517 pullfrombundlestore = bool(scratchbundles)
523 518 wrappedchangegrouppart = False
524 519 wrappedlistkeys = False
525 520 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
526 521 try:
527 522 def _changegrouppart(bundler, *args, **kwargs):
528 523 # Order is important here. First add non-scratch part
529 524 # and only then add parts with scratch bundles because
530 525 # non-scratch part contains parents of roots of scratch bundles.
531 526 result = oldchangegrouppart(bundler, *args, **kwargs)
532 527 for bundle in scratchbundles:
533 528 for part in bundle:
534 529 bundler.addpart(part)
535 530 return result
536 531
537 532 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
538 533 wrappedchangegrouppart = True
539 534
540 535 def _listkeys(orig, self, namespace):
541 536 origvalues = orig(self, namespace)
542 537 if namespace == 'phases' and pullfrombundlestore:
543 538 if origvalues.get('publishing') == 'True':
544 539 # Make repo non-publishing to preserve draft phase
545 540 del origvalues['publishing']
546 541 origvalues.update(newphases)
547 542 return origvalues
548 543
549 544 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
550 545 _listkeys)
551 546 wrappedlistkeys = True
552 547 heads = list((set(newheads) | set(heads)) - set(scratchheads))
553 548 result = orig(repo, source, heads=heads,
554 549 bundlecaps=bundlecaps, **kwargs)
555 550 finally:
556 551 if wrappedchangegrouppart:
557 552 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
558 553 if wrappedlistkeys:
559 554 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
560 555 _listkeys)
561 556 return result
562 557
563 558 def _lookupwrap(orig):
564 559 def _lookup(repo, proto, key):
565 560 localkey = encoding.tolocal(key)
566 561
567 562 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
568 563 scratchnode = repo.bundlestore.index.getnode(localkey)
569 564 if scratchnode:
570 565 return "%s %s\n" % (1, scratchnode)
571 566 else:
572 567 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
573 568 else:
574 569 try:
575 570 r = hex(repo.lookup(localkey))
576 571 return "%s %s\n" % (1, r)
577 572 except Exception as inst:
578 573 if repo.bundlestore.index.getbundle(localkey):
579 574 return "%s %s\n" % (1, localkey)
580 575 else:
581 576 r = str(inst)
582 577 return "%s %s\n" % (0, r)
583 578 return _lookup
584 579
585 580 def _update(orig, ui, repo, node=None, rev=None, **opts):
586 581 if rev and node:
587 582 raise error.Abort(_("please specify just one revision"))
588 583
589 584 if not opts.get('date') and (rev or node) not in repo:
590 585 mayberemote = rev or node
591 586 mayberemote = _tryhoist(ui, mayberemote)
592 587 dopull = False
593 588 kwargs = {}
594 589 if _scratchbranchmatcher(mayberemote):
595 590 dopull = True
596 591 kwargs['bookmark'] = [mayberemote]
597 592 elif len(mayberemote) == 40 and _maybehash(mayberemote):
598 593 dopull = True
599 594 kwargs['rev'] = [mayberemote]
600 595
601 596 if dopull:
602 597 ui.warn(
603 598 _("'%s' does not exist locally - looking for it " +
604 599 "remotely...\n") % mayberemote)
605 600 # Try pulling node from remote repo
606 601 try:
607 602 cmdname = '^pull'
608 603 pullcmd = commands.table[cmdname][0]
609 604 pullopts = dict(opt[1:3] for opt in commands.table[cmdname][1])
610 605 pullopts.update(kwargs)
611 606 pullcmd(ui, repo, **pullopts)
612 607 except Exception:
613 608 ui.warn(_('pull failed: %s\n') % sys.exc_info()[1])
614 609 else:
615 610 ui.warn(_("'%s' found remotely\n") % mayberemote)
616 611 return orig(ui, repo, node, rev, **opts)
617 612
618 613 def _pull(orig, ui, repo, source="default", **opts):
619 614 # Copy paste from `pull` command
620 615 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
621 616
622 617 scratchbookmarks = {}
623 618 unfi = repo.unfiltered()
624 619 unknownnodes = []
625 620 for rev in opts.get('rev', []):
626 621 if rev not in unfi:
627 622 unknownnodes.append(rev)
628 623 if opts.get('bookmark'):
629 624 bookmarks = []
630 625 revs = opts.get('rev') or []
631 626 for bookmark in opts.get('bookmark'):
632 627 if _scratchbranchmatcher(bookmark):
633 628 # rev is not known yet
634 629 # it will be fetched with listkeyspatterns next
635 630 scratchbookmarks[bookmark] = 'REVTOFETCH'
636 631 else:
637 632 bookmarks.append(bookmark)
638 633
639 634 if scratchbookmarks:
640 635 other = hg.peer(repo, opts, source)
641 636 fetchedbookmarks = other.listkeyspatterns(
642 637 'bookmarks', patterns=scratchbookmarks)
643 638 for bookmark in scratchbookmarks:
644 639 if bookmark not in fetchedbookmarks:
645 640 raise error.Abort('remote bookmark %s not found!' %
646 641 bookmark)
647 642 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
648 643 revs.append(fetchedbookmarks[bookmark])
649 644 opts['bookmark'] = bookmarks
650 645 opts['rev'] = revs
651 646
652 647 if scratchbookmarks or unknownnodes:
653 648 # Set anyincoming to True
654 649 extensions.wrapfunction(discovery, 'findcommonincoming',
655 650 _findcommonincoming)
656 651 try:
657 652 # Remote scratch bookmarks will be deleted because remotenames doesn't
658 653 # know about them. Let's save it before pull and restore after
659 654 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
660 655 result = orig(ui, repo, source, **opts)
661 656 # TODO(stash): race condition is possible
662 657 # if scratch bookmarks was updated right after orig.
663 658 # But that's unlikely and shouldn't be harmful.
664 659 if common.isremotebooksenabled(ui):
665 660 remotescratchbookmarks.update(scratchbookmarks)
666 661 _saveremotebookmarks(repo, remotescratchbookmarks, source)
667 662 else:
668 663 _savelocalbookmarks(repo, scratchbookmarks)
669 664 return result
670 665 finally:
671 666 if scratchbookmarks:
672 667 extensions.unwrapfunction(discovery, 'findcommonincoming')
673 668
674 669 def _readscratchremotebookmarks(ui, repo, other):
675 670 if common.isremotebooksenabled(ui):
676 671 remotenamesext = extensions.find('remotenames')
677 672 remotepath = remotenamesext.activepath(repo.ui, other)
678 673 result = {}
679 674 # Let's refresh remotenames to make sure we have it up to date
680 675 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
681 676 # and it results in deleting scratch bookmarks. Our best guess how to
682 677 # fix it is to use `clearnames()`
683 678 repo._remotenames.clearnames()
684 679 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
685 680 path, bookname = remotenamesext.splitremotename(remotebookmark)
686 681 if path == remotepath and _scratchbranchmatcher(bookname):
687 682 nodes = repo.names['remotebookmarks'].nodes(repo,
688 683 remotebookmark)
689 684 if nodes:
690 685 result[bookname] = hex(nodes[0])
691 686 return result
692 687 else:
693 688 return {}
694 689
695 690 def _saveremotebookmarks(repo, newbookmarks, remote):
696 691 remotenamesext = extensions.find('remotenames')
697 692 remotepath = remotenamesext.activepath(repo.ui, remote)
698 693 branches = collections.defaultdict(list)
699 694 bookmarks = {}
700 695 remotenames = remotenamesext.readremotenames(repo)
701 696 for hexnode, nametype, remote, rname in remotenames:
702 697 if remote != remotepath:
703 698 continue
704 699 if nametype == 'bookmarks':
705 700 if rname in newbookmarks:
706 701 # It's possible if we have a normal bookmark that matches
707 702 # scratch branch pattern. In this case just use the current
708 703 # bookmark node
709 704 del newbookmarks[rname]
710 705 bookmarks[rname] = hexnode
711 706 elif nametype == 'branches':
712 707 # saveremotenames expects 20 byte binary nodes for branches
713 708 branches[rname].append(bin(hexnode))
714 709
715 710 for bookmark, hexnode in newbookmarks.iteritems():
716 711 bookmarks[bookmark] = hexnode
717 712 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
718 713
719 714 def _savelocalbookmarks(repo, bookmarks):
720 715 if not bookmarks:
721 716 return
722 717 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
723 718 changes = []
724 719 for scratchbook, node in bookmarks.iteritems():
725 720 changectx = repo[node]
726 721 changes.append((scratchbook, changectx.node()))
727 722 repo._bookmarks.applychanges(repo, tr, changes)
728 723
729 724 def _findcommonincoming(orig, *args, **kwargs):
730 725 common, inc, remoteheads = orig(*args, **kwargs)
731 726 return common, True, remoteheads
732 727
733 728 def _push(orig, ui, repo, dest=None, *args, **opts):
734 729
735 730 bookmark = opts.get('bookmark')
736 731 # we only support pushing one infinitepush bookmark at once
737 732 if len(bookmark) == 1:
738 733 bookmark = bookmark[0]
739 734 else:
740 735 bookmark = ''
741 736
742 737 oldphasemove = None
743 738 overrides = {(experimental, configbookmark): bookmark}
744 739
745 740 with ui.configoverride(overrides, 'infinitepush'):
746 741 scratchpush = opts.get('bundle_store')
747 742 if _scratchbranchmatcher(bookmark):
748 743 scratchpush = True
749 744 # bundle2 can be sent back after push (for example, bundle2
750 745 # containing `pushkey` part to update bookmarks)
751 746 ui.setconfig(experimental, 'bundle2.pushback', True)
752 747
753 748 ui.setconfig(experimental, confignonforwardmove,
754 749 opts.get('non_forward_move'), '--non-forward-move')
755 750 if scratchpush:
756 751 # this is an infinitepush, we don't want the bookmark to be applied
757 752 # rather that should be stored in the bundlestore
758 753 opts['bookmark'] = []
759 754 ui.setconfig(experimental, configscratchpush, True)
760 755 oldphasemove = extensions.wrapfunction(exchange,
761 756 '_localphasemove',
762 757 _phasemove)
763 758 # Copy-paste from `push` command
764 759 path = ui.paths.getpath(dest, default=('default-push', 'default'))
765 760 if not path:
766 761 raise error.Abort(_('default repository not configured!'),
767 762 hint=_("see 'hg help config.paths'"))
768 763 destpath = path.pushloc or path.loc
769 764 # Remote scratch bookmarks will be deleted because remotenames doesn't
770 765 # know about them. Let's save it before push and restore after
771 766 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
772 767 result = orig(ui, repo, dest, *args, **opts)
773 768 if common.isremotebooksenabled(ui):
774 769 if bookmark and scratchpush:
775 770 other = hg.peer(repo, opts, destpath)
776 771 fetchedbookmarks = other.listkeyspatterns('bookmarks',
777 772 patterns=[bookmark])
778 773 remotescratchbookmarks.update(fetchedbookmarks)
779 774 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
780 775 if oldphasemove:
781 776 exchange._localphasemove = oldphasemove
782 777 return result
783 778
784 779 def _deleteinfinitepushbookmarks(ui, repo, path, names):
785 780 """Prune remote names by removing the bookmarks we don't want anymore,
786 781 then writing the result back to disk
787 782 """
788 783 remotenamesext = extensions.find('remotenames')
789 784
790 785 # remotename format is:
791 786 # (node, nametype ("branches" or "bookmarks"), remote, name)
792 787 nametype_idx = 1
793 788 remote_idx = 2
794 789 name_idx = 3
795 790 remotenames = [remotename for remotename in \
796 791 remotenamesext.readremotenames(repo) \
797 792 if remotename[remote_idx] == path]
798 793 remote_bm_names = [remotename[name_idx] for remotename in \
799 794 remotenames if remotename[nametype_idx] == "bookmarks"]
800 795
801 796 for name in names:
802 797 if name not in remote_bm_names:
803 798 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
804 799 "in path '{}'").format(name, path))
805 800
806 801 bookmarks = {}
807 802 branches = collections.defaultdict(list)
808 803 for node, nametype, remote, name in remotenames:
809 804 if nametype == "bookmarks" and name not in names:
810 805 bookmarks[name] = node
811 806 elif nametype == "branches":
812 807 # saveremotenames wants binary nodes for branches
813 808 branches[name].append(bin(node))
814 809
815 810 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
816 811
817 812 def _phasemove(orig, pushop, nodes, phase=phases.public):
818 813 """prevent commits from being marked public
819 814
820 815 Since these are going to a scratch branch, they aren't really being
821 816 published."""
822 817
823 818 if phase != phases.public:
824 819 orig(pushop, nodes, phase)
825 820
826 821 @exchange.b2partsgenerator(scratchbranchparttype)
827 822 def partgen(pushop, bundler):
828 823 bookmark = pushop.ui.config(experimental, configbookmark)
829 824 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
830 825 if 'changesets' in pushop.stepsdone or not scratchpush:
831 826 return
832 827
833 828 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
834 829 return
835 830
836 831 pushop.stepsdone.add('changesets')
837 832 if not pushop.outgoing.missing:
838 833 pushop.ui.status(_('no changes found\n'))
839 834 pushop.cgresult = 0
840 835 return
841 836
842 837 # This parameter tells the server that the following bundle is an
843 838 # infinitepush. This let's it switch the part processing to our infinitepush
844 839 # code path.
845 840 bundler.addparam("infinitepush", "True")
846 841
847 842 nonforwardmove = pushop.force or pushop.ui.configbool(experimental,
848 843 confignonforwardmove)
849 844 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
850 845 pushop.remote,
851 846 pushop.outgoing,
852 847 nonforwardmove,
853 848 pushop.ui,
854 849 bookmark)
855 850
856 851 for scratchpart in scratchparts:
857 852 bundler.addpart(scratchpart)
858 853
859 854 def handlereply(op):
860 855 # server either succeeds or aborts; no code to read
861 856 pushop.cgresult = 1
862 857
863 858 return handlereply
864 859
865 860 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
866 861
867 862 def _getrevs(bundle, oldnode, force, bookmark):
868 863 'extracts and validates the revs to be imported'
869 864 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
870 865
871 866 # new bookmark
872 867 if oldnode is None:
873 868 return revs
874 869
875 870 # Fast forward update
876 871 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
877 872 return revs
878 873
879 874 # Forced non-fast forward update
880 875 if force:
881 876 return revs
882 877 else:
883 878 raise error.Abort(_('non-forward push'),
884 879 hint=_('use --non-forward-move to override'))
885 880
886 881 @contextlib.contextmanager
887 882 def logservicecall(logger, service, **kwargs):
888 883 start = time.time()
889 884 logger(service, eventtype='start', **kwargs)
890 885 try:
891 886 yield
892 887 logger(service, eventtype='success',
893 888 elapsedms=(time.time() - start) * 1000, **kwargs)
894 889 except Exception as e:
895 890 logger(service, eventtype='failure',
896 891 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
897 892 **kwargs)
898 893 raise
899 894
900 895 def _getorcreateinfinitepushlogger(op):
901 896 logger = op.records['infinitepushlogger']
902 897 if not logger:
903 898 ui = op.repo.ui
904 899 try:
905 900 username = util.getuser()
906 901 except Exception:
907 902 username = 'unknown'
908 903 # Generate random request id to be able to find all logged entries
909 904 # for the same request. Since requestid is pseudo-generated it may
910 905 # not be unique, but we assume that (hostname, username, requestid)
911 906 # is unique.
912 907 random.seed()
913 908 requestid = random.randint(0, 2000000000)
914 909 hostname = socket.gethostname()
915 910 logger = functools.partial(ui.log, 'infinitepush', user=username,
916 911 requestid=requestid, hostname=hostname,
917 912 reponame=ui.config('infinitepush',
918 913 'reponame'))
919 914 op.records.add('infinitepushlogger', logger)
920 915 else:
921 916 logger = logger[0]
922 917 return logger
923 918
924 919 def processparts(orig, repo, op, unbundler):
925 920 if unbundler.params.get('infinitepush') != 'True':
926 921 return orig(repo, op, unbundler)
927 922
928 923 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
929 924
930 925 bundler = bundle2.bundle20(repo.ui)
931 926 cgparams = None
932 927 with bundle2.partiterator(repo, op, unbundler) as parts:
933 928 for part in parts:
934 929 bundlepart = None
935 930 if part.type == 'replycaps':
936 931 # This configures the current operation to allow reply parts.
937 932 bundle2._processpart(op, part)
938 933 elif part.type == bundleparts.scratchbranchparttype:
939 934 # Scratch branch parts need to be converted to normal
940 935 # changegroup parts, and the extra parameters stored for later
941 936 # when we upload to the store. Eventually those parameters will
942 937 # be put on the actual bundle instead of this part, then we can
943 938 # send a vanilla changegroup instead of the scratchbranch part.
944 939 cgversion = part.params.get('cgversion', '01')
945 940 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
946 941 bundlepart.addparam('version', cgversion)
947 942 cgparams = part.params
948 943
949 944 # If we're not dumping all parts into the new bundle, we need to
950 945 # alert the future pushkey and phase-heads handler to skip
951 946 # the part.
952 947 if not handleallparts:
953 948 op.records.add(scratchbranchparttype + '_skippushkey', True)
954 949 op.records.add(scratchbranchparttype + '_skipphaseheads',
955 950 True)
956 951 else:
957 952 if handleallparts:
958 953 # Ideally we would not process any parts, and instead just
959 954 # forward them to the bundle for storage, but since this
960 955 # differs from previous behavior, we need to put it behind a
961 956 # config flag for incremental rollout.
962 957 bundlepart = bundle2.bundlepart(part.type, data=part.read())
963 958 for key, value in part.params.iteritems():
964 959 bundlepart.addparam(key, value)
965 960
966 961 # Certain parts require a response
967 962 if part.type == 'pushkey':
968 963 if op.reply is not None:
969 964 rpart = op.reply.newpart('reply:pushkey')
970 965 rpart.addparam('in-reply-to', str(part.id),
971 966 mandatory=False)
972 967 rpart.addparam('return', '1', mandatory=False)
973 968 else:
974 969 bundle2._processpart(op, part)
975 970
976 971 if handleallparts:
977 972 op.records.add(part.type, {
978 973 'return': 1,
979 974 })
980 975 if bundlepart:
981 976 bundler.addpart(bundlepart)
982 977
983 978 # If commits were sent, store them
984 979 if cgparams:
985 980 buf = util.chunkbuffer(bundler.getchunks())
986 981 fd, bundlefile = tempfile.mkstemp()
987 982 try:
988 983 try:
989 984 fp = os.fdopen(fd, 'wb')
990 985 fp.write(buf.read())
991 986 finally:
992 987 fp.close()
993 988 storebundle(op, cgparams, bundlefile)
994 989 finally:
995 990 try:
996 991 os.unlink(bundlefile)
997 992 except Exception:
998 993 # we would rather see the original exception
999 994 pass
1000 995
1001 996 def storebundle(op, params, bundlefile):
1002 997 log = _getorcreateinfinitepushlogger(op)
1003 998 parthandlerstart = time.time()
1004 999 log(scratchbranchparttype, eventtype='start')
1005 1000 index = op.repo.bundlestore.index
1006 1001 store = op.repo.bundlestore.store
1007 1002 op.records.add(scratchbranchparttype + '_skippushkey', True)
1008 1003
1009 1004 bundle = None
1010 1005 try: # guards bundle
1011 1006 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1012 1007 bundle = hg.repository(op.repo.ui, bundlepath)
1013 1008
1014 1009 bookmark = params.get('bookmark')
1015 1010 bookprevnode = params.get('bookprevnode', '')
1016 1011 force = params.get('force')
1017 1012
1018 1013 if bookmark:
1019 1014 oldnode = index.getnode(bookmark)
1020 1015 else:
1021 1016 oldnode = None
1022 1017 bundleheads = bundle.revs('heads(bundle())')
1023 1018 if bookmark and len(bundleheads) > 1:
1024 1019 raise error.Abort(
1025 1020 _('cannot push more than one head to a scratch branch'))
1026 1021
1027 1022 revs = _getrevs(bundle, oldnode, force, bookmark)
1028 1023
1029 1024 # Notify the user of what is being pushed
1030 1025 plural = 's' if len(revs) > 1 else ''
1031 1026 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1032 1027 maxoutput = 10
1033 1028 for i in range(0, min(len(revs), maxoutput)):
1034 1029 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1035 1030 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1036 1031
1037 1032 if len(revs) > maxoutput + 1:
1038 1033 op.repo.ui.warn((" ...\n"))
1039 1034 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1040 1035 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1041 1036
1042 1037 nodesctx = [bundle[rev] for rev in revs]
1043 1038 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1044 1039 if bundleheads:
1045 1040 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1046 1041 else:
1047 1042 newheadscount = 0
1048 1043 # If there's a bookmark specified, there should be only one head,
1049 1044 # so we choose the last node, which will be that head.
1050 1045 # If a bug or malicious client allows there to be a bookmark
1051 1046 # with multiple heads, we will place the bookmark on the last head.
1052 1047 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1053 1048 key = None
1054 1049 if newheadscount:
1055 1050 with open(bundlefile, 'r') as f:
1056 1051 bundledata = f.read()
1057 1052 with logservicecall(log, 'bundlestore',
1058 1053 bundlesize=len(bundledata)):
1059 1054 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1060 1055 if len(bundledata) > bundlesizelimit:
1061 1056 error_msg = ('bundle is too big: %d bytes. ' +
1062 1057 'max allowed size is 100 MB')
1063 1058 raise error.Abort(error_msg % (len(bundledata),))
1064 1059 key = store.write(bundledata)
1065 1060
1066 1061 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1067 1062 if key:
1068 1063 index.addbundle(key, nodesctx)
1069 1064 if bookmark:
1070 1065 index.addbookmark(bookmark, bookmarknode)
1071 1066 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1072 1067 bookprevnode, params)
1073 1068 log(scratchbranchparttype, eventtype='success',
1074 1069 elapsedms=(time.time() - parthandlerstart) * 1000)
1075 1070
1076 1071 except Exception as e:
1077 1072 log(scratchbranchparttype, eventtype='failure',
1078 1073 elapsedms=(time.time() - parthandlerstart) * 1000,
1079 1074 errormsg=str(e))
1080 1075 raise
1081 1076 finally:
1082 1077 if bundle:
1083 1078 bundle.close()
1084 1079
1085 1080 @bundle2.parthandler(scratchbranchparttype,
1086 1081 ('bookmark', 'bookprevnode', 'force',
1087 1082 'pushbackbookmarks', 'cgversion'))
1088 1083 def bundle2scratchbranch(op, part):
1089 1084 '''unbundle a bundle2 part containing a changegroup to store'''
1090 1085
1091 1086 bundler = bundle2.bundle20(op.repo.ui)
1092 1087 cgversion = part.params.get('cgversion', '01')
1093 1088 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1094 1089 cgpart.addparam('version', cgversion)
1095 1090 bundler.addpart(cgpart)
1096 1091 buf = util.chunkbuffer(bundler.getchunks())
1097 1092
1098 1093 fd, bundlefile = tempfile.mkstemp()
1099 1094 try:
1100 1095 try:
1101 1096 fp = os.fdopen(fd, 'wb')
1102 1097 fp.write(buf.read())
1103 1098 finally:
1104 1099 fp.close()
1105 1100 storebundle(op, part.params, bundlefile)
1106 1101 finally:
1107 1102 try:
1108 1103 os.unlink(bundlefile)
1109 1104 except OSError as e:
1110 1105 if e.errno != errno.ENOENT:
1111 1106 raise
1112 1107
1113 1108 return 1
1114 1109
1115 1110 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1116 1111 if params.get('pushbackbookmarks'):
1117 1112 if op.reply and 'pushback' in op.reply.capabilities:
1118 1113 params = {
1119 1114 'namespace': 'bookmarks',
1120 1115 'key': bookmark,
1121 1116 'new': newnode,
1122 1117 'old': oldnode,
1123 1118 }
1124 1119 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1125 1120
1126 1121 def bundle2pushkey(orig, op, part):
1127 1122 '''Wrapper of bundle2.handlepushkey()
1128 1123
1129 1124 The only goal is to skip calling the original function if flag is set.
1130 1125 It's set if infinitepush push is happening.
1131 1126 '''
1132 1127 if op.records[scratchbranchparttype + '_skippushkey']:
1133 1128 if op.reply is not None:
1134 1129 rpart = op.reply.newpart('reply:pushkey')
1135 1130 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1136 1131 rpart.addparam('return', '1', mandatory=False)
1137 1132 return 1
1138 1133
1139 1134 return orig(op, part)
1140 1135
1141 1136 def bundle2handlephases(orig, op, part):
1142 1137 '''Wrapper of bundle2.handlephases()
1143 1138
1144 1139 The only goal is to skip calling the original function if flag is set.
1145 1140 It's set if infinitepush push is happening.
1146 1141 '''
1147 1142
1148 1143 if op.records[scratchbranchparttype + '_skipphaseheads']:
1149 1144 return
1150 1145
1151 1146 return orig(op, part)
1152 1147
1153 1148 def _asyncsavemetadata(root, nodes):
1154 1149 '''starts a separate process that fills metadata for the nodes
1155 1150
1156 1151 This function creates a separate process and doesn't wait for it's
1157 1152 completion. This was done to avoid slowing down pushes
1158 1153 '''
1159 1154
1160 1155 maxnodes = 50
1161 1156 if len(nodes) > maxnodes:
1162 1157 return
1163 1158 nodesargs = []
1164 1159 for node in nodes:
1165 1160 nodesargs.append('--node')
1166 1161 nodesargs.append(node)
1167 1162 with open(os.devnull, 'w+b') as devnull:
1168 1163 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1169 1164 '-R', root] + nodesargs
1170 1165 # Process will run in background. We don't care about the return code
1171 1166 subprocess.Popen(cmdline, close_fds=True, shell=False,
1172 1167 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,290 +1,213 b''
1 1 Testing infinipush extension and the confi options provided by it
2 2
3 3 Setup
4 4
5 5 $ . "$TESTDIR/library-infinitepush.sh"
6 6 $ cp $HGRCPATH $TESTTMP/defaulthgrc
7 7 $ setupcommon
8 8 $ hg init repo
9 9 $ cd repo
10 10 $ setupserver
11 11 $ echo initialcommit > initialcommit
12 12 $ hg ci -Aqm "initialcommit"
13 13 $ hg phase --public .
14 14
15 15 $ cd ..
16 16 $ hg clone ssh://user@dummy/repo client -q
17 17
18 18 Create two heads. Push first head alone, then two heads together. Make sure that
19 19 multihead push works.
20 20 $ cd client
21 21 $ echo multihead1 > multihead1
22 22 $ hg add multihead1
23 23 $ hg ci -m "multihead1"
24 24 $ hg up null
25 25 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
26 26 $ echo multihead2 > multihead2
27 27 $ hg ci -Am "multihead2"
28 28 adding multihead2
29 29 created new head
30 30 $ hg push -r . --bundle-store
31 31 pushing to ssh://user@dummy/repo
32 32 searching for changes
33 33 remote: pushing 1 commit:
34 34 remote: ee4802bf6864 multihead2
35 35 $ hg push -r '1:2' --bundle-store
36 36 pushing to ssh://user@dummy/repo
37 37 searching for changes
38 38 remote: pushing 2 commits:
39 39 remote: bc22f9a30a82 multihead1
40 40 remote: ee4802bf6864 multihead2
41 41 $ scratchnodes
42 42 bc22f9a30a821118244deacbd732e394ed0b686c ab1bc557aa090a9e4145512c734b6e8a828393a5
43 43 ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f ab1bc557aa090a9e4145512c734b6e8a828393a5
44 44
45 45 Create two new scratch bookmarks
46 46 $ hg up 0
47 47 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 48 $ echo scratchfirstpart > scratchfirstpart
49 49 $ hg ci -Am "scratchfirstpart"
50 50 adding scratchfirstpart
51 51 created new head
52 52 $ hg push -r . -B scratch/firstpart
53 53 pushing to ssh://user@dummy/repo
54 54 searching for changes
55 55 remote: pushing 1 commit:
56 56 remote: 176993b87e39 scratchfirstpart
57 57 $ hg up 0
58 58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 59 $ echo scratchsecondpart > scratchsecondpart
60 60 $ hg ci -Am "scratchsecondpart"
61 61 adding scratchsecondpart
62 62 created new head
63 63 $ hg push -r . -B scratch/secondpart
64 64 pushing to ssh://user@dummy/repo
65 65 searching for changes
66 66 remote: pushing 1 commit:
67 67 remote: 8db3891c220e scratchsecondpart
68 68
69 69 Pull two bookmarks from the second client
70 70 $ cd ..
71 71 $ hg clone ssh://user@dummy/repo client2 -q
72 72 $ cd client2
73 73 $ hg pull -B scratch/firstpart -B scratch/secondpart
74 74 pulling from ssh://user@dummy/repo
75 75 searching for changes
76 76 adding changesets
77 77 adding manifests
78 78 adding file changes
79 79 added 1 changesets with 1 changes to 1 files
80 80 adding changesets
81 81 adding manifests
82 82 adding file changes
83 83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 84 new changesets * (glob)
85 85 (run 'hg heads' to see heads, 'hg merge' to merge)
86 86 $ hg log -r scratch/secondpart -T '{node}'
87 87 8db3891c220e216f6da214e8254bd4371f55efca (no-eol)
88 88 $ hg log -r scratch/firstpart -T '{node}'
89 89 176993b87e39bd88d66a2cccadabe33f0b346339 (no-eol)
90 90 Make two commits to the scratch branch
91 91
92 92 $ echo testpullbycommithash1 > testpullbycommithash1
93 93 $ hg ci -Am "testpullbycommithash1"
94 94 adding testpullbycommithash1
95 95 created new head
96 96 $ hg log -r '.' -T '{node}\n' > ../testpullbycommithash1
97 97 $ echo testpullbycommithash2 > testpullbycommithash2
98 98 $ hg ci -Aqm "testpullbycommithash2"
99 99 $ hg push -r . -B scratch/mybranch -q
100 100
101 101 Create third client and pull by commit hash.
102 102 Make sure testpullbycommithash2 has not fetched
103 103 $ cd ..
104 104 $ hg clone ssh://user@dummy/repo client3 -q
105 105 $ cd client3
106 106 $ hg pull -r `cat ../testpullbycommithash1`
107 107 pulling from ssh://user@dummy/repo
108 108 searching for changes
109 109 adding changesets
110 110 adding manifests
111 111 adding file changes
112 112 added 1 changesets with 1 changes to 1 files
113 113 new changesets 33910bfe6ffe
114 114 (run 'hg update' to get a working copy)
115 115 $ hg log -G -T '{desc} {phase} {bookmarks}'
116 116 o testpullbycommithash1 draft
117 117 |
118 118 @ initialcommit public
119 119
120 120 Make public commit in the repo and pull it.
121 121 Make sure phase on the client is public.
122 122 $ cd ../repo
123 123 $ echo publiccommit > publiccommit
124 124 $ hg ci -Aqm "publiccommit"
125 125 $ hg phase --public .
126 126 $ cd ../client3
127 127 $ hg pull
128 128 pulling from ssh://user@dummy/repo
129 129 searching for changes
130 130 adding changesets
131 131 adding manifests
132 132 adding file changes
133 133 added 1 changesets with 1 changes to 1 files (+1 heads)
134 134 new changesets a79b6597f322
135 135 (run 'hg heads' to see heads, 'hg merge' to merge)
136 136 $ hg log -G -T '{desc} {phase} {bookmarks} {node|short}'
137 137 o publiccommit public a79b6597f322
138 138 |
139 139 | o testpullbycommithash1 draft 33910bfe6ffe
140 140 |/
141 141 @ initialcommit public 67145f466344
142 142
143 143 $ hg up a79b6597f322
144 144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 145 $ echo scratchontopofpublic > scratchontopofpublic
146 146 $ hg ci -Aqm "scratchontopofpublic"
147 147 $ hg push -r . -B scratch/scratchontopofpublic
148 148 pushing to ssh://user@dummy/repo
149 149 searching for changes
150 150 remote: pushing 1 commit:
151 151 remote: c70aee6da07d scratchontopofpublic
152 152 $ cd ../client2
153 153 $ hg pull -B scratch/scratchontopofpublic
154 154 pulling from ssh://user@dummy/repo
155 155 searching for changes
156 156 adding changesets
157 157 adding manifests
158 158 adding file changes
159 159 added 1 changesets with 1 changes to 1 files (+1 heads)
160 160 adding changesets
161 161 adding manifests
162 162 adding file changes
163 163 added 1 changesets with 1 changes to 1 files
164 164 new changesets a79b6597f322:c70aee6da07d
165 165 (run 'hg heads .' to see heads, 'hg merge' to merge)
166 166 $ hg log -r scratch/scratchontopofpublic -T '{phase}'
167 167 draft (no-eol)
168 168 Strip scratchontopofpublic commit and do hg update
169 169 $ hg log -r tip -T '{node}\n'
170 170 c70aee6da07d7cdb9897375473690df3a8563339
171 171 $ echo "[extensions]" >> .hg/hgrc
172 172 $ echo "strip=" >> .hg/hgrc
173 173 $ hg strip -q tip
174 174 $ hg up c70aee6da07d7cdb9897375473690df3a8563339
175 175 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
176 176 pulling from ssh://user@dummy/repo
177 177 searching for changes
178 178 adding changesets
179 179 adding manifests
180 180 adding file changes
181 181 added 1 changesets with 1 changes to 1 files
182 182 new changesets c70aee6da07d
183 183 (run 'hg update' to get a working copy)
184 184 'c70aee6da07d7cdb9897375473690df3a8563339' found remotely
185 185 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
186 186
187 187 Trying to pull from bad path
188 188 $ hg strip -q tip
189 189 $ hg --config paths.default=badpath up c70aee6da07d7cdb9897375473690df3a8563339
190 190 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
191 191 pulling from $TESTTMP/client2/badpath (glob)
192 192 pull failed: repository $TESTTMP/client2/badpath not found
193 193 abort: unknown revision 'c70aee6da07d7cdb9897375473690df3a8563339'!
194 194 [255]
195 195
196 196 Strip commit and pull it using hg update with bookmark name
197 197 $ hg strip -q d8fde0ddfc96
198 198 $ hg book -d scratch/mybranch
199 199 $ hg up scratch/mybranch
200 200 'scratch/mybranch' does not exist locally - looking for it remotely...
201 201 pulling from ssh://user@dummy/repo
202 202 searching for changes
203 203 adding changesets
204 204 adding manifests
205 205 adding file changes
206 206 added 1 changesets with 1 changes to 2 files
207 207 new changesets d8fde0ddfc96
208 208 (run 'hg update' to get a working copy)
209 209 'scratch/mybranch' found remotely
210 210 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
211 211 (activating bookmark scratch/mybranch)
212 212 $ hg log -r scratch/mybranch -T '{node}'
213 213 d8fde0ddfc962183977f92d2bc52d303b8840f9d (no-eol)
214
215 Test debugfillinfinitepushmetadata
216 $ cd ../repo
217 $ hg debugfillinfinitepushmetadata
218 abort: nodes are not specified
219 [255]
220 $ hg debugfillinfinitepushmetadata --node randomnode
221 abort: node randomnode is not found
222 [255]
223 $ hg debugfillinfinitepushmetadata --node d8fde0ddfc962183977f92d2bc52d303b8840f9d
224 $ cat .hg/scratchbranches/index/nodemetadatamap/d8fde0ddfc962183977f92d2bc52d303b8840f9d
225 {"changed_files": {"testpullbycommithash2": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
226
227 $ cd ../client
228 $ hg up d8fde0ddfc962183977f92d2bc52d303b8840f9d
229 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' does not exist locally - looking for it remotely...
230 pulling from ssh://user@dummy/repo
231 searching for changes
232 adding changesets
233 adding manifests
234 adding file changes
235 added 2 changesets with 2 changes to 2 files (+1 heads)
236 new changesets 33910bfe6ffe:d8fde0ddfc96
237 (run 'hg heads .' to see heads, 'hg merge' to merge)
238 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' found remotely
239 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
240 $ echo file > file
241 $ hg add file
242 $ hg rm testpullbycommithash2
243 $ hg ci -m 'add and rm files'
244 $ hg log -r . -T '{node}\n'
245 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
246 $ hg cp file cpfile
247 $ hg mv file mvfile
248 $ hg ci -m 'cpfile and mvfile'
249 $ hg log -r . -T '{node}\n'
250 c7ac39f638c6b39bcdacf868fa21b6195670f8ae
251 $ hg push -r . --bundle-store
252 pushing to ssh://user@dummy/repo
253 searching for changes
254 remote: pushing 4 commits:
255 remote: 33910bfe6ffe testpullbycommithash1
256 remote: d8fde0ddfc96 testpullbycommithash2
257 remote: 3edfe7e9089a add and rm files
258 remote: c7ac39f638c6 cpfile and mvfile
259 $ cd ../repo
260 $ hg debugfillinfinitepushmetadata --node 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239 --node c7ac39f638c6b39bcdacf868fa21b6195670f8ae
261 $ cat .hg/scratchbranches/index/nodemetadatamap/3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
262 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "testpullbycommithash2": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}}} (no-eol)
263 $ cat .hg/scratchbranches/index/nodemetadatamap/c7ac39f638c6b39bcdacf868fa21b6195670f8ae
264 {"changed_files": {"cpfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}, "file": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}, "mvfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
265
266 Test infinitepush.metadatafilelimit number
267 $ cd ../client
268 $ echo file > file
269 $ hg add file
270 $ echo file1 > file1
271 $ hg add file1
272 $ echo file2 > file2
273 $ hg add file2
274 $ hg ci -m 'add many files'
275 $ hg log -r . -T '{node}'
276 09904fb20c53ff351bd3b1d47681f569a4dab7e5 (no-eol)
277 $ hg push -r . --bundle-store
278 pushing to ssh://user@dummy/repo
279 searching for changes
280 remote: pushing 5 commits:
281 remote: 33910bfe6ffe testpullbycommithash1
282 remote: d8fde0ddfc96 testpullbycommithash2
283 remote: 3edfe7e9089a add and rm files
284 remote: c7ac39f638c6 cpfile and mvfile
285 remote: 09904fb20c53 add many files
286
287 $ cd ../repo
288 $ hg debugfillinfinitepushmetadata --node 09904fb20c53ff351bd3b1d47681f569a4dab7e5 --config infinitepush.metadatafilelimit=2
289 $ cat .hg/scratchbranches/index/nodemetadatamap/09904fb20c53ff351bd3b1d47681f569a4dab7e5
290 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "file1": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}, "changed_files_truncated": true} (no-eol)
1 NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now