##// END OF EJS Templates
infinitepush: move the extension to core from fb-hgext...
Pulkit Goyal -
r37204:03ff17a4 default
parent child Browse files
Show More
@@ -0,0 +1,23 b''
1 ## What is it?
2
3 This extension adds ability to save certain pushes to a remote blob store
4 as bundles and to serve commits from remote blob store.
5 The revisions are stored on disk or in everstore.
6 The metadata are stored in sql or on disk.
7
8 ## Config options
9
10 infinitepush.branchpattern: pattern to detect a scratchbranch, example
11 're:scratch/.+'
12
13 infinitepush.indextype: disk or sql for the metadata
14 infinitepush.reponame: only relevant for sql metadata backend, reponame to put in
15 sql
16
17 infinitepush.indexpath: only relevant for ondisk metadata backend, the path to
18 store the index on disk. If not set will be under .hg
19 in a folder named filebundlestore
20
21 infinitepush.storepath: only relevant for ondisk metadata backend, the path to
22 store the bundles. If not set, it will be
23 .hg/filebundlestore
This diff has been collapsed as it changes many lines, (1428 lines changed) Show them Hide them
@@ -0,0 +1,1428 b''
1 # Infinite push
2 #
3 # Copyright 2016 Facebook, Inc.
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
9 [infinitepush]
10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 branchpattern = PATTERN
12
13 # Server or client
14 server = False
15
16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 indextype = disk
18
19 # Server-side option. Used only if indextype=sql.
20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22
23 # Server-side option. Used only if indextype=disk.
24 # Filesystem path to the index store
25 indexpath = PATH
26
27 # Server-side option. Possible values: 'disk' or 'external'
28 # Fails if not set
29 storetype = disk
30
31 # Server-side option.
32 # Path to the binary that will save bundle to the bundlestore
33 # Formatted cmd line will be passed to it (see `put_args`)
34 put_binary = put
35
36 # Serser-side option. Used only if storetype=external.
37 # Format cmd-line string for put binary. Placeholder: {filename}
38 put_args = {filename}
39
40 # Server-side option.
41 # Path to the binary that get bundle from the bundlestore.
42 # Formatted cmd line will be passed to it (see `get_args`)
43 get_binary = get
44
45 # Serser-side option. Used only if storetype=external.
46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 get_args = {filename} {handle}
48
49 # Server-side option
50 logfile = FIlE
51
52 # Server-side option
53 loglevel = DEBUG
54
55 # Server-side option. Used only if indextype=sql.
56 # Sets mysql wait_timeout option.
57 waittimeout = 300
58
59 # Server-side option. Used only if indextype=sql.
60 # Sets mysql innodb_lock_wait_timeout option.
61 locktimeout = 120
62
63 # Server-side option. Used only if indextype=sql.
64 # Name of the repository
65 reponame = ''
66
67 # Client-side option. Used by --list-remote option. List of remote scratch
68 # patterns to list if no patterns are specified.
69 defaultremotepatterns = ['*']
70
71 # Server-side option. If bookmark that was pushed matches
72 # `fillmetadatabranchpattern` then background
73 # `hg debugfillinfinitepushmetadata` process will save metadata
74 # in infinitepush index for nodes that are ancestor of the bookmark.
75 fillmetadatabranchpattern = ''
76
77 # Instructs infinitepush to forward all received bundle2 parts to the
78 # bundle for storage. Defaults to False.
79 storeallparts = True
80
81 [remotenames]
82 # Client-side option
83 # This option should be set only if remotenames extension is enabled.
84 # Whether remote bookmarks are tracked by remotenames extension.
85 bookmarks = True
86 """
87
88 from __future__ import absolute_import
89
90 import collections
91 import contextlib
92 import errno
93 import functools
94 import json
95 import logging
96 import os
97 import random
98 import re
99 import socket
100 import struct
101 import subprocess
102 import sys
103 import tempfile
104 import time
105
106 from mercurial.node import (
107 bin,
108 hex,
109 )
110
111 from mercurial.i18n import _
112
113 from mercurial import (
114 bundle2,
115 changegroup,
116 commands,
117 discovery,
118 encoding,
119 error,
120 exchange,
121 extensions,
122 hg,
123 localrepo,
124 peer,
125 phases,
126 pushkey,
127 registrar,
128 util,
129 wireproto,
130 )
131
132 from . import (
133 backupcommands,
134 bundleparts,
135 common,
136 infinitepushcommands,
137 )
138
139 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
140 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
141 # be specifying the version(s) of Mercurial they are tested with, or
142 # leave the attribute unspecified.
143 testedwith = 'ships-with-hg-core'
144
145 configtable = {}
146 configitem = registrar.configitem(configtable)
147
148 configitem('infinitepush', 'server',
149 default=False,
150 )
151 configitem('infinitepush', 'storetype',
152 default='',
153 )
154 configitem('infinitepush', 'indextype',
155 default='',
156 )
157 configitem('infinitepush', 'indexpath',
158 default='',
159 )
160 configitem('infinitepush', 'fillmetadatabranchpattern',
161 default='',
162 )
163 configitem('infinitepush', 'storeallparts',
164 default=False,
165 )
166 configitem('infinitepush', 'reponame',
167 default='',
168 )
169 configitem('infinitepush', 'bundle-stream',
170 default=False,
171 )
172 configitem('scratchbranch', 'storepath',
173 default='',
174 )
175 configitem('infinitepush', 'branchpattern',
176 default='',
177 )
178 configitem('infinitepush', 'metadatafilelimit',
179 default=100,
180 )
181 configitem('infinitepushbackup', 'autobackup',
182 default=False,
183 )
184 configitem('experimental', 'server-bundlestore-bookmark',
185 default='',
186 )
187 configitem('experimental', 'server-bundlestore-create',
188 default='',
189 )
190 configitem('experimental', 'infinitepush-scratchpush',
191 default=False,
192 )
193 configitem('experimental', 'non-forward-move',
194 default=False,
195 )
196
197 pushrebaseparttype = 'b2x:rebase'
198 experimental = 'experimental'
199 configbookmark = 'server-bundlestore-bookmark'
200 configcreate = 'server-bundlestore-create'
201 configscratchpush = 'infinitepush-scratchpush'
202 confignonforwardmove = 'non-forward-move'
203
204 scratchbranchparttype = bundleparts.scratchbranchparttype
205 cmdtable = infinitepushcommands.cmdtable
206 revsetpredicate = backupcommands.revsetpredicate
207 templatekeyword = backupcommands.templatekeyword
208 _scratchbranchmatcher = lambda x: False
209 _maybehash = re.compile(r'^[a-f0-9]+$').search
210
211 def _buildexternalbundlestore(ui):
212 put_args = ui.configlist('infinitepush', 'put_args', [])
213 put_binary = ui.config('infinitepush', 'put_binary')
214 if not put_binary:
215 raise error.Abort('put binary is not specified')
216 get_args = ui.configlist('infinitepush', 'get_args', [])
217 get_binary = ui.config('infinitepush', 'get_binary')
218 if not get_binary:
219 raise error.Abort('get binary is not specified')
220 from . import store
221 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
222
223 def _buildsqlindex(ui):
224 sqlhost = ui.config('infinitepush', 'sqlhost')
225 if not sqlhost:
226 raise error.Abort(_('please set infinitepush.sqlhost'))
227 host, port, db, user, password = sqlhost.split(':')
228 reponame = ui.config('infinitepush', 'reponame')
229 if not reponame:
230 raise error.Abort(_('please set infinitepush.reponame'))
231
232 logfile = ui.config('infinitepush', 'logfile', '')
233 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
234 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
235 from . import sqlindexapi
236 return sqlindexapi.sqlindexapi(
237 reponame, host, port, db, user, password,
238 logfile, _getloglevel(ui), waittimeout=waittimeout,
239 locktimeout=locktimeout)
240
241 def _getloglevel(ui):
242 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
243 numeric_loglevel = getattr(logging, loglevel.upper(), None)
244 if not isinstance(numeric_loglevel, int):
245 raise error.Abort(_('invalid log level %s') % loglevel)
246 return numeric_loglevel
247
248 def _tryhoist(ui, remotebookmark):
249 '''returns a bookmarks with hoisted part removed
250
251 Remotenames extension has a 'hoist' config that allows to use remote
252 bookmarks without specifying remote path. For example, 'hg update master'
253 works as well as 'hg update remote/master'. We want to allow the same in
254 infinitepush.
255 '''
256
257 if common.isremotebooksenabled(ui):
258 hoist = ui.config('remotenames', 'hoist') + '/'
259 if remotebookmark.startswith(hoist):
260 return remotebookmark[len(hoist):]
261 return remotebookmark
262
263 class bundlestore(object):
264 def __init__(self, repo):
265 self._repo = repo
266 storetype = self._repo.ui.config('infinitepush', 'storetype', '')
267 if storetype == 'disk':
268 from . import store
269 self.store = store.filebundlestore(self._repo.ui, self._repo)
270 elif storetype == 'external':
271 self.store = _buildexternalbundlestore(self._repo.ui)
272 else:
273 raise error.Abort(
274 _('unknown infinitepush store type specified %s') % storetype)
275
276 indextype = self._repo.ui.config('infinitepush', 'indextype', '')
277 if indextype == 'disk':
278 from . import fileindexapi
279 self.index = fileindexapi.fileindexapi(self._repo)
280 elif indextype == 'sql':
281 self.index = _buildsqlindex(self._repo.ui)
282 else:
283 raise error.Abort(
284 _('unknown infinitepush index type specified %s') % indextype)
285
286 def _isserver(ui):
287 return ui.configbool('infinitepush', 'server')
288
289 def reposetup(ui, repo):
290 if _isserver(ui) and repo.local():
291 repo.bundlestore = bundlestore(repo)
292
293 def uisetup(ui):
294 # remotenames circumvents the default push implementation entirely, so make
295 # sure we load after it so that we wrap it.
296 order = extensions._order
297 order.remove('infinitepush')
298 order.append('infinitepush')
299 extensions._order = order
300
301 def extsetup(ui):
302 # Allow writing backup files outside the normal lock
303 localrepo.localrepository._wlockfreeprefix.update([
304 backupcommands._backupstatefile,
305 backupcommands._backupgenerationfile,
306 backupcommands._backuplatestinfofile,
307 ])
308
309 commonsetup(ui)
310 if _isserver(ui):
311 serverextsetup(ui)
312 else:
313 clientextsetup(ui)
314
315 def commonsetup(ui):
316 wireproto.commands['listkeyspatterns'] = (
317 wireprotolistkeyspatterns, 'namespace patterns')
318 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
319 if scratchbranchpat:
320 global _scratchbranchmatcher
321 kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
322
323 def serverextsetup(ui):
324 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
325
326 def newpushkeyhandler(*args, **kwargs):
327 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
328 newpushkeyhandler.params = origpushkeyhandler.params
329 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
330
331 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
332 newphaseheadshandler = lambda *args, **kwargs: \
333 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
334 newphaseheadshandler.params = orighandlephasehandler.params
335 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
336
337 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
338 localrepolistkeys)
339 wireproto.commands['lookup'] = (
340 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
341 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
342
343 extensions.wrapfunction(bundle2, 'processparts', processparts)
344
345 def clientextsetup(ui):
346 entry = extensions.wrapcommand(commands.table, 'push', _push)
347 # Don't add the 'to' arg if it already exists
348 if not any(a for a in entry[1] if a[1] == 'to'):
349 entry[1].append(('', 'to', '', _('push revs to this bookmark')))
350
351 if not any(a for a in entry[1] if a[1] == 'non-forward-move'):
352 entry[1].append(('', 'non-forward-move', None,
353 _('allows moving a remote bookmark to an '
354 'arbitrary place')))
355
356 if not any(a for a in entry[1] if a[1] == 'create'):
357 entry[1].append(
358 ('', 'create', None, _('create a new remote bookmark')))
359
360 entry[1].append(
361 ('', 'bundle-store', None,
362 _('force push to go to bundle store (EXPERIMENTAL)')))
363
364 bookcmd = extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks)
365 bookcmd[1].append(
366 ('', 'list-remote', None,
367 'list remote bookmarks. '
368 'Positional arguments are interpreted as wildcard patterns. '
369 'Only allowed wildcard is \'*\' in the end of the pattern. '
370 'If no positional arguments are specified then it will list '
371 'the most "important" remote bookmarks. '
372 'Otherwise it will list remote bookmarks '
373 'that match at least one pattern '
374 ''))
375 bookcmd[1].append(
376 ('', 'remote-path', '',
377 'name of the remote path to list the bookmarks'))
378
379 extensions.wrapcommand(commands.table, 'pull', _pull)
380 extensions.wrapcommand(commands.table, 'update', _update)
381
382 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
383 extensions.wrapfunction(bundle2, '_addpartsfromopts', _addpartsfromopts)
384
385 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
386
387 # Move infinitepush part before pushrebase part
388 # to avoid generation of both parts.
389 partorder = exchange.b2partsgenorder
390 index = partorder.index('changeset')
391 if pushrebaseparttype in partorder:
392 index = min(index, partorder.index(pushrebaseparttype))
393 partorder.insert(
394 index, partorder.pop(partorder.index(scratchbranchparttype)))
395
396 def wrapsmartlog(loaded):
397 if not loaded:
398 return
399 smartlogmod = extensions.find('smartlog')
400 extensions.wrapcommand(smartlogmod.cmdtable, 'smartlog', _smartlog)
401 extensions.afterloaded('smartlog', wrapsmartlog)
402 backupcommands.extsetup(ui)
403
404 def _smartlog(orig, ui, repo, **opts):
405 res = orig(ui, repo, **opts)
406 backupcommands.smartlogsummary(ui, repo)
407 return res
408
409 def _showbookmarks(ui, bookmarks, **opts):
410 # Copy-paste from commands.py
411 fm = ui.formatter('bookmarks', opts)
412 for bmark, n in sorted(bookmarks.iteritems()):
413 fm.startitem()
414 if not ui.quiet:
415 fm.plain(' ')
416 fm.write('bookmark', '%s', bmark)
417 pad = ' ' * (25 - encoding.colwidth(bmark))
418 fm.condwrite(not ui.quiet, 'node', pad + ' %s', n)
419 fm.plain('\n')
420 fm.end()
421
422 def exbookmarks(orig, ui, repo, *names, **opts):
423 pattern = opts.get('list_remote')
424 delete = opts.get('delete')
425 remotepath = opts.get('remote_path')
426 path = ui.paths.getpath(remotepath or None, default=('default'))
427 if pattern:
428 destpath = path.pushloc or path.loc
429 other = hg.peer(repo, opts, destpath)
430 if not names:
431 raise error.Abort(
432 '--list-remote requires a bookmark pattern',
433 hint='use "hg book" to get a list of your local bookmarks')
434 else:
435 fetchedbookmarks = other.listkeyspatterns('bookmarks',
436 patterns=names)
437 _showbookmarks(ui, fetchedbookmarks, **opts)
438 return
439 elif delete and 'remotenames' in extensions._extensions:
440 existing_local_bms = set(repo._bookmarks.keys())
441 scratch_bms = []
442 other_bms = []
443 for name in names:
444 if _scratchbranchmatcher(name) and name not in existing_local_bms:
445 scratch_bms.append(name)
446 else:
447 other_bms.append(name)
448
449 if len(scratch_bms) > 0:
450 if remotepath == '':
451 remotepath = 'default'
452 _deleteinfinitepushbookmarks(ui,
453 repo,
454 remotepath,
455 scratch_bms)
456
457 if len(other_bms) > 0 or len(scratch_bms) == 0:
458 return orig(ui, repo, *other_bms, **opts)
459 else:
460 return orig(ui, repo, *names, **opts)
461
462 def _checkheads(orig, pushop):
463 if pushop.ui.configbool(experimental, configscratchpush, False):
464 return
465 return orig(pushop)
466
467 def _addpartsfromopts(orig, ui, repo, bundler, *args, **kwargs):
468 """ adds a stream level part to bundle2 storing whether this is an
469 infinitepush bundle or not
470 This functionality is hidden behind a config option:
471
472 [infinitepush]
473 bundle-stream = True
474 """
475 if ui.configbool('infinitepush', 'bundle-stream', False):
476 bundler.addparam('infinitepush', True)
477 return orig(ui, repo, bundler, *args, **kwargs)
478
479 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
480 patterns = wireproto.decodelist(patterns)
481 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
482 return pushkey.encodekeys(d)
483
484 def localrepolistkeys(orig, self, namespace, patterns=None):
485 if namespace == 'bookmarks' and patterns:
486 index = self.bundlestore.index
487 results = {}
488 bookmarks = orig(self, namespace)
489 for pattern in patterns:
490 results.update(index.getbookmarks(pattern))
491 if pattern.endswith('*'):
492 pattern = 're:^' + pattern[:-1] + '.*'
493 kind, pat, matcher = util.stringmatcher(pattern)
494 for bookmark, node in bookmarks.iteritems():
495 if matcher(bookmark):
496 results[bookmark] = node
497 return results
498 else:
499 return orig(self, namespace)
500
501 @peer.batchable
502 def listkeyspatterns(self, namespace, patterns):
503 if not self.capable('pushkey'):
504 yield {}, None
505 f = peer.future()
506 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
507 (namespace, patterns))
508 yield {
509 'namespace': encoding.fromlocal(namespace),
510 'patterns': wireproto.encodelist(patterns)
511 }, f
512 d = f.value
513 self.ui.debug('received listkey for "%s": %i bytes\n'
514 % (namespace, len(d)))
515 yield pushkey.decodekeys(d)
516
517 def _readbundlerevs(bundlerepo):
518 return list(bundlerepo.revs('bundle()'))
519
520 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
521 '''Tells remotefilelog to include all changed files to the changegroup
522
523 By default remotefilelog doesn't include file content to the changegroup.
524 But we need to include it if we are fetching from bundlestore.
525 '''
526 changedfiles = set()
527 cl = bundlerepo.changelog
528 for r in bundlerevs:
529 # [3] means changed files
530 changedfiles.update(cl.read(r)[3])
531 if not changedfiles:
532 return bundlecaps
533
534 changedfiles = '\0'.join(changedfiles)
535 newcaps = []
536 appended = False
537 for cap in (bundlecaps or []):
538 if cap.startswith('excludepattern='):
539 newcaps.append('\0'.join((cap, changedfiles)))
540 appended = True
541 else:
542 newcaps.append(cap)
543 if not appended:
544 # Not found excludepattern cap. Just append it
545 newcaps.append('excludepattern=' + changedfiles)
546
547 return newcaps
548
549 def _rebundle(bundlerepo, bundleroots, unknownhead):
550 '''
551 Bundle may include more revision then user requested. For example,
552 if user asks for revision but bundle also consists its descendants.
553 This function will filter out all revision that user is not requested.
554 '''
555 parts = []
556
557 version = '02'
558 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
559 missingheads=[unknownhead])
560 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
561 cgstream = util.chunkbuffer(cgstream).read()
562 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
563 cgpart.addparam('version', version)
564 parts.append(cgpart)
565
566 try:
567 treemod = extensions.find('treemanifest')
568 except KeyError:
569 pass
570 else:
571 if treemod._cansendtrees(bundlerepo, outgoing.missing):
572 treepart = treemod.createtreepackpart(bundlerepo, outgoing,
573 treemod.TREEGROUP_PARTTYPE2)
574 parts.append(treepart)
575
576 return parts
577
578 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
579 cl = bundlerepo.changelog
580 bundleroots = []
581 for rev in bundlerevs:
582 node = cl.node(rev)
583 parents = cl.parents(node)
584 for parent in parents:
585 # include all revs that exist in the main repo
586 # to make sure that bundle may apply client-side
587 if parent in oldrepo:
588 bundleroots.append(parent)
589 return bundleroots
590
591 def _needsrebundling(head, bundlerepo):
592 bundleheads = list(bundlerepo.revs('heads(bundle())'))
593 return not (len(bundleheads) == 1 and
594 bundlerepo[bundleheads[0]].node() == head)
595
596 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
597 '''generates bundle that will be send to the user
598
599 returns tuple with raw bundle string and bundle type
600 '''
601 parts = []
602 if not _needsrebundling(head, bundlerepo):
603 with util.posixfile(bundlefile, "rb") as f:
604 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
605 if isinstance(unbundler, changegroup.cg1unpacker):
606 part = bundle2.bundlepart('changegroup',
607 data=unbundler._stream.read())
608 part.addparam('version', '01')
609 parts.append(part)
610 elif isinstance(unbundler, bundle2.unbundle20):
611 haschangegroup = False
612 for part in unbundler.iterparts():
613 if part.type == 'changegroup':
614 haschangegroup = True
615 newpart = bundle2.bundlepart(part.type, data=part.read())
616 for key, value in part.params.iteritems():
617 newpart.addparam(key, value)
618 parts.append(newpart)
619
620 if not haschangegroup:
621 raise error.Abort(
622 'unexpected bundle without changegroup part, ' +
623 'head: %s' % hex(head),
624 hint='report to administrator')
625 else:
626 raise error.Abort('unknown bundle type')
627 else:
628 parts = _rebundle(bundlerepo, bundleroots, head)
629
630 return parts
631
632 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
633 heads = heads or []
634 # newheads are parents of roots of scratch bundles that were requested
635 newphases = {}
636 scratchbundles = []
637 newheads = []
638 scratchheads = []
639 nodestobundle = {}
640 allbundlestocleanup = []
641 try:
642 for head in heads:
643 if head not in repo.changelog.nodemap:
644 if head not in nodestobundle:
645 newbundlefile = common.downloadbundle(repo, head)
646 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
647 bundlerepo = hg.repository(repo.ui, bundlepath)
648
649 allbundlestocleanup.append((bundlerepo, newbundlefile))
650 bundlerevs = set(_readbundlerevs(bundlerepo))
651 bundlecaps = _includefilelogstobundle(
652 bundlecaps, bundlerepo, bundlerevs, repo.ui)
653 cl = bundlerepo.changelog
654 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
655 for rev in bundlerevs:
656 node = cl.node(rev)
657 newphases[hex(node)] = str(phases.draft)
658 nodestobundle[node] = (bundlerepo, bundleroots,
659 newbundlefile)
660
661 scratchbundles.append(
662 _generateoutputparts(head, *nodestobundle[head]))
663 newheads.extend(bundleroots)
664 scratchheads.append(head)
665 finally:
666 for bundlerepo, bundlefile in allbundlestocleanup:
667 bundlerepo.close()
668 try:
669 os.unlink(bundlefile)
670 except (IOError, OSError):
671 # if we can't cleanup the file then just ignore the error,
672 # no need to fail
673 pass
674
675 pullfrombundlestore = bool(scratchbundles)
676 wrappedchangegrouppart = False
677 wrappedlistkeys = False
678 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
679 try:
680 def _changegrouppart(bundler, *args, **kwargs):
681 # Order is important here. First add non-scratch part
682 # and only then add parts with scratch bundles because
683 # non-scratch part contains parents of roots of scratch bundles.
684 result = oldchangegrouppart(bundler, *args, **kwargs)
685 for bundle in scratchbundles:
686 for part in bundle:
687 bundler.addpart(part)
688 return result
689
690 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
691 wrappedchangegrouppart = True
692
693 def _listkeys(orig, self, namespace):
694 origvalues = orig(self, namespace)
695 if namespace == 'phases' and pullfrombundlestore:
696 if origvalues.get('publishing') == 'True':
697 # Make repo non-publishing to preserve draft phase
698 del origvalues['publishing']
699 origvalues.update(newphases)
700 return origvalues
701
702 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
703 _listkeys)
704 wrappedlistkeys = True
705 heads = list((set(newheads) | set(heads)) - set(scratchheads))
706 result = orig(repo, source, heads=heads,
707 bundlecaps=bundlecaps, **kwargs)
708 finally:
709 if wrappedchangegrouppart:
710 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
711 if wrappedlistkeys:
712 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
713 _listkeys)
714 return result
715
716 def _lookupwrap(orig):
717 def _lookup(repo, proto, key):
718 localkey = encoding.tolocal(key)
719
720 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
721 scratchnode = repo.bundlestore.index.getnode(localkey)
722 if scratchnode:
723 return "%s %s\n" % (1, scratchnode)
724 else:
725 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
726 else:
727 try:
728 r = hex(repo.lookup(localkey))
729 return "%s %s\n" % (1, r)
730 except Exception as inst:
731 if repo.bundlestore.index.getbundle(localkey):
732 return "%s %s\n" % (1, localkey)
733 else:
734 r = str(inst)
735 return "%s %s\n" % (0, r)
736 return _lookup
737
738 def _decodebookmarks(stream):
739 sizeofjsonsize = struct.calcsize('>i')
740 size = struct.unpack('>i', stream.read(sizeofjsonsize))[0]
741 unicodedict = json.loads(stream.read(size))
742 # python json module always returns unicode strings. We need to convert
743 # it back to bytes string
744 result = {}
745 for bookmark, node in unicodedict.iteritems():
746 bookmark = bookmark.encode('ascii')
747 node = node.encode('ascii')
748 result[bookmark] = node
749 return result
750
751 def _update(orig, ui, repo, node=None, rev=None, **opts):
752 if rev and node:
753 raise error.Abort(_("please specify just one revision"))
754
755 if not opts.get('date') and (rev or node) not in repo:
756 mayberemote = rev or node
757 mayberemote = _tryhoist(ui, mayberemote)
758 dopull = False
759 kwargs = {}
760 if _scratchbranchmatcher(mayberemote):
761 dopull = True
762 kwargs['bookmark'] = [mayberemote]
763 elif len(mayberemote) == 40 and _maybehash(mayberemote):
764 dopull = True
765 kwargs['rev'] = [mayberemote]
766
767 if dopull:
768 ui.warn(
769 _("'%s' does not exist locally - looking for it " +
770 "remotely...\n") % mayberemote)
771 # Try pulling node from remote repo
772 try:
773 cmdname = '^pull'
774 pullcmd = commands.table[cmdname][0]
775 pullopts = dict(opt[1:3] for opt in commands.table[cmdname][1])
776 pullopts.update(kwargs)
777 pullcmd(ui, repo, **pullopts)
778 except Exception:
779 ui.warn(_('pull failed: %s\n') % sys.exc_info()[1])
780 else:
781 ui.warn(_("'%s' found remotely\n") % mayberemote)
782 return orig(ui, repo, node, rev, **opts)
783
784 def _pull(orig, ui, repo, source="default", **opts):
785 # Copy paste from `pull` command
786 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
787
788 scratchbookmarks = {}
789 unfi = repo.unfiltered()
790 unknownnodes = []
791 for rev in opts.get('rev', []):
792 if rev not in unfi:
793 unknownnodes.append(rev)
794 if opts.get('bookmark'):
795 bookmarks = []
796 revs = opts.get('rev') or []
797 for bookmark in opts.get('bookmark'):
798 if _scratchbranchmatcher(bookmark):
799 # rev is not known yet
800 # it will be fetched with listkeyspatterns next
801 scratchbookmarks[bookmark] = 'REVTOFETCH'
802 else:
803 bookmarks.append(bookmark)
804
805 if scratchbookmarks:
806 other = hg.peer(repo, opts, source)
807 fetchedbookmarks = other.listkeyspatterns(
808 'bookmarks', patterns=scratchbookmarks)
809 for bookmark in scratchbookmarks:
810 if bookmark not in fetchedbookmarks:
811 raise error.Abort('remote bookmark %s not found!' %
812 bookmark)
813 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
814 revs.append(fetchedbookmarks[bookmark])
815 opts['bookmark'] = bookmarks
816 opts['rev'] = revs
817
818 try:
819 inhibitmod = extensions.find('inhibit')
820 except KeyError:
821 # Ignore if inhibit is not enabled
822 pass
823 else:
824 # Pulling revisions that were filtered results in a error.
825 # Let's inhibit them
826 unfi = repo.unfiltered()
827 for rev in opts.get('rev', []):
828 try:
829 repo[rev]
830 except error.FilteredRepoLookupError:
831 node = unfi[rev].node()
832 inhibitmod.revive([repo.unfiltered()[node]])
833 except error.RepoLookupError:
834 pass
835
836 if scratchbookmarks or unknownnodes:
837 # Set anyincoming to True
838 extensions.wrapfunction(discovery, 'findcommonincoming',
839 _findcommonincoming)
840 try:
841 # Remote scratch bookmarks will be deleted because remotenames doesn't
842 # know about them. Let's save it before pull and restore after
843 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
844 result = orig(ui, repo, source, **opts)
845 # TODO(stash): race condition is possible
846 # if scratch bookmarks was updated right after orig.
847 # But that's unlikely and shouldn't be harmful.
848 if common.isremotebooksenabled(ui):
849 remotescratchbookmarks.update(scratchbookmarks)
850 _saveremotebookmarks(repo, remotescratchbookmarks, source)
851 else:
852 _savelocalbookmarks(repo, scratchbookmarks)
853 return result
854 finally:
855 if scratchbookmarks:
856 extensions.unwrapfunction(discovery, 'findcommonincoming')
857
858 def _readscratchremotebookmarks(ui, repo, other):
859 if common.isremotebooksenabled(ui):
860 remotenamesext = extensions.find('remotenames')
861 remotepath = remotenamesext.activepath(repo.ui, other)
862 result = {}
863 # Let's refresh remotenames to make sure we have it up to date
864 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
865 # and it results in deleting scratch bookmarks. Our best guess how to
866 # fix it is to use `clearnames()`
867 repo._remotenames.clearnames()
868 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
869 path, bookname = remotenamesext.splitremotename(remotebookmark)
870 if path == remotepath and _scratchbranchmatcher(bookname):
871 nodes = repo.names['remotebookmarks'].nodes(repo,
872 remotebookmark)
873 if nodes:
874 result[bookname] = hex(nodes[0])
875 return result
876 else:
877 return {}
878
879 def _saveremotebookmarks(repo, newbookmarks, remote):
880 remotenamesext = extensions.find('remotenames')
881 remotepath = remotenamesext.activepath(repo.ui, remote)
882 branches = collections.defaultdict(list)
883 bookmarks = {}
884 remotenames = remotenamesext.readremotenames(repo)
885 for hexnode, nametype, remote, rname in remotenames:
886 if remote != remotepath:
887 continue
888 if nametype == 'bookmarks':
889 if rname in newbookmarks:
890 # It's possible if we have a normal bookmark that matches
891 # scratch branch pattern. In this case just use the current
892 # bookmark node
893 del newbookmarks[rname]
894 bookmarks[rname] = hexnode
895 elif nametype == 'branches':
896 # saveremotenames expects 20 byte binary nodes for branches
897 branches[rname].append(bin(hexnode))
898
899 for bookmark, hexnode in newbookmarks.iteritems():
900 bookmarks[bookmark] = hexnode
901 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
902
903 def _savelocalbookmarks(repo, bookmarks):
904 if not bookmarks:
905 return
906 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
907 changes = []
908 for scratchbook, node in bookmarks.iteritems():
909 changectx = repo[node]
910 changes.append((scratchbook, changectx.node()))
911 repo._bookmarks.applychanges(repo, tr, changes)
912
913 def _findcommonincoming(orig, *args, **kwargs):
914 common, inc, remoteheads = orig(*args, **kwargs)
915 return common, True, remoteheads
916
917 def _push(orig, ui, repo, dest=None, *args, **opts):
918 bookmark = opts.get('to') or ''
919 create = opts.get('create') or False
920
921 oldphasemove = None
922 overrides = {(experimental, configbookmark): bookmark,
923 (experimental, configcreate): create}
924
925 with ui.configoverride(overrides, 'infinitepush'):
926 scratchpush = opts.get('bundle_store')
927 if _scratchbranchmatcher(bookmark):
928 # Hack to fix interaction with remotenames. Remotenames push
929 # '--to' bookmark to the server but we don't want to push scratch
930 # bookmark to the server. Let's delete '--to' and '--create' and
931 # also set allow_anon to True (because if --to is not set
932 # remotenames will think that we are pushing anonymoush head)
933 if 'to' in opts:
934 del opts['to']
935 if 'create' in opts:
936 del opts['create']
937 opts['allow_anon'] = True
938 scratchpush = True
939 # bundle2 can be sent back after push (for example, bundle2
940 # containing `pushkey` part to update bookmarks)
941 ui.setconfig(experimental, 'bundle2.pushback', True)
942
943 ui.setconfig(experimental, confignonforwardmove,
944 opts.get('non_forward_move'), '--non-forward-move')
945 if scratchpush:
946 ui.setconfig(experimental, configscratchpush, True)
947 oldphasemove = extensions.wrapfunction(exchange,
948 '_localphasemove',
949 _phasemove)
950 # Copy-paste from `push` command
951 path = ui.paths.getpath(dest, default=('default-push', 'default'))
952 if not path:
953 raise error.Abort(_('default repository not configured!'),
954 hint=_("see 'hg help config.paths'"))
955 destpath = path.pushloc or path.loc
956 if destpath.startswith('svn+') and scratchpush:
957 raise error.Abort('infinite push does not work with svn repo',
958 hint='did you forget to `hg push default`?')
959 # Remote scratch bookmarks will be deleted because remotenames doesn't
960 # know about them. Let's save it before push and restore after
961 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
962 result = orig(ui, repo, dest, *args, **opts)
963 if common.isremotebooksenabled(ui):
964 if bookmark and scratchpush:
965 other = hg.peer(repo, opts, destpath)
966 fetchedbookmarks = other.listkeyspatterns('bookmarks',
967 patterns=[bookmark])
968 remotescratchbookmarks.update(fetchedbookmarks)
969 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
970 if oldphasemove:
971 exchange._localphasemove = oldphasemove
972 return result
973
974 def _deleteinfinitepushbookmarks(ui, repo, path, names):
975 """Prune remote names by removing the bookmarks we don't want anymore,
976 then writing the result back to disk
977 """
978 remotenamesext = extensions.find('remotenames')
979
980 # remotename format is:
981 # (node, nametype ("branches" or "bookmarks"), remote, name)
982 nametype_idx = 1
983 remote_idx = 2
984 name_idx = 3
985 remotenames = [remotename for remotename in \
986 remotenamesext.readremotenames(repo) \
987 if remotename[remote_idx] == path]
988 remote_bm_names = [remotename[name_idx] for remotename in \
989 remotenames if remotename[nametype_idx] == "bookmarks"]
990
991 for name in names:
992 if name not in remote_bm_names:
993 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
994 "in path '{}'").format(name, path))
995
996 bookmarks = {}
997 branches = collections.defaultdict(list)
998 for node, nametype, remote, name in remotenames:
999 if nametype == "bookmarks" and name not in names:
1000 bookmarks[name] = node
1001 elif nametype == "branches":
1002 # saveremotenames wants binary nodes for branches
1003 branches[name].append(bin(node))
1004
1005 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
1006
1007 def _phasemove(orig, pushop, nodes, phase=phases.public):
1008 """prevent commits from being marked public
1009
1010 Since these are going to a scratch branch, they aren't really being
1011 published."""
1012
1013 if phase != phases.public:
1014 orig(pushop, nodes, phase)
1015
1016 @exchange.b2partsgenerator(scratchbranchparttype)
1017 def partgen(pushop, bundler):
1018 bookmark = pushop.ui.config(experimental, configbookmark)
1019 create = pushop.ui.configbool(experimental, configcreate)
1020 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
1021 if 'changesets' in pushop.stepsdone or not scratchpush:
1022 return
1023
1024 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
1025 return
1026
1027 pushop.stepsdone.add('changesets')
1028 pushop.stepsdone.add('treepack')
1029 if not pushop.outgoing.missing:
1030 pushop.ui.status(_('no changes found\n'))
1031 pushop.cgresult = 0
1032 return
1033
1034 # This parameter tells the server that the following bundle is an
1035 # infinitepush. This let's it switch the part processing to our infinitepush
1036 # code path.
1037 bundler.addparam("infinitepush", "True")
1038
1039 nonforwardmove = pushop.force or pushop.ui.configbool(experimental,
1040 confignonforwardmove)
1041 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
1042 pushop.remote,
1043 pushop.outgoing,
1044 nonforwardmove,
1045 pushop.ui,
1046 bookmark,
1047 create)
1048
1049 for scratchpart in scratchparts:
1050 bundler.addpart(scratchpart)
1051
1052 def handlereply(op):
1053 # server either succeeds or aborts; no code to read
1054 pushop.cgresult = 1
1055
1056 return handlereply
1057
1058 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
1059 bundle2.capabilities[bundleparts.scratchbookmarksparttype] = ()
1060
1061 def _getrevs(bundle, oldnode, force, bookmark):
1062 'extracts and validates the revs to be imported'
1063 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
1064
1065 # new bookmark
1066 if oldnode is None:
1067 return revs
1068
1069 # Fast forward update
1070 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
1071 return revs
1072
1073 # Forced non-fast forward update
1074 if force:
1075 return revs
1076 else:
1077 raise error.Abort(_('non-forward push'),
1078 hint=_('use --non-forward-move to override'))
1079
1080 @contextlib.contextmanager
1081 def logservicecall(logger, service, **kwargs):
1082 start = time.time()
1083 logger(service, eventtype='start', **kwargs)
1084 try:
1085 yield
1086 logger(service, eventtype='success',
1087 elapsedms=(time.time() - start) * 1000, **kwargs)
1088 except Exception as e:
1089 logger(service, eventtype='failure',
1090 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
1091 **kwargs)
1092 raise
1093
1094 def _getorcreateinfinitepushlogger(op):
1095 logger = op.records['infinitepushlogger']
1096 if not logger:
1097 ui = op.repo.ui
1098 try:
1099 username = util.getuser()
1100 except Exception:
1101 username = 'unknown'
1102 # Generate random request id to be able to find all logged entries
1103 # for the same request. Since requestid is pseudo-generated it may
1104 # not be unique, but we assume that (hostname, username, requestid)
1105 # is unique.
1106 random.seed()
1107 requestid = random.randint(0, 2000000000)
1108 hostname = socket.gethostname()
1109 logger = functools.partial(ui.log, 'infinitepush', user=username,
1110 requestid=requestid, hostname=hostname,
1111 reponame=ui.config('infinitepush',
1112 'reponame'))
1113 op.records.add('infinitepushlogger', logger)
1114 else:
1115 logger = logger[0]
1116 return logger
1117
1118 def processparts(orig, repo, op, unbundler):
1119 if unbundler.params.get('infinitepush') != 'True':
1120 return orig(repo, op, unbundler)
1121
1122 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
1123
1124 partforwardingwhitelist = []
1125 try:
1126 treemfmod = extensions.find('treemanifest')
1127 partforwardingwhitelist.append(treemfmod.TREEGROUP_PARTTYPE2)
1128 except KeyError:
1129 pass
1130
1131 bundler = bundle2.bundle20(repo.ui)
1132 cgparams = None
1133 scratchbookpart = None
1134 with bundle2.partiterator(repo, op, unbundler) as parts:
1135 for part in parts:
1136 bundlepart = None
1137 if part.type == 'replycaps':
1138 # This configures the current operation to allow reply parts.
1139 bundle2._processpart(op, part)
1140 elif part.type == bundleparts.scratchbranchparttype:
1141 # Scratch branch parts need to be converted to normal
1142 # changegroup parts, and the extra parameters stored for later
1143 # when we upload to the store. Eventually those parameters will
1144 # be put on the actual bundle instead of this part, then we can
1145 # send a vanilla changegroup instead of the scratchbranch part.
1146 cgversion = part.params.get('cgversion', '01')
1147 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
1148 bundlepart.addparam('version', cgversion)
1149 cgparams = part.params
1150
1151 # If we're not dumping all parts into the new bundle, we need to
1152 # alert the future pushkey and phase-heads handler to skip
1153 # the part.
1154 if not handleallparts:
1155 op.records.add(scratchbranchparttype + '_skippushkey', True)
1156 op.records.add(scratchbranchparttype + '_skipphaseheads',
1157 True)
1158 elif part.type == bundleparts.scratchbookmarksparttype:
1159 # Save this for later processing. Details below.
1160 #
1161 # Upstream https://phab.mercurial-scm.org/D1389 and its
1162 # follow-ups stop part.seek support to reduce memory usage
1163 # (https://bz.mercurial-scm.org/5691). So we need to copy
1164 # the part so it can be consumed later.
1165 scratchbookpart = bundleparts.copiedpart(part)
1166 else:
1167 if handleallparts or part.type in partforwardingwhitelist:
1168 # Ideally we would not process any parts, and instead just
1169 # forward them to the bundle for storage, but since this
1170 # differs from previous behavior, we need to put it behind a
1171 # config flag for incremental rollout.
1172 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1173 for key, value in part.params.iteritems():
1174 bundlepart.addparam(key, value)
1175
1176 # Certain parts require a response
1177 if part.type == 'pushkey':
1178 if op.reply is not None:
1179 rpart = op.reply.newpart('reply:pushkey')
1180 rpart.addparam('in-reply-to', str(part.id),
1181 mandatory=False)
1182 rpart.addparam('return', '1', mandatory=False)
1183 else:
1184 bundle2._processpart(op, part)
1185
1186 if handleallparts:
1187 op.records.add(part.type, {
1188 'return': 1,
1189 })
1190 if bundlepart:
1191 bundler.addpart(bundlepart)
1192
1193 # If commits were sent, store them
1194 if cgparams:
1195 buf = util.chunkbuffer(bundler.getchunks())
1196 fd, bundlefile = tempfile.mkstemp()
1197 try:
1198 try:
1199 fp = os.fdopen(fd, 'wb')
1200 fp.write(buf.read())
1201 finally:
1202 fp.close()
1203 storebundle(op, cgparams, bundlefile)
1204 finally:
1205 try:
1206 os.unlink(bundlefile)
1207 except Exception:
1208 # we would rather see the original exception
1209 pass
1210
1211 # The scratch bookmark part is sent as part of a push backup. It needs to be
1212 # processed after the main bundle has been stored, so that any commits it
1213 # references are available in the store.
1214 if scratchbookpart:
1215 bundle2._processpart(op, scratchbookpart)
1216
1217 def storebundle(op, params, bundlefile):
1218 log = _getorcreateinfinitepushlogger(op)
1219 parthandlerstart = time.time()
1220 log(scratchbranchparttype, eventtype='start')
1221 index = op.repo.bundlestore.index
1222 store = op.repo.bundlestore.store
1223 op.records.add(scratchbranchparttype + '_skippushkey', True)
1224
1225 bundle = None
1226 try: # guards bundle
1227 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1228 bundle = hg.repository(op.repo.ui, bundlepath)
1229
1230 bookmark = params.get('bookmark')
1231 bookprevnode = params.get('bookprevnode', '')
1232 create = params.get('create')
1233 force = params.get('force')
1234
1235 if bookmark:
1236 oldnode = index.getnode(bookmark)
1237
1238 if not oldnode and not create:
1239 raise error.Abort("unknown bookmark %s" % bookmark,
1240 hint="use --create if you want to create one")
1241 else:
1242 oldnode = None
1243 bundleheads = bundle.revs('heads(bundle())')
1244 if bookmark and len(bundleheads) > 1:
1245 raise error.Abort(
1246 _('cannot push more than one head to a scratch branch'))
1247
1248 revs = _getrevs(bundle, oldnode, force, bookmark)
1249
1250 # Notify the user of what is being pushed
1251 plural = 's' if len(revs) > 1 else ''
1252 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1253 maxoutput = 10
1254 for i in range(0, min(len(revs), maxoutput)):
1255 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1256 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1257
1258 if len(revs) > maxoutput + 1:
1259 op.repo.ui.warn((" ...\n"))
1260 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1261 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1262
1263 nodesctx = [bundle[rev] for rev in revs]
1264 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1265 if bundleheads:
1266 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1267 else:
1268 newheadscount = 0
1269 # If there's a bookmark specified, there should be only one head,
1270 # so we choose the last node, which will be that head.
1271 # If a bug or malicious client allows there to be a bookmark
1272 # with multiple heads, we will place the bookmark on the last head.
1273 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1274 key = None
1275 if newheadscount:
1276 with open(bundlefile, 'r') as f:
1277 bundledata = f.read()
1278 with logservicecall(log, 'bundlestore',
1279 bundlesize=len(bundledata)):
1280 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1281 if len(bundledata) > bundlesizelimit:
1282 error_msg = ('bundle is too big: %d bytes. ' +
1283 'max allowed size is 100 MB')
1284 raise error.Abort(error_msg % (len(bundledata),))
1285 key = store.write(bundledata)
1286
1287 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1288 if key:
1289 index.addbundle(key, nodesctx)
1290 if bookmark:
1291 index.addbookmark(bookmark, bookmarknode)
1292 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1293 bookprevnode, params)
1294 log(scratchbranchparttype, eventtype='success',
1295 elapsedms=(time.time() - parthandlerstart) * 1000)
1296
1297 fillmetadatabranchpattern = op.repo.ui.config(
1298 'infinitepush', 'fillmetadatabranchpattern', '')
1299 if bookmark and fillmetadatabranchpattern:
1300 __, __, matcher = util.stringmatcher(fillmetadatabranchpattern)
1301 if matcher(bookmark):
1302 _asyncsavemetadata(op.repo.root,
1303 [ctx.hex() for ctx in nodesctx])
1304 except Exception as e:
1305 log(scratchbranchparttype, eventtype='failure',
1306 elapsedms=(time.time() - parthandlerstart) * 1000,
1307 errormsg=str(e))
1308 raise
1309 finally:
1310 if bundle:
1311 bundle.close()
1312
1313 @bundle2.b2streamparamhandler('infinitepush')
1314 def processinfinitepush(unbundler, param, value):
1315 """ process the bundle2 stream level parameter containing whether this push
1316 is an infinitepush or not. """
1317 if value and unbundler.ui.configbool('infinitepush',
1318 'bundle-stream', False):
1319 pass
1320
1321 @bundle2.parthandler(scratchbranchparttype,
1322 ('bookmark', 'bookprevnode' 'create', 'force',
1323 'pushbackbookmarks', 'cgversion'))
1324 def bundle2scratchbranch(op, part):
1325 '''unbundle a bundle2 part containing a changegroup to store'''
1326
1327 bundler = bundle2.bundle20(op.repo.ui)
1328 cgversion = part.params.get('cgversion', '01')
1329 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1330 cgpart.addparam('version', cgversion)
1331 bundler.addpart(cgpart)
1332 buf = util.chunkbuffer(bundler.getchunks())
1333
1334 fd, bundlefile = tempfile.mkstemp()
1335 try:
1336 try:
1337 fp = os.fdopen(fd, 'wb')
1338 fp.write(buf.read())
1339 finally:
1340 fp.close()
1341 storebundle(op, part.params, bundlefile)
1342 finally:
1343 try:
1344 os.unlink(bundlefile)
1345 except OSError as e:
1346 if e.errno != errno.ENOENT:
1347 raise
1348
1349 return 1
1350
1351 @bundle2.parthandler(bundleparts.scratchbookmarksparttype)
1352 def bundle2scratchbookmarks(op, part):
1353 '''Handler deletes bookmarks first then adds new bookmarks.
1354 '''
1355 index = op.repo.bundlestore.index
1356 decodedbookmarks = _decodebookmarks(part)
1357 toinsert = {}
1358 todelete = []
1359 for bookmark, node in decodedbookmarks.iteritems():
1360 if node:
1361 toinsert[bookmark] = node
1362 else:
1363 todelete.append(bookmark)
1364 log = _getorcreateinfinitepushlogger(op)
1365 with logservicecall(log, bundleparts.scratchbookmarksparttype), index:
1366 if todelete:
1367 index.deletebookmarks(todelete)
1368 if toinsert:
1369 index.addmanybookmarks(toinsert)
1370
1371 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1372 if params.get('pushbackbookmarks'):
1373 if op.reply and 'pushback' in op.reply.capabilities:
1374 params = {
1375 'namespace': 'bookmarks',
1376 'key': bookmark,
1377 'new': newnode,
1378 'old': oldnode,
1379 }
1380 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1381
1382 def bundle2pushkey(orig, op, part):
1383 '''Wrapper of bundle2.handlepushkey()
1384
1385 The only goal is to skip calling the original function if flag is set.
1386 It's set if infinitepush push is happening.
1387 '''
1388 if op.records[scratchbranchparttype + '_skippushkey']:
1389 if op.reply is not None:
1390 rpart = op.reply.newpart('reply:pushkey')
1391 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1392 rpart.addparam('return', '1', mandatory=False)
1393 return 1
1394
1395 return orig(op, part)
1396
1397 def bundle2handlephases(orig, op, part):
1398 '''Wrapper of bundle2.handlephases()
1399
1400 The only goal is to skip calling the original function if flag is set.
1401 It's set if infinitepush push is happening.
1402 '''
1403
1404 if op.records[scratchbranchparttype + '_skipphaseheads']:
1405 return
1406
1407 return orig(op, part)
1408
1409 def _asyncsavemetadata(root, nodes):
1410 '''starts a separate process that fills metadata for the nodes
1411
1412 This function creates a separate process and doesn't wait for it's
1413 completion. This was done to avoid slowing down pushes
1414 '''
1415
1416 maxnodes = 50
1417 if len(nodes) > maxnodes:
1418 return
1419 nodesargs = []
1420 for node in nodes:
1421 nodesargs.append('--node')
1422 nodesargs.append(node)
1423 with open(os.devnull, 'w+b') as devnull:
1424 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1425 '-R', root] + nodesargs
1426 # Process will run in background. We don't care about the return code
1427 subprocess.Popen(cmdline, close_fds=True, shell=False,
1428 stdin=devnull, stdout=devnull, stderr=devnull)
This diff has been collapsed as it changes many lines, (992 lines changed) Show them Hide them
@@ -0,0 +1,992 b''
1 # Copyright 2017 Facebook, Inc.
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5 """
6 [infinitepushbackup]
7 # Whether to enable automatic backups. If this option is True then a backup
8 # process will be started after every mercurial command that modifies the
9 # repo, for example, commit, amend, histedit, rebase etc.
10 autobackup = False
11
12 # path to the directory where pushback logs should be stored
13 logdir = path/to/dir
14
15 # Backup at most maxheadstobackup heads, other heads are ignored.
16 # Negative number means backup everything.
17 maxheadstobackup = -1
18
19 # Nodes that should not be backed up. Ancestors of these nodes won't be
20 # backed up either
21 dontbackupnodes = []
22
23 # Special option that may be used to trigger re-backuping. For example,
24 # if there was a bug in infinitepush backups, then changing the value of
25 # this option will force all clients to make a "clean" backup
26 backupgeneration = 0
27
28 # Hostname value to use. If not specified then socket.gethostname() will
29 # be used
30 hostname = ''
31
32 # Enable reporting of infinitepush backup status as a summary at the end
33 # of smartlog.
34 enablestatus = False
35
36 # Whether or not to save information about the latest successful backup.
37 # This information includes the local revision number and unix timestamp
38 # of the last time we successfully made a backup.
39 savelatestbackupinfo = False
40 """
41
42 from __future__ import absolute_import
43
44 import collections
45 import errno
46 import json
47 import os
48 import re
49 import socket
50 import stat
51 import subprocess
52 import time
53
54 from mercurial.node import (
55 bin,
56 hex,
57 nullrev,
58 short,
59 )
60
61 from mercurial.i18n import _
62
63 from mercurial import (
64 bundle2,
65 changegroup,
66 commands,
67 discovery,
68 dispatch,
69 encoding,
70 error,
71 extensions,
72 hg,
73 localrepo,
74 lock as lockmod,
75 phases,
76 policy,
77 registrar,
78 scmutil,
79 util,
80 )
81
82 from . import bundleparts
83
84 getscratchbookmarkspart = bundleparts.getscratchbookmarkspart
85 getscratchbranchparts = bundleparts.getscratchbranchparts
86
87 from hgext3rd import shareutil
88
89 osutil = policy.importmod(r'osutil')
90
91 cmdtable = {}
92 command = registrar.command(cmdtable)
93 revsetpredicate = registrar.revsetpredicate()
94 templatekeyword = registrar.templatekeyword()
95
96 backupbookmarktuple = collections.namedtuple('backupbookmarktuple',
97 ['hostname', 'reporoot', 'localbookmark'])
98
99 class backupstate(object):
100 def __init__(self):
101 self.heads = set()
102 self.localbookmarks = {}
103
104 def empty(self):
105 return not self.heads and not self.localbookmarks
106
107 class WrongPermissionsException(Exception):
108 def __init__(self, logdir):
109 self.logdir = logdir
110
111 restoreoptions = [
112 ('', 'reporoot', '', 'root of the repo to restore'),
113 ('', 'user', '', 'user who ran the backup'),
114 ('', 'hostname', '', 'hostname of the repo to restore'),
115 ]
116
117 _backuplockname = 'infinitepushbackup.lock'
118
119 def extsetup(ui):
120 if ui.configbool('infinitepushbackup', 'autobackup', False):
121 extensions.wrapfunction(dispatch, 'runcommand',
122 _autobackupruncommandwrapper)
123 extensions.wrapfunction(localrepo.localrepository, 'transaction',
124 _transaction)
125
126 @command('pushbackup',
127 [('', 'background', None, 'run backup in background')])
128 def backup(ui, repo, dest=None, **opts):
129 """
130 Pushes commits, bookmarks and heads to infinitepush.
131 New non-extinct commits are saved since the last `hg pushbackup`
132 or since 0 revision if this backup is the first.
133 Local bookmarks are saved remotely as:
134 infinitepush/backups/USERNAME/HOST/REPOROOT/bookmarks/LOCAL_BOOKMARK
135 Local heads are saved remotely as:
136 infinitepush/backups/USERNAME/HOST/REPOROOT/heads/HEAD_HASH
137 """
138
139 if opts.get('background'):
140 _dobackgroundbackup(ui, repo, dest)
141 return 0
142
143 try:
144 # Wait at most 30 seconds, because that's the average backup time
145 timeout = 30
146 srcrepo = shareutil.getsrcrepo(repo)
147 with lockmod.lock(srcrepo.vfs, _backuplockname, timeout=timeout):
148 return _dobackup(ui, repo, dest, **opts)
149 except error.LockHeld as e:
150 if e.errno == errno.ETIMEDOUT:
151 ui.warn(_('timeout waiting on backup lock\n'))
152 return 0
153 else:
154 raise
155
156 @command('pullbackup', restoreoptions)
157 def restore(ui, repo, dest=None, **opts):
158 """
159 Pulls commits from infinitepush that were previously saved with
160 `hg pushbackup`.
161 If user has only one backup for the `dest` repo then it will be restored.
162 But user may have backed up many local repos that points to `dest` repo.
163 These local repos may reside on different hosts or in different
164 repo roots. It makes restore ambiguous; `--reporoot` and `--hostname`
165 options are used to disambiguate.
166 """
167
168 other = _getremote(repo, ui, dest, **opts)
169
170 sourcereporoot = opts.get('reporoot')
171 sourcehostname = opts.get('hostname')
172 namingmgr = BackupBookmarkNamingManager(ui, repo, opts.get('user'))
173 allbackupstates = _downloadbackupstate(ui, other, sourcereporoot,
174 sourcehostname, namingmgr)
175 if len(allbackupstates) == 0:
176 ui.warn(_('no backups found!'))
177 return 1
178 _checkbackupstates(allbackupstates)
179
180 __, backupstate = allbackupstates.popitem()
181 pullcmd, pullopts = _getcommandandoptions('^pull')
182 # pull backuped heads and nodes that are pointed by bookmarks
183 pullopts['rev'] = list(backupstate.heads |
184 set(backupstate.localbookmarks.values()))
185 if dest:
186 pullopts['source'] = dest
187 result = pullcmd(ui, repo, **pullopts)
188
189 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
190 changes = []
191 for book, hexnode in backupstate.localbookmarks.iteritems():
192 if hexnode in repo:
193 changes.append((book, bin(hexnode)))
194 else:
195 ui.warn(_('%s not found, not creating %s bookmark') %
196 (hexnode, book))
197 repo._bookmarks.applychanges(repo, tr, changes)
198
199 # manually write local backup state and flag to not autobackup
200 # just after we restored, which would be pointless
201 _writelocalbackupstate(repo.vfs,
202 list(backupstate.heads),
203 backupstate.localbookmarks)
204 repo.ignoreautobackup = True
205
206 return result
207
208 @command('getavailablebackups',
209 [('', 'user', '', _('username, defaults to current user')),
210 ('', 'json', None, _('print available backups in json format'))])
211 def getavailablebackups(ui, repo, dest=None, **opts):
212 other = _getremote(repo, ui, dest, **opts)
213
214 sourcereporoot = opts.get('reporoot')
215 sourcehostname = opts.get('hostname')
216
217 namingmgr = BackupBookmarkNamingManager(ui, repo, opts.get('user'))
218 allbackupstates = _downloadbackupstate(ui, other, sourcereporoot,
219 sourcehostname, namingmgr)
220
221 if opts.get('json'):
222 jsondict = collections.defaultdict(list)
223 for hostname, reporoot in allbackupstates.keys():
224 jsondict[hostname].append(reporoot)
225 # make sure the output is sorted. That's not an efficient way to
226 # keep list sorted but we don't have that many backups.
227 jsondict[hostname].sort()
228 ui.write('%s\n' % json.dumps(jsondict))
229 else:
230 if not allbackupstates:
231 ui.write(_('no backups available for %s\n') % namingmgr.username)
232
233 ui.write(_('user %s has %d available backups:\n') %
234 (namingmgr.username, len(allbackupstates)))
235
236 for hostname, reporoot in sorted(allbackupstates.keys()):
237 ui.write(_('%s on %s\n') % (reporoot, hostname))
238
239 @command('debugcheckbackup',
240 [('', 'all', None, _('check all backups that user have')),
241 ] + restoreoptions)
242 def checkbackup(ui, repo, dest=None, **opts):
243 """
244 Checks that all the nodes that backup needs are available in bundlestore
245 This command can check either specific backup (see restoreoptions) or all
246 backups for the user
247 """
248
249 sourcereporoot = opts.get('reporoot')
250 sourcehostname = opts.get('hostname')
251
252 other = _getremote(repo, ui, dest, **opts)
253 namingmgr = BackupBookmarkNamingManager(ui, repo, opts.get('user'))
254 allbackupstates = _downloadbackupstate(ui, other, sourcereporoot,
255 sourcehostname, namingmgr)
256 if not opts.get('all'):
257 _checkbackupstates(allbackupstates)
258
259 ret = 0
260 while allbackupstates:
261 key, bkpstate = allbackupstates.popitem()
262 ui.status(_('checking %s on %s\n') % (key[1], key[0]))
263 if not _dobackupcheck(bkpstate, ui, repo, dest, **opts):
264 ret = 255
265 return ret
266
267 @command('debugwaitbackup', [('', 'timeout', '', 'timeout value')])
268 def waitbackup(ui, repo, timeout):
269 try:
270 if timeout:
271 timeout = int(timeout)
272 else:
273 timeout = -1
274 except ValueError:
275 raise error.Abort('timeout should be integer')
276
277 try:
278 repo = shareutil.getsrcrepo(repo)
279 with lockmod.lock(repo.vfs, _backuplockname, timeout=timeout):
280 pass
281 except error.LockHeld as e:
282 if e.errno == errno.ETIMEDOUT:
283 raise error.Abort(_('timeout while waiting for backup'))
284 raise
285
286 @command('isbackedup',
287 [('r', 'rev', [], _('show the specified revision or revset'), _('REV'))])
288 def isbackedup(ui, repo, **opts):
289 """checks if commit was backed up to infinitepush
290
291 If no revision are specified then it checks working copy parent
292 """
293
294 revs = opts.get('rev')
295 if not revs:
296 revs = ['.']
297 bkpstate = _readlocalbackupstate(ui, repo)
298 unfi = repo.unfiltered()
299 backeduprevs = unfi.revs('draft() and ::%ls', bkpstate.heads)
300 for r in scmutil.revrange(unfi, revs):
301 ui.write(_(unfi[r].hex() + ' '))
302 ui.write(_('backed up' if r in backeduprevs else 'not backed up'))
303 ui.write(_('\n'))
304
305 @revsetpredicate('backedup')
306 def backedup(repo, subset, x):
307 """Draft changesets that have been backed up by infinitepush"""
308 unfi = repo.unfiltered()
309 bkpstate = _readlocalbackupstate(repo.ui, repo)
310 return subset & unfi.revs('draft() and ::%ls and not hidden()',
311 bkpstate.heads)
312
313 @revsetpredicate('notbackedup')
314 def notbackedup(repo, subset, x):
315 """Changesets that have not yet been backed up by infinitepush"""
316 bkpstate = _readlocalbackupstate(repo.ui, repo)
317 bkpheads = set(bkpstate.heads)
318 candidates = set(_backupheads(repo.ui, repo))
319 notbackeduprevs = set()
320 # Find all revisions that are ancestors of the expected backup heads,
321 # stopping when we reach either a public commit or a known backup head.
322 while candidates:
323 candidate = candidates.pop()
324 if candidate not in bkpheads:
325 ctx = repo[candidate]
326 rev = ctx.rev()
327 if rev not in notbackeduprevs and ctx.phase() != phases.public:
328 # This rev may not have been backed up. Record it, and add its
329 # parents as candidates.
330 notbackeduprevs.add(rev)
331 candidates.update([p.hex() for p in ctx.parents()])
332 if notbackeduprevs:
333 # Some revisions in this set may actually have been backed up by
334 # virtue of being an ancestor of a different backup head, which may
335 # have been hidden since the backup was made. Find these and remove
336 # them from the set.
337 unfi = repo.unfiltered()
338 candidates = bkpheads
339 while candidates:
340 candidate = candidates.pop()
341 if candidate in unfi:
342 ctx = unfi[candidate]
343 if ctx.phase() != phases.public:
344 notbackeduprevs.discard(ctx.rev())
345 candidates.update([p.hex() for p in ctx.parents()])
346 return subset & notbackeduprevs
347
348 @templatekeyword('backingup')
349 def backingup(repo, ctx, **args):
350 """Whether infinitepush is currently backing up commits."""
351 # If the backup lock exists then a backup should be in progress.
352 srcrepo = shareutil.getsrcrepo(repo)
353 return srcrepo.vfs.lexists(_backuplockname)
354
355 def smartlogsummary(ui, repo):
356 if not ui.configbool('infinitepushbackup', 'enablestatus'):
357 return
358
359 # Don't output the summary if a backup is currently in progress.
360 srcrepo = shareutil.getsrcrepo(repo)
361 if srcrepo.vfs.lexists(_backuplockname):
362 return
363
364 unbackeduprevs = repo.revs('notbackedup()')
365
366 # Count the number of changesets that haven't been backed up for 10 minutes.
367 # If there is only one, also print out its hash.
368 backuptime = time.time() - 10 * 60 # 10 minutes ago
369 count = 0
370 singleunbackeduprev = None
371 for rev in unbackeduprevs:
372 if repo[rev].date()[0] <= backuptime:
373 singleunbackeduprev = rev
374 count += 1
375 if count > 0:
376 if count > 1:
377 ui.warn(_('note: %d changesets are not backed up.\n') % count)
378 else:
379 ui.warn(_('note: changeset %s is not backed up.\n') %
380 short(repo[singleunbackeduprev].node()))
381 ui.warn(_('Run `hg pushbackup` to perform a backup. If this fails,\n'
382 'please report to the Source Control @ FB group.\n'))
383
384 def _autobackupruncommandwrapper(orig, lui, repo, cmd, fullargs, *args):
385 '''
386 If this wrapper is enabled then auto backup is started after every command
387 that modifies a repository.
388 Since we don't want to start auto backup after read-only commands,
389 then this wrapper checks if this command opened at least one transaction.
390 If yes then background backup will be started.
391 '''
392
393 # For chg, do not wrap the "serve" runcommand call
394 if 'CHGINTERNALMARK' in encoding.environ:
395 return orig(lui, repo, cmd, fullargs, *args)
396
397 try:
398 return orig(lui, repo, cmd, fullargs, *args)
399 finally:
400 if getattr(repo, 'txnwasopened', False) \
401 and not getattr(repo, 'ignoreautobackup', False):
402 lui.debug("starting infinitepush autobackup in the background\n")
403 _dobackgroundbackup(lui, repo)
404
405 def _transaction(orig, self, *args, **kwargs):
406 ''' Wrapper that records if a transaction was opened.
407
408 If a transaction was opened then we want to start background backup process.
409 This hook records the fact that transaction was opened.
410 '''
411 self.txnwasopened = True
412 return orig(self, *args, **kwargs)
413
414 def _backupheads(ui, repo):
415 """Returns the set of heads that should be backed up in this repo."""
416 maxheadstobackup = ui.configint('infinitepushbackup',
417 'maxheadstobackup', -1)
418
419 revset = 'heads(draft()) & not obsolete()'
420
421 backupheads = [ctx.hex() for ctx in repo.set(revset)]
422 if maxheadstobackup > 0:
423 backupheads = backupheads[-maxheadstobackup:]
424 elif maxheadstobackup == 0:
425 backupheads = []
426 return set(backupheads)
427
428 def _dobackup(ui, repo, dest, **opts):
429 ui.status(_('starting backup %s\n') % time.strftime('%H:%M:%S %d %b %Y %Z'))
430 start = time.time()
431 # to handle multiple working copies correctly
432 repo = shareutil.getsrcrepo(repo)
433 currentbkpgenerationvalue = _readbackupgenerationfile(repo.vfs)
434 newbkpgenerationvalue = ui.configint('infinitepushbackup',
435 'backupgeneration', 0)
436 if currentbkpgenerationvalue != newbkpgenerationvalue:
437 # Unlinking local backup state will trigger re-backuping
438 _deletebackupstate(repo)
439 _writebackupgenerationfile(repo.vfs, newbkpgenerationvalue)
440 bkpstate = _readlocalbackupstate(ui, repo)
441
442 # this variable stores the local store info (tip numeric revision and date)
443 # which we use to quickly tell if our backup is stale
444 afterbackupinfo = _getlocalinfo(repo)
445
446 # This variable will store what heads will be saved in backup state file
447 # if backup finishes successfully
448 afterbackupheads = _backupheads(ui, repo)
449 other = _getremote(repo, ui, dest, **opts)
450 outgoing, badhexnodes = _getrevstobackup(repo, ui, other,
451 afterbackupheads - bkpstate.heads)
452 # If remotefilelog extension is enabled then there can be nodes that we
453 # can't backup. In this case let's remove them from afterbackupheads
454 afterbackupheads.difference_update(badhexnodes)
455
456 # As afterbackupheads this variable stores what heads will be saved in
457 # backup state file if backup finishes successfully
458 afterbackuplocalbooks = _getlocalbookmarks(repo)
459 afterbackuplocalbooks = _filterbookmarks(
460 afterbackuplocalbooks, repo, afterbackupheads)
461
462 newheads = afterbackupheads - bkpstate.heads
463 removedheads = bkpstate.heads - afterbackupheads
464 newbookmarks = _dictdiff(afterbackuplocalbooks, bkpstate.localbookmarks)
465 removedbookmarks = _dictdiff(bkpstate.localbookmarks, afterbackuplocalbooks)
466
467 namingmgr = BackupBookmarkNamingManager(ui, repo)
468 bookmarkstobackup = _getbookmarkstobackup(
469 repo, newbookmarks, removedbookmarks,
470 newheads, removedheads, namingmgr)
471
472 # Special case if backup state is empty. Clean all backup bookmarks from the
473 # server.
474 if bkpstate.empty():
475 bookmarkstobackup[namingmgr.getbackupheadprefix()] = ''
476 bookmarkstobackup[namingmgr.getbackupbookmarkprefix()] = ''
477
478 # Wrap deltaparent function to make sure that bundle takes less space
479 # See _deltaparent comments for details
480 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', _deltaparent)
481 try:
482 bundler = _createbundler(ui, repo, other)
483 bundler.addparam("infinitepush", "True")
484 backup = False
485 if outgoing and outgoing.missing:
486 backup = True
487 parts = getscratchbranchparts(repo, other, outgoing,
488 confignonforwardmove=False,
489 ui=ui, bookmark=None,
490 create=False)
491 for part in parts:
492 bundler.addpart(part)
493
494 if bookmarkstobackup:
495 backup = True
496 bundler.addpart(getscratchbookmarkspart(other, bookmarkstobackup))
497
498 if backup:
499 _sendbundle(bundler, other)
500 _writelocalbackupstate(repo.vfs, afterbackupheads,
501 afterbackuplocalbooks)
502 if ui.config('infinitepushbackup', 'savelatestbackupinfo'):
503 _writelocalbackupinfo(repo.vfs, **afterbackupinfo)
504 else:
505 ui.status(_('nothing to backup\n'))
506 finally:
507 # cleanup ensures that all pipes are flushed
508 cleanup = getattr(other, '_cleanup', None) or getattr(other, 'cleanup')
509 try:
510 cleanup()
511 except Exception:
512 ui.warn(_('remote connection cleanup failed\n'))
513 ui.status(_('finished in %f seconds\n') % (time.time() - start))
514 extensions.unwrapfunction(changegroup.cg2packer, 'deltaparent',
515 _deltaparent)
516 return 0
517
518 def _dobackgroundbackup(ui, repo, dest=None):
519 background_cmd = ['hg', 'pushbackup']
520 if dest:
521 background_cmd.append(dest)
522 logfile = None
523 logdir = ui.config('infinitepushbackup', 'logdir')
524 if logdir:
525 # make newly created files and dirs non-writable
526 oldumask = os.umask(0o022)
527 try:
528 try:
529 username = util.shortuser(ui.username())
530 except Exception:
531 username = 'unknown'
532
533 if not _checkcommonlogdir(logdir):
534 raise WrongPermissionsException(logdir)
535
536 userlogdir = os.path.join(logdir, username)
537 util.makedirs(userlogdir)
538
539 if not _checkuserlogdir(userlogdir):
540 raise WrongPermissionsException(userlogdir)
541
542 reporoot = repo.origroot
543 reponame = os.path.basename(reporoot)
544 _removeoldlogfiles(userlogdir, reponame)
545 logfile = _getlogfilename(logdir, username, reponame)
546 except (OSError, IOError) as e:
547 ui.debug('infinitepush backup log is disabled: %s\n' % e)
548 except WrongPermissionsException as e:
549 ui.debug(('%s directory has incorrect permission, ' +
550 'infinitepush backup logging will be disabled\n') %
551 e.logdir)
552 finally:
553 os.umask(oldumask)
554
555 if not logfile:
556 logfile = os.devnull
557
558 with open(logfile, 'a') as f:
559 subprocess.Popen(background_cmd, shell=False, stdout=f,
560 stderr=subprocess.STDOUT)
561
562 def _dobackupcheck(bkpstate, ui, repo, dest, **opts):
563 remotehexnodes = sorted(
564 set(bkpstate.heads).union(bkpstate.localbookmarks.values()))
565 if not remotehexnodes:
566 return True
567 other = _getremote(repo, ui, dest, **opts)
568 batch = other.iterbatch()
569 for hexnode in remotehexnodes:
570 batch.lookup(hexnode)
571 batch.submit()
572 lookupresults = batch.results()
573 i = 0
574 try:
575 for i, r in enumerate(lookupresults):
576 # iterate over results to make it throw if revision
577 # was not found
578 pass
579 return True
580 except error.RepoError:
581 ui.warn(_('unknown revision %r\n') % remotehexnodes[i])
582 return False
583
584 _backuplatestinfofile = 'infinitepushlatestbackupinfo'
585 _backupstatefile = 'infinitepushbackupstate'
586 _backupgenerationfile = 'infinitepushbackupgeneration'
587
588 # Common helper functions
589 def _getlocalinfo(repo):
590 localinfo = {}
591 localinfo['rev'] = repo[repo.changelog.tip()].rev()
592 localinfo['time'] = int(time.time())
593 return localinfo
594
595 def _getlocalbookmarks(repo):
596 localbookmarks = {}
597 for bookmark, node in repo._bookmarks.iteritems():
598 hexnode = hex(node)
599 localbookmarks[bookmark] = hexnode
600 return localbookmarks
601
602 def _filterbookmarks(localbookmarks, repo, headstobackup):
603 '''Filters out some bookmarks from being backed up
604
605 Filters out bookmarks that do not point to ancestors of headstobackup or
606 public commits
607 '''
608
609 headrevstobackup = [repo[hexhead].rev() for hexhead in headstobackup]
610 ancestors = repo.changelog.ancestors(headrevstobackup, inclusive=True)
611 filteredbooks = {}
612 for bookmark, hexnode in localbookmarks.iteritems():
613 if (repo[hexnode].rev() in ancestors or
614 repo[hexnode].phase() == phases.public):
615 filteredbooks[bookmark] = hexnode
616 return filteredbooks
617
618 def _downloadbackupstate(ui, other, sourcereporoot, sourcehostname, namingmgr):
619 pattern = namingmgr.getcommonuserprefix()
620 fetchedbookmarks = other.listkeyspatterns('bookmarks', patterns=[pattern])
621 allbackupstates = collections.defaultdict(backupstate)
622 for book, hexnode in fetchedbookmarks.iteritems():
623 parsed = _parsebackupbookmark(book, namingmgr)
624 if parsed:
625 if sourcereporoot and sourcereporoot != parsed.reporoot:
626 continue
627 if sourcehostname and sourcehostname != parsed.hostname:
628 continue
629 key = (parsed.hostname, parsed.reporoot)
630 if parsed.localbookmark:
631 bookname = parsed.localbookmark
632 allbackupstates[key].localbookmarks[bookname] = hexnode
633 else:
634 allbackupstates[key].heads.add(hexnode)
635 else:
636 ui.warn(_('wrong format of backup bookmark: %s') % book)
637
638 return allbackupstates
639
640 def _checkbackupstates(allbackupstates):
641 if len(allbackupstates) == 0:
642 raise error.Abort('no backups found!')
643
644 hostnames = set(key[0] for key in allbackupstates.iterkeys())
645 reporoots = set(key[1] for key in allbackupstates.iterkeys())
646
647 if len(hostnames) > 1:
648 raise error.Abort(
649 _('ambiguous hostname to restore: %s') % sorted(hostnames),
650 hint=_('set --hostname to disambiguate'))
651
652 if len(reporoots) > 1:
653 raise error.Abort(
654 _('ambiguous repo root to restore: %s') % sorted(reporoots),
655 hint=_('set --reporoot to disambiguate'))
656
657 class BackupBookmarkNamingManager(object):
658 def __init__(self, ui, repo, username=None):
659 self.ui = ui
660 self.repo = repo
661 if not username:
662 username = util.shortuser(ui.username())
663 self.username = username
664
665 self.hostname = self.ui.config('infinitepushbackup', 'hostname')
666 if not self.hostname:
667 self.hostname = socket.gethostname()
668
669 def getcommonuserprefix(self):
670 return '/'.join((self._getcommonuserprefix(), '*'))
671
672 def getcommonprefix(self):
673 return '/'.join((self._getcommonprefix(), '*'))
674
675 def getbackupbookmarkprefix(self):
676 return '/'.join((self._getbackupbookmarkprefix(), '*'))
677
678 def getbackupbookmarkname(self, bookmark):
679 bookmark = _escapebookmark(bookmark)
680 return '/'.join((self._getbackupbookmarkprefix(), bookmark))
681
682 def getbackupheadprefix(self):
683 return '/'.join((self._getbackupheadprefix(), '*'))
684
685 def getbackupheadname(self, hexhead):
686 return '/'.join((self._getbackupheadprefix(), hexhead))
687
688 def _getbackupbookmarkprefix(self):
689 return '/'.join((self._getcommonprefix(), 'bookmarks'))
690
691 def _getbackupheadprefix(self):
692 return '/'.join((self._getcommonprefix(), 'heads'))
693
694 def _getcommonuserprefix(self):
695 return '/'.join(('infinitepush', 'backups', self.username))
696
697 def _getcommonprefix(self):
698 reporoot = self.repo.origroot
699
700 result = '/'.join((self._getcommonuserprefix(), self.hostname))
701 if not reporoot.startswith('/'):
702 result += '/'
703 result += reporoot
704 if result.endswith('/'):
705 result = result[:-1]
706 return result
707
708 def _escapebookmark(bookmark):
709 '''
710 If `bookmark` contains "bookmarks" as a substring then replace it with
711 "bookmarksbookmarks". This will make parsing remote bookmark name
712 unambigious.
713 '''
714
715 bookmark = encoding.fromlocal(bookmark)
716 return bookmark.replace('bookmarks', 'bookmarksbookmarks')
717
718 def _unescapebookmark(bookmark):
719 bookmark = encoding.tolocal(bookmark)
720 return bookmark.replace('bookmarksbookmarks', 'bookmarks')
721
722 def _getremote(repo, ui, dest, **opts):
723 path = ui.paths.getpath(dest, default=('infinitepush', 'default'))
724 if not path:
725 raise error.Abort(_('default repository not configured!'),
726 hint=_("see 'hg help config.paths'"))
727 dest = path.pushloc or path.loc
728 return hg.peer(repo, opts, dest)
729
730 def _getcommandandoptions(command):
731 cmd = commands.table[command][0]
732 opts = dict(opt[1:3] for opt in commands.table[command][1])
733 return cmd, opts
734
735 # Backup helper functions
736
737 def _deltaparent(orig, self, revlog, rev, p1, p2, prev):
738 # This version of deltaparent prefers p1 over prev to use less space
739 dp = revlog.deltaparent(rev)
740 if dp == nullrev and not revlog.storedeltachains:
741 # send full snapshot only if revlog configured to do so
742 return nullrev
743 return p1
744
745 def _getbookmarkstobackup(repo, newbookmarks, removedbookmarks,
746 newheads, removedheads, namingmgr):
747 bookmarkstobackup = {}
748
749 for bookmark, hexnode in removedbookmarks.items():
750 backupbookmark = namingmgr.getbackupbookmarkname(bookmark)
751 bookmarkstobackup[backupbookmark] = ''
752
753 for bookmark, hexnode in newbookmarks.items():
754 backupbookmark = namingmgr.getbackupbookmarkname(bookmark)
755 bookmarkstobackup[backupbookmark] = hexnode
756
757 for hexhead in removedheads:
758 headbookmarksname = namingmgr.getbackupheadname(hexhead)
759 bookmarkstobackup[headbookmarksname] = ''
760
761 for hexhead in newheads:
762 headbookmarksname = namingmgr.getbackupheadname(hexhead)
763 bookmarkstobackup[headbookmarksname] = hexhead
764
765 return bookmarkstobackup
766
767 def _createbundler(ui, repo, other):
768 bundler = bundle2.bundle20(ui, bundle2.bundle2caps(other))
769 # Disallow pushback because we want to avoid taking repo locks.
770 # And we don't need pushback anyway
771 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo,
772 allowpushback=False))
773 bundler.newpart('replycaps', data=capsblob)
774 return bundler
775
776 def _sendbundle(bundler, other):
777 stream = util.chunkbuffer(bundler.getchunks())
778 try:
779 other.unbundle(stream, ['force'], other.url())
780 except error.BundleValueError as exc:
781 raise error.Abort(_('missing support for %s') % exc)
782
783 def findcommonoutgoing(repo, ui, other, heads):
784 if heads:
785 # Avoid using remotenames fastheaddiscovery heuristic. It uses
786 # remotenames file to quickly find commonoutgoing set, but it can
787 # result in sending public commits to infinitepush servers.
788 # For example:
789 #
790 # o draft
791 # /
792 # o C1
793 # |
794 # ...
795 # |
796 # o remote/master
797 #
798 # pushbackup in that case results in sending to the infinitepush server
799 # all public commits from 'remote/master' to C1. It increases size of
800 # the bundle + it may result in storing data about public commits
801 # in infinitepush table.
802
803 with ui.configoverride({("remotenames", "fastheaddiscovery"): False}):
804 nodes = map(repo.changelog.node, heads)
805 return discovery.findcommonoutgoing(repo, other, onlyheads=nodes)
806 else:
807 return None
808
809 def _getrevstobackup(repo, ui, other, headstobackup):
810 # In rare cases it's possible to have a local node without filelogs.
811 # This is possible if remotefilelog is enabled and if the node was
812 # stripped server-side. We want to filter out these bad nodes and all
813 # of their descendants.
814 badnodes = ui.configlist('infinitepushbackup', 'dontbackupnodes', [])
815 badnodes = [node for node in badnodes if node in repo]
816 badrevs = [repo[node].rev() for node in badnodes]
817 badnodesdescendants = repo.set('%ld::', badrevs) if badrevs else set()
818 badnodesdescendants = set(ctx.hex() for ctx in badnodesdescendants)
819 filteredheads = filter(lambda head: head in badnodesdescendants,
820 headstobackup)
821
822 if filteredheads:
823 ui.warn(_('filtering nodes: %s\n') % filteredheads)
824 ui.log('infinitepushbackup', 'corrupted nodes found',
825 infinitepushbackupcorruptednodes='failure')
826 headstobackup = filter(lambda head: head not in badnodesdescendants,
827 headstobackup)
828
829 revs = list(repo[hexnode].rev() for hexnode in headstobackup)
830 outgoing = findcommonoutgoing(repo, ui, other, revs)
831 nodeslimit = 1000
832 if outgoing and len(outgoing.missing) > nodeslimit:
833 # trying to push too many nodes usually means that there is a bug
834 # somewhere. Let's be safe and avoid pushing too many nodes at once
835 raise error.Abort('trying to back up too many nodes: %d' %
836 (len(outgoing.missing),))
837 return outgoing, set(filteredheads)
838
839 def _localbackupstateexists(repo):
840 return repo.vfs.exists(_backupstatefile)
841
842 def _deletebackupstate(repo):
843 return repo.vfs.tryunlink(_backupstatefile)
844
845 def _readlocalbackupstate(ui, repo):
846 repo = shareutil.getsrcrepo(repo)
847 if not _localbackupstateexists(repo):
848 return backupstate()
849
850 with repo.vfs(_backupstatefile) as f:
851 try:
852 state = json.loads(f.read())
853 if (not isinstance(state['bookmarks'], dict) or
854 not isinstance(state['heads'], list)):
855 raise ValueError('bad types of bookmarks or heads')
856
857 result = backupstate()
858 result.heads = set(map(str, state['heads']))
859 result.localbookmarks = state['bookmarks']
860 return result
861 except (ValueError, KeyError, TypeError) as e:
862 ui.warn(_('corrupt file: %s (%s)\n') % (_backupstatefile, e))
863 return backupstate()
864 return backupstate()
865
866 def _writelocalbackupstate(vfs, heads, bookmarks):
867 with vfs(_backupstatefile, 'w') as f:
868 f.write(json.dumps({'heads': list(heads), 'bookmarks': bookmarks}))
869
870 def _readbackupgenerationfile(vfs):
871 try:
872 with vfs(_backupgenerationfile) as f:
873 return int(f.read())
874 except (IOError, OSError, ValueError):
875 return 0
876
877 def _writebackupgenerationfile(vfs, backupgenerationvalue):
878 with vfs(_backupgenerationfile, 'w', atomictemp=True) as f:
879 f.write(str(backupgenerationvalue))
880
881 def _writelocalbackupinfo(vfs, rev, time):
882 with vfs(_backuplatestinfofile, 'w', atomictemp=True) as f:
883 f.write(('backuprevision=%d\nbackuptime=%d\n') % (rev, time))
884
885 # Restore helper functions
886 def _parsebackupbookmark(backupbookmark, namingmgr):
887 '''Parses backup bookmark and returns info about it
888
889 Backup bookmark may represent either a local bookmark or a head.
890 Returns None if backup bookmark has wrong format or tuple.
891 First entry is a hostname where this bookmark came from.
892 Second entry is a root of the repo where this bookmark came from.
893 Third entry in a tuple is local bookmark if backup bookmark
894 represents a local bookmark and None otherwise.
895 '''
896
897 backupbookmarkprefix = namingmgr._getcommonuserprefix()
898 commonre = '^{0}/([-\w.]+)(/.*)'.format(re.escape(backupbookmarkprefix))
899 bookmarkre = commonre + '/bookmarks/(.*)$'
900 headsre = commonre + '/heads/[a-f0-9]{40}$'
901
902 match = re.search(bookmarkre, backupbookmark)
903 if not match:
904 match = re.search(headsre, backupbookmark)
905 if not match:
906 return None
907 # It's a local head not a local bookmark.
908 # That's why localbookmark is None
909 return backupbookmarktuple(hostname=match.group(1),
910 reporoot=match.group(2),
911 localbookmark=None)
912
913 return backupbookmarktuple(hostname=match.group(1),
914 reporoot=match.group(2),
915 localbookmark=_unescapebookmark(match.group(3)))
916
917 _timeformat = '%Y%m%d'
918
919 def _getlogfilename(logdir, username, reponame):
920 '''Returns name of the log file for particular user and repo
921
922 Different users have different directories inside logdir. Log filename
923 consists of reponame (basename of repo path) and current day
924 (see _timeformat). That means that two different repos with the same name
925 can share the same log file. This is not a big problem so we ignore it.
926 '''
927
928 currentday = time.strftime(_timeformat)
929 return os.path.join(logdir, username, reponame + currentday)
930
931 def _removeoldlogfiles(userlogdir, reponame):
932 existinglogfiles = []
933 for entry in osutil.listdir(userlogdir):
934 filename = entry[0]
935 fullpath = os.path.join(userlogdir, filename)
936 if filename.startswith(reponame) and os.path.isfile(fullpath):
937 try:
938 time.strptime(filename[len(reponame):], _timeformat)
939 except ValueError:
940 continue
941 existinglogfiles.append(filename)
942
943 # _timeformat gives us a property that if we sort log file names in
944 # descending order then newer files are going to be in the beginning
945 existinglogfiles = sorted(existinglogfiles, reverse=True)
946 # Delete logs that are older than 5 days
947 maxlogfilenumber = 5
948 if len(existinglogfiles) > maxlogfilenumber:
949 for filename in existinglogfiles[maxlogfilenumber:]:
950 os.unlink(os.path.join(userlogdir, filename))
951
952 def _checkcommonlogdir(logdir):
953 '''Checks permissions of the log directory
954
955 We want log directory to actually be a directory, have restricting
956 deletion flag set (sticky bit)
957 '''
958
959 try:
960 st = os.stat(logdir)
961 return stat.S_ISDIR(st.st_mode) and st.st_mode & stat.S_ISVTX
962 except OSError:
963 # is raised by os.stat()
964 return False
965
966 def _checkuserlogdir(userlogdir):
967 '''Checks permissions of the user log directory
968
969 We want user log directory to be writable only by the user who created it
970 and be owned by `username`
971 '''
972
973 try:
974 st = os.stat(userlogdir)
975 # Check that `userlogdir` is owned by `username`
976 if os.getuid() != st.st_uid:
977 return False
978 return ((st.st_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)) ==
979 stat.S_IWUSR)
980 except OSError:
981 # is raised by os.stat()
982 return False
983
984 def _dictdiff(first, second):
985 '''Returns new dict that contains items from the first dict that are missing
986 from the second dict.
987 '''
988 result = {}
989 for book, hexnode in first.items():
990 if second.get(book) != hexnode:
991 result[book] = hexnode
992 return result
@@ -0,0 +1,143 b''
1 # Copyright 2017 Facebook, Inc.
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from __future__ import absolute_import
7
8 from mercurial.i18n import _
9
10 from mercurial import (
11 bundle2,
12 changegroup,
13 error,
14 extensions,
15 revsetlang,
16 util,
17 )
18
19 from . import common
20
21 encodebookmarks = common.encodebookmarks
22 isremotebooksenabled = common.isremotebooksenabled
23
24 scratchbranchparttype = 'b2x:infinitepush'
25 scratchbookmarksparttype = 'b2x:infinitepushscratchbookmarks'
26
27 def getscratchbranchparts(repo, peer, outgoing, confignonforwardmove,
28 ui, bookmark, create):
29 if not outgoing.missing:
30 raise error.Abort(_('no commits to push'))
31
32 if scratchbranchparttype not in bundle2.bundle2caps(peer):
33 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
34
35 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
36 bookmark)
37
38 supportedversions = changegroup.supportedoutgoingversions(repo)
39 # Explicitly avoid using '01' changegroup version in infinitepush to
40 # support general delta
41 supportedversions.discard('01')
42 cgversion = min(supportedversions)
43 _handlelfs(repo, outgoing.missing)
44 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
45
46 params = {}
47 params['cgversion'] = cgversion
48 if bookmark:
49 params['bookmark'] = bookmark
50 # 'prevbooknode' is necessary for pushkey reply part
51 params['bookprevnode'] = ''
52 if bookmark in repo:
53 params['bookprevnode'] = repo[bookmark].hex()
54 if create:
55 params['create'] = '1'
56 if confignonforwardmove:
57 params['force'] = '1'
58
59 # Do not send pushback bundle2 part with bookmarks if remotenames extension
60 # is enabled. It will be handled manually in `_push()`
61 if not isremotebooksenabled(ui):
62 params['pushbackbookmarks'] = '1'
63
64 parts = []
65
66 # .upper() marks this as a mandatory part: server will abort if there's no
67 # handler
68 parts.append(bundle2.bundlepart(
69 scratchbranchparttype.upper(),
70 advisoryparams=params.iteritems(),
71 data=cg))
72
73 try:
74 treemod = extensions.find('treemanifest')
75 mfnodes = []
76 for node in outgoing.missing:
77 mfnodes.append(('', repo[node].manifestnode()))
78
79 # Only include the tree parts if they all exist
80 if not repo.manifestlog.datastore.getmissing(mfnodes):
81 parts.append(treemod.createtreepackpart(
82 repo, outgoing, treemod.TREEGROUP_PARTTYPE2))
83 except KeyError:
84 pass
85
86 return parts
87
88 def getscratchbookmarkspart(peer, bookmarks):
89 if scratchbookmarksparttype not in bundle2.bundle2caps(peer):
90 raise error.Abort(
91 _('no server support for %r') % scratchbookmarksparttype)
92
93 return bundle2.bundlepart(
94 scratchbookmarksparttype.upper(),
95 data=encodebookmarks(bookmarks))
96
97 def _validaterevset(repo, revset, bookmark):
98 """Abort if the revs to be pushed aren't valid for a scratch branch."""
99 if not repo.revs(revset):
100 raise error.Abort(_('nothing to push'))
101 if bookmark:
102 # Allow bundle with many heads only if no bookmark is specified
103 heads = repo.revs('heads(%r)', revset)
104 if len(heads) > 1:
105 raise error.Abort(
106 _('cannot push more than one head to a scratch branch'))
107
108 def _handlelfs(repo, missing):
109 '''Special case if lfs is enabled
110
111 If lfs is enabled then we need to call prepush hook
112 to make sure large files are uploaded to lfs
113 '''
114 try:
115 lfsmod = extensions.find('lfs')
116 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
117 except KeyError:
118 # Ignore if lfs extension is not enabled
119 return
120
121 class copiedpart(object):
122 """a copy of unbundlepart content that can be consumed later"""
123
124 def __init__(self, part):
125 # copy "public properties"
126 self.type = part.type
127 self.id = part.id
128 self.mandatory = part.mandatory
129 self.mandatoryparams = part.mandatoryparams
130 self.advisoryparams = part.advisoryparams
131 self.params = part.params
132 self.mandatorykeys = part.mandatorykeys
133 # copy the buffer
134 self._io = util.stringio(part.read())
135
136 def consume(self):
137 return
138
139 def read(self, size=None):
140 if size is None:
141 return self._io.read()
142 else:
143 return self._io.read(size)
@@ -0,0 +1,58 b''
1 # Copyright 2017 Facebook, Inc.
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from __future__ import absolute_import
7
8 import json
9 import os
10 import struct
11 import tempfile
12
13 from mercurial.node import hex
14
15 from mercurial import (
16 error,
17 extensions,
18 )
19
20 def isremotebooksenabled(ui):
21 return ('remotenames' in extensions._extensions and
22 ui.configbool('remotenames', 'bookmarks'))
23
24 def encodebookmarks(bookmarks):
25 encoded = {}
26 for bookmark, node in bookmarks.iteritems():
27 encoded[bookmark] = node
28 dumped = json.dumps(encoded)
29 result = struct.pack('>i', len(dumped)) + dumped
30 return result
31
32 def downloadbundle(repo, unknownbinhead):
33 index = repo.bundlestore.index
34 store = repo.bundlestore.store
35 bundleid = index.getbundle(hex(unknownbinhead))
36 if bundleid is None:
37 raise error.Abort('%s head is not known' % hex(unknownbinhead))
38 bundleraw = store.read(bundleid)
39 return _makebundlefromraw(bundleraw)
40
41 def _makebundlefromraw(data):
42 fp = None
43 fd, bundlefile = tempfile.mkstemp()
44 try: # guards bundlefile
45 try: # guards fp
46 fp = os.fdopen(fd, 'wb')
47 fp.write(data)
48 finally:
49 fp.close()
50 except Exception:
51 try:
52 os.unlink(bundlefile)
53 except Exception:
54 # we would rather see the original exception
55 pass
56 raise
57
58 return bundlefile
@@ -0,0 +1,107 b''
1 # Infinite push
2 #
3 # Copyright 2016 Facebook, Inc.
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 """
8 [infinitepush]
9 # Server-side option. Used only if indextype=disk.
10 # Filesystem path to the index store
11 indexpath = PATH
12 """
13
14 from __future__ import absolute_import
15
16 import os
17
18 from mercurial import util
19
20 from . import indexapi
21
22 class fileindexapi(indexapi.indexapi):
23 def __init__(self, repo):
24 super(fileindexapi, self).__init__()
25 self._repo = repo
26 root = repo.ui.config('infinitepush', 'indexpath')
27 if not root:
28 root = os.path.join('scratchbranches', 'index')
29
30 self._nodemap = os.path.join(root, 'nodemap')
31 self._bookmarkmap = os.path.join(root, 'bookmarkmap')
32 self._metadatamap = os.path.join(root, 'nodemetadatamap')
33 self._lock = None
34
35 def __enter__(self):
36 self._lock = self._repo.wlock()
37 return self
38
39 def __exit__(self, exc_type, exc_val, exc_tb):
40 if self._lock:
41 self._lock.__exit__(exc_type, exc_val, exc_tb)
42
43 def addbundle(self, bundleid, nodesctx):
44 for node in nodesctx:
45 nodepath = os.path.join(self._nodemap, node.hex())
46 self._write(nodepath, bundleid)
47
48 def addbookmark(self, bookmark, node):
49 bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
50 self._write(bookmarkpath, node)
51
52 def addmanybookmarks(self, bookmarks):
53 for bookmark, node in bookmarks.items():
54 self.addbookmark(bookmark, node)
55
56 def deletebookmarks(self, patterns):
57 for pattern in patterns:
58 for bookmark, _ in self._listbookmarks(pattern):
59 bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
60 self._delete(bookmarkpath)
61
62 def getbundle(self, node):
63 nodepath = os.path.join(self._nodemap, node)
64 return self._read(nodepath)
65
66 def getnode(self, bookmark):
67 bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
68 return self._read(bookmarkpath)
69
70 def getbookmarks(self, query):
71 return dict(self._listbookmarks(query))
72
73 def saveoptionaljsonmetadata(self, node, jsonmetadata):
74 vfs = self._repo.vfs
75 vfs.write(os.path.join(self._metadatamap, node), jsonmetadata)
76
77 def _listbookmarks(self, pattern):
78 if pattern.endswith('*'):
79 pattern = 're:^' + pattern[:-1] + '.*'
80 kind, pat, matcher = util.stringmatcher(pattern)
81 prefixlen = len(self._bookmarkmap) + 1
82 for dirpath, _, books in self._repo.vfs.walk(self._bookmarkmap):
83 for book in books:
84 bookmark = os.path.join(dirpath, book)[prefixlen:]
85 if not matcher(bookmark):
86 continue
87 yield bookmark, self._read(os.path.join(dirpath, book))
88
89 def _write(self, path, value):
90 vfs = self._repo.vfs
91 dirname = vfs.dirname(path)
92 if not vfs.exists(dirname):
93 vfs.makedirs(dirname)
94
95 vfs.write(path, value)
96
97 def _read(self, path):
98 vfs = self._repo.vfs
99 if not vfs.exists(path):
100 return None
101 return vfs.read(path)
102
103 def _delete(self, path):
104 vfs = self._repo.vfs
105 if not vfs.exists(path):
106 return
107 return vfs.unlink(path)
@@ -0,0 +1,70 b''
1 # Infinite push
2 #
3 # Copyright 2016 Facebook, Inc.
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 class indexapi(object):
11 """Class that manages access to infinitepush index.
12
13 This class is a context manager and all write operations (like
14 deletebookmarks, addbookmark etc) should use `with` statement:
15
16 with index:
17 index.deletebookmarks(...)
18 ...
19 """
20
21 def __init__(self):
22 """Initializes the metadata store connection."""
23
24 def close(self):
25 """Cleans up the metadata store connection."""
26
27 def __enter__(self):
28 return self
29
30 def __exit__(self, exc_type, exc_val, exc_tb):
31 pass
32
33 def addbundle(self, bundleid, nodesctx):
34 """Takes a bundleid and a list of node contexts for each node
35 in that bundle and records that."""
36 raise NotImplementedError()
37
38 def addbookmark(self, bookmark, node):
39 """Takes a bookmark name and hash, and records mapping in the metadata
40 store."""
41 raise NotImplementedError()
42
43 def addmanybookmarks(self, bookmarks):
44 """Takes a dict with mapping from bookmark to hash and records mapping
45 in the metadata store."""
46 raise NotImplementedError()
47
48 def deletebookmarks(self, patterns):
49 """Accepts list of bookmarks and deletes them.
50 """
51 raise NotImplementedError()
52
53 def getbundle(self, node):
54 """Returns the bundleid for the bundle that contains the given node."""
55 raise NotImplementedError()
56
57 def getnode(self, bookmark):
58 """Returns the node for the given bookmark. None if it doesn't exist."""
59 raise NotImplementedError()
60
61 def getbookmarks(self, query):
62 """Returns bookmarks that match the query"""
63 raise NotImplementedError()
64
65 def saveoptionaljsonmetadata(self, node, jsonmetadata):
66 """Saves optional metadata for a given node"""
67 raise NotImplementedError()
68
69 class indexexception(Exception):
70 pass
@@ -0,0 +1,102 b''
1 # Copyright 2016 Facebook, Inc.
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5 """
6 config::
7
8 [infinitepush]
9 # limit number of files in the node metadata. This is to make sure we don't
10 # waste too much space on huge codemod commits.
11 metadatafilelimit = 100
12 """
13
14 from __future__ import absolute_import
15
16 import json
17
18 from mercurial.node import bin
19 from mercurial.i18n import _
20
21 from mercurial import (
22 copies as copiesmod,
23 encoding,
24 error,
25 hg,
26 patch,
27 registrar,
28 scmutil,
29 util,
30 )
31
32 from . import (
33 backupcommands,
34 common,
35 )
36
37 downloadbundle = common.downloadbundle
38
39 cmdtable = backupcommands.cmdtable
40 command = registrar.command(cmdtable)
41
42 @command('debugfillinfinitepushmetadata',
43 [('', 'node', [], 'node to fill metadata for')])
44 def debugfillinfinitepushmetadata(ui, repo, **opts):
45 '''Special command that fills infinitepush metadata for a node
46 '''
47
48 nodes = opts['node']
49 if not nodes:
50 raise error.Abort(_('nodes are not specified'))
51
52 filelimit = ui.configint('infinitepush', 'metadatafilelimit', 100)
53 nodesmetadata = {}
54 for node in nodes:
55 index = repo.bundlestore.index
56 if not bool(index.getbundle(node)):
57 raise error.Abort(_('node %s is not found') % node)
58
59 if node not in repo:
60 newbundlefile = downloadbundle(repo, bin(node))
61 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
62 bundlerepo = hg.repository(ui, bundlepath)
63 repo = bundlerepo
64
65 p1 = repo[node].p1().node()
66 diffopts = patch.diffallopts(ui, {})
67 match = scmutil.matchall(repo)
68 chunks = patch.diff(repo, p1, node, match, None, diffopts, relroot='')
69 difflines = util.iterlines(chunks)
70
71 states = 'modified added removed deleted unknown ignored clean'.split()
72 status = repo.status(p1, node)
73 status = zip(states, status)
74
75 filestatus = {}
76 for state, files in status:
77 for f in files:
78 filestatus[f] = state
79
80 diffstat = patch.diffstatdata(difflines)
81 changed_files = {}
82 copies = copiesmod.pathcopies(repo[p1], repo[node])
83 for filename, adds, removes, isbinary in diffstat[:filelimit]:
84 # use special encoding that allows non-utf8 filenames
85 filename = encoding.jsonescape(filename, paranoid=True)
86 changed_files[filename] = {
87 'adds': adds, 'removes': removes, 'isbinary': isbinary,
88 'status': filestatus.get(filename, 'unknown')
89 }
90 if filename in copies:
91 changed_files[filename]['copies'] = copies[filename]
92
93 output = {}
94 output['changed_files'] = changed_files
95 if len(diffstat) > filelimit:
96 output['changed_files_truncated'] = True
97 nodesmetadata[node] = output
98
99 with index:
100 for node, metadata in nodesmetadata.iteritems():
101 dumped = json.dumps(metadata, sort_keys=True)
102 index.saveoptionaljsonmetadata(node, dumped)
@@ -0,0 +1,33 b''
1 CREATE TABLE `bookmarkstonode` (
2 `node` varbinary(64) NOT NULL,
3 `bookmark` varbinary(512) NOT NULL,
4 `reponame` varbinary(255) NOT NULL,
5 PRIMARY KEY (`reponame`,`bookmark`)
6 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
7
8 CREATE TABLE `bundles` (
9 `bundle` varbinary(512) NOT NULL,
10 `reponame` varbinary(255) NOT NULL,
11 PRIMARY KEY (`bundle`,`reponame`)
12 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
13
14 CREATE TABLE `nodestobundle` (
15 `node` varbinary(64) NOT NULL,
16 `bundle` varbinary(512) NOT NULL,
17 `reponame` varbinary(255) NOT NULL,
18 PRIMARY KEY (`node`,`reponame`)
19 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
20
21 CREATE TABLE `nodesmetadata` (
22 `node` varbinary(64) NOT NULL,
23 `message` mediumblob NOT NULL,
24 `p1` varbinary(64) NOT NULL,
25 `p2` varbinary(64) DEFAULT NULL,
26 `author` varbinary(255) NOT NULL,
27 `committer` varbinary(255) DEFAULT NULL,
28 `author_date` bigint(20) NOT NULL,
29 `committer_date` bigint(20) DEFAULT NULL,
30 `reponame` varbinary(255) NOT NULL,
31 `optional_json_metadata` mediumblob,
32 PRIMARY KEY (`reponame`,`node`)
33 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
@@ -0,0 +1,257 b''
1 # Infinite push
2 #
3 # Copyright 2016 Facebook, Inc.
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 import logging
11 import os
12 import time
13
14 import warnings
15 import mysql.connector
16
17 from . import indexapi
18
19 def _convertbookmarkpattern(pattern):
20 pattern = pattern.replace('_', '\\_')
21 pattern = pattern.replace('%', '\\%')
22 if pattern.endswith('*'):
23 pattern = pattern[:-1] + '%'
24 return pattern
25
26 class sqlindexapi(indexapi.indexapi):
27 '''
28 Sql backend for infinitepush index. See schema.sql
29 '''
30
31 def __init__(self, reponame, host, port,
32 database, user, password, logfile, loglevel,
33 waittimeout=300, locktimeout=120):
34 super(sqlindexapi, self).__init__()
35 self.reponame = reponame
36 self.sqlargs = {
37 'host': host,
38 'port': port,
39 'database': database,
40 'user': user,
41 'password': password,
42 }
43 self.sqlconn = None
44 self.sqlcursor = None
45 if not logfile:
46 logfile = os.devnull
47 logging.basicConfig(filename=logfile)
48 self.log = logging.getLogger()
49 self.log.setLevel(loglevel)
50 self._connected = False
51 self._waittimeout = waittimeout
52 self._locktimeout = locktimeout
53
54 def sqlconnect(self):
55 if self.sqlconn:
56 raise indexapi.indexexception("SQL connection already open")
57 if self.sqlcursor:
58 raise indexapi.indexexception("SQL cursor already open without"
59 " connection")
60 retry = 3
61 while True:
62 try:
63 self.sqlconn = mysql.connector.connect(
64 force_ipv6=True, **self.sqlargs)
65
66 # Code is copy-pasted from hgsql. Bug fixes need to be
67 # back-ported!
68 # The default behavior is to return byte arrays, when we
69 # need strings. This custom convert returns strings.
70 self.sqlconn.set_converter_class(CustomConverter)
71 self.sqlconn.autocommit = False
72 break
73 except mysql.connector.errors.Error:
74 # mysql can be flakey occasionally, so do some minimal
75 # retrying.
76 retry -= 1
77 if retry == 0:
78 raise
79 time.sleep(0.2)
80
81 waittimeout = self.sqlconn.converter.escape('%s' % self._waittimeout)
82
83 self.sqlcursor = self.sqlconn.cursor()
84 self.sqlcursor.execute("SET wait_timeout=%s" % waittimeout)
85 self.sqlcursor.execute("SET innodb_lock_wait_timeout=%s" %
86 self._locktimeout)
87 self._connected = True
88
89 def close(self):
90 """Cleans up the metadata store connection."""
91 with warnings.catch_warnings():
92 warnings.simplefilter("ignore")
93 self.sqlcursor.close()
94 self.sqlconn.close()
95 self.sqlcursor = None
96 self.sqlconn = None
97
98 def __enter__(self):
99 if not self._connected:
100 self.sqlconnect()
101 return self
102
103 def __exit__(self, exc_type, exc_val, exc_tb):
104 if exc_type is None:
105 self.sqlconn.commit()
106 else:
107 self.sqlconn.rollback()
108
109 def addbundle(self, bundleid, nodesctx):
110 if not self._connected:
111 self.sqlconnect()
112 self.log.info("ADD BUNDLE %r %r" % (self.reponame, bundleid))
113 self.sqlcursor.execute(
114 "INSERT INTO bundles(bundle, reponame) VALUES "
115 "(%s, %s)", params=(bundleid, self.reponame))
116 for ctx in nodesctx:
117 self.sqlcursor.execute(
118 "INSERT INTO nodestobundle(node, bundle, reponame) "
119 "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
120 "bundle=VALUES(bundle)",
121 params=(ctx.hex(), bundleid, self.reponame))
122
123 extra = ctx.extra()
124 author_name = ctx.user()
125 committer_name = extra.get('committer', ctx.user())
126 author_date = int(ctx.date()[0])
127 committer_date = int(extra.get('committer_date', author_date))
128 self.sqlcursor.execute(
129 "INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
130 "author, committer, author_date, committer_date, "
131 "reponame) VALUES "
132 "(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
133 params=(ctx.hex(), ctx.description(),
134 ctx.p1().hex(), ctx.p2().hex(), author_name,
135 committer_name, author_date, committer_date,
136 self.reponame)
137 )
138
139 def addbookmark(self, bookmark, node):
140 """Takes a bookmark name and hash, and records mapping in the metadata
141 store."""
142 if not self._connected:
143 self.sqlconnect()
144 self.log.info(
145 "ADD BOOKMARKS %r bookmark: %r node: %r" %
146 (self.reponame, bookmark, node))
147 self.sqlcursor.execute(
148 "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
149 "VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
150 params=(bookmark, node, self.reponame))
151
152 def addmanybookmarks(self, bookmarks):
153 if not self._connected:
154 self.sqlconnect()
155 args = []
156 values = []
157 for bookmark, node in bookmarks.iteritems():
158 args.append('(%s, %s, %s)')
159 values.extend((bookmark, node, self.reponame))
160 args = ','.join(args)
161
162 self.sqlcursor.execute(
163 "INSERT INTO bookmarkstonode(bookmark, node, reponame) "
164 "VALUES %s ON DUPLICATE KEY UPDATE node=VALUES(node)" % args,
165 params=values)
166
167 def deletebookmarks(self, patterns):
168 """Accepts list of bookmark patterns and deletes them.
169 If `commit` is set then bookmark will actually be deleted. Otherwise
170 deletion will be delayed until the end of transaction.
171 """
172 if not self._connected:
173 self.sqlconnect()
174 self.log.info("DELETE BOOKMARKS: %s" % patterns)
175 for pattern in patterns:
176 pattern = _convertbookmarkpattern(pattern)
177 self.sqlcursor.execute(
178 "DELETE from bookmarkstonode WHERE bookmark LIKE (%s) "
179 "and reponame = %s",
180 params=(pattern, self.reponame))
181
182 def getbundle(self, node):
183 """Returns the bundleid for the bundle that contains the given node."""
184 if not self._connected:
185 self.sqlconnect()
186 self.log.info("GET BUNDLE %r %r" % (self.reponame, node))
187 self.sqlcursor.execute(
188 "SELECT bundle from nodestobundle "
189 "WHERE node = %s AND reponame = %s", params=(node, self.reponame))
190 result = self.sqlcursor.fetchall()
191 if len(result) != 1 or len(result[0]) != 1:
192 self.log.info("No matching node")
193 return None
194 bundle = result[0][0]
195 self.log.info("Found bundle %r" % bundle)
196 return bundle
197
198 def getnode(self, bookmark):
199 """Returns the node for the given bookmark. None if it doesn't exist."""
200 if not self._connected:
201 self.sqlconnect()
202 self.log.info(
203 "GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark))
204 self.sqlcursor.execute(
205 "SELECT node from bookmarkstonode WHERE "
206 "bookmark = %s AND reponame = %s", params=(bookmark, self.reponame))
207 result = self.sqlcursor.fetchall()
208 if len(result) != 1 or len(result[0]) != 1:
209 self.log.info("No matching bookmark")
210 return None
211 node = result[0][0]
212 self.log.info("Found node %r" % node)
213 return node
214
215 def getbookmarks(self, query):
216 if not self._connected:
217 self.sqlconnect()
218 self.log.info(
219 "QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query))
220 query = _convertbookmarkpattern(query)
221 self.sqlcursor.execute(
222 "SELECT bookmark, node from bookmarkstonode WHERE "
223 "reponame = %s AND bookmark LIKE %s",
224 params=(self.reponame, query))
225 result = self.sqlcursor.fetchall()
226 bookmarks = {}
227 for row in result:
228 if len(row) != 2:
229 self.log.info("Bad row returned: %s" % row)
230 continue
231 bookmarks[row[0]] = row[1]
232 return bookmarks
233
234 def saveoptionaljsonmetadata(self, node, jsonmetadata):
235 if not self._connected:
236 self.sqlconnect()
237 self.log.info(
238 ("INSERT METADATA, QUERY BOOKMARKS reponame: %r " +
239 "node: %r, jsonmetadata: %s") %
240 (self.reponame, node, jsonmetadata))
241
242 self.sqlcursor.execute(
243 "UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
244 "reponame=%s AND node=%s",
245 params=(jsonmetadata, self.reponame, node))
246
247 class CustomConverter(mysql.connector.conversion.MySQLConverter):
248 """Ensure that all values being returned are returned as python string
249 (versus the default byte arrays)."""
250 def _STRING_to_python(self, value, dsc=None):
251 return str(value)
252
253 def _VAR_STRING_to_python(self, value, dsc=None):
254 return str(value)
255
256 def _BLOB_to_python(self, value, dsc=None):
257 return str(value)
@@ -0,0 +1,155 b''
1 # This software may be used and distributed according to the terms of the
2 # GNU General Public License version 2 or any later version.
3
4 # based on bundleheads extension by Gregory Szorc <gps@mozilla.com>
5
6 from __future__ import absolute_import
7
8 import abc
9 import hashlib
10 import os
11 import subprocess
12 import tempfile
13
14 NamedTemporaryFile = tempfile.NamedTemporaryFile
15
16 class BundleWriteException(Exception):
17 pass
18
19 class BundleReadException(Exception):
20 pass
21
22 class abstractbundlestore(object):
23 """Defines the interface for bundle stores.
24
25 A bundle store is an entity that stores raw bundle data. It is a simple
26 key-value store. However, the keys are chosen by the store. The keys can
27 be any Python object understood by the corresponding bundle index (see
28 ``abstractbundleindex`` below).
29 """
30 __metaclass__ = abc.ABCMeta
31
32 @abc.abstractmethod
33 def write(self, data):
34 """Write bundle data to the store.
35
36 This function receives the raw data to be written as a str.
37 Throws BundleWriteException
38 The key of the written data MUST be returned.
39 """
40
41 @abc.abstractmethod
42 def read(self, key):
43 """Obtain bundle data for a key.
44
45 Returns None if the bundle isn't known.
46 Throws BundleReadException
47 The returned object should be a file object supporting read()
48 and close().
49 """
50
51 class filebundlestore(object):
52 """bundle store in filesystem
53
54 meant for storing bundles somewhere on disk and on network filesystems
55 """
56 def __init__(self, ui, repo):
57 self.ui = ui
58 self.repo = repo
59 self.storepath = ui.configpath('scratchbranch', 'storepath')
60 if not self.storepath:
61 self.storepath = self.repo.vfs.join("scratchbranches",
62 "filebundlestore")
63 if not os.path.exists(self.storepath):
64 os.makedirs(self.storepath)
65
66 def _dirpath(self, hashvalue):
67 """First two bytes of the hash are the name of the upper
68 level directory, next two bytes are the name of the
69 next level directory"""
70 return os.path.join(self.storepath, hashvalue[0:2], hashvalue[2:4])
71
72 def _filepath(self, filename):
73 return os.path.join(self._dirpath(filename), filename)
74
75 def write(self, data):
76 filename = hashlib.sha1(data).hexdigest()
77 dirpath = self._dirpath(filename)
78
79 if not os.path.exists(dirpath):
80 os.makedirs(dirpath)
81
82 with open(self._filepath(filename), 'w') as f:
83 f.write(data)
84
85 return filename
86
87 def read(self, key):
88 try:
89 f = open(self._filepath(key), 'r')
90 except IOError:
91 return None
92
93 return f.read()
94
95 class externalbundlestore(abstractbundlestore):
96 def __init__(self, put_binary, put_args, get_binary, get_args):
97 """
98 `put_binary` - path to binary file which uploads bundle to external
99 storage and prints key to stdout
100 `put_args` - format string with additional args to `put_binary`
101 {filename} replacement field can be used.
102 `get_binary` - path to binary file which accepts filename and key
103 (in that order), downloads bundle from store and saves it to file
104 `get_args` - format string with additional args to `get_binary`.
105 {filename} and {handle} replacement field can be used.
106 """
107
108 self.put_args = put_args
109 self.get_args = get_args
110 self.put_binary = put_binary
111 self.get_binary = get_binary
112
113 def _call_binary(self, args):
114 p = subprocess.Popen(
115 args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
116 close_fds=True)
117 stdout, stderr = p.communicate()
118 returncode = p.returncode
119 return returncode, stdout, stderr
120
121 def write(self, data):
122 # Won't work on windows because you can't open file second time without
123 # closing it
124 with NamedTemporaryFile() as temp:
125 temp.write(data)
126 temp.flush()
127 temp.seek(0)
128 formatted_args = [arg.format(filename=temp.name)
129 for arg in self.put_args]
130 returncode, stdout, stderr = self._call_binary(
131 [self.put_binary] + formatted_args)
132
133 if returncode != 0:
134 raise BundleWriteException(
135 'Failed to upload to external store: %s' % stderr)
136 stdout_lines = stdout.splitlines()
137 if len(stdout_lines) == 1:
138 return stdout_lines[0]
139 else:
140 raise BundleWriteException(
141 'Bad output from %s: %s' % (self.put_binary, stdout))
142
143 def read(self, handle):
144 # Won't work on windows because you can't open file second time without
145 # closing it
146 with NamedTemporaryFile() as temp:
147 formatted_args = [arg.format(filename=temp.name, handle=handle)
148 for arg in self.get_args]
149 returncode, stdout, stderr = self._call_binary(
150 [self.get_binary] + formatted_args)
151
152 if returncode != 0:
153 raise BundleReadException(
154 'Failed to download from external store: %s' % stderr)
155 return temp.read()
@@ -0,0 +1,49 b''
1 scratchnodes() {
2 for node in `find ../repo/.hg/scratchbranches/index/nodemap/* | sort`; do
3 echo ${node##*/} `cat $node`
4 done
5 }
6
7 scratchbookmarks() {
8 for bookmark in `find ../repo/.hg/scratchbranches/index/bookmarkmap/* -type f | sort`; do
9 echo "${bookmark##*/bookmarkmap/} `cat $bookmark`"
10 done
11 }
12
13 setupcommon() {
14 cat >> $HGRCPATH << EOF
15 [extensions]
16 infinitepush=
17 [ui]
18 ssh = python "$TESTDIR/dummyssh"
19 [infinitepush]
20 branchpattern=re:scratch/.*
21 EOF
22 }
23
24 setupserver() {
25 cat >> .hg/hgrc << EOF
26 [infinitepush]
27 server=yes
28 indextype=disk
29 storetype=disk
30 reponame=babar
31 EOF
32 }
33
34 waitbgbackup() {
35 sleep 1
36 hg debugwaitbackup
37 }
38
39 mkcommitautobackup() {
40 echo $1 > $1
41 hg add $1
42 hg ci -m $1 --config infinitepushbackup.autobackup=True
43 }
44
45 setuplogdir() {
46 mkdir $TESTTMP/logs
47 chmod 0755 $TESTTMP/logs
48 chmod +t $TESTTMP/logs
49 }
@@ -0,0 +1,417 b''
1
2 Create an ondisk bundlestore in .hg/scratchbranches
3 $ . "$TESTDIR/library-infinitepush.sh"
4 $ cp $HGRCPATH $TESTTMP/defaulthgrc
5 $ setupcommon
6 $ mkcommit() {
7 > echo "$1" > "$1"
8 > hg add "$1"
9 > hg ci -m "$1"
10 > }
11 $ hg init repo
12 $ cd repo
13
14 Check that we can send a scratch on the server and it does not show there in
15 the history but is stored on disk
16 $ setupserver
17 $ cd ..
18 $ hg clone ssh://user@dummy/repo client -q
19 $ cd client
20 $ mkcommit initialcommit
21 $ hg push -r . --create
22 pushing to ssh://user@dummy/repo
23 searching for changes
24 remote: adding changesets
25 remote: adding manifests
26 remote: adding file changes
27 remote: added 1 changesets with 1 changes to 1 files
28 $ mkcommit scratchcommit
29 $ hg push -r . --to scratch/mybranch --create
30 pushing to ssh://user@dummy/repo
31 searching for changes
32 remote: pushing 1 commit:
33 remote: 20759b6926ce scratchcommit
34 $ hg log -G
35 @ changeset: 1:20759b6926ce
36 | bookmark: scratch/mybranch
37 | tag: tip
38 | user: test
39 | date: Thu Jan 01 00:00:00 1970 +0000
40 | summary: scratchcommit
41 |
42 o changeset: 0:67145f466344
43 user: test
44 date: Thu Jan 01 00:00:00 1970 +0000
45 summary: initialcommit
46
47 $ hg log -G -R ../repo
48 o changeset: 0:67145f466344
49 tag: tip
50 user: test
51 date: Thu Jan 01 00:00:00 1970 +0000
52 summary: initialcommit
53
54 $ find ../repo/.hg/scratchbranches | sort
55 ../repo/.hg/scratchbranches
56 ../repo/.hg/scratchbranches/filebundlestore
57 ../repo/.hg/scratchbranches/filebundlestore/b9
58 ../repo/.hg/scratchbranches/filebundlestore/b9/e1
59 ../repo/.hg/scratchbranches/filebundlestore/b9/e1/b9e1ee5f93fb6d7c42496fc176c09839639dd9cc
60 ../repo/.hg/scratchbranches/index
61 ../repo/.hg/scratchbranches/index/bookmarkmap
62 ../repo/.hg/scratchbranches/index/bookmarkmap/scratch
63 ../repo/.hg/scratchbranches/index/bookmarkmap/scratch/mybranch
64 ../repo/.hg/scratchbranches/index/nodemap
65 ../repo/.hg/scratchbranches/index/nodemap/20759b6926ce827d5a8c73eb1fa9726d6f7defb2
66
67 From another client we can get the scratchbranch if we ask for it explicitely
68
69 $ cd ..
70 $ hg clone ssh://user@dummy/repo client2 -q
71 $ cd client2
72 $ hg pull -B scratch/mybranch --traceback
73 pulling from ssh://user@dummy/repo
74 searching for changes
75 adding changesets
76 adding manifests
77 adding file changes
78 added 1 changesets with 1 changes to 1 files
79 new changesets 20759b6926ce
80 (run 'hg update' to get a working copy)
81 $ hg log -G
82 o changeset: 1:20759b6926ce
83 | bookmark: scratch/mybranch
84 | tag: tip
85 | user: test
86 | date: Thu Jan 01 00:00:00 1970 +0000
87 | summary: scratchcommit
88 |
89 @ changeset: 0:67145f466344
90 user: test
91 date: Thu Jan 01 00:00:00 1970 +0000
92 summary: initialcommit
93
94 $ cd ..
95
96 Push to non-scratch bookmark
97
98 $ cd client
99 $ hg up 0
100 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
101 $ mkcommit newcommit
102 created new head
103 $ hg push -r .
104 pushing to ssh://user@dummy/repo
105 searching for changes
106 remote: adding changesets
107 remote: adding manifests
108 remote: adding file changes
109 remote: added 1 changesets with 1 changes to 1 files
110 $ hg log -G -T '{desc} {phase} {bookmarks}'
111 @ newcommit public
112 |
113 | o scratchcommit draft scratch/mybranch
114 |/
115 o initialcommit public
116
117
118 Push to scratch branch
119 $ cd ../client2
120 $ hg up -q scratch/mybranch
121 $ mkcommit 'new scratch commit'
122 $ hg push -r . --to scratch/mybranch
123 pushing to ssh://user@dummy/repo
124 searching for changes
125 remote: pushing 2 commits:
126 remote: 20759b6926ce scratchcommit
127 remote: 1de1d7d92f89 new scratch commit
128 $ hg log -G -T '{desc} {phase} {bookmarks}'
129 @ new scratch commit draft scratch/mybranch
130 |
131 o scratchcommit draft
132 |
133 o initialcommit public
134
135 $ scratchnodes
136 1de1d7d92f8965260391d0513fe8a8d5973d3042 bed63daed3beba97fff2e819a148cf415c217a85
137 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 bed63daed3beba97fff2e819a148cf415c217a85
138
139 $ scratchbookmarks
140 scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
141
142 Push scratch bookmark with no new revs
143 $ hg push -r . --to scratch/anotherbranch --create
144 pushing to ssh://user@dummy/repo
145 searching for changes
146 remote: pushing 2 commits:
147 remote: 20759b6926ce scratchcommit
148 remote: 1de1d7d92f89 new scratch commit
149 $ hg log -G -T '{desc} {phase} {bookmarks}'
150 @ new scratch commit draft scratch/anotherbranch scratch/mybranch
151 |
152 o scratchcommit draft
153 |
154 o initialcommit public
155
156 $ scratchbookmarks
157 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
158 scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
159
160 Pull scratch and non-scratch bookmark at the same time
161
162 $ hg -R ../repo book newbook
163 $ cd ../client
164 $ hg pull -B newbook -B scratch/mybranch --traceback
165 pulling from ssh://user@dummy/repo
166 searching for changes
167 adding changesets
168 adding manifests
169 adding file changes
170 added 1 changesets with 1 changes to 2 files
171 adding remote bookmark newbook
172 new changesets 1de1d7d92f89
173 (run 'hg update' to get a working copy)
174 $ hg log -G -T '{desc} {phase} {bookmarks}'
175 o new scratch commit draft scratch/mybranch
176 |
177 | @ newcommit public
178 | |
179 o | scratchcommit draft
180 |/
181 o initialcommit public
182
183
184 Push scratch revision without bookmark with --bundle-store
185
186 $ hg up -q tip
187 $ mkcommit scratchcommitnobook
188 $ hg log -G -T '{desc} {phase} {bookmarks}'
189 @ scratchcommitnobook draft
190 |
191 o new scratch commit draft scratch/mybranch
192 |
193 | o newcommit public
194 | |
195 o | scratchcommit draft
196 |/
197 o initialcommit public
198
199 $ hg push -r . --bundle-store
200 pushing to ssh://user@dummy/repo
201 searching for changes
202 remote: pushing 3 commits:
203 remote: 20759b6926ce scratchcommit
204 remote: 1de1d7d92f89 new scratch commit
205 remote: 2b5d271c7e0d scratchcommitnobook
206 $ hg -R ../repo log -G -T '{desc} {phase}'
207 o newcommit public
208 |
209 o initialcommit public
210
211
212 $ scratchnodes
213 1de1d7d92f8965260391d0513fe8a8d5973d3042 66fa08ff107451320512817bed42b7f467a1bec3
214 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 66fa08ff107451320512817bed42b7f467a1bec3
215 2b5d271c7e0d25d811359a314d413ebcc75c9524 66fa08ff107451320512817bed42b7f467a1bec3
216
217 Test with pushrebase
218 $ mkcommit scratchcommitwithpushrebase
219 $ hg push -r . --to scratch/mybranch
220 pushing to ssh://user@dummy/repo
221 searching for changes
222 remote: pushing 4 commits:
223 remote: 20759b6926ce scratchcommit
224 remote: 1de1d7d92f89 new scratch commit
225 remote: 2b5d271c7e0d scratchcommitnobook
226 remote: d8c4f54ab678 scratchcommitwithpushrebase
227 $ hg -R ../repo log -G -T '{desc} {phase}'
228 o newcommit public
229 |
230 o initialcommit public
231
232 $ scratchnodes
233 1de1d7d92f8965260391d0513fe8a8d5973d3042 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
234 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
235 2b5d271c7e0d25d811359a314d413ebcc75c9524 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
236 d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
237
238 Change the order of pushrebase and infinitepush
239 $ mkcommit scratchcommitwithpushrebase2
240 $ hg push -r . --to scratch/mybranch
241 pushing to ssh://user@dummy/repo
242 searching for changes
243 remote: pushing 5 commits:
244 remote: 20759b6926ce scratchcommit
245 remote: 1de1d7d92f89 new scratch commit
246 remote: 2b5d271c7e0d scratchcommitnobook
247 remote: d8c4f54ab678 scratchcommitwithpushrebase
248 remote: 6c10d49fe927 scratchcommitwithpushrebase2
249 $ hg -R ../repo log -G -T '{desc} {phase}'
250 o newcommit public
251 |
252 o initialcommit public
253
254 $ scratchnodes
255 1de1d7d92f8965260391d0513fe8a8d5973d3042 cd0586065eaf8b483698518f5fc32531e36fd8e0
256 20759b6926ce827d5a8c73eb1fa9726d6f7defb2 cd0586065eaf8b483698518f5fc32531e36fd8e0
257 2b5d271c7e0d25d811359a314d413ebcc75c9524 cd0586065eaf8b483698518f5fc32531e36fd8e0
258 6c10d49fe92751666c40263f96721b918170d3da cd0586065eaf8b483698518f5fc32531e36fd8e0
259 d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 cd0586065eaf8b483698518f5fc32531e36fd8e0
260
261 Non-fastforward scratch bookmark push
262
263 $ hg log -GT "{rev}:{node} {desc}\n"
264 @ 6:6c10d49fe92751666c40263f96721b918170d3da scratchcommitwithpushrebase2
265 |
266 o 5:d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 scratchcommitwithpushrebase
267 |
268 o 4:2b5d271c7e0d25d811359a314d413ebcc75c9524 scratchcommitnobook
269 |
270 o 3:1de1d7d92f8965260391d0513fe8a8d5973d3042 new scratch commit
271 |
272 | o 2:91894e11e8255bf41aa5434b7b98e8b2aa2786eb newcommit
273 | |
274 o | 1:20759b6926ce827d5a8c73eb1fa9726d6f7defb2 scratchcommit
275 |/
276 o 0:67145f4663446a9580364f70034fea6e21293b6f initialcommit
277
278 $ hg up 6c10d49fe927
279 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
280 $ echo 1 > amend
281 $ hg add amend
282 $ hg ci --amend -m 'scratch amended commit'
283 saved backup bundle to $TESTTMP/client/.hg/strip-backup/6c10d49fe927-c99ffec5-amend.hg (glob)
284 $ hg log -G -T '{desc} {phase} {bookmarks}'
285 @ scratch amended commit draft scratch/mybranch
286 |
287 o scratchcommitwithpushrebase draft
288 |
289 o scratchcommitnobook draft
290 |
291 o new scratch commit draft
292 |
293 | o newcommit public
294 | |
295 o | scratchcommit draft
296 |/
297 o initialcommit public
298
299
300 $ scratchbookmarks
301 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
302 scratch/mybranch 6c10d49fe92751666c40263f96721b918170d3da
303 $ hg push -r . --to scratch/mybranch
304 pushing to ssh://user@dummy/repo
305 searching for changes
306 remote: non-forward push
307 remote: (use --non-forward-move to override)
308 abort: push failed on remote
309 [255]
310
311 $ hg push -r . --to scratch/mybranch --non-forward-move
312 pushing to ssh://user@dummy/repo
313 searching for changes
314 remote: pushing 5 commits:
315 remote: 20759b6926ce scratchcommit
316 remote: 1de1d7d92f89 new scratch commit
317 remote: 2b5d271c7e0d scratchcommitnobook
318 remote: d8c4f54ab678 scratchcommitwithpushrebase
319 remote: 8872775dd97a scratch amended commit
320 $ scratchbookmarks
321 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
322 scratch/mybranch 8872775dd97a750e1533dc1fbbca665644b32547
323 $ hg log -G -T '{desc} {phase} {bookmarks}'
324 @ scratch amended commit draft scratch/mybranch
325 |
326 o scratchcommitwithpushrebase draft
327 |
328 o scratchcommitnobook draft
329 |
330 o new scratch commit draft
331 |
332 | o newcommit public
333 | |
334 o | scratchcommit draft
335 |/
336 o initialcommit public
337
338 Check that push path is not ignored. Add new path to the hgrc
339 $ cat >> .hg/hgrc << EOF
340 > [paths]
341 > peer=ssh://user@dummy/client2
342 > EOF
343
344 Checkout last non-scrath commit
345 $ hg up 91894e11e8255
346 1 files updated, 0 files merged, 6 files removed, 0 files unresolved
347 $ mkcommit peercommit
348 Use --force because this push creates new head
349 $ hg push peer -r . -f
350 pushing to ssh://user@dummy/client2
351 searching for changes
352 remote: adding changesets
353 remote: adding manifests
354 remote: adding file changes
355 remote: added 2 changesets with 2 changes to 2 files (+1 heads)
356 $ hg -R ../repo log -G -T '{desc} {phase} {bookmarks}'
357 o newcommit public
358 |
359 o initialcommit public
360
361 $ hg -R ../client2 log -G -T '{desc} {phase} {bookmarks}'
362 o peercommit public
363 |
364 o newcommit public
365 |
366 | @ new scratch commit draft scratch/anotherbranch scratch/mybranch
367 | |
368 | o scratchcommit draft
369 |/
370 o initialcommit public
371
372 $ hg book --list-remote scratch/*
373 scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
374 scratch/mybranch 8872775dd97a750e1533dc1fbbca665644b32547
375 $ hg book --list-remote
376 abort: --list-remote requires a bookmark pattern
377 (use "hg book" to get a list of your local bookmarks)
378 [255]
379 $ hg book --config infinitepush.defaultremotepatterns=scratch/another* --list-remote
380 abort: --list-remote requires a bookmark pattern
381 (use "hg book" to get a list of your local bookmarks)
382 [255]
383 $ hg book --list-remote scratch/my
384 $ hg book --list-remote scratch/my*
385 scratch/mybranch 8872775dd97a750e1533dc1fbbca665644b32547
386 $ hg book --list-remote scratch/my* -T json
387 [
388 {
389 "bookmark": "scratch/mybranch",
390 "node": "8872775dd97a750e1533dc1fbbca665644b32547"
391 }
392 ]
393 $ cd ../repo
394 $ hg book scratch/serversidebook
395 $ hg book serversidebook
396 $ cd ../client
397 $ hg book --list-remote scratch/* -T json
398 [
399 {
400 "bookmark": "scratch/anotherbranch",
401 "node": "1de1d7d92f8965260391d0513fe8a8d5973d3042"
402 },
403 {
404 "bookmark": "scratch/mybranch",
405 "node": "8872775dd97a750e1533dc1fbbca665644b32547"
406 },
407 {
408 "bookmark": "scratch/serversidebook",
409 "node": "0000000000000000000000000000000000000000"
410 }
411 ]
412
413 Push to svn server should fail
414 $ hg push svn+ssh://svn.vip.facebook.com/svnroot/tfb/trunk/www -r . --to scratch/serversidebook
415 abort: infinite push does not work with svn repo
416 (did you forget to `hg push default`?)
417 [255]
@@ -0,0 +1,318 b''
1 Testing infinipush extension and the confi options provided by it
2
3 Setup
4
5 $ . "$TESTDIR/library-infinitepush.sh"
6 $ cp $HGRCPATH $TESTTMP/defaulthgrc
7 $ setupcommon
8 $ hg init repo
9 $ cd repo
10 $ setupserver
11 $ echo initialcommit > initialcommit
12 $ hg ci -Aqm "initialcommit"
13 $ hg phase --public .
14
15 $ cd ..
16 $ hg clone ssh://user@dummy/repo client -q
17
18 Create two heads. Push first head alone, then two heads together. Make sure that
19 multihead push works.
20 $ cd client
21 $ echo multihead1 > multihead1
22 $ hg add multihead1
23 $ hg ci -m "multihead1"
24 $ hg up null
25 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
26 $ echo multihead2 > multihead2
27 $ hg ci -Am "multihead2"
28 adding multihead2
29 created new head
30 $ hg push -r . --bundle-store
31 pushing to ssh://user@dummy/repo
32 searching for changes
33 remote: pushing 1 commit:
34 remote: ee4802bf6864 multihead2
35 $ hg push -r '1:2' --bundle-store
36 pushing to ssh://user@dummy/repo
37 searching for changes
38 remote: pushing 2 commits:
39 remote: bc22f9a30a82 multihead1
40 remote: ee4802bf6864 multihead2
41 $ scratchnodes
42 bc22f9a30a821118244deacbd732e394ed0b686c ab1bc557aa090a9e4145512c734b6e8a828393a5
43 ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f ab1bc557aa090a9e4145512c734b6e8a828393a5
44
45 Create two new scratch bookmarks
46 $ hg up 0
47 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 $ echo scratchfirstpart > scratchfirstpart
49 $ hg ci -Am "scratchfirstpart"
50 adding scratchfirstpart
51 created new head
52 $ hg push -r . --to scratch/firstpart --create
53 pushing to ssh://user@dummy/repo
54 searching for changes
55 remote: pushing 1 commit:
56 remote: 176993b87e39 scratchfirstpart
57 $ hg up 0
58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 $ echo scratchsecondpart > scratchsecondpart
60 $ hg ci -Am "scratchsecondpart"
61 adding scratchsecondpart
62 created new head
63 $ hg push -r . --to scratch/secondpart --create
64 pushing to ssh://user@dummy/repo
65 searching for changes
66 remote: pushing 1 commit:
67 remote: 8db3891c220e scratchsecondpart
68
69 Pull two bookmarks from the second client
70 $ cd ..
71 $ hg clone ssh://user@dummy/repo client2 -q
72 $ cd client2
73 $ hg pull -B scratch/firstpart -B scratch/secondpart
74 pulling from ssh://user@dummy/repo
75 searching for changes
76 adding changesets
77 adding manifests
78 adding file changes
79 added 1 changesets with 1 changes to 1 files
80 adding changesets
81 adding manifests
82 adding file changes
83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 new changesets * (glob)
85 (run 'hg heads' to see heads, 'hg merge' to merge)
86 $ hg log -r scratch/secondpart -T '{node}'
87 8db3891c220e216f6da214e8254bd4371f55efca (no-eol)
88 $ hg log -r scratch/firstpart -T '{node}'
89 176993b87e39bd88d66a2cccadabe33f0b346339 (no-eol)
90 Make two commits to the scratch branch
91
92 $ echo testpullbycommithash1 > testpullbycommithash1
93 $ hg ci -Am "testpullbycommithash1"
94 adding testpullbycommithash1
95 created new head
96 $ hg log -r '.' -T '{node}\n' > ../testpullbycommithash1
97 $ echo testpullbycommithash2 > testpullbycommithash2
98 $ hg ci -Aqm "testpullbycommithash2"
99 $ hg push -r . --to scratch/mybranch --create -q
100
101 Create third client and pull by commit hash.
102 Make sure testpullbycommithash2 has not fetched
103 $ cd ..
104 $ hg clone ssh://user@dummy/repo client3 -q
105 $ cd client3
106 $ hg pull -r `cat ../testpullbycommithash1`
107 pulling from ssh://user@dummy/repo
108 searching for changes
109 adding changesets
110 adding manifests
111 adding file changes
112 added 1 changesets with 1 changes to 1 files
113 new changesets 33910bfe6ffe
114 (run 'hg update' to get a working copy)
115 $ hg log -G -T '{desc} {phase} {bookmarks}'
116 o testpullbycommithash1 draft
117 |
118 @ initialcommit public
119
120 Make public commit in the repo and pull it.
121 Make sure phase on the client is public.
122 $ cd ../repo
123 $ echo publiccommit > publiccommit
124 $ hg ci -Aqm "publiccommit"
125 $ hg phase --public .
126 $ cd ../client3
127 $ hg pull
128 pulling from ssh://user@dummy/repo
129 searching for changes
130 adding changesets
131 adding manifests
132 adding file changes
133 added 1 changesets with 1 changes to 1 files (+1 heads)
134 new changesets a79b6597f322
135 (run 'hg heads' to see heads, 'hg merge' to merge)
136 $ hg log -G -T '{desc} {phase} {bookmarks} {node|short}'
137 o publiccommit public a79b6597f322
138 |
139 | o testpullbycommithash1 draft 33910bfe6ffe
140 |/
141 @ initialcommit public 67145f466344
142
143 $ hg up a79b6597f322
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 $ echo scratchontopofpublic > scratchontopofpublic
146 $ hg ci -Aqm "scratchontopofpublic"
147 $ hg push -r . --to scratch/scratchontopofpublic --create
148 pushing to ssh://user@dummy/repo
149 searching for changes
150 remote: pushing 1 commit:
151 remote: c70aee6da07d scratchontopofpublic
152 $ cd ../client2
153 $ hg pull -B scratch/scratchontopofpublic
154 pulling from ssh://user@dummy/repo
155 searching for changes
156 adding changesets
157 adding manifests
158 adding file changes
159 added 1 changesets with 1 changes to 1 files (+1 heads)
160 adding changesets
161 adding manifests
162 adding file changes
163 added 1 changesets with 1 changes to 1 files
164 new changesets a79b6597f322:c70aee6da07d
165 (run 'hg heads .' to see heads, 'hg merge' to merge)
166 $ hg log -r scratch/scratchontopofpublic -T '{phase}'
167 draft (no-eol)
168 Strip scratchontopofpublic commit and do hg update
169 $ hg log -r tip -T '{node}\n'
170 c70aee6da07d7cdb9897375473690df3a8563339
171 $ echo "[extensions]" >> .hg/hgrc
172 $ echo "strip=" >> .hg/hgrc
173 $ hg strip -q tip
174 $ hg up c70aee6da07d7cdb9897375473690df3a8563339
175 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
176 pulling from ssh://user@dummy/repo
177 searching for changes
178 adding changesets
179 adding manifests
180 adding file changes
181 added 1 changesets with 1 changes to 1 files
182 new changesets c70aee6da07d
183 (run 'hg update' to get a working copy)
184 'c70aee6da07d7cdb9897375473690df3a8563339' found remotely
185 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
186
187 Trying to pull from bad path
188 $ hg strip -q tip
189 $ hg --config paths.default=badpath up c70aee6da07d7cdb9897375473690df3a8563339
190 'c70aee6da07d7cdb9897375473690df3a8563339' does not exist locally - looking for it remotely...
191 pulling from $TESTTMP/client2/badpath (glob)
192 pull failed: repository $TESTTMP/client2/badpath not found
193 abort: unknown revision 'c70aee6da07d7cdb9897375473690df3a8563339'!
194 [255]
195
196 Strip commit and pull it using hg update with bookmark name
197 $ hg strip -q d8fde0ddfc96
198 $ hg book -d scratch/mybranch
199 $ hg up scratch/mybranch
200 'scratch/mybranch' does not exist locally - looking for it remotely...
201 pulling from ssh://user@dummy/repo
202 searching for changes
203 adding changesets
204 adding manifests
205 adding file changes
206 added 1 changesets with 1 changes to 2 files
207 new changesets d8fde0ddfc96
208 (run 'hg update' to get a working copy)
209 'scratch/mybranch' found remotely
210 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
211 (activating bookmark scratch/mybranch)
212 $ hg log -r scratch/mybranch -T '{node}'
213 d8fde0ddfc962183977f92d2bc52d303b8840f9d (no-eol)
214
215 Test debugfillinfinitepushmetadata
216 $ cd ../repo
217 $ hg debugfillinfinitepushmetadata
218 abort: nodes are not specified
219 [255]
220 $ hg debugfillinfinitepushmetadata --node randomnode
221 abort: node randomnode is not found
222 [255]
223 $ hg debugfillinfinitepushmetadata --node d8fde0ddfc962183977f92d2bc52d303b8840f9d
224 $ cat .hg/scratchbranches/index/nodemetadatamap/d8fde0ddfc962183977f92d2bc52d303b8840f9d
225 {"changed_files": {"testpullbycommithash2": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
226
227 $ cd ../client
228 $ hg up d8fde0ddfc962183977f92d2bc52d303b8840f9d
229 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' does not exist locally - looking for it remotely...
230 pulling from ssh://user@dummy/repo
231 searching for changes
232 adding changesets
233 adding manifests
234 adding file changes
235 added 2 changesets with 2 changes to 2 files (+1 heads)
236 new changesets 33910bfe6ffe:d8fde0ddfc96
237 (run 'hg heads .' to see heads, 'hg merge' to merge)
238 'd8fde0ddfc962183977f92d2bc52d303b8840f9d' found remotely
239 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
240 $ echo file > file
241 $ hg add file
242 $ hg rm testpullbycommithash2
243 $ hg ci -m 'add and rm files'
244 $ hg log -r . -T '{node}\n'
245 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
246 $ hg cp file cpfile
247 $ hg mv file mvfile
248 $ hg ci -m 'cpfile and mvfile'
249 $ hg log -r . -T '{node}\n'
250 c7ac39f638c6b39bcdacf868fa21b6195670f8ae
251 $ hg push -r . --bundle-store
252 pushing to ssh://user@dummy/repo
253 searching for changes
254 remote: pushing 4 commits:
255 remote: 33910bfe6ffe testpullbycommithash1
256 remote: d8fde0ddfc96 testpullbycommithash2
257 remote: 3edfe7e9089a add and rm files
258 remote: c7ac39f638c6 cpfile and mvfile
259 $ cd ../repo
260 $ hg debugfillinfinitepushmetadata --node 3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239 --node c7ac39f638c6b39bcdacf868fa21b6195670f8ae
261 $ cat .hg/scratchbranches/index/nodemetadatamap/3edfe7e9089ab9f728eb8e0d0c62a5d18cf19239
262 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "testpullbycommithash2": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}}} (no-eol)
263 $ cat .hg/scratchbranches/index/nodemetadatamap/c7ac39f638c6b39bcdacf868fa21b6195670f8ae
264 {"changed_files": {"cpfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}, "file": {"adds": 0, "isbinary": false, "removes": 1, "status": "removed"}, "mvfile": {"adds": 1, "copies": "file", "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
265
266 Test infinitepush.metadatafilelimit number
267 $ cd ../client
268 $ echo file > file
269 $ hg add file
270 $ echo file1 > file1
271 $ hg add file1
272 $ echo file2 > file2
273 $ hg add file2
274 $ hg ci -m 'add many files'
275 $ hg log -r . -T '{node}'
276 09904fb20c53ff351bd3b1d47681f569a4dab7e5 (no-eol)
277 $ hg push -r . --bundle-store
278 pushing to ssh://user@dummy/repo
279 searching for changes
280 remote: pushing 5 commits:
281 remote: 33910bfe6ffe testpullbycommithash1
282 remote: d8fde0ddfc96 testpullbycommithash2
283 remote: 3edfe7e9089a add and rm files
284 remote: c7ac39f638c6 cpfile and mvfile
285 remote: 09904fb20c53 add many files
286
287 $ cd ../repo
288 $ hg debugfillinfinitepushmetadata --node 09904fb20c53ff351bd3b1d47681f569a4dab7e5 --config infinitepush.metadatafilelimit=2
289 $ cat .hg/scratchbranches/index/nodemetadatamap/09904fb20c53ff351bd3b1d47681f569a4dab7e5
290 {"changed_files": {"file": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}, "file1": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}, "changed_files_truncated": true} (no-eol)
291
292 Test infinitepush.fillmetadatabranchpattern
293 $ cd ../repo
294 $ cat >> .hg/hgrc << EOF
295 > [infinitepush]
296 > fillmetadatabranchpattern=re:scratch/fillmetadata/.*
297 > EOF
298 $ cd ../client
299 $ echo tofillmetadata > tofillmetadata
300 $ hg ci -Aqm "tofillmetadata"
301 $ hg log -r . -T '{node}\n'
302 d2b0410d4da084bc534b1d90df0de9eb21583496
303 $ hg push -r . --to scratch/fillmetadata/fill --create
304 pushing to ssh://user@dummy/repo
305 searching for changes
306 remote: pushing 6 commits:
307 remote: 33910bfe6ffe testpullbycommithash1
308 remote: d8fde0ddfc96 testpullbycommithash2
309 remote: 3edfe7e9089a add and rm files
310 remote: c7ac39f638c6 cpfile and mvfile
311 remote: 09904fb20c53 add many files
312 remote: d2b0410d4da0 tofillmetadata
313
314 Make sure background process finished
315 $ sleep 3
316 $ cd ../repo
317 $ cat .hg/scratchbranches/index/nodemetadatamap/d2b0410d4da084bc534b1d90df0de9eb21583496
318 {"changed_files": {"tofillmetadata": {"adds": 1, "isbinary": false, "removes": 0, "status": "added"}}} (no-eol)
General Comments 0
You need to be logged in to leave comments. Login now