##// END OF EJS Templates
phases: do not exchange secret changesets...
Pierre-Yves David -
r15713:cff25e4b default
parent child Browse files
Show More
@@ -1,202 +1,218 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the reponse lists justs
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
50 50 '''Return a tuple (common, anyoutgoing, heads) used to identify the set
51 51 of nodes present in repo but not in other.
52 52
53 53 If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
54 54 are included. If you already know the local repo's heads, passing them in
55 55 onlyheads is faster than letting them be recomputed here.
56 56
57 57 If commoninc is given, it must the the result of a prior call to
58 58 findcommonincoming(repo, other, force) to avoid recomputing it here.
59 59
60 60 The returned tuple is meant to be passed to changelog.findmissing.'''
61 61 common, _any, _hds = commoninc or findcommonincoming(repo, other, force=force)
62 62 return (common, onlyheads or repo.heads())
63 63
64 64 def prepush(repo, remote, force, revs, newbranch):
65 65 '''Analyze the local and remote repositories and determine which
66 66 changesets need to be pushed to the remote. Return value depends
67 67 on circumstances:
68 68
69 69 If we are not going to push anything, return a tuple (None,
70 70 outgoing, common) where outgoing is 0 if there are no outgoing
71 71 changesets and 1 if there are, but we refuse to push them
72 72 (e.g. would create new remote heads). The third element "common"
73 73 is the list of heads of the common set between local and remote.
74 74
75 75 Otherwise, return a tuple (changegroup, remoteheads, futureheads),
76 76 where changegroup is a readable file-like object whose read()
77 77 returns successive changegroup chunks ready to be sent over the
78 78 wire, remoteheads is the list of remote heads and futureheads is
79 79 the list of heads of the common set between local and remote to
80 80 be after push completion.
81 81 '''
82 82 commoninc = findcommonincoming(repo, remote, force=force)
83 83 common, revs = findcommonoutgoing(repo, remote, onlyheads=revs,
84 84 commoninc=commoninc, force=force)
85 85 _common, inc, remoteheads = commoninc
86 86
87 87 cl = repo.changelog
88 outg = cl.findmissing(common, revs)
88 alloutg = cl.findmissing(common, revs)
89 outg = []
90 secret = []
91 for o in alloutg:
92 if repo[o].phase() >= 2:
93 secret.append(o)
94 else:
95 outg.append(o)
89 96
90 97 if not outg:
91 repo.ui.status(_("no changes found\n"))
98 if secret:
99 repo.ui.status(_("no changes to push but %i secret changesets\n")
100 % len(secret))
101 else:
102 repo.ui.status(_("no changes found\n"))
92 103 return None, 1, common
93 104
105 if secret:
106 # recompute target revs
107 revs = [ctx.node() for ctx in repo.set('heads(::(%ld))',
108 map(repo.changelog.rev, outg))]
109
94 110 if not force and remoteheads != [nullid]:
95 111 if remote.capable('branchmap'):
96 112 # Check for each named branch if we're creating new remote heads.
97 113 # To be a remote head after push, node must be either:
98 114 # - unknown locally
99 115 # - a local outgoing head descended from update
100 116 # - a remote head that's known locally and not
101 117 # ancestral to an outgoing head
102 118
103 119 # 1. Create set of branches involved in the push.
104 120 branches = set(repo[n].branch() for n in outg)
105 121
106 122 # 2. Check for new branches on the remote.
107 123 remotemap = remote.branchmap()
108 124 newbranches = branches - set(remotemap)
109 125 if newbranches and not newbranch: # new branch requires --new-branch
110 126 branchnames = ', '.join(sorted(newbranches))
111 127 raise util.Abort(_("push creates new remote branches: %s!")
112 128 % branchnames,
113 129 hint=_("use 'hg push --new-branch' to create"
114 130 " new remote branches"))
115 131 branches.difference_update(newbranches)
116 132
117 133 # 3. Construct the initial oldmap and newmap dicts.
118 134 # They contain information about the remote heads before and
119 135 # after the push, respectively.
120 136 # Heads not found locally are not included in either dict,
121 137 # since they won't be affected by the push.
122 138 # unsynced contains all branches with incoming changesets.
123 139 oldmap = {}
124 140 newmap = {}
125 141 unsynced = set()
126 142 for branch in branches:
127 143 remotebrheads = remotemap[branch]
128 144 prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
129 145 oldmap[branch] = prunedbrheads
130 146 newmap[branch] = list(prunedbrheads)
131 147 if len(remotebrheads) > len(prunedbrheads):
132 148 unsynced.add(branch)
133 149
134 150 # 4. Update newmap with outgoing changes.
135 151 # This will possibly add new heads and remove existing ones.
136 152 ctxgen = (repo[n] for n in outg)
137 153 repo._updatebranchcache(newmap, ctxgen)
138 154
139 155 else:
140 156 # 1-4b. old servers: Check for new topological heads.
141 157 # Construct {old,new}map with branch = None (topological branch).
142 158 # (code based on _updatebranchcache)
143 159 oldheads = set(h for h in remoteheads if h in cl.nodemap)
144 160 newheads = oldheads.union(outg)
145 161 if len(newheads) > 1:
146 162 for latest in reversed(outg):
147 163 if latest not in newheads:
148 164 continue
149 165 minhrev = min(cl.rev(h) for h in newheads)
150 166 reachable = cl.reachable(latest, cl.node(minhrev))
151 167 reachable.remove(latest)
152 168 newheads.difference_update(reachable)
153 169 branches = set([None])
154 170 newmap = {None: newheads}
155 171 oldmap = {None: oldheads}
156 172 unsynced = inc and branches or set()
157 173
158 174 # 5. Check for new heads.
159 175 # If there are more heads after the push than before, a suitable
160 176 # error message, depending on unsynced status, is displayed.
161 177 error = None
162 178 for branch in branches:
163 179 newhs = set(newmap[branch])
164 180 oldhs = set(oldmap[branch])
165 181 if len(newhs) > len(oldhs):
166 182 dhs = list(newhs - oldhs)
167 183 if error is None:
168 184 if branch not in ('default', None):
169 185 error = _("push creates new remote head %s "
170 186 "on branch '%s'!") % (short(dhs[0]), branch)
171 187 else:
172 188 error = _("push creates new remote head %s!"
173 189 ) % short(dhs[0])
174 190 if branch in unsynced:
175 191 hint = _("you should pull and merge or "
176 192 "use push -f to force")
177 193 else:
178 194 hint = _("did you forget to merge? "
179 195 "use push -f to force")
180 196 if branch is not None:
181 197 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
182 198 for h in dhs:
183 199 repo.ui.note(_("new remote head %s\n") % short(h))
184 200 if error:
185 201 raise util.Abort(error, hint=hint)
186 202
187 203 # 6. Check for unsynced changes on involved branches.
188 204 if unsynced:
189 205 repo.ui.warn(_("note: unsynced remote changes!\n"))
190 206
191 207 if revs is None:
192 208 # use the fast path, no race possible on push
193 209 cg = repo._changegroup(outg, 'push')
194 210 else:
195 211 cg = repo.getbundle('push', heads=revs, common=common)
196 212 # no need to compute outg ancestor. All node in outg have either:
197 213 # - parents in outg
198 214 # - parents in common
199 215 # - nullid parent
200 216 rset = repo.set('heads(%ln + %ln)', common, outg)
201 217 futureheads = [ctx.node() for ctx in rset]
202 218 return cg, remoteheads, futureheads
@@ -1,194 +1,195 b''
1 1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 2 #
3 3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import random, collections, util, dagutil
12 import phases
12 13
13 14 def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
14 15 # if nodes is empty we scan the entire graph
15 16 if nodes:
16 17 heads = dag.headsetofconnecteds(nodes)
17 18 else:
18 19 heads = dag.heads()
19 20 dist = {}
20 21 visit = collections.deque(heads)
21 22 seen = set()
22 23 factor = 1
23 24 while visit:
24 25 curr = visit.popleft()
25 26 if curr in seen:
26 27 continue
27 28 d = dist.setdefault(curr, 1)
28 29 if d > factor:
29 30 factor *= 2
30 31 if d == factor:
31 32 if curr not in always: # need this check for the early exit below
32 33 sample.add(curr)
33 34 if quicksamplesize and (len(sample) >= quicksamplesize):
34 35 return
35 36 seen.add(curr)
36 37 for p in dag.parents(curr):
37 38 if not nodes or p in nodes:
38 39 dist.setdefault(p, d + 1)
39 40 visit.append(p)
40 41
41 42 def _setupsample(dag, nodes, size):
42 43 if len(nodes) <= size:
43 44 return set(nodes), None, 0
44 45 always = dag.headsetofconnecteds(nodes)
45 46 desiredlen = size - len(always)
46 47 if desiredlen <= 0:
47 48 # This could be bad if there are very many heads, all unknown to the
48 49 # server. We're counting on long request support here.
49 50 return always, None, desiredlen
50 51 return always, set(), desiredlen
51 52
52 53 def _takequicksample(dag, nodes, size, initial):
53 54 always, sample, desiredlen = _setupsample(dag, nodes, size)
54 55 if sample is None:
55 56 return always
56 57 if initial:
57 58 fromset = None
58 59 else:
59 60 fromset = nodes
60 61 _updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
61 62 sample.update(always)
62 63 return sample
63 64
64 65 def _takefullsample(dag, nodes, size):
65 66 always, sample, desiredlen = _setupsample(dag, nodes, size)
66 67 if sample is None:
67 68 return always
68 69 # update from heads
69 70 _updatesample(dag, nodes, sample, always)
70 71 # update from roots
71 72 _updatesample(dag.inverse(), nodes, sample, always)
72 73 assert sample
73 74 if len(sample) > desiredlen:
74 75 sample = set(random.sample(sample, desiredlen))
75 76 elif len(sample) < desiredlen:
76 77 more = desiredlen - len(sample)
77 78 sample.update(random.sample(list(nodes - sample - always), more))
78 79 sample.update(always)
79 80 return sample
80 81
81 82 def findcommonheads(ui, local, remote,
82 83 initialsamplesize=100,
83 84 fullsamplesize=200,
84 85 abortwhenunrelated=True):
85 86 '''Return a tuple (common, anyincoming, remoteheads) used to identify
86 87 missing nodes from or in remote.
87 88
88 89 shortcutlocal determines whether we try use direct access to localrepo if
89 90 remote is actually local.
90 91 '''
91 92 roundtrips = 0
92 93 cl = local.changelog
93 94 dag = dagutil.revlogdag(cl)
94 95
95 96 # early exit if we know all the specified remote heads already
96 97 ui.debug("query 1; heads\n")
97 98 roundtrips += 1
98 99 ownheads = dag.heads()
99 100 sample = ownheads
100 101 if remote.local():
101 102 # stopgap until we have a proper localpeer that supports batch()
102 srvheadhashes = remote.heads()
103 srvheadhashes = phases.visibleheads(remote)
103 104 yesno = remote.known(dag.externalizeall(sample))
104 105 elif remote.capable('batch'):
105 106 batch = remote.batch()
106 107 srvheadhashesref = batch.heads()
107 108 yesnoref = batch.known(dag.externalizeall(sample))
108 109 batch.submit()
109 110 srvheadhashes = srvheadhashesref.value
110 111 yesno = yesnoref.value
111 112 else:
112 113 # compatibitity with pre-batch, but post-known remotes during 1.9 devel
113 114 srvheadhashes = remote.heads()
114 115 sample = []
115 116
116 117 if cl.tip() == nullid:
117 118 if srvheadhashes != [nullid]:
118 119 return [nullid], True, srvheadhashes
119 120 return [nullid], False, []
120 121
121 122 # start actual discovery (we note this before the next "if" for
122 123 # compatibility reasons)
123 124 ui.status(_("searching for changes\n"))
124 125
125 126 srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
126 127 if len(srvheads) == len(srvheadhashes):
127 128 ui.debug("all remote heads known locally\n")
128 129 return (srvheadhashes, False, srvheadhashes,)
129 130
130 131 if sample and util.all(yesno):
131 132 ui.note(_("all local heads known remotely\n"))
132 133 ownheadhashes = dag.externalizeall(ownheads)
133 134 return (ownheadhashes, True, srvheadhashes,)
134 135
135 136 # full blown discovery
136 137 undecided = dag.nodeset() # own nodes where I don't know if remote knows them
137 138 common = set() # own nodes I know we both know
138 139 missing = set() # own nodes I know remote lacks
139 140
140 141 # treat remote heads (and maybe own heads) as a first implicit sample response
141 142 common.update(dag.ancestorset(srvheads))
142 143 undecided.difference_update(common)
143 144
144 145 full = False
145 146 while undecided:
146 147
147 148 if sample:
148 149 commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
149 150 common.update(dag.ancestorset(commoninsample, common))
150 151
151 152 missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
152 153 missing.update(dag.descendantset(missinginsample, missing))
153 154
154 155 undecided.difference_update(missing)
155 156 undecided.difference_update(common)
156 157
157 158 if not undecided:
158 159 break
159 160
160 161 if full:
161 162 ui.note(_("sampling from both directions\n"))
162 163 sample = _takefullsample(dag, undecided, size=fullsamplesize)
163 164 elif common:
164 165 # use cheapish initial sample
165 166 ui.debug("taking initial sample\n")
166 167 sample = _takefullsample(dag, undecided, size=fullsamplesize)
167 168 else:
168 169 # use even cheaper initial sample
169 170 ui.debug("taking quick initial sample\n")
170 171 sample = _takequicksample(dag, undecided, size=initialsamplesize,
171 172 initial=True)
172 173
173 174 roundtrips += 1
174 175 ui.progress(_('searching'), roundtrips, unit=_('queries'))
175 176 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
176 177 % (roundtrips, len(undecided), len(sample)))
177 178 # indices between sample and externalized version must match
178 179 sample = list(sample)
179 180 yesno = remote.known(dag.externalizeall(sample))
180 181 full = True
181 182
182 183 result = dag.headsetofconnecteds(common)
183 184 ui.progress(_('searching'), None)
184 185 ui.debug("%d total queries\n" % roundtrips)
185 186
186 187 if not result and srvheadhashes != [nullid]:
187 188 if abortwhenunrelated:
188 189 raise util.Abort(_("repository is unrelated"))
189 190 else:
190 191 ui.warn(_("warning: repository is unrelated\n"))
191 192 return (set([nullid]), True, srvheadhashes,)
192 193
193 194 anyincoming = (srvheadhashes != [nullid])
194 195 return dag.externalizeall(result), anyincoming, srvheadhashes
@@ -1,609 +1,610 b''
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import urllib, tempfile, os, sys
9 9 from i18n import _
10 10 from node import bin, hex
11 11 import changegroup as changegroupmod
12 12 import repo, error, encoding, util, store
13 import phases
13 14
14 15 # abstract batching support
15 16
16 17 class future(object):
17 18 '''placeholder for a value to be set later'''
18 19 def set(self, value):
19 20 if util.safehasattr(self, 'value'):
20 21 raise error.RepoError("future is already set")
21 22 self.value = value
22 23
23 24 class batcher(object):
24 25 '''base class for batches of commands submittable in a single request
25 26
26 27 All methods invoked on instances of this class are simply queued and return a
27 28 a future for the result. Once you call submit(), all the queued calls are
28 29 performed and the results set in their respective futures.
29 30 '''
30 31 def __init__(self):
31 32 self.calls = []
32 33 def __getattr__(self, name):
33 34 def call(*args, **opts):
34 35 resref = future()
35 36 self.calls.append((name, args, opts, resref,))
36 37 return resref
37 38 return call
38 39 def submit(self):
39 40 pass
40 41
41 42 class localbatch(batcher):
42 43 '''performs the queued calls directly'''
43 44 def __init__(self, local):
44 45 batcher.__init__(self)
45 46 self.local = local
46 47 def submit(self):
47 48 for name, args, opts, resref in self.calls:
48 49 resref.set(getattr(self.local, name)(*args, **opts))
49 50
50 51 class remotebatch(batcher):
51 52 '''batches the queued calls; uses as few roundtrips as possible'''
52 53 def __init__(self, remote):
53 54 '''remote must support _submitbatch(encbatch) and _submitone(op, encargs)'''
54 55 batcher.__init__(self)
55 56 self.remote = remote
56 57 def submit(self):
57 58 req, rsp = [], []
58 59 for name, args, opts, resref in self.calls:
59 60 mtd = getattr(self.remote, name)
60 61 batchablefn = getattr(mtd, 'batchable', None)
61 62 if batchablefn is not None:
62 63 batchable = batchablefn(mtd.im_self, *args, **opts)
63 64 encargsorres, encresref = batchable.next()
64 65 if encresref:
65 66 req.append((name, encargsorres,))
66 67 rsp.append((batchable, encresref, resref,))
67 68 else:
68 69 resref.set(encargsorres)
69 70 else:
70 71 if req:
71 72 self._submitreq(req, rsp)
72 73 req, rsp = [], []
73 74 resref.set(mtd(*args, **opts))
74 75 if req:
75 76 self._submitreq(req, rsp)
76 77 def _submitreq(self, req, rsp):
77 78 encresults = self.remote._submitbatch(req)
78 79 for encres, r in zip(encresults, rsp):
79 80 batchable, encresref, resref = r
80 81 encresref.set(encres)
81 82 resref.set(batchable.next())
82 83
83 84 def batchable(f):
84 85 '''annotation for batchable methods
85 86
86 87 Such methods must implement a coroutine as follows:
87 88
88 89 @batchable
89 90 def sample(self, one, two=None):
90 91 # Handle locally computable results first:
91 92 if not one:
92 93 yield "a local result", None
93 94 # Build list of encoded arguments suitable for your wire protocol:
94 95 encargs = [('one', encode(one),), ('two', encode(two),)]
95 96 # Create future for injection of encoded result:
96 97 encresref = future()
97 98 # Return encoded arguments and future:
98 99 yield encargs, encresref
99 100 # Assuming the future to be filled with the result from the batched request
100 101 # now. Decode it:
101 102 yield decode(encresref.value)
102 103
103 104 The decorator returns a function which wraps this coroutine as a plain method,
104 105 but adds the original method as an attribute called "batchable", which is
105 106 used by remotebatch to split the call into separate encoding and decoding
106 107 phases.
107 108 '''
108 109 def plain(*args, **opts):
109 110 batchable = f(*args, **opts)
110 111 encargsorres, encresref = batchable.next()
111 112 if not encresref:
112 113 return encargsorres # a local result in this case
113 114 self = args[0]
114 115 encresref.set(self._submitone(f.func_name, encargsorres))
115 116 return batchable.next()
116 117 setattr(plain, 'batchable', f)
117 118 return plain
118 119
119 120 # list of nodes encoding / decoding
120 121
121 122 def decodelist(l, sep=' '):
122 123 if l:
123 124 return map(bin, l.split(sep))
124 125 return []
125 126
126 127 def encodelist(l, sep=' '):
127 128 return sep.join(map(hex, l))
128 129
129 130 # batched call argument encoding
130 131
131 132 def escapearg(plain):
132 133 return (plain
133 134 .replace(':', '::')
134 135 .replace(',', ':,')
135 136 .replace(';', ':;')
136 137 .replace('=', ':='))
137 138
138 139 def unescapearg(escaped):
139 140 return (escaped
140 141 .replace(':=', '=')
141 142 .replace(':;', ';')
142 143 .replace(':,', ',')
143 144 .replace('::', ':'))
144 145
145 146 # client side
146 147
147 148 def todict(**args):
148 149 return args
149 150
150 151 class wirerepository(repo.repository):
151 152
152 153 def batch(self):
153 154 return remotebatch(self)
154 155 def _submitbatch(self, req):
155 156 cmds = []
156 157 for op, argsdict in req:
157 158 args = ','.join('%s=%s' % p for p in argsdict.iteritems())
158 159 cmds.append('%s %s' % (op, args))
159 160 rsp = self._call("batch", cmds=';'.join(cmds))
160 161 return rsp.split(';')
161 162 def _submitone(self, op, args):
162 163 return self._call(op, **args)
163 164
164 165 @batchable
165 166 def lookup(self, key):
166 167 self.requirecap('lookup', _('look up remote revision'))
167 168 f = future()
168 169 yield todict(key=encoding.fromlocal(key)), f
169 170 d = f.value
170 171 success, data = d[:-1].split(" ", 1)
171 172 if int(success):
172 173 yield bin(data)
173 174 self._abort(error.RepoError(data))
174 175
175 176 @batchable
176 177 def heads(self):
177 178 f = future()
178 179 yield {}, f
179 180 d = f.value
180 181 try:
181 182 yield decodelist(d[:-1])
182 183 except ValueError:
183 184 self._abort(error.ResponseError(_("unexpected response:"), d))
184 185
185 186 @batchable
186 187 def known(self, nodes):
187 188 f = future()
188 189 yield todict(nodes=encodelist(nodes)), f
189 190 d = f.value
190 191 try:
191 192 yield [bool(int(f)) for f in d]
192 193 except ValueError:
193 194 self._abort(error.ResponseError(_("unexpected response:"), d))
194 195
195 196 @batchable
196 197 def branchmap(self):
197 198 f = future()
198 199 yield {}, f
199 200 d = f.value
200 201 try:
201 202 branchmap = {}
202 203 for branchpart in d.splitlines():
203 204 branchname, branchheads = branchpart.split(' ', 1)
204 205 branchname = encoding.tolocal(urllib.unquote(branchname))
205 206 branchheads = decodelist(branchheads)
206 207 branchmap[branchname] = branchheads
207 208 yield branchmap
208 209 except TypeError:
209 210 self._abort(error.ResponseError(_("unexpected response:"), d))
210 211
211 212 def branches(self, nodes):
212 213 n = encodelist(nodes)
213 214 d = self._call("branches", nodes=n)
214 215 try:
215 216 br = [tuple(decodelist(b)) for b in d.splitlines()]
216 217 return br
217 218 except ValueError:
218 219 self._abort(error.ResponseError(_("unexpected response:"), d))
219 220
220 221 def between(self, pairs):
221 222 batch = 8 # avoid giant requests
222 223 r = []
223 224 for i in xrange(0, len(pairs), batch):
224 225 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
225 226 d = self._call("between", pairs=n)
226 227 try:
227 228 r.extend(l and decodelist(l) or [] for l in d.splitlines())
228 229 except ValueError:
229 230 self._abort(error.ResponseError(_("unexpected response:"), d))
230 231 return r
231 232
232 233 @batchable
233 234 def pushkey(self, namespace, key, old, new):
234 235 if not self.capable('pushkey'):
235 236 yield False, None
236 237 f = future()
237 238 yield todict(namespace=encoding.fromlocal(namespace),
238 239 key=encoding.fromlocal(key),
239 240 old=encoding.fromlocal(old),
240 241 new=encoding.fromlocal(new)), f
241 242 d = f.value
242 243 d, output = d.split('\n', 1)
243 244 try:
244 245 d = bool(int(d))
245 246 except ValueError:
246 247 raise error.ResponseError(
247 248 _('push failed (unexpected response):'), d)
248 249 for l in output.splitlines(True):
249 250 self.ui.status(_('remote: '), l)
250 251 yield d
251 252
252 253 @batchable
253 254 def listkeys(self, namespace):
254 255 if not self.capable('pushkey'):
255 256 yield {}, None
256 257 f = future()
257 258 yield todict(namespace=encoding.fromlocal(namespace)), f
258 259 d = f.value
259 260 r = {}
260 261 for l in d.splitlines():
261 262 k, v = l.split('\t')
262 263 r[encoding.tolocal(k)] = encoding.tolocal(v)
263 264 yield r
264 265
265 266 def stream_out(self):
266 267 return self._callstream('stream_out')
267 268
268 269 def changegroup(self, nodes, kind):
269 270 n = encodelist(nodes)
270 271 f = self._callstream("changegroup", roots=n)
271 272 return changegroupmod.unbundle10(self._decompress(f), 'UN')
272 273
273 274 def changegroupsubset(self, bases, heads, kind):
274 275 self.requirecap('changegroupsubset', _('look up remote changes'))
275 276 bases = encodelist(bases)
276 277 heads = encodelist(heads)
277 278 f = self._callstream("changegroupsubset",
278 279 bases=bases, heads=heads)
279 280 return changegroupmod.unbundle10(self._decompress(f), 'UN')
280 281
281 282 def getbundle(self, source, heads=None, common=None):
282 283 self.requirecap('getbundle', _('look up remote changes'))
283 284 opts = {}
284 285 if heads is not None:
285 286 opts['heads'] = encodelist(heads)
286 287 if common is not None:
287 288 opts['common'] = encodelist(common)
288 289 f = self._callstream("getbundle", **opts)
289 290 return changegroupmod.unbundle10(self._decompress(f), 'UN')
290 291
291 292 def unbundle(self, cg, heads, source):
292 293 '''Send cg (a readable file-like object representing the
293 294 changegroup to push, typically a chunkbuffer object) to the
294 295 remote server as a bundle. Return an integer indicating the
295 296 result of the push (see localrepository.addchangegroup()).'''
296 297
297 298 if heads != ['force'] and self.capable('unbundlehash'):
298 299 heads = encodelist(['hashed',
299 300 util.sha1(''.join(sorted(heads))).digest()])
300 301 else:
301 302 heads = encodelist(heads)
302 303
303 304 ret, output = self._callpush("unbundle", cg, heads=heads)
304 305 if ret == "":
305 306 raise error.ResponseError(
306 307 _('push failed:'), output)
307 308 try:
308 309 ret = int(ret)
309 310 except ValueError:
310 311 raise error.ResponseError(
311 312 _('push failed (unexpected response):'), ret)
312 313
313 314 for l in output.splitlines(True):
314 315 self.ui.status(_('remote: '), l)
315 316 return ret
316 317
317 318 def debugwireargs(self, one, two, three=None, four=None, five=None):
318 319 # don't pass optional arguments left at their default value
319 320 opts = {}
320 321 if three is not None:
321 322 opts['three'] = three
322 323 if four is not None:
323 324 opts['four'] = four
324 325 return self._call('debugwireargs', one=one, two=two, **opts)
325 326
326 327 # server side
327 328
328 329 class streamres(object):
329 330 def __init__(self, gen):
330 331 self.gen = gen
331 332
332 333 class pushres(object):
333 334 def __init__(self, res):
334 335 self.res = res
335 336
336 337 class pusherr(object):
337 338 def __init__(self, res):
338 339 self.res = res
339 340
340 341 class ooberror(object):
341 342 def __init__(self, message):
342 343 self.message = message
343 344
344 345 def dispatch(repo, proto, command):
345 346 func, spec = commands[command]
346 347 args = proto.getargs(spec)
347 348 return func(repo, proto, *args)
348 349
349 350 def options(cmd, keys, others):
350 351 opts = {}
351 352 for k in keys:
352 353 if k in others:
353 354 opts[k] = others[k]
354 355 del others[k]
355 356 if others:
356 357 sys.stderr.write("abort: %s got unexpected arguments %s\n"
357 358 % (cmd, ",".join(others)))
358 359 return opts
359 360
360 361 def batch(repo, proto, cmds, others):
361 362 res = []
362 363 for pair in cmds.split(';'):
363 364 op, args = pair.split(' ', 1)
364 365 vals = {}
365 366 for a in args.split(','):
366 367 if a:
367 368 n, v = a.split('=')
368 369 vals[n] = unescapearg(v)
369 370 func, spec = commands[op]
370 371 if spec:
371 372 keys = spec.split()
372 373 data = {}
373 374 for k in keys:
374 375 if k == '*':
375 376 star = {}
376 377 for key in vals.keys():
377 378 if key not in keys:
378 379 star[key] = vals[key]
379 380 data['*'] = star
380 381 else:
381 382 data[k] = vals[k]
382 383 result = func(repo, proto, *[data[k] for k in keys])
383 384 else:
384 385 result = func(repo, proto)
385 386 if isinstance(result, ooberror):
386 387 return result
387 388 res.append(escapearg(result))
388 389 return ';'.join(res)
389 390
390 391 def between(repo, proto, pairs):
391 392 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
392 393 r = []
393 394 for b in repo.between(pairs):
394 395 r.append(encodelist(b) + "\n")
395 396 return "".join(r)
396 397
397 398 def branchmap(repo, proto):
398 399 branchmap = repo.branchmap()
399 400 heads = []
400 401 for branch, nodes in branchmap.iteritems():
401 402 branchname = urllib.quote(encoding.fromlocal(branch))
402 403 branchnodes = encodelist(nodes)
403 404 heads.append('%s %s' % (branchname, branchnodes))
404 405 return '\n'.join(heads)
405 406
406 407 def branches(repo, proto, nodes):
407 408 nodes = decodelist(nodes)
408 409 r = []
409 410 for b in repo.branches(nodes):
410 411 r.append(encodelist(b) + "\n")
411 412 return "".join(r)
412 413
413 414 def capabilities(repo, proto):
414 415 caps = ('lookup changegroupsubset branchmap pushkey known getbundle '
415 416 'unbundlehash batch').split()
416 417 if _allowstream(repo.ui):
417 418 requiredformats = repo.requirements & repo.supportedformats
418 419 # if our local revlogs are just revlogv1, add 'stream' cap
419 420 if not requiredformats - set(('revlogv1',)):
420 421 caps.append('stream')
421 422 # otherwise, add 'streamreqs' detailing our local revlog format
422 423 else:
423 424 caps.append('streamreqs=%s' % ','.join(requiredformats))
424 425 caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
425 426 caps.append('httpheader=1024')
426 427 return ' '.join(caps)
427 428
428 429 def changegroup(repo, proto, roots):
429 430 nodes = decodelist(roots)
430 431 cg = repo.changegroup(nodes, 'serve')
431 432 return streamres(proto.groupchunks(cg))
432 433
433 434 def changegroupsubset(repo, proto, bases, heads):
434 435 bases = decodelist(bases)
435 436 heads = decodelist(heads)
436 437 cg = repo.changegroupsubset(bases, heads, 'serve')
437 438 return streamres(proto.groupchunks(cg))
438 439
439 440 def debugwireargs(repo, proto, one, two, others):
440 441 # only accept optional args from the known set
441 442 opts = options('debugwireargs', ['three', 'four'], others)
442 443 return repo.debugwireargs(one, two, **opts)
443 444
444 445 def getbundle(repo, proto, others):
445 446 opts = options('getbundle', ['heads', 'common'], others)
446 447 for k, v in opts.iteritems():
447 448 opts[k] = decodelist(v)
448 449 cg = repo.getbundle('serve', **opts)
449 450 return streamres(proto.groupchunks(cg))
450 451
451 452 def heads(repo, proto):
452 h = repo.heads()
453 h = phases.visibleheads(repo)
453 454 return encodelist(h) + "\n"
454 455
455 456 def hello(repo, proto):
456 457 '''the hello command returns a set of lines describing various
457 458 interesting things about the server, in an RFC822-like format.
458 459 Currently the only one defined is "capabilities", which
459 460 consists of a line in the form:
460 461
461 462 capabilities: space separated list of tokens
462 463 '''
463 464 return "capabilities: %s\n" % (capabilities(repo, proto))
464 465
465 466 def listkeys(repo, proto, namespace):
466 467 d = repo.listkeys(encoding.tolocal(namespace)).items()
467 468 t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
468 469 for k, v in d])
469 470 return t
470 471
471 472 def lookup(repo, proto, key):
472 473 try:
473 474 r = hex(repo.lookup(encoding.tolocal(key)))
474 475 success = 1
475 476 except Exception, inst:
476 477 r = str(inst)
477 478 success = 0
478 479 return "%s %s\n" % (success, r)
479 480
480 481 def known(repo, proto, nodes, others):
481 482 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
482 483
483 484 def pushkey(repo, proto, namespace, key, old, new):
484 485 # compatibility with pre-1.8 clients which were accidentally
485 486 # sending raw binary nodes rather than utf-8-encoded hex
486 487 if len(new) == 20 and new.encode('string-escape') != new:
487 488 # looks like it could be a binary node
488 489 try:
489 490 new.decode('utf-8')
490 491 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
491 492 except UnicodeDecodeError:
492 493 pass # binary, leave unmodified
493 494 else:
494 495 new = encoding.tolocal(new) # normal path
495 496
496 497 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
497 498 encoding.tolocal(old), new)
498 499 return '%s\n' % int(r)
499 500
500 501 def _allowstream(ui):
501 502 return ui.configbool('server', 'uncompressed', True, untrusted=True)
502 503
503 504 def stream(repo, proto):
504 505 '''If the server supports streaming clone, it advertises the "stream"
505 506 capability with a value representing the version and flags of the repo
506 507 it is serving. Client checks to see if it understands the format.
507 508
508 509 The format is simple: the server writes out a line with the amount
509 510 of files, then the total amount of bytes to be transfered (separated
510 511 by a space). Then, for each file, the server first writes the filename
511 512 and filesize (separated by the null character), then the file contents.
512 513 '''
513 514
514 515 if not _allowstream(repo.ui):
515 516 return '1\n'
516 517
517 518 entries = []
518 519 total_bytes = 0
519 520 try:
520 521 # get consistent snapshot of repo, lock during scan
521 522 lock = repo.lock()
522 523 try:
523 524 repo.ui.debug('scanning\n')
524 525 for name, ename, size in repo.store.walk():
525 526 entries.append((name, size))
526 527 total_bytes += size
527 528 finally:
528 529 lock.release()
529 530 except error.LockError:
530 531 return '2\n' # error: 2
531 532
532 533 def streamer(repo, entries, total):
533 534 '''stream out all metadata files in repository.'''
534 535 yield '0\n' # success
535 536 repo.ui.debug('%d files, %d bytes to transfer\n' %
536 537 (len(entries), total_bytes))
537 538 yield '%d %d\n' % (len(entries), total_bytes)
538 539 for name, size in entries:
539 540 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
540 541 # partially encode name over the wire for backwards compat
541 542 yield '%s\0%d\n' % (store.encodedir(name), size)
542 543 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
543 544 yield chunk
544 545
545 546 return streamres(streamer(repo, entries, total_bytes))
546 547
547 548 def unbundle(repo, proto, heads):
548 549 their_heads = decodelist(heads)
549 550
550 551 def check_heads():
551 552 heads = repo.heads()
552 553 heads_hash = util.sha1(''.join(sorted(heads))).digest()
553 554 return (their_heads == ['force'] or their_heads == heads or
554 555 their_heads == ['hashed', heads_hash])
555 556
556 557 proto.redirect()
557 558
558 559 # fail early if possible
559 560 if not check_heads():
560 561 return pusherr('unsynced changes')
561 562
562 563 # write bundle data to temporary file because it can be big
563 564 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
564 565 fp = os.fdopen(fd, 'wb+')
565 566 r = 0
566 567 try:
567 568 proto.getfile(fp)
568 569 lock = repo.lock()
569 570 try:
570 571 if not check_heads():
571 572 # someone else committed/pushed/unbundled while we
572 573 # were transferring data
573 574 return pusherr('unsynced changes')
574 575
575 576 # push can proceed
576 577 fp.seek(0)
577 578 gen = changegroupmod.readbundle(fp, None)
578 579
579 580 try:
580 581 r = repo.addchangegroup(gen, 'serve', proto._client())
581 582 except util.Abort, inst:
582 583 sys.stderr.write("abort: %s\n" % inst)
583 584 finally:
584 585 lock.release()
585 586 return pushres(r)
586 587
587 588 finally:
588 589 fp.close()
589 590 os.unlink(tempname)
590 591
591 592 commands = {
592 593 'batch': (batch, 'cmds *'),
593 594 'between': (between, 'pairs'),
594 595 'branchmap': (branchmap, ''),
595 596 'branches': (branches, 'nodes'),
596 597 'capabilities': (capabilities, ''),
597 598 'changegroup': (changegroup, 'roots'),
598 599 'changegroupsubset': (changegroupsubset, 'bases heads'),
599 600 'debugwireargs': (debugwireargs, 'one two *'),
600 601 'getbundle': (getbundle, '*'),
601 602 'heads': (heads, ''),
602 603 'hello': (hello, ''),
603 604 'known': (known, 'nodes *'),
604 605 'listkeys': (listkeys, 'namespace'),
605 606 'lookup': (lookup, 'key'),
606 607 'pushkey': (pushkey, 'namespace key old new'),
607 608 'stream_out': (stream, ''),
608 609 'unbundle': (unbundle, 'heads'),
609 610 }
@@ -1,508 +1,570 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > EOF
5 5 $ alias hgph='hg log --template "{rev} {phase} {desc} - {node|short}\n"'
6 6
7 7 $ mkcommit() {
8 8 > echo "$1" > "$1"
9 9 > hg add "$1"
10 > hg ci -m "$1"
10 > message="$1"
11 > shift
12 > hg ci -m "$message" $*
11 13 > }
12 14
13 15 $ hg init alpha
14 16 $ cd alpha
15 17 $ mkcommit a-A
16 18 $ mkcommit a-B
17 19 $ mkcommit a-C
18 20 $ mkcommit a-D
19 21 $ hgph
20 22 3 1 a-D - b555f63b6063
21 23 2 1 a-C - 54acac6f23ab
22 24 1 1 a-B - 548a3d25dbf0
23 25 0 1 a-A - 054250a37db4
24 26
25 27 $ hg init ../beta
26 28 $ hg push -r 1 ../beta
27 29 pushing to ../beta
28 30 searching for changes
29 31 adding changesets
30 32 adding manifests
31 33 adding file changes
32 34 added 2 changesets with 2 changes to 2 files
33 35 $ hgph
34 36 3 1 a-D - b555f63b6063
35 37 2 1 a-C - 54acac6f23ab
36 38 1 0 a-B - 548a3d25dbf0
37 39 0 0 a-A - 054250a37db4
38 40
39 41 $ cd ../beta
40 42 $ hgph
41 43 1 0 a-B - 548a3d25dbf0
42 44 0 0 a-A - 054250a37db4
43 45 $ hg up -q
44 46 $ mkcommit b-A
45 47 $ hgph
46 48 2 1 b-A - f54f1bb90ff3
47 49 1 0 a-B - 548a3d25dbf0
48 50 0 0 a-A - 054250a37db4
49 51 $ hg pull ../alpha
50 52 pulling from ../alpha
51 53 searching for changes
52 54 adding changesets
53 55 adding manifests
54 56 adding file changes
55 57 added 2 changesets with 2 changes to 2 files (+1 heads)
56 58 (run 'hg heads' to see heads, 'hg merge' to merge)
57 59 $ hgph
58 60 4 0 a-D - b555f63b6063
59 61 3 0 a-C - 54acac6f23ab
60 62 2 1 b-A - f54f1bb90ff3
61 63 1 0 a-B - 548a3d25dbf0
62 64 0 0 a-A - 054250a37db4
63 65
64 66 pull did not updated ../alpha state.
65 67 push from alpha to beta should update phase even if nothing is transfered
66 68
67 69 $ cd ../alpha
68 70 $ hgph # not updated by remote pull
69 71 3 1 a-D - b555f63b6063
70 72 2 1 a-C - 54acac6f23ab
71 73 1 0 a-B - 548a3d25dbf0
72 74 0 0 a-A - 054250a37db4
73 75 $ hg push ../beta
74 76 pushing to ../beta
75 77 searching for changes
76 78 no changes found
77 79 $ hgph
78 80 3 0 a-D - b555f63b6063
79 81 2 0 a-C - 54acac6f23ab
80 82 1 0 a-B - 548a3d25dbf0
81 83 0 0 a-A - 054250a37db4
82 84
83 85 update must update phase of common changeset too
84 86
85 87 $ hg pull ../beta # getting b-A
86 88 pulling from ../beta
87 89 searching for changes
88 90 adding changesets
89 91 adding manifests
90 92 adding file changes
91 93 added 1 changesets with 1 changes to 1 files (+1 heads)
92 94 (run 'hg heads' to see heads, 'hg merge' to merge)
93 95
94 96 $ cd ../beta
95 97 $ hgph # not updated by remote pull
96 98 4 0 a-D - b555f63b6063
97 99 3 0 a-C - 54acac6f23ab
98 100 2 1 b-A - f54f1bb90ff3
99 101 1 0 a-B - 548a3d25dbf0
100 102 0 0 a-A - 054250a37db4
101 103 $ hg pull ../alpha
102 104 pulling from ../alpha
103 105 searching for changes
104 106 no changes found
105 107 $ hgph
106 108 4 0 a-D - b555f63b6063
107 109 3 0 a-C - 54acac6f23ab
108 110 2 0 b-A - f54f1bb90ff3
109 111 1 0 a-B - 548a3d25dbf0
110 112 0 0 a-A - 054250a37db4
111 113
112 114 Publish configuration option
113 115 ----------------------------
114 116
115 117 Pull
116 118 ````
117 119
118 120 changegroup are added without phase movement
119 121
120 122 $ hg bundle -a ../base.bundle
121 123 5 changesets found
122 124 $ cd ..
123 125 $ hg init mu
124 126 $ cd mu
125 127 $ cat > .hg/hgrc << EOF
126 128 > [phases]
127 129 > publish=0
128 130 > EOF
129 131 $ hg unbundle ../base.bundle
130 132 adding changesets
131 133 adding manifests
132 134 adding file changes
133 135 added 5 changesets with 5 changes to 5 files (+1 heads)
134 136 (run 'hg heads' to see heads, 'hg merge' to merge)
135 137 $ hgph
136 138 4 1 a-D - b555f63b6063
137 139 3 1 a-C - 54acac6f23ab
138 140 2 1 b-A - f54f1bb90ff3
139 141 1 1 a-B - 548a3d25dbf0
140 142 0 1 a-A - 054250a37db4
141 143 $ cd ..
142 144
143 145 Pulling from publish=False to publish=False does not move boundary.
144 146
145 147 $ hg init nu
146 148 $ cd nu
147 149 $ cat > .hg/hgrc << EOF
148 150 > [phases]
149 151 > publish=0
150 152 > EOF
151 153 $ hg pull ../mu -r 54acac6f23ab
152 154 pulling from ../mu
153 155 adding changesets
154 156 adding manifests
155 157 adding file changes
156 158 added 3 changesets with 3 changes to 3 files
157 159 (run 'hg update' to get a working copy)
158 160 $ hgph
159 161 2 1 a-C - 54acac6f23ab
160 162 1 1 a-B - 548a3d25dbf0
161 163 0 1 a-A - 054250a37db4
162 164
163 165 Even for common
164 166
165 167 $ hg pull ../mu -r f54f1bb90ff3
166 168 pulling from ../mu
167 169 searching for changes
168 170 adding changesets
169 171 adding manifests
170 172 adding file changes
171 173 added 1 changesets with 1 changes to 1 files (+1 heads)
172 174 (run 'hg heads' to see heads, 'hg merge' to merge)
173 175 $ hgph
174 176 3 1 b-A - f54f1bb90ff3
175 177 2 1 a-C - 54acac6f23ab
176 178 1 1 a-B - 548a3d25dbf0
177 179 0 1 a-A - 054250a37db4
178 180
179 181
180 182 Pulling from Publish=True to Publish=False move boundary in common set.
181 183 we are in nu
182 184
183 185 $ hg pull ../alpha -r b555f63b6063
184 186 pulling from ../alpha
185 187 searching for changes
186 188 adding changesets
187 189 adding manifests
188 190 adding file changes
189 191 added 1 changesets with 1 changes to 1 files
190 192 (run 'hg update' to get a working copy)
191 193 $ hgph
192 194 4 0 a-D - b555f63b6063
193 195 3 0 b-A - f54f1bb90ff3
194 196 2 0 a-C - 54acac6f23ab
195 197 1 0 a-B - 548a3d25dbf0
196 198 0 0 a-A - 054250a37db4
197 199
198 200 pulling from Publish=False to publish=False with some public
199 201
200 202 $ hg up -q f54f1bb90ff3
201 203 $ mkcommit n-A
202 204 $ mkcommit n-B
203 205 $ hgph
204 206 6 1 n-B - 145e75495359
205 207 5 1 n-A - d6bcb4f74035
206 208 4 0 a-D - b555f63b6063
207 209 3 0 b-A - f54f1bb90ff3
208 210 2 0 a-C - 54acac6f23ab
209 211 1 0 a-B - 548a3d25dbf0
210 212 0 0 a-A - 054250a37db4
211 213 $ cd ../mu
212 214 $ hg pull ../nu
213 215 pulling from ../nu
214 216 searching for changes
215 217 adding changesets
216 218 adding manifests
217 219 adding file changes
218 220 added 2 changesets with 2 changes to 2 files
219 221 (run 'hg update' to get a working copy)
220 222 $ hgph
221 223 6 1 n-B - 145e75495359
222 224 5 1 n-A - d6bcb4f74035
223 225 4 0 a-D - b555f63b6063
224 226 3 0 a-C - 54acac6f23ab
225 227 2 0 b-A - f54f1bb90ff3
226 228 1 0 a-B - 548a3d25dbf0
227 229 0 0 a-A - 054250a37db4
228 230 $ cd ..
229 231
230 232 pulling into publish=True
231 233
232 234 $ cd alpha
233 235 $ hgph
234 236 4 0 b-A - f54f1bb90ff3
235 237 3 0 a-D - b555f63b6063
236 238 2 0 a-C - 54acac6f23ab
237 239 1 0 a-B - 548a3d25dbf0
238 240 0 0 a-A - 054250a37db4
239 241 $ hg pull ../mu
240 242 pulling from ../mu
241 243 searching for changes
242 244 adding changesets
243 245 adding manifests
244 246 adding file changes
245 247 added 2 changesets with 2 changes to 2 files
246 248 (run 'hg update' to get a working copy)
247 249 $ hgph
248 250 6 1 n-B - 145e75495359
249 251 5 1 n-A - d6bcb4f74035
250 252 4 0 b-A - f54f1bb90ff3
251 253 3 0 a-D - b555f63b6063
252 254 2 0 a-C - 54acac6f23ab
253 255 1 0 a-B - 548a3d25dbf0
254 256 0 0 a-A - 054250a37db4
255 257 $ cd ..
256 258
257 259 pulling back into original repo
258 260
259 261 $ cd nu
260 262 $ hg pull ../alpha
261 263 pulling from ../alpha
262 264 searching for changes
263 265 no changes found
264 266 $ hgph
265 267 6 0 n-B - 145e75495359
266 268 5 0 n-A - d6bcb4f74035
267 269 4 0 a-D - b555f63b6063
268 270 3 0 b-A - f54f1bb90ff3
269 271 2 0 a-C - 54acac6f23ab
270 272 1 0 a-B - 548a3d25dbf0
271 273 0 0 a-A - 054250a37db4
272 274
273 275 Push
274 276 ````
275 277
276 278 (inserted)
277 279
278 280 Test that phase are pushed even when they are nothing to pus
279 281 (this might be tested later bu are very convenient to not alter too much test)
280 282
281 283 Push back to alpha
282 284
283 285 $ hg push ../alpha # from nu
284 286 pushing to ../alpha
285 287 searching for changes
286 288 no changes found
287 289 $ cd ..
288 290 $ cd alpha
289 291 $ hgph
290 292 6 0 n-B - 145e75495359
291 293 5 0 n-A - d6bcb4f74035
292 294 4 0 b-A - f54f1bb90ff3
293 295 3 0 a-D - b555f63b6063
294 296 2 0 a-C - 54acac6f23ab
295 297 1 0 a-B - 548a3d25dbf0
296 298 0 0 a-A - 054250a37db4
297 299
298 300 (end insertion)
299 301
300 302
301 303 initial setup
302 304
303 305 $ hg glog # of alpha
304 306 o changeset: 6:145e75495359
305 307 | tag: tip
306 308 | user: test
307 309 | date: Thu Jan 01 00:00:00 1970 +0000
308 310 | summary: n-B
309 311 |
310 312 o changeset: 5:d6bcb4f74035
311 313 | user: test
312 314 | date: Thu Jan 01 00:00:00 1970 +0000
313 315 | summary: n-A
314 316 |
315 317 o changeset: 4:f54f1bb90ff3
316 318 | parent: 1:548a3d25dbf0
317 319 | user: test
318 320 | date: Thu Jan 01 00:00:00 1970 +0000
319 321 | summary: b-A
320 322 |
321 323 | @ changeset: 3:b555f63b6063
322 324 | | user: test
323 325 | | date: Thu Jan 01 00:00:00 1970 +0000
324 326 | | summary: a-D
325 327 | |
326 328 | o changeset: 2:54acac6f23ab
327 329 |/ user: test
328 330 | date: Thu Jan 01 00:00:00 1970 +0000
329 331 | summary: a-C
330 332 |
331 333 o changeset: 1:548a3d25dbf0
332 334 | user: test
333 335 | date: Thu Jan 01 00:00:00 1970 +0000
334 336 | summary: a-B
335 337 |
336 338 o changeset: 0:054250a37db4
337 339 user: test
338 340 date: Thu Jan 01 00:00:00 1970 +0000
339 341 summary: a-A
340 342
341 343 $ mkcommit a-E
342 344 $ mkcommit a-F
343 345 $ mkcommit a-G
344 346 $ hg up d6bcb4f74035 -q
345 347 $ mkcommit a-H
346 348 created new head
347 349 $ hgph
348 350 10 1 a-H - 967b449fbc94
349 351 9 1 a-G - 3e27b6f1eee1
350 352 8 1 a-F - b740e3e5c05d
351 353 7 1 a-E - e9f537e46dea
352 354 6 0 n-B - 145e75495359
353 355 5 0 n-A - d6bcb4f74035
354 356 4 0 b-A - f54f1bb90ff3
355 357 3 0 a-D - b555f63b6063
356 358 2 0 a-C - 54acac6f23ab
357 359 1 0 a-B - 548a3d25dbf0
358 360 0 0 a-A - 054250a37db4
359 361
360 362 Pushing to Publish=False (unknown changeset)
361 363
362 364 $ hg push ../mu -r b740e3e5c05d # a-F
363 365 pushing to ../mu
364 366 searching for changes
365 367 adding changesets
366 368 adding manifests
367 369 adding file changes
368 370 added 2 changesets with 2 changes to 2 files
369 371 $ hgph
370 372 10 1 a-H - 967b449fbc94
371 373 9 1 a-G - 3e27b6f1eee1
372 374 8 1 a-F - b740e3e5c05d
373 375 7 1 a-E - e9f537e46dea
374 376 6 0 n-B - 145e75495359
375 377 5 0 n-A - d6bcb4f74035
376 378 4 0 b-A - f54f1bb90ff3
377 379 3 0 a-D - b555f63b6063
378 380 2 0 a-C - 54acac6f23ab
379 381 1 0 a-B - 548a3d25dbf0
380 382 0 0 a-A - 054250a37db4
381 383
382 384 $ cd ../mu
383 385 $ hgph # d6bcb4f74035 and 145e75495359 changed because common is too smart
384 386 8 1 a-F - b740e3e5c05d
385 387 7 1 a-E - e9f537e46dea
386 388 6 0 n-B - 145e75495359
387 389 5 0 n-A - d6bcb4f74035
388 390 4 0 a-D - b555f63b6063
389 391 3 0 a-C - 54acac6f23ab
390 392 2 0 b-A - f54f1bb90ff3
391 393 1 0 a-B - 548a3d25dbf0
392 394 0 0 a-A - 054250a37db4
393 395
394 396 Pushing to Publish=True (unknown changeset)
395 397
396 398 $ hg push ../beta -r b740e3e5c05d
397 399 pushing to ../beta
398 400 searching for changes
399 401 adding changesets
400 402 adding manifests
401 403 adding file changes
402 404 added 2 changesets with 2 changes to 2 files
403 405 $ hgph # again d6bcb4f74035 and 145e75495359 changed because common is too smart
404 406 8 0 a-F - b740e3e5c05d
405 407 7 0 a-E - e9f537e46dea
406 408 6 0 n-B - 145e75495359
407 409 5 0 n-A - d6bcb4f74035
408 410 4 0 a-D - b555f63b6063
409 411 3 0 a-C - 54acac6f23ab
410 412 2 0 b-A - f54f1bb90ff3
411 413 1 0 a-B - 548a3d25dbf0
412 414 0 0 a-A - 054250a37db4
413 415
414 416 Pushing to Publish=True (common changeset)
415 417
416 418 $ cd ../beta
417 419 $ hg push ../alpha
418 420 pushing to ../alpha
419 421 searching for changes
420 422 no changes found
421 423 $ hgph
422 424 6 0 a-F - b740e3e5c05d
423 425 5 0 a-E - e9f537e46dea
424 426 4 0 a-D - b555f63b6063
425 427 3 0 a-C - 54acac6f23ab
426 428 2 0 b-A - f54f1bb90ff3
427 429 1 0 a-B - 548a3d25dbf0
428 430 0 0 a-A - 054250a37db4
429 431 $ cd ../alpha
430 432 $ hgph # e9f537e46dea and b740e3e5c05d should have been sync to 0
431 433 10 1 a-H - 967b449fbc94
432 434 9 1 a-G - 3e27b6f1eee1
433 435 8 0 a-F - b740e3e5c05d
434 436 7 0 a-E - e9f537e46dea
435 437 6 0 n-B - 145e75495359
436 438 5 0 n-A - d6bcb4f74035
437 439 4 0 b-A - f54f1bb90ff3
438 440 3 0 a-D - b555f63b6063
439 441 2 0 a-C - 54acac6f23ab
440 442 1 0 a-B - 548a3d25dbf0
441 443 0 0 a-A - 054250a37db4
442 444
443 445 Pushing to Publish=False (common changeset that change phase + unknown one)
444 446
445 447 $ hg push ../mu -r 967b449fbc94 -f
446 448 pushing to ../mu
447 449 searching for changes
448 450 adding changesets
449 451 adding manifests
450 452 adding file changes
451 453 added 1 changesets with 1 changes to 1 files (+1 heads)
452 454 $ hgph
453 455 10 1 a-H - 967b449fbc94
454 456 9 1 a-G - 3e27b6f1eee1
455 457 8 0 a-F - b740e3e5c05d
456 458 7 0 a-E - e9f537e46dea
457 459 6 0 n-B - 145e75495359
458 460 5 0 n-A - d6bcb4f74035
459 461 4 0 b-A - f54f1bb90ff3
460 462 3 0 a-D - b555f63b6063
461 463 2 0 a-C - 54acac6f23ab
462 464 1 0 a-B - 548a3d25dbf0
463 465 0 0 a-A - 054250a37db4
464 466 $ cd ../mu
465 467 $ hgph # d6bcb4f74035 should have changed phase
466 468 > # again d6bcb4f74035 and 145e75495359 changed because common was too smart
467 469 9 1 a-H - 967b449fbc94
468 470 8 0 a-F - b740e3e5c05d
469 471 7 0 a-E - e9f537e46dea
470 472 6 0 n-B - 145e75495359
471 473 5 0 n-A - d6bcb4f74035
472 474 4 0 a-D - b555f63b6063
473 475 3 0 a-C - 54acac6f23ab
474 476 2 0 b-A - f54f1bb90ff3
475 477 1 0 a-B - 548a3d25dbf0
476 478 0 0 a-A - 054250a37db4
477 479
478 480
479 481 Pushing to Publish=True (common changeset from publish=False)
480 482
483 (in mu)
481 484 $ hg push ../alpha
482 485 pushing to ../alpha
483 486 searching for changes
484 487 no changes found
485 488 $ hgph
486 489 9 0 a-H - 967b449fbc94
487 490 8 0 a-F - b740e3e5c05d
488 491 7 0 a-E - e9f537e46dea
489 492 6 0 n-B - 145e75495359
490 493 5 0 n-A - d6bcb4f74035
491 494 4 0 a-D - b555f63b6063
492 495 3 0 a-C - 54acac6f23ab
493 496 2 0 b-A - f54f1bb90ff3
494 497 1 0 a-B - 548a3d25dbf0
495 498 0 0 a-A - 054250a37db4
496 499 $ hgph -R ../alpha # a-H should have been synced to 0
497 500 10 0 a-H - 967b449fbc94
498 501 9 1 a-G - 3e27b6f1eee1
499 502 8 0 a-F - b740e3e5c05d
500 503 7 0 a-E - e9f537e46dea
501 504 6 0 n-B - 145e75495359
502 505 5 0 n-A - d6bcb4f74035
503 506 4 0 b-A - f54f1bb90ff3
504 507 3 0 a-D - b555f63b6063
505 508 2 0 a-C - 54acac6f23ab
506 509 1 0 a-B - 548a3d25dbf0
507 510 0 0 a-A - 054250a37db4
508 511
512
513 Discovery locally secret changeset on a remote repository:
514
515 - should make it non-secret
516
517 $ cd ../alpha
518 $ mkcommit A-secret --config phases.new-commit=2
519 $ hgph
520 11 2 A-secret - 435b5d83910c
521 10 0 a-H - 967b449fbc94
522 9 1 a-G - 3e27b6f1eee1
523 8 0 a-F - b740e3e5c05d
524 7 0 a-E - e9f537e46dea
525 6 0 n-B - 145e75495359
526 5 0 n-A - d6bcb4f74035
527 4 0 b-A - f54f1bb90ff3
528 3 0 a-D - b555f63b6063
529 2 0 a-C - 54acac6f23ab
530 1 0 a-B - 548a3d25dbf0
531 0 0 a-A - 054250a37db4
532 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
533 1 changesets found
534 $ hg -R ../mu unbundle ../secret-bundle.hg
535 adding changesets
536 adding manifests
537 adding file changes
538 added 1 changesets with 1 changes to 1 files
539 (run 'hg update' to get a working copy)
540 $ hgph -R ../mu
541 10 1 A-secret - 435b5d83910c
542 9 0 a-H - 967b449fbc94
543 8 0 a-F - b740e3e5c05d
544 7 0 a-E - e9f537e46dea
545 6 0 n-B - 145e75495359
546 5 0 n-A - d6bcb4f74035
547 4 0 a-D - b555f63b6063
548 3 0 a-C - 54acac6f23ab
549 2 0 b-A - f54f1bb90ff3
550 1 0 a-B - 548a3d25dbf0
551 0 0 a-A - 054250a37db4
552 $ hg pull ../mu
553 pulling from ../mu
554 searching for changes
555 no changes found
556 $ hgph
557 11 1 A-secret - 435b5d83910c
558 10 0 a-H - 967b449fbc94
559 9 1 a-G - 3e27b6f1eee1
560 8 0 a-F - b740e3e5c05d
561 7 0 a-E - e9f537e46dea
562 6 0 n-B - 145e75495359
563 5 0 n-A - d6bcb4f74035
564 4 0 b-A - f54f1bb90ff3
565 3 0 a-D - b555f63b6063
566 2 0 a-C - 54acac6f23ab
567 1 0 a-B - 548a3d25dbf0
568 0 0 a-A - 054250a37db4
569
570
@@ -1,90 +1,136 b''
1 1 $ alias hglog='hg log --template "{rev} {phase} {desc}\n"'
2 2 $ mkcommit() {
3 3 > echo "$1" > "$1"
4 4 > hg add "$1"
5 5 > message="$1"
6 6 > shift
7 7 > hg ci -m "$message" $*
8 8 > }
9 9
10 10 $ hg init initialrepo
11 11 $ cd initialrepo
12 12 $ mkcommit A
13 13
14 14 New commit are draft by default
15 15
16 16 $ hglog
17 17 0 1 A
18 18
19 19 Following commit are draft too
20 20
21 21 $ mkcommit B
22 22
23 23 $ hglog
24 24 1 1 B
25 25 0 1 A
26 26
27 27 Draft commit are properly created over public one:
28 28
29 29 $ hg pull -q . # XXX use the dedicated phase command once available
30 30 $ hglog
31 31 1 0 B
32 32 0 0 A
33 33
34 34 $ mkcommit C
35 35 $ mkcommit D
36 36
37 37 $ hglog
38 38 3 1 D
39 39 2 1 C
40 40 1 0 B
41 41 0 0 A
42 42
43 43 Test creating changeset as secret
44 44
45 45 $ mkcommit E --config phases.new-commit=2
46 46 $ hglog
47 47 4 2 E
48 48 3 1 D
49 49 2 1 C
50 50 1 0 B
51 51 0 0 A
52 52
53 53 Test the secret property is inherited
54 54
55 55 $ mkcommit H
56 56 $ hglog
57 57 5 2 H
58 58 4 2 E
59 59 3 1 D
60 60 2 1 C
61 61 1 0 B
62 62 0 0 A
63 63
64 64 Even on merge
65 65
66 66 $ hg up -q 1
67 67 $ mkcommit "B'"
68 68 created new head
69 69 $ hglog
70 70 6 1 B'
71 71 5 2 H
72 72 4 2 E
73 73 3 1 D
74 74 2 1 C
75 75 1 0 B
76 76 0 0 A
77 77 $ hg merge 4 # E
78 78 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
79 79 (branch merge, don't forget to commit)
80 80 $ hg ci -m "merge B' and E"
81 81 $ hglog
82 82 7 2 merge B' and E
83 83 6 1 B'
84 84 5 2 H
85 85 4 2 E
86 86 3 1 D
87 87 2 1 C
88 88 1 0 B
89 89 0 0 A
90 90
91 Test secret changeset are not pushed
92
93 $ hg init ../push-dest
94 $ hg push ../push-dest -f # force because we push multiple heads
95 pushing to ../push-dest
96 searching for changes
97 adding changesets
98 adding manifests
99 adding file changes
100 added 5 changesets with 5 changes to 5 files (+1 heads)
101 $ hglog
102 7 2 merge B' and E
103 6 0 B'
104 5 2 H
105 4 2 E
106 3 0 D
107 2 0 C
108 1 0 B
109 0 0 A
110 $ cd ../push-dest
111 $ hglog
112 4 0 B'
113 3 0 D
114 2 0 C
115 1 0 B
116 0 0 A
117 $ cd ..
118
119 Test secret changeset are not pull
120
121 $ hg init pull-dest
122 $ cd pull-dest
123 $ hg pull ../initialrepo
124 pulling from ../initialrepo
125 requesting all changes
126 adding changesets
127 adding manifests
128 adding file changes
129 added 5 changesets with 5 changes to 5 files (+1 heads)
130 (run 'hg heads' to see heads, 'hg merge' to merge)
131 $ hglog
132 4 0 B'
133 3 0 D
134 2 0 C
135 1 0 B
136 0 0 A
General Comments 0
You need to be logged in to leave comments. Login now