##// END OF EJS Templates
obsolete: sort obsmarkers during exchange...
Pierre-Yves David -
r25118:e632a242 default
parent child Browse files
Show More
@@ -1,1332 +1,1334 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex, nullid
9 from node import hex, nullid
10 import errno, urllib
10 import errno, urllib
11 import util, scmutil, changegroup, base85, error
11 import util, scmutil, changegroup, base85, error
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
12 import discovery, phases, obsolete, bookmarks as bookmod, bundle2, pushkey
13 import lock as lockmod
13 import lock as lockmod
14
14
15 def readbundle(ui, fh, fname, vfs=None):
15 def readbundle(ui, fh, fname, vfs=None):
16 header = changegroup.readexactly(fh, 4)
16 header = changegroup.readexactly(fh, 4)
17
17
18 alg = None
18 alg = None
19 if not fname:
19 if not fname:
20 fname = "stream"
20 fname = "stream"
21 if not header.startswith('HG') and header.startswith('\0'):
21 if not header.startswith('HG') and header.startswith('\0'):
22 fh = changegroup.headerlessfixup(fh, header)
22 fh = changegroup.headerlessfixup(fh, header)
23 header = "HG10"
23 header = "HG10"
24 alg = 'UN'
24 alg = 'UN'
25 elif vfs:
25 elif vfs:
26 fname = vfs.join(fname)
26 fname = vfs.join(fname)
27
27
28 magic, version = header[0:2], header[2:4]
28 magic, version = header[0:2], header[2:4]
29
29
30 if magic != 'HG':
30 if magic != 'HG':
31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
31 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
32 if version == '10':
32 if version == '10':
33 if alg is None:
33 if alg is None:
34 alg = changegroup.readexactly(fh, 2)
34 alg = changegroup.readexactly(fh, 2)
35 return changegroup.cg1unpacker(fh, alg)
35 return changegroup.cg1unpacker(fh, alg)
36 elif version.startswith('2'):
36 elif version.startswith('2'):
37 return bundle2.getunbundler(ui, fh, header=magic + version)
37 return bundle2.getunbundler(ui, fh, header=magic + version)
38 else:
38 else:
39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
39 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
40
40
41 def buildobsmarkerspart(bundler, markers):
41 def buildobsmarkerspart(bundler, markers):
42 """add an obsmarker part to the bundler with <markers>
42 """add an obsmarker part to the bundler with <markers>
43
43
44 No part is created if markers is empty.
44 No part is created if markers is empty.
45 Raises ValueError if the bundler doesn't support any known obsmarker format.
45 Raises ValueError if the bundler doesn't support any known obsmarker format.
46 """
46 """
47 if markers:
47 if markers:
48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
48 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
49 version = obsolete.commonversion(remoteversions)
49 version = obsolete.commonversion(remoteversions)
50 if version is None:
50 if version is None:
51 raise ValueError('bundler do not support common obsmarker format')
51 raise ValueError('bundler do not support common obsmarker format')
52 stream = obsolete.encodemarkers(markers, True, version=version)
52 stream = obsolete.encodemarkers(markers, True, version=version)
53 return bundler.newpart('obsmarkers', data=stream)
53 return bundler.newpart('obsmarkers', data=stream)
54 return None
54 return None
55
55
56 def _canusebundle2(op):
56 def _canusebundle2(op):
57 """return true if a pull/push can use bundle2
57 """return true if a pull/push can use bundle2
58
58
59 Feel free to nuke this function when we drop the experimental option"""
59 Feel free to nuke this function when we drop the experimental option"""
60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
60 return (op.repo.ui.configbool('experimental', 'bundle2-exp', False)
61 and op.remote.capable('bundle2'))
61 and op.remote.capable('bundle2'))
62
62
63
63
64 class pushoperation(object):
64 class pushoperation(object):
65 """A object that represent a single push operation
65 """A object that represent a single push operation
66
66
67 It purpose is to carry push related state and very common operation.
67 It purpose is to carry push related state and very common operation.
68
68
69 A new should be created at the beginning of each push and discarded
69 A new should be created at the beginning of each push and discarded
70 afterward.
70 afterward.
71 """
71 """
72
72
73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
73 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
74 bookmarks=()):
74 bookmarks=()):
75 # repo we push from
75 # repo we push from
76 self.repo = repo
76 self.repo = repo
77 self.ui = repo.ui
77 self.ui = repo.ui
78 # repo we push to
78 # repo we push to
79 self.remote = remote
79 self.remote = remote
80 # force option provided
80 # force option provided
81 self.force = force
81 self.force = force
82 # revs to be pushed (None is "all")
82 # revs to be pushed (None is "all")
83 self.revs = revs
83 self.revs = revs
84 # bookmark explicitly pushed
84 # bookmark explicitly pushed
85 self.bookmarks = bookmarks
85 self.bookmarks = bookmarks
86 # allow push of new branch
86 # allow push of new branch
87 self.newbranch = newbranch
87 self.newbranch = newbranch
88 # did a local lock get acquired?
88 # did a local lock get acquired?
89 self.locallocked = None
89 self.locallocked = None
90 # step already performed
90 # step already performed
91 # (used to check what steps have been already performed through bundle2)
91 # (used to check what steps have been already performed through bundle2)
92 self.stepsdone = set()
92 self.stepsdone = set()
93 # Integer version of the changegroup push result
93 # Integer version of the changegroup push result
94 # - None means nothing to push
94 # - None means nothing to push
95 # - 0 means HTTP error
95 # - 0 means HTTP error
96 # - 1 means we pushed and remote head count is unchanged *or*
96 # - 1 means we pushed and remote head count is unchanged *or*
97 # we have outgoing changesets but refused to push
97 # we have outgoing changesets but refused to push
98 # - other values as described by addchangegroup()
98 # - other values as described by addchangegroup()
99 self.cgresult = None
99 self.cgresult = None
100 # Boolean value for the bookmark push
100 # Boolean value for the bookmark push
101 self.bkresult = None
101 self.bkresult = None
102 # discover.outgoing object (contains common and outgoing data)
102 # discover.outgoing object (contains common and outgoing data)
103 self.outgoing = None
103 self.outgoing = None
104 # all remote heads before the push
104 # all remote heads before the push
105 self.remoteheads = None
105 self.remoteheads = None
106 # testable as a boolean indicating if any nodes are missing locally.
106 # testable as a boolean indicating if any nodes are missing locally.
107 self.incoming = None
107 self.incoming = None
108 # phases changes that must be pushed along side the changesets
108 # phases changes that must be pushed along side the changesets
109 self.outdatedphases = None
109 self.outdatedphases = None
110 # phases changes that must be pushed if changeset push fails
110 # phases changes that must be pushed if changeset push fails
111 self.fallbackoutdatedphases = None
111 self.fallbackoutdatedphases = None
112 # outgoing obsmarkers
112 # outgoing obsmarkers
113 self.outobsmarkers = set()
113 self.outobsmarkers = set()
114 # outgoing bookmarks
114 # outgoing bookmarks
115 self.outbookmarks = []
115 self.outbookmarks = []
116 # transaction manager
116 # transaction manager
117 self.trmanager = None
117 self.trmanager = None
118
118
119 @util.propertycache
119 @util.propertycache
120 def futureheads(self):
120 def futureheads(self):
121 """future remote heads if the changeset push succeeds"""
121 """future remote heads if the changeset push succeeds"""
122 return self.outgoing.missingheads
122 return self.outgoing.missingheads
123
123
124 @util.propertycache
124 @util.propertycache
125 def fallbackheads(self):
125 def fallbackheads(self):
126 """future remote heads if the changeset push fails"""
126 """future remote heads if the changeset push fails"""
127 if self.revs is None:
127 if self.revs is None:
128 # not target to push, all common are relevant
128 # not target to push, all common are relevant
129 return self.outgoing.commonheads
129 return self.outgoing.commonheads
130 unfi = self.repo.unfiltered()
130 unfi = self.repo.unfiltered()
131 # I want cheads = heads(::missingheads and ::commonheads)
131 # I want cheads = heads(::missingheads and ::commonheads)
132 # (missingheads is revs with secret changeset filtered out)
132 # (missingheads is revs with secret changeset filtered out)
133 #
133 #
134 # This can be expressed as:
134 # This can be expressed as:
135 # cheads = ( (missingheads and ::commonheads)
135 # cheads = ( (missingheads and ::commonheads)
136 # + (commonheads and ::missingheads))"
136 # + (commonheads and ::missingheads))"
137 # )
137 # )
138 #
138 #
139 # while trying to push we already computed the following:
139 # while trying to push we already computed the following:
140 # common = (::commonheads)
140 # common = (::commonheads)
141 # missing = ((commonheads::missingheads) - commonheads)
141 # missing = ((commonheads::missingheads) - commonheads)
142 #
142 #
143 # We can pick:
143 # We can pick:
144 # * missingheads part of common (::commonheads)
144 # * missingheads part of common (::commonheads)
145 common = set(self.outgoing.common)
145 common = set(self.outgoing.common)
146 nm = self.repo.changelog.nodemap
146 nm = self.repo.changelog.nodemap
147 cheads = [node for node in self.revs if nm[node] in common]
147 cheads = [node for node in self.revs if nm[node] in common]
148 # and
148 # and
149 # * commonheads parents on missing
149 # * commonheads parents on missing
150 revset = unfi.set('%ln and parents(roots(%ln))',
150 revset = unfi.set('%ln and parents(roots(%ln))',
151 self.outgoing.commonheads,
151 self.outgoing.commonheads,
152 self.outgoing.missing)
152 self.outgoing.missing)
153 cheads.extend(c.node() for c in revset)
153 cheads.extend(c.node() for c in revset)
154 return cheads
154 return cheads
155
155
156 @property
156 @property
157 def commonheads(self):
157 def commonheads(self):
158 """set of all common heads after changeset bundle push"""
158 """set of all common heads after changeset bundle push"""
159 if self.cgresult:
159 if self.cgresult:
160 return self.futureheads
160 return self.futureheads
161 else:
161 else:
162 return self.fallbackheads
162 return self.fallbackheads
163
163
164 # mapping of message used when pushing bookmark
164 # mapping of message used when pushing bookmark
165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
165 bookmsgmap = {'update': (_("updating bookmark %s\n"),
166 _('updating bookmark %s failed!\n')),
166 _('updating bookmark %s failed!\n')),
167 'export': (_("exporting bookmark %s\n"),
167 'export': (_("exporting bookmark %s\n"),
168 _('exporting bookmark %s failed!\n')),
168 _('exporting bookmark %s failed!\n')),
169 'delete': (_("deleting remote bookmark %s\n"),
169 'delete': (_("deleting remote bookmark %s\n"),
170 _('deleting remote bookmark %s failed!\n')),
170 _('deleting remote bookmark %s failed!\n')),
171 }
171 }
172
172
173
173
174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
174 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=()):
175 '''Push outgoing changesets (limited by revs) from a local
175 '''Push outgoing changesets (limited by revs) from a local
176 repository to remote. Return an integer:
176 repository to remote. Return an integer:
177 - None means nothing to push
177 - None means nothing to push
178 - 0 means HTTP error
178 - 0 means HTTP error
179 - 1 means we pushed and remote head count is unchanged *or*
179 - 1 means we pushed and remote head count is unchanged *or*
180 we have outgoing changesets but refused to push
180 we have outgoing changesets but refused to push
181 - other values as described by addchangegroup()
181 - other values as described by addchangegroup()
182 '''
182 '''
183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
183 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks)
184 if pushop.remote.local():
184 if pushop.remote.local():
185 missing = (set(pushop.repo.requirements)
185 missing = (set(pushop.repo.requirements)
186 - pushop.remote.local().supported)
186 - pushop.remote.local().supported)
187 if missing:
187 if missing:
188 msg = _("required features are not"
188 msg = _("required features are not"
189 " supported in the destination:"
189 " supported in the destination:"
190 " %s") % (', '.join(sorted(missing)))
190 " %s") % (', '.join(sorted(missing)))
191 raise util.Abort(msg)
191 raise util.Abort(msg)
192
192
193 # there are two ways to push to remote repo:
193 # there are two ways to push to remote repo:
194 #
194 #
195 # addchangegroup assumes local user can lock remote
195 # addchangegroup assumes local user can lock remote
196 # repo (local filesystem, old ssh servers).
196 # repo (local filesystem, old ssh servers).
197 #
197 #
198 # unbundle assumes local user cannot lock remote repo (new ssh
198 # unbundle assumes local user cannot lock remote repo (new ssh
199 # servers, http servers).
199 # servers, http servers).
200
200
201 if not pushop.remote.canpush():
201 if not pushop.remote.canpush():
202 raise util.Abort(_("destination does not support push"))
202 raise util.Abort(_("destination does not support push"))
203 # get local lock as we might write phase data
203 # get local lock as we might write phase data
204 localwlock = locallock = None
204 localwlock = locallock = None
205 try:
205 try:
206 # bundle2 push may receive a reply bundle touching bookmarks or other
206 # bundle2 push may receive a reply bundle touching bookmarks or other
207 # things requiring the wlock. Take it now to ensure proper ordering.
207 # things requiring the wlock. Take it now to ensure proper ordering.
208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
208 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
209 if _canusebundle2(pushop) and maypushback:
209 if _canusebundle2(pushop) and maypushback:
210 localwlock = pushop.repo.wlock()
210 localwlock = pushop.repo.wlock()
211 locallock = pushop.repo.lock()
211 locallock = pushop.repo.lock()
212 pushop.locallocked = True
212 pushop.locallocked = True
213 except IOError, err:
213 except IOError, err:
214 pushop.locallocked = False
214 pushop.locallocked = False
215 if err.errno != errno.EACCES:
215 if err.errno != errno.EACCES:
216 raise
216 raise
217 # source repo cannot be locked.
217 # source repo cannot be locked.
218 # We do not abort the push, but just disable the local phase
218 # We do not abort the push, but just disable the local phase
219 # synchronisation.
219 # synchronisation.
220 msg = 'cannot lock source repository: %s\n' % err
220 msg = 'cannot lock source repository: %s\n' % err
221 pushop.ui.debug(msg)
221 pushop.ui.debug(msg)
222 try:
222 try:
223 if pushop.locallocked:
223 if pushop.locallocked:
224 pushop.trmanager = transactionmanager(repo,
224 pushop.trmanager = transactionmanager(repo,
225 'push-response',
225 'push-response',
226 pushop.remote.url())
226 pushop.remote.url())
227 pushop.repo.checkpush(pushop)
227 pushop.repo.checkpush(pushop)
228 lock = None
228 lock = None
229 unbundle = pushop.remote.capable('unbundle')
229 unbundle = pushop.remote.capable('unbundle')
230 if not unbundle:
230 if not unbundle:
231 lock = pushop.remote.lock()
231 lock = pushop.remote.lock()
232 try:
232 try:
233 _pushdiscovery(pushop)
233 _pushdiscovery(pushop)
234 if _canusebundle2(pushop):
234 if _canusebundle2(pushop):
235 _pushbundle2(pushop)
235 _pushbundle2(pushop)
236 _pushchangeset(pushop)
236 _pushchangeset(pushop)
237 _pushsyncphase(pushop)
237 _pushsyncphase(pushop)
238 _pushobsolete(pushop)
238 _pushobsolete(pushop)
239 _pushbookmark(pushop)
239 _pushbookmark(pushop)
240 finally:
240 finally:
241 if lock is not None:
241 if lock is not None:
242 lock.release()
242 lock.release()
243 if pushop.trmanager:
243 if pushop.trmanager:
244 pushop.trmanager.close()
244 pushop.trmanager.close()
245 finally:
245 finally:
246 if pushop.trmanager:
246 if pushop.trmanager:
247 pushop.trmanager.release()
247 pushop.trmanager.release()
248 if locallock is not None:
248 if locallock is not None:
249 locallock.release()
249 locallock.release()
250 if localwlock is not None:
250 if localwlock is not None:
251 localwlock.release()
251 localwlock.release()
252
252
253 return pushop
253 return pushop
254
254
255 # list of steps to perform discovery before push
255 # list of steps to perform discovery before push
256 pushdiscoveryorder = []
256 pushdiscoveryorder = []
257
257
258 # Mapping between step name and function
258 # Mapping between step name and function
259 #
259 #
260 # This exists to help extensions wrap steps if necessary
260 # This exists to help extensions wrap steps if necessary
261 pushdiscoverymapping = {}
261 pushdiscoverymapping = {}
262
262
263 def pushdiscovery(stepname):
263 def pushdiscovery(stepname):
264 """decorator for function performing discovery before push
264 """decorator for function performing discovery before push
265
265
266 The function is added to the step -> function mapping and appended to the
266 The function is added to the step -> function mapping and appended to the
267 list of steps. Beware that decorated function will be added in order (this
267 list of steps. Beware that decorated function will be added in order (this
268 may matter).
268 may matter).
269
269
270 You can only use this decorator for a new step, if you want to wrap a step
270 You can only use this decorator for a new step, if you want to wrap a step
271 from an extension, change the pushdiscovery dictionary directly."""
271 from an extension, change the pushdiscovery dictionary directly."""
272 def dec(func):
272 def dec(func):
273 assert stepname not in pushdiscoverymapping
273 assert stepname not in pushdiscoverymapping
274 pushdiscoverymapping[stepname] = func
274 pushdiscoverymapping[stepname] = func
275 pushdiscoveryorder.append(stepname)
275 pushdiscoveryorder.append(stepname)
276 return func
276 return func
277 return dec
277 return dec
278
278
279 def _pushdiscovery(pushop):
279 def _pushdiscovery(pushop):
280 """Run all discovery steps"""
280 """Run all discovery steps"""
281 for stepname in pushdiscoveryorder:
281 for stepname in pushdiscoveryorder:
282 step = pushdiscoverymapping[stepname]
282 step = pushdiscoverymapping[stepname]
283 step(pushop)
283 step(pushop)
284
284
285 @pushdiscovery('changeset')
285 @pushdiscovery('changeset')
286 def _pushdiscoverychangeset(pushop):
286 def _pushdiscoverychangeset(pushop):
287 """discover the changeset that need to be pushed"""
287 """discover the changeset that need to be pushed"""
288 fci = discovery.findcommonincoming
288 fci = discovery.findcommonincoming
289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
289 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
290 common, inc, remoteheads = commoninc
290 common, inc, remoteheads = commoninc
291 fco = discovery.findcommonoutgoing
291 fco = discovery.findcommonoutgoing
292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
292 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
293 commoninc=commoninc, force=pushop.force)
293 commoninc=commoninc, force=pushop.force)
294 pushop.outgoing = outgoing
294 pushop.outgoing = outgoing
295 pushop.remoteheads = remoteheads
295 pushop.remoteheads = remoteheads
296 pushop.incoming = inc
296 pushop.incoming = inc
297
297
298 @pushdiscovery('phase')
298 @pushdiscovery('phase')
299 def _pushdiscoveryphase(pushop):
299 def _pushdiscoveryphase(pushop):
300 """discover the phase that needs to be pushed
300 """discover the phase that needs to be pushed
301
301
302 (computed for both success and failure case for changesets push)"""
302 (computed for both success and failure case for changesets push)"""
303 outgoing = pushop.outgoing
303 outgoing = pushop.outgoing
304 unfi = pushop.repo.unfiltered()
304 unfi = pushop.repo.unfiltered()
305 remotephases = pushop.remote.listkeys('phases')
305 remotephases = pushop.remote.listkeys('phases')
306 publishing = remotephases.get('publishing', False)
306 publishing = remotephases.get('publishing', False)
307 ana = phases.analyzeremotephases(pushop.repo,
307 ana = phases.analyzeremotephases(pushop.repo,
308 pushop.fallbackheads,
308 pushop.fallbackheads,
309 remotephases)
309 remotephases)
310 pheads, droots = ana
310 pheads, droots = ana
311 extracond = ''
311 extracond = ''
312 if not publishing:
312 if not publishing:
313 extracond = ' and public()'
313 extracond = ' and public()'
314 revset = 'heads((%%ln::%%ln) %s)' % extracond
314 revset = 'heads((%%ln::%%ln) %s)' % extracond
315 # Get the list of all revs draft on remote by public here.
315 # Get the list of all revs draft on remote by public here.
316 # XXX Beware that revset break if droots is not strictly
316 # XXX Beware that revset break if droots is not strictly
317 # XXX root we may want to ensure it is but it is costly
317 # XXX root we may want to ensure it is but it is costly
318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
318 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
319 if not outgoing.missing:
319 if not outgoing.missing:
320 future = fallback
320 future = fallback
321 else:
321 else:
322 # adds changeset we are going to push as draft
322 # adds changeset we are going to push as draft
323 #
323 #
324 # should not be necessary for publishing server, but because of an
324 # should not be necessary for publishing server, but because of an
325 # issue fixed in xxxxx we have to do it anyway.
325 # issue fixed in xxxxx we have to do it anyway.
326 fdroots = list(unfi.set('roots(%ln + %ln::)',
326 fdroots = list(unfi.set('roots(%ln + %ln::)',
327 outgoing.missing, droots))
327 outgoing.missing, droots))
328 fdroots = [f.node() for f in fdroots]
328 fdroots = [f.node() for f in fdroots]
329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
329 future = list(unfi.set(revset, fdroots, pushop.futureheads))
330 pushop.outdatedphases = future
330 pushop.outdatedphases = future
331 pushop.fallbackoutdatedphases = fallback
331 pushop.fallbackoutdatedphases = fallback
332
332
333 @pushdiscovery('obsmarker')
333 @pushdiscovery('obsmarker')
334 def _pushdiscoveryobsmarkers(pushop):
334 def _pushdiscoveryobsmarkers(pushop):
335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
335 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
336 and pushop.repo.obsstore
336 and pushop.repo.obsstore
337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
337 and 'obsolete' in pushop.remote.listkeys('namespaces')):
338 repo = pushop.repo
338 repo = pushop.repo
339 # very naive computation, that can be quite expensive on big repo.
339 # very naive computation, that can be quite expensive on big repo.
340 # However: evolution is currently slow on them anyway.
340 # However: evolution is currently slow on them anyway.
341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
341 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
342 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
343
343
344 @pushdiscovery('bookmarks')
344 @pushdiscovery('bookmarks')
345 def _pushdiscoverybookmarks(pushop):
345 def _pushdiscoverybookmarks(pushop):
346 ui = pushop.ui
346 ui = pushop.ui
347 repo = pushop.repo.unfiltered()
347 repo = pushop.repo.unfiltered()
348 remote = pushop.remote
348 remote = pushop.remote
349 ui.debug("checking for updated bookmarks\n")
349 ui.debug("checking for updated bookmarks\n")
350 ancestors = ()
350 ancestors = ()
351 if pushop.revs:
351 if pushop.revs:
352 revnums = map(repo.changelog.rev, pushop.revs)
352 revnums = map(repo.changelog.rev, pushop.revs)
353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
353 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
354 remotebookmark = remote.listkeys('bookmarks')
354 remotebookmark = remote.listkeys('bookmarks')
355
355
356 explicit = set(pushop.bookmarks)
356 explicit = set(pushop.bookmarks)
357
357
358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
358 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
359 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
360 for b, scid, dcid in advsrc:
360 for b, scid, dcid in advsrc:
361 if b in explicit:
361 if b in explicit:
362 explicit.remove(b)
362 explicit.remove(b)
363 if not ancestors or repo[scid].rev() in ancestors:
363 if not ancestors or repo[scid].rev() in ancestors:
364 pushop.outbookmarks.append((b, dcid, scid))
364 pushop.outbookmarks.append((b, dcid, scid))
365 # search added bookmark
365 # search added bookmark
366 for b, scid, dcid in addsrc:
366 for b, scid, dcid in addsrc:
367 if b in explicit:
367 if b in explicit:
368 explicit.remove(b)
368 explicit.remove(b)
369 pushop.outbookmarks.append((b, '', scid))
369 pushop.outbookmarks.append((b, '', scid))
370 # search for overwritten bookmark
370 # search for overwritten bookmark
371 for b, scid, dcid in advdst + diverge + differ:
371 for b, scid, dcid in advdst + diverge + differ:
372 if b in explicit:
372 if b in explicit:
373 explicit.remove(b)
373 explicit.remove(b)
374 pushop.outbookmarks.append((b, dcid, scid))
374 pushop.outbookmarks.append((b, dcid, scid))
375 # search for bookmark to delete
375 # search for bookmark to delete
376 for b, scid, dcid in adddst:
376 for b, scid, dcid in adddst:
377 if b in explicit:
377 if b in explicit:
378 explicit.remove(b)
378 explicit.remove(b)
379 # treat as "deleted locally"
379 # treat as "deleted locally"
380 pushop.outbookmarks.append((b, dcid, ''))
380 pushop.outbookmarks.append((b, dcid, ''))
381 # identical bookmarks shouldn't get reported
381 # identical bookmarks shouldn't get reported
382 for b, scid, dcid in same:
382 for b, scid, dcid in same:
383 if b in explicit:
383 if b in explicit:
384 explicit.remove(b)
384 explicit.remove(b)
385
385
386 if explicit:
386 if explicit:
387 explicit = sorted(explicit)
387 explicit = sorted(explicit)
388 # we should probably list all of them
388 # we should probably list all of them
389 ui.warn(_('bookmark %s does not exist on the local '
389 ui.warn(_('bookmark %s does not exist on the local '
390 'or remote repository!\n') % explicit[0])
390 'or remote repository!\n') % explicit[0])
391 pushop.bkresult = 2
391 pushop.bkresult = 2
392
392
393 pushop.outbookmarks.sort()
393 pushop.outbookmarks.sort()
394
394
395 def _pushcheckoutgoing(pushop):
395 def _pushcheckoutgoing(pushop):
396 outgoing = pushop.outgoing
396 outgoing = pushop.outgoing
397 unfi = pushop.repo.unfiltered()
397 unfi = pushop.repo.unfiltered()
398 if not outgoing.missing:
398 if not outgoing.missing:
399 # nothing to push
399 # nothing to push
400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
400 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
401 return False
401 return False
402 # something to push
402 # something to push
403 if not pushop.force:
403 if not pushop.force:
404 # if repo.obsstore == False --> no obsolete
404 # if repo.obsstore == False --> no obsolete
405 # then, save the iteration
405 # then, save the iteration
406 if unfi.obsstore:
406 if unfi.obsstore:
407 # this message are here for 80 char limit reason
407 # this message are here for 80 char limit reason
408 mso = _("push includes obsolete changeset: %s!")
408 mso = _("push includes obsolete changeset: %s!")
409 mst = {"unstable": _("push includes unstable changeset: %s!"),
409 mst = {"unstable": _("push includes unstable changeset: %s!"),
410 "bumped": _("push includes bumped changeset: %s!"),
410 "bumped": _("push includes bumped changeset: %s!"),
411 "divergent": _("push includes divergent changeset: %s!")}
411 "divergent": _("push includes divergent changeset: %s!")}
412 # If we are to push if there is at least one
412 # If we are to push if there is at least one
413 # obsolete or unstable changeset in missing, at
413 # obsolete or unstable changeset in missing, at
414 # least one of the missinghead will be obsolete or
414 # least one of the missinghead will be obsolete or
415 # unstable. So checking heads only is ok
415 # unstable. So checking heads only is ok
416 for node in outgoing.missingheads:
416 for node in outgoing.missingheads:
417 ctx = unfi[node]
417 ctx = unfi[node]
418 if ctx.obsolete():
418 if ctx.obsolete():
419 raise util.Abort(mso % ctx)
419 raise util.Abort(mso % ctx)
420 elif ctx.troubled():
420 elif ctx.troubled():
421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
421 raise util.Abort(mst[ctx.troubles()[0]] % ctx)
422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
422 newbm = pushop.ui.configlist('bookmarks', 'pushing')
423 discovery.checkheads(unfi, pushop.remote, outgoing,
423 discovery.checkheads(unfi, pushop.remote, outgoing,
424 pushop.remoteheads,
424 pushop.remoteheads,
425 pushop.newbranch,
425 pushop.newbranch,
426 bool(pushop.incoming),
426 bool(pushop.incoming),
427 newbm)
427 newbm)
428 return True
428 return True
429
429
430 # List of names of steps to perform for an outgoing bundle2, order matters.
430 # List of names of steps to perform for an outgoing bundle2, order matters.
431 b2partsgenorder = []
431 b2partsgenorder = []
432
432
433 # Mapping between step name and function
433 # Mapping between step name and function
434 #
434 #
435 # This exists to help extensions wrap steps if necessary
435 # This exists to help extensions wrap steps if necessary
436 b2partsgenmapping = {}
436 b2partsgenmapping = {}
437
437
438 def b2partsgenerator(stepname, idx=None):
438 def b2partsgenerator(stepname, idx=None):
439 """decorator for function generating bundle2 part
439 """decorator for function generating bundle2 part
440
440
441 The function is added to the step -> function mapping and appended to the
441 The function is added to the step -> function mapping and appended to the
442 list of steps. Beware that decorated functions will be added in order
442 list of steps. Beware that decorated functions will be added in order
443 (this may matter).
443 (this may matter).
444
444
445 You can only use this decorator for new steps, if you want to wrap a step
445 You can only use this decorator for new steps, if you want to wrap a step
446 from an extension, attack the b2partsgenmapping dictionary directly."""
446 from an extension, attack the b2partsgenmapping dictionary directly."""
447 def dec(func):
447 def dec(func):
448 assert stepname not in b2partsgenmapping
448 assert stepname not in b2partsgenmapping
449 b2partsgenmapping[stepname] = func
449 b2partsgenmapping[stepname] = func
450 if idx is None:
450 if idx is None:
451 b2partsgenorder.append(stepname)
451 b2partsgenorder.append(stepname)
452 else:
452 else:
453 b2partsgenorder.insert(idx, stepname)
453 b2partsgenorder.insert(idx, stepname)
454 return func
454 return func
455 return dec
455 return dec
456
456
457 @b2partsgenerator('changeset')
457 @b2partsgenerator('changeset')
458 def _pushb2ctx(pushop, bundler):
458 def _pushb2ctx(pushop, bundler):
459 """handle changegroup push through bundle2
459 """handle changegroup push through bundle2
460
460
461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
461 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
462 """
462 """
463 if 'changesets' in pushop.stepsdone:
463 if 'changesets' in pushop.stepsdone:
464 return
464 return
465 pushop.stepsdone.add('changesets')
465 pushop.stepsdone.add('changesets')
466 # Send known heads to the server for race detection.
466 # Send known heads to the server for race detection.
467 if not _pushcheckoutgoing(pushop):
467 if not _pushcheckoutgoing(pushop):
468 return
468 return
469 pushop.repo.prepushoutgoinghooks(pushop.repo,
469 pushop.repo.prepushoutgoinghooks(pushop.repo,
470 pushop.remote,
470 pushop.remote,
471 pushop.outgoing)
471 pushop.outgoing)
472 if not pushop.force:
472 if not pushop.force:
473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
473 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
474 b2caps = bundle2.bundle2caps(pushop.remote)
474 b2caps = bundle2.bundle2caps(pushop.remote)
475 version = None
475 version = None
476 cgversions = b2caps.get('changegroup')
476 cgversions = b2caps.get('changegroup')
477 if not cgversions: # 3.1 and 3.2 ship with an empty value
477 if not cgversions: # 3.1 and 3.2 ship with an empty value
478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
478 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
479 pushop.outgoing)
479 pushop.outgoing)
480 else:
480 else:
481 cgversions = [v for v in cgversions if v in changegroup.packermap]
481 cgversions = [v for v in cgversions if v in changegroup.packermap]
482 if not cgversions:
482 if not cgversions:
483 raise ValueError(_('no common changegroup version'))
483 raise ValueError(_('no common changegroup version'))
484 version = max(cgversions)
484 version = max(cgversions)
485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
485 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
486 pushop.outgoing,
486 pushop.outgoing,
487 version=version)
487 version=version)
488 cgpart = bundler.newpart('changegroup', data=cg)
488 cgpart = bundler.newpart('changegroup', data=cg)
489 if version is not None:
489 if version is not None:
490 cgpart.addparam('version', version)
490 cgpart.addparam('version', version)
491 def handlereply(op):
491 def handlereply(op):
492 """extract addchangegroup returns from server reply"""
492 """extract addchangegroup returns from server reply"""
493 cgreplies = op.records.getreplies(cgpart.id)
493 cgreplies = op.records.getreplies(cgpart.id)
494 assert len(cgreplies['changegroup']) == 1
494 assert len(cgreplies['changegroup']) == 1
495 pushop.cgresult = cgreplies['changegroup'][0]['return']
495 pushop.cgresult = cgreplies['changegroup'][0]['return']
496 return handlereply
496 return handlereply
497
497
498 @b2partsgenerator('phase')
498 @b2partsgenerator('phase')
499 def _pushb2phases(pushop, bundler):
499 def _pushb2phases(pushop, bundler):
500 """handle phase push through bundle2"""
500 """handle phase push through bundle2"""
501 if 'phases' in pushop.stepsdone:
501 if 'phases' in pushop.stepsdone:
502 return
502 return
503 b2caps = bundle2.bundle2caps(pushop.remote)
503 b2caps = bundle2.bundle2caps(pushop.remote)
504 if not 'pushkey' in b2caps:
504 if not 'pushkey' in b2caps:
505 return
505 return
506 pushop.stepsdone.add('phases')
506 pushop.stepsdone.add('phases')
507 part2node = []
507 part2node = []
508 enc = pushkey.encode
508 enc = pushkey.encode
509 for newremotehead in pushop.outdatedphases:
509 for newremotehead in pushop.outdatedphases:
510 part = bundler.newpart('pushkey')
510 part = bundler.newpart('pushkey')
511 part.addparam('namespace', enc('phases'))
511 part.addparam('namespace', enc('phases'))
512 part.addparam('key', enc(newremotehead.hex()))
512 part.addparam('key', enc(newremotehead.hex()))
513 part.addparam('old', enc(str(phases.draft)))
513 part.addparam('old', enc(str(phases.draft)))
514 part.addparam('new', enc(str(phases.public)))
514 part.addparam('new', enc(str(phases.public)))
515 part2node.append((part.id, newremotehead))
515 part2node.append((part.id, newremotehead))
516 def handlereply(op):
516 def handlereply(op):
517 for partid, node in part2node:
517 for partid, node in part2node:
518 partrep = op.records.getreplies(partid)
518 partrep = op.records.getreplies(partid)
519 results = partrep['pushkey']
519 results = partrep['pushkey']
520 assert len(results) <= 1
520 assert len(results) <= 1
521 msg = None
521 msg = None
522 if not results:
522 if not results:
523 msg = _('server ignored update of %s to public!\n') % node
523 msg = _('server ignored update of %s to public!\n') % node
524 elif not int(results[0]['return']):
524 elif not int(results[0]['return']):
525 msg = _('updating %s to public failed!\n') % node
525 msg = _('updating %s to public failed!\n') % node
526 if msg is not None:
526 if msg is not None:
527 pushop.ui.warn(msg)
527 pushop.ui.warn(msg)
528 return handlereply
528 return handlereply
529
529
530 @b2partsgenerator('obsmarkers')
530 @b2partsgenerator('obsmarkers')
531 def _pushb2obsmarkers(pushop, bundler):
531 def _pushb2obsmarkers(pushop, bundler):
532 if 'obsmarkers' in pushop.stepsdone:
532 if 'obsmarkers' in pushop.stepsdone:
533 return
533 return
534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
534 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
535 if obsolete.commonversion(remoteversions) is None:
535 if obsolete.commonversion(remoteversions) is None:
536 return
536 return
537 pushop.stepsdone.add('obsmarkers')
537 pushop.stepsdone.add('obsmarkers')
538 if pushop.outobsmarkers:
538 if pushop.outobsmarkers:
539 buildobsmarkerspart(bundler, pushop.outobsmarkers)
539 markers = sorted(pushop.outobsmarkers)
540 buildobsmarkerspart(bundler, markers)
540
541
541 @b2partsgenerator('bookmarks')
542 @b2partsgenerator('bookmarks')
542 def _pushb2bookmarks(pushop, bundler):
543 def _pushb2bookmarks(pushop, bundler):
543 """handle phase push through bundle2"""
544 """handle phase push through bundle2"""
544 if 'bookmarks' in pushop.stepsdone:
545 if 'bookmarks' in pushop.stepsdone:
545 return
546 return
546 b2caps = bundle2.bundle2caps(pushop.remote)
547 b2caps = bundle2.bundle2caps(pushop.remote)
547 if 'pushkey' not in b2caps:
548 if 'pushkey' not in b2caps:
548 return
549 return
549 pushop.stepsdone.add('bookmarks')
550 pushop.stepsdone.add('bookmarks')
550 part2book = []
551 part2book = []
551 enc = pushkey.encode
552 enc = pushkey.encode
552 for book, old, new in pushop.outbookmarks:
553 for book, old, new in pushop.outbookmarks:
553 part = bundler.newpart('pushkey')
554 part = bundler.newpart('pushkey')
554 part.addparam('namespace', enc('bookmarks'))
555 part.addparam('namespace', enc('bookmarks'))
555 part.addparam('key', enc(book))
556 part.addparam('key', enc(book))
556 part.addparam('old', enc(old))
557 part.addparam('old', enc(old))
557 part.addparam('new', enc(new))
558 part.addparam('new', enc(new))
558 action = 'update'
559 action = 'update'
559 if not old:
560 if not old:
560 action = 'export'
561 action = 'export'
561 elif not new:
562 elif not new:
562 action = 'delete'
563 action = 'delete'
563 part2book.append((part.id, book, action))
564 part2book.append((part.id, book, action))
564
565
565
566
566 def handlereply(op):
567 def handlereply(op):
567 ui = pushop.ui
568 ui = pushop.ui
568 for partid, book, action in part2book:
569 for partid, book, action in part2book:
569 partrep = op.records.getreplies(partid)
570 partrep = op.records.getreplies(partid)
570 results = partrep['pushkey']
571 results = partrep['pushkey']
571 assert len(results) <= 1
572 assert len(results) <= 1
572 if not results:
573 if not results:
573 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
574 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
574 else:
575 else:
575 ret = int(results[0]['return'])
576 ret = int(results[0]['return'])
576 if ret:
577 if ret:
577 ui.status(bookmsgmap[action][0] % book)
578 ui.status(bookmsgmap[action][0] % book)
578 else:
579 else:
579 ui.warn(bookmsgmap[action][1] % book)
580 ui.warn(bookmsgmap[action][1] % book)
580 if pushop.bkresult is not None:
581 if pushop.bkresult is not None:
581 pushop.bkresult = 1
582 pushop.bkresult = 1
582 return handlereply
583 return handlereply
583
584
584
585
585 def _pushbundle2(pushop):
586 def _pushbundle2(pushop):
586 """push data to the remote using bundle2
587 """push data to the remote using bundle2
587
588
588 The only currently supported type of data is changegroup but this will
589 The only currently supported type of data is changegroup but this will
589 evolve in the future."""
590 evolve in the future."""
590 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
591 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
591 pushback = (pushop.trmanager
592 pushback = (pushop.trmanager
592 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
593 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
593
594
594 # create reply capability
595 # create reply capability
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
596 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
596 allowpushback=pushback))
597 allowpushback=pushback))
597 bundler.newpart('replycaps', data=capsblob)
598 bundler.newpart('replycaps', data=capsblob)
598 replyhandlers = []
599 replyhandlers = []
599 for partgenname in b2partsgenorder:
600 for partgenname in b2partsgenorder:
600 partgen = b2partsgenmapping[partgenname]
601 partgen = b2partsgenmapping[partgenname]
601 ret = partgen(pushop, bundler)
602 ret = partgen(pushop, bundler)
602 if callable(ret):
603 if callable(ret):
603 replyhandlers.append(ret)
604 replyhandlers.append(ret)
604 # do not push if nothing to push
605 # do not push if nothing to push
605 if bundler.nbparts <= 1:
606 if bundler.nbparts <= 1:
606 return
607 return
607 stream = util.chunkbuffer(bundler.getchunks())
608 stream = util.chunkbuffer(bundler.getchunks())
608 try:
609 try:
609 reply = pushop.remote.unbundle(stream, ['force'], 'push')
610 reply = pushop.remote.unbundle(stream, ['force'], 'push')
610 except error.BundleValueError, exc:
611 except error.BundleValueError, exc:
611 raise util.Abort('missing support for %s' % exc)
612 raise util.Abort('missing support for %s' % exc)
612 try:
613 try:
613 trgetter = None
614 trgetter = None
614 if pushback:
615 if pushback:
615 trgetter = pushop.trmanager.transaction
616 trgetter = pushop.trmanager.transaction
616 op = bundle2.processbundle(pushop.repo, reply, trgetter)
617 op = bundle2.processbundle(pushop.repo, reply, trgetter)
617 except error.BundleValueError, exc:
618 except error.BundleValueError, exc:
618 raise util.Abort('missing support for %s' % exc)
619 raise util.Abort('missing support for %s' % exc)
619 for rephand in replyhandlers:
620 for rephand in replyhandlers:
620 rephand(op)
621 rephand(op)
621
622
622 def _pushchangeset(pushop):
623 def _pushchangeset(pushop):
623 """Make the actual push of changeset bundle to remote repo"""
624 """Make the actual push of changeset bundle to remote repo"""
624 if 'changesets' in pushop.stepsdone:
625 if 'changesets' in pushop.stepsdone:
625 return
626 return
626 pushop.stepsdone.add('changesets')
627 pushop.stepsdone.add('changesets')
627 if not _pushcheckoutgoing(pushop):
628 if not _pushcheckoutgoing(pushop):
628 return
629 return
629 pushop.repo.prepushoutgoinghooks(pushop.repo,
630 pushop.repo.prepushoutgoinghooks(pushop.repo,
630 pushop.remote,
631 pushop.remote,
631 pushop.outgoing)
632 pushop.outgoing)
632 outgoing = pushop.outgoing
633 outgoing = pushop.outgoing
633 unbundle = pushop.remote.capable('unbundle')
634 unbundle = pushop.remote.capable('unbundle')
634 # TODO: get bundlecaps from remote
635 # TODO: get bundlecaps from remote
635 bundlecaps = None
636 bundlecaps = None
636 # create a changegroup from local
637 # create a changegroup from local
637 if pushop.revs is None and not (outgoing.excluded
638 if pushop.revs is None and not (outgoing.excluded
638 or pushop.repo.changelog.filteredrevs):
639 or pushop.repo.changelog.filteredrevs):
639 # push everything,
640 # push everything,
640 # use the fast path, no race possible on push
641 # use the fast path, no race possible on push
641 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
642 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
642 cg = changegroup.getsubset(pushop.repo,
643 cg = changegroup.getsubset(pushop.repo,
643 outgoing,
644 outgoing,
644 bundler,
645 bundler,
645 'push',
646 'push',
646 fastpath=True)
647 fastpath=True)
647 else:
648 else:
648 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
649 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
649 bundlecaps)
650 bundlecaps)
650
651
651 # apply changegroup to remote
652 # apply changegroup to remote
652 if unbundle:
653 if unbundle:
653 # local repo finds heads on server, finds out what
654 # local repo finds heads on server, finds out what
654 # revs it must push. once revs transferred, if server
655 # revs it must push. once revs transferred, if server
655 # finds it has different heads (someone else won
656 # finds it has different heads (someone else won
656 # commit/push race), server aborts.
657 # commit/push race), server aborts.
657 if pushop.force:
658 if pushop.force:
658 remoteheads = ['force']
659 remoteheads = ['force']
659 else:
660 else:
660 remoteheads = pushop.remoteheads
661 remoteheads = pushop.remoteheads
661 # ssh: return remote's addchangegroup()
662 # ssh: return remote's addchangegroup()
662 # http: return remote's addchangegroup() or 0 for error
663 # http: return remote's addchangegroup() or 0 for error
663 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
664 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
664 pushop.repo.url())
665 pushop.repo.url())
665 else:
666 else:
666 # we return an integer indicating remote head count
667 # we return an integer indicating remote head count
667 # change
668 # change
668 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
669 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
669 pushop.repo.url())
670 pushop.repo.url())
670
671
671 def _pushsyncphase(pushop):
672 def _pushsyncphase(pushop):
672 """synchronise phase information locally and remotely"""
673 """synchronise phase information locally and remotely"""
673 cheads = pushop.commonheads
674 cheads = pushop.commonheads
674 # even when we don't push, exchanging phase data is useful
675 # even when we don't push, exchanging phase data is useful
675 remotephases = pushop.remote.listkeys('phases')
676 remotephases = pushop.remote.listkeys('phases')
676 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
677 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
677 and remotephases # server supports phases
678 and remotephases # server supports phases
678 and pushop.cgresult is None # nothing was pushed
679 and pushop.cgresult is None # nothing was pushed
679 and remotephases.get('publishing', False)):
680 and remotephases.get('publishing', False)):
680 # When:
681 # When:
681 # - this is a subrepo push
682 # - this is a subrepo push
682 # - and remote support phase
683 # - and remote support phase
683 # - and no changeset was pushed
684 # - and no changeset was pushed
684 # - and remote is publishing
685 # - and remote is publishing
685 # We may be in issue 3871 case!
686 # We may be in issue 3871 case!
686 # We drop the possible phase synchronisation done by
687 # We drop the possible phase synchronisation done by
687 # courtesy to publish changesets possibly locally draft
688 # courtesy to publish changesets possibly locally draft
688 # on the remote.
689 # on the remote.
689 remotephases = {'publishing': 'True'}
690 remotephases = {'publishing': 'True'}
690 if not remotephases: # old server or public only reply from non-publishing
691 if not remotephases: # old server or public only reply from non-publishing
691 _localphasemove(pushop, cheads)
692 _localphasemove(pushop, cheads)
692 # don't push any phase data as there is nothing to push
693 # don't push any phase data as there is nothing to push
693 else:
694 else:
694 ana = phases.analyzeremotephases(pushop.repo, cheads,
695 ana = phases.analyzeremotephases(pushop.repo, cheads,
695 remotephases)
696 remotephases)
696 pheads, droots = ana
697 pheads, droots = ana
697 ### Apply remote phase on local
698 ### Apply remote phase on local
698 if remotephases.get('publishing', False):
699 if remotephases.get('publishing', False):
699 _localphasemove(pushop, cheads)
700 _localphasemove(pushop, cheads)
700 else: # publish = False
701 else: # publish = False
701 _localphasemove(pushop, pheads)
702 _localphasemove(pushop, pheads)
702 _localphasemove(pushop, cheads, phases.draft)
703 _localphasemove(pushop, cheads, phases.draft)
703 ### Apply local phase on remote
704 ### Apply local phase on remote
704
705
705 if pushop.cgresult:
706 if pushop.cgresult:
706 if 'phases' in pushop.stepsdone:
707 if 'phases' in pushop.stepsdone:
707 # phases already pushed though bundle2
708 # phases already pushed though bundle2
708 return
709 return
709 outdated = pushop.outdatedphases
710 outdated = pushop.outdatedphases
710 else:
711 else:
711 outdated = pushop.fallbackoutdatedphases
712 outdated = pushop.fallbackoutdatedphases
712
713
713 pushop.stepsdone.add('phases')
714 pushop.stepsdone.add('phases')
714
715
715 # filter heads already turned public by the push
716 # filter heads already turned public by the push
716 outdated = [c for c in outdated if c.node() not in pheads]
717 outdated = [c for c in outdated if c.node() not in pheads]
717 # fallback to independent pushkey command
718 # fallback to independent pushkey command
718 for newremotehead in outdated:
719 for newremotehead in outdated:
719 r = pushop.remote.pushkey('phases',
720 r = pushop.remote.pushkey('phases',
720 newremotehead.hex(),
721 newremotehead.hex(),
721 str(phases.draft),
722 str(phases.draft),
722 str(phases.public))
723 str(phases.public))
723 if not r:
724 if not r:
724 pushop.ui.warn(_('updating %s to public failed!\n')
725 pushop.ui.warn(_('updating %s to public failed!\n')
725 % newremotehead)
726 % newremotehead)
726
727
727 def _localphasemove(pushop, nodes, phase=phases.public):
728 def _localphasemove(pushop, nodes, phase=phases.public):
728 """move <nodes> to <phase> in the local source repo"""
729 """move <nodes> to <phase> in the local source repo"""
729 if pushop.trmanager:
730 if pushop.trmanager:
730 phases.advanceboundary(pushop.repo,
731 phases.advanceboundary(pushop.repo,
731 pushop.trmanager.transaction(),
732 pushop.trmanager.transaction(),
732 phase,
733 phase,
733 nodes)
734 nodes)
734 else:
735 else:
735 # repo is not locked, do not change any phases!
736 # repo is not locked, do not change any phases!
736 # Informs the user that phases should have been moved when
737 # Informs the user that phases should have been moved when
737 # applicable.
738 # applicable.
738 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
739 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
739 phasestr = phases.phasenames[phase]
740 phasestr = phases.phasenames[phase]
740 if actualmoves:
741 if actualmoves:
741 pushop.ui.status(_('cannot lock source repo, skipping '
742 pushop.ui.status(_('cannot lock source repo, skipping '
742 'local %s phase update\n') % phasestr)
743 'local %s phase update\n') % phasestr)
743
744
744 def _pushobsolete(pushop):
745 def _pushobsolete(pushop):
745 """utility function to push obsolete markers to a remote"""
746 """utility function to push obsolete markers to a remote"""
746 if 'obsmarkers' in pushop.stepsdone:
747 if 'obsmarkers' in pushop.stepsdone:
747 return
748 return
748 pushop.ui.debug('try to push obsolete markers to remote\n')
749 pushop.ui.debug('try to push obsolete markers to remote\n')
749 repo = pushop.repo
750 repo = pushop.repo
750 remote = pushop.remote
751 remote = pushop.remote
751 pushop.stepsdone.add('obsmarkers')
752 pushop.stepsdone.add('obsmarkers')
752 if pushop.outobsmarkers:
753 if pushop.outobsmarkers:
753 rslts = []
754 rslts = []
754 remotedata = obsolete._pushkeyescape(pushop.outobsmarkers)
755 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
755 for key in sorted(remotedata, reverse=True):
756 for key in sorted(remotedata, reverse=True):
756 # reverse sort to ensure we end with dump0
757 # reverse sort to ensure we end with dump0
757 data = remotedata[key]
758 data = remotedata[key]
758 rslts.append(remote.pushkey('obsolete', key, '', data))
759 rslts.append(remote.pushkey('obsolete', key, '', data))
759 if [r for r in rslts if not r]:
760 if [r for r in rslts if not r]:
760 msg = _('failed to push some obsolete markers!\n')
761 msg = _('failed to push some obsolete markers!\n')
761 repo.ui.warn(msg)
762 repo.ui.warn(msg)
762
763
763 def _pushbookmark(pushop):
764 def _pushbookmark(pushop):
764 """Update bookmark position on remote"""
765 """Update bookmark position on remote"""
765 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
766 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
766 return
767 return
767 pushop.stepsdone.add('bookmarks')
768 pushop.stepsdone.add('bookmarks')
768 ui = pushop.ui
769 ui = pushop.ui
769 remote = pushop.remote
770 remote = pushop.remote
770
771
771 for b, old, new in pushop.outbookmarks:
772 for b, old, new in pushop.outbookmarks:
772 action = 'update'
773 action = 'update'
773 if not old:
774 if not old:
774 action = 'export'
775 action = 'export'
775 elif not new:
776 elif not new:
776 action = 'delete'
777 action = 'delete'
777 if remote.pushkey('bookmarks', b, old, new):
778 if remote.pushkey('bookmarks', b, old, new):
778 ui.status(bookmsgmap[action][0] % b)
779 ui.status(bookmsgmap[action][0] % b)
779 else:
780 else:
780 ui.warn(bookmsgmap[action][1] % b)
781 ui.warn(bookmsgmap[action][1] % b)
781 # discovery can have set the value form invalid entry
782 # discovery can have set the value form invalid entry
782 if pushop.bkresult is not None:
783 if pushop.bkresult is not None:
783 pushop.bkresult = 1
784 pushop.bkresult = 1
784
785
785 class pulloperation(object):
786 class pulloperation(object):
786 """A object that represent a single pull operation
787 """A object that represent a single pull operation
787
788
788 It purpose is to carry pull related state and very common operation.
789 It purpose is to carry pull related state and very common operation.
789
790
790 A new should be created at the beginning of each pull and discarded
791 A new should be created at the beginning of each pull and discarded
791 afterward.
792 afterward.
792 """
793 """
793
794
794 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
795 def __init__(self, repo, remote, heads=None, force=False, bookmarks=()):
795 # repo we pull into
796 # repo we pull into
796 self.repo = repo
797 self.repo = repo
797 # repo we pull from
798 # repo we pull from
798 self.remote = remote
799 self.remote = remote
799 # revision we try to pull (None is "all")
800 # revision we try to pull (None is "all")
800 self.heads = heads
801 self.heads = heads
801 # bookmark pulled explicitly
802 # bookmark pulled explicitly
802 self.explicitbookmarks = bookmarks
803 self.explicitbookmarks = bookmarks
803 # do we force pull?
804 # do we force pull?
804 self.force = force
805 self.force = force
805 # transaction manager
806 # transaction manager
806 self.trmanager = None
807 self.trmanager = None
807 # set of common changeset between local and remote before pull
808 # set of common changeset between local and remote before pull
808 self.common = None
809 self.common = None
809 # set of pulled head
810 # set of pulled head
810 self.rheads = None
811 self.rheads = None
811 # list of missing changeset to fetch remotely
812 # list of missing changeset to fetch remotely
812 self.fetch = None
813 self.fetch = None
813 # remote bookmarks data
814 # remote bookmarks data
814 self.remotebookmarks = None
815 self.remotebookmarks = None
815 # result of changegroup pulling (used as return code by pull)
816 # result of changegroup pulling (used as return code by pull)
816 self.cgresult = None
817 self.cgresult = None
817 # list of step already done
818 # list of step already done
818 self.stepsdone = set()
819 self.stepsdone = set()
819
820
820 @util.propertycache
821 @util.propertycache
821 def pulledsubset(self):
822 def pulledsubset(self):
822 """heads of the set of changeset target by the pull"""
823 """heads of the set of changeset target by the pull"""
823 # compute target subset
824 # compute target subset
824 if self.heads is None:
825 if self.heads is None:
825 # We pulled every thing possible
826 # We pulled every thing possible
826 # sync on everything common
827 # sync on everything common
827 c = set(self.common)
828 c = set(self.common)
828 ret = list(self.common)
829 ret = list(self.common)
829 for n in self.rheads:
830 for n in self.rheads:
830 if n not in c:
831 if n not in c:
831 ret.append(n)
832 ret.append(n)
832 return ret
833 return ret
833 else:
834 else:
834 # We pulled a specific subset
835 # We pulled a specific subset
835 # sync on this subset
836 # sync on this subset
836 return self.heads
837 return self.heads
837
838
838 def gettransaction(self):
839 def gettransaction(self):
839 # deprecated; talk to trmanager directly
840 # deprecated; talk to trmanager directly
840 return self.trmanager.transaction()
841 return self.trmanager.transaction()
841
842
842 class transactionmanager(object):
843 class transactionmanager(object):
843 """An object to manage the life cycle of a transaction
844 """An object to manage the life cycle of a transaction
844
845
845 It creates the transaction on demand and calls the appropriate hooks when
846 It creates the transaction on demand and calls the appropriate hooks when
846 closing the transaction."""
847 closing the transaction."""
847 def __init__(self, repo, source, url):
848 def __init__(self, repo, source, url):
848 self.repo = repo
849 self.repo = repo
849 self.source = source
850 self.source = source
850 self.url = url
851 self.url = url
851 self._tr = None
852 self._tr = None
852
853
853 def transaction(self):
854 def transaction(self):
854 """Return an open transaction object, constructing if necessary"""
855 """Return an open transaction object, constructing if necessary"""
855 if not self._tr:
856 if not self._tr:
856 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
857 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
857 self._tr = self.repo.transaction(trname)
858 self._tr = self.repo.transaction(trname)
858 self._tr.hookargs['source'] = self.source
859 self._tr.hookargs['source'] = self.source
859 self._tr.hookargs['url'] = self.url
860 self._tr.hookargs['url'] = self.url
860 return self._tr
861 return self._tr
861
862
862 def close(self):
863 def close(self):
863 """close transaction if created"""
864 """close transaction if created"""
864 if self._tr is not None:
865 if self._tr is not None:
865 self._tr.close()
866 self._tr.close()
866
867
867 def release(self):
868 def release(self):
868 """release transaction if created"""
869 """release transaction if created"""
869 if self._tr is not None:
870 if self._tr is not None:
870 self._tr.release()
871 self._tr.release()
871
872
872 def pull(repo, remote, heads=None, force=False, bookmarks=()):
873 def pull(repo, remote, heads=None, force=False, bookmarks=()):
873 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
874 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks)
874 if pullop.remote.local():
875 if pullop.remote.local():
875 missing = set(pullop.remote.requirements) - pullop.repo.supported
876 missing = set(pullop.remote.requirements) - pullop.repo.supported
876 if missing:
877 if missing:
877 msg = _("required features are not"
878 msg = _("required features are not"
878 " supported in the destination:"
879 " supported in the destination:"
879 " %s") % (', '.join(sorted(missing)))
880 " %s") % (', '.join(sorted(missing)))
880 raise util.Abort(msg)
881 raise util.Abort(msg)
881
882
882 pullop.remotebookmarks = remote.listkeys('bookmarks')
883 pullop.remotebookmarks = remote.listkeys('bookmarks')
883 lock = pullop.repo.lock()
884 lock = pullop.repo.lock()
884 try:
885 try:
885 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
886 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
886 _pulldiscovery(pullop)
887 _pulldiscovery(pullop)
887 if _canusebundle2(pullop):
888 if _canusebundle2(pullop):
888 _pullbundle2(pullop)
889 _pullbundle2(pullop)
889 _pullchangeset(pullop)
890 _pullchangeset(pullop)
890 _pullphase(pullop)
891 _pullphase(pullop)
891 _pullbookmarks(pullop)
892 _pullbookmarks(pullop)
892 _pullobsolete(pullop)
893 _pullobsolete(pullop)
893 pullop.trmanager.close()
894 pullop.trmanager.close()
894 finally:
895 finally:
895 pullop.trmanager.release()
896 pullop.trmanager.release()
896 lock.release()
897 lock.release()
897
898
898 return pullop
899 return pullop
899
900
900 # list of steps to perform discovery before pull
901 # list of steps to perform discovery before pull
901 pulldiscoveryorder = []
902 pulldiscoveryorder = []
902
903
903 # Mapping between step name and function
904 # Mapping between step name and function
904 #
905 #
905 # This exists to help extensions wrap steps if necessary
906 # This exists to help extensions wrap steps if necessary
906 pulldiscoverymapping = {}
907 pulldiscoverymapping = {}
907
908
908 def pulldiscovery(stepname):
909 def pulldiscovery(stepname):
909 """decorator for function performing discovery before pull
910 """decorator for function performing discovery before pull
910
911
911 The function is added to the step -> function mapping and appended to the
912 The function is added to the step -> function mapping and appended to the
912 list of steps. Beware that decorated function will be added in order (this
913 list of steps. Beware that decorated function will be added in order (this
913 may matter).
914 may matter).
914
915
915 You can only use this decorator for a new step, if you want to wrap a step
916 You can only use this decorator for a new step, if you want to wrap a step
916 from an extension, change the pulldiscovery dictionary directly."""
917 from an extension, change the pulldiscovery dictionary directly."""
917 def dec(func):
918 def dec(func):
918 assert stepname not in pulldiscoverymapping
919 assert stepname not in pulldiscoverymapping
919 pulldiscoverymapping[stepname] = func
920 pulldiscoverymapping[stepname] = func
920 pulldiscoveryorder.append(stepname)
921 pulldiscoveryorder.append(stepname)
921 return func
922 return func
922 return dec
923 return dec
923
924
924 def _pulldiscovery(pullop):
925 def _pulldiscovery(pullop):
925 """Run all discovery steps"""
926 """Run all discovery steps"""
926 for stepname in pulldiscoveryorder:
927 for stepname in pulldiscoveryorder:
927 step = pulldiscoverymapping[stepname]
928 step = pulldiscoverymapping[stepname]
928 step(pullop)
929 step(pullop)
929
930
930 @pulldiscovery('changegroup')
931 @pulldiscovery('changegroup')
931 def _pulldiscoverychangegroup(pullop):
932 def _pulldiscoverychangegroup(pullop):
932 """discovery phase for the pull
933 """discovery phase for the pull
933
934
934 Current handle changeset discovery only, will change handle all discovery
935 Current handle changeset discovery only, will change handle all discovery
935 at some point."""
936 at some point."""
936 tmp = discovery.findcommonincoming(pullop.repo,
937 tmp = discovery.findcommonincoming(pullop.repo,
937 pullop.remote,
938 pullop.remote,
938 heads=pullop.heads,
939 heads=pullop.heads,
939 force=pullop.force)
940 force=pullop.force)
940 common, fetch, rheads = tmp
941 common, fetch, rheads = tmp
941 nm = pullop.repo.unfiltered().changelog.nodemap
942 nm = pullop.repo.unfiltered().changelog.nodemap
942 if fetch and rheads:
943 if fetch and rheads:
943 # If a remote heads in filtered locally, lets drop it from the unknown
944 # If a remote heads in filtered locally, lets drop it from the unknown
944 # remote heads and put in back in common.
945 # remote heads and put in back in common.
945 #
946 #
946 # This is a hackish solution to catch most of "common but locally
947 # This is a hackish solution to catch most of "common but locally
947 # hidden situation". We do not performs discovery on unfiltered
948 # hidden situation". We do not performs discovery on unfiltered
948 # repository because it end up doing a pathological amount of round
949 # repository because it end up doing a pathological amount of round
949 # trip for w huge amount of changeset we do not care about.
950 # trip for w huge amount of changeset we do not care about.
950 #
951 #
951 # If a set of such "common but filtered" changeset exist on the server
952 # If a set of such "common but filtered" changeset exist on the server
952 # but are not including a remote heads, we'll not be able to detect it,
953 # but are not including a remote heads, we'll not be able to detect it,
953 scommon = set(common)
954 scommon = set(common)
954 filteredrheads = []
955 filteredrheads = []
955 for n in rheads:
956 for n in rheads:
956 if n in nm:
957 if n in nm:
957 if n not in scommon:
958 if n not in scommon:
958 common.append(n)
959 common.append(n)
959 else:
960 else:
960 filteredrheads.append(n)
961 filteredrheads.append(n)
961 if not filteredrheads:
962 if not filteredrheads:
962 fetch = []
963 fetch = []
963 rheads = filteredrheads
964 rheads = filteredrheads
964 pullop.common = common
965 pullop.common = common
965 pullop.fetch = fetch
966 pullop.fetch = fetch
966 pullop.rheads = rheads
967 pullop.rheads = rheads
967
968
968 def _pullbundle2(pullop):
969 def _pullbundle2(pullop):
969 """pull data using bundle2
970 """pull data using bundle2
970
971
971 For now, the only supported data are changegroup."""
972 For now, the only supported data are changegroup."""
972 remotecaps = bundle2.bundle2caps(pullop.remote)
973 remotecaps = bundle2.bundle2caps(pullop.remote)
973 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
974 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
974 # pulling changegroup
975 # pulling changegroup
975 pullop.stepsdone.add('changegroup')
976 pullop.stepsdone.add('changegroup')
976
977
977 kwargs['common'] = pullop.common
978 kwargs['common'] = pullop.common
978 kwargs['heads'] = pullop.heads or pullop.rheads
979 kwargs['heads'] = pullop.heads or pullop.rheads
979 kwargs['cg'] = pullop.fetch
980 kwargs['cg'] = pullop.fetch
980 if 'listkeys' in remotecaps:
981 if 'listkeys' in remotecaps:
981 kwargs['listkeys'] = ['phase', 'bookmarks']
982 kwargs['listkeys'] = ['phase', 'bookmarks']
982 if not pullop.fetch:
983 if not pullop.fetch:
983 pullop.repo.ui.status(_("no changes found\n"))
984 pullop.repo.ui.status(_("no changes found\n"))
984 pullop.cgresult = 0
985 pullop.cgresult = 0
985 else:
986 else:
986 if pullop.heads is None and list(pullop.common) == [nullid]:
987 if pullop.heads is None and list(pullop.common) == [nullid]:
987 pullop.repo.ui.status(_("requesting all changes\n"))
988 pullop.repo.ui.status(_("requesting all changes\n"))
988 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
989 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
989 remoteversions = bundle2.obsmarkersversion(remotecaps)
990 remoteversions = bundle2.obsmarkersversion(remotecaps)
990 if obsolete.commonversion(remoteversions) is not None:
991 if obsolete.commonversion(remoteversions) is not None:
991 kwargs['obsmarkers'] = True
992 kwargs['obsmarkers'] = True
992 pullop.stepsdone.add('obsmarkers')
993 pullop.stepsdone.add('obsmarkers')
993 _pullbundle2extraprepare(pullop, kwargs)
994 _pullbundle2extraprepare(pullop, kwargs)
994 bundle = pullop.remote.getbundle('pull', **kwargs)
995 bundle = pullop.remote.getbundle('pull', **kwargs)
995 try:
996 try:
996 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
997 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
997 except error.BundleValueError, exc:
998 except error.BundleValueError, exc:
998 raise util.Abort('missing support for %s' % exc)
999 raise util.Abort('missing support for %s' % exc)
999
1000
1000 if pullop.fetch:
1001 if pullop.fetch:
1001 results = [cg['return'] for cg in op.records['changegroup']]
1002 results = [cg['return'] for cg in op.records['changegroup']]
1002 pullop.cgresult = changegroup.combineresults(results)
1003 pullop.cgresult = changegroup.combineresults(results)
1003
1004
1004 # processing phases change
1005 # processing phases change
1005 for namespace, value in op.records['listkeys']:
1006 for namespace, value in op.records['listkeys']:
1006 if namespace == 'phases':
1007 if namespace == 'phases':
1007 _pullapplyphases(pullop, value)
1008 _pullapplyphases(pullop, value)
1008
1009
1009 # processing bookmark update
1010 # processing bookmark update
1010 for namespace, value in op.records['listkeys']:
1011 for namespace, value in op.records['listkeys']:
1011 if namespace == 'bookmarks':
1012 if namespace == 'bookmarks':
1012 pullop.remotebookmarks = value
1013 pullop.remotebookmarks = value
1013 _pullbookmarks(pullop)
1014 _pullbookmarks(pullop)
1014
1015
1015 def _pullbundle2extraprepare(pullop, kwargs):
1016 def _pullbundle2extraprepare(pullop, kwargs):
1016 """hook function so that extensions can extend the getbundle call"""
1017 """hook function so that extensions can extend the getbundle call"""
1017 pass
1018 pass
1018
1019
1019 def _pullchangeset(pullop):
1020 def _pullchangeset(pullop):
1020 """pull changeset from unbundle into the local repo"""
1021 """pull changeset from unbundle into the local repo"""
1021 # We delay the open of the transaction as late as possible so we
1022 # We delay the open of the transaction as late as possible so we
1022 # don't open transaction for nothing or you break future useful
1023 # don't open transaction for nothing or you break future useful
1023 # rollback call
1024 # rollback call
1024 if 'changegroup' in pullop.stepsdone:
1025 if 'changegroup' in pullop.stepsdone:
1025 return
1026 return
1026 pullop.stepsdone.add('changegroup')
1027 pullop.stepsdone.add('changegroup')
1027 if not pullop.fetch:
1028 if not pullop.fetch:
1028 pullop.repo.ui.status(_("no changes found\n"))
1029 pullop.repo.ui.status(_("no changes found\n"))
1029 pullop.cgresult = 0
1030 pullop.cgresult = 0
1030 return
1031 return
1031 pullop.gettransaction()
1032 pullop.gettransaction()
1032 if pullop.heads is None and list(pullop.common) == [nullid]:
1033 if pullop.heads is None and list(pullop.common) == [nullid]:
1033 pullop.repo.ui.status(_("requesting all changes\n"))
1034 pullop.repo.ui.status(_("requesting all changes\n"))
1034 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1035 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1035 # issue1320, avoid a race if remote changed after discovery
1036 # issue1320, avoid a race if remote changed after discovery
1036 pullop.heads = pullop.rheads
1037 pullop.heads = pullop.rheads
1037
1038
1038 if pullop.remote.capable('getbundle'):
1039 if pullop.remote.capable('getbundle'):
1039 # TODO: get bundlecaps from remote
1040 # TODO: get bundlecaps from remote
1040 cg = pullop.remote.getbundle('pull', common=pullop.common,
1041 cg = pullop.remote.getbundle('pull', common=pullop.common,
1041 heads=pullop.heads or pullop.rheads)
1042 heads=pullop.heads or pullop.rheads)
1042 elif pullop.heads is None:
1043 elif pullop.heads is None:
1043 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1044 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1044 elif not pullop.remote.capable('changegroupsubset'):
1045 elif not pullop.remote.capable('changegroupsubset'):
1045 raise util.Abort(_("partial pull cannot be done because "
1046 raise util.Abort(_("partial pull cannot be done because "
1046 "other repository doesn't support "
1047 "other repository doesn't support "
1047 "changegroupsubset."))
1048 "changegroupsubset."))
1048 else:
1049 else:
1049 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1050 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1050 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1051 pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull',
1051 pullop.remote.url())
1052 pullop.remote.url())
1052
1053
1053 def _pullphase(pullop):
1054 def _pullphase(pullop):
1054 # Get remote phases data from remote
1055 # Get remote phases data from remote
1055 if 'phases' in pullop.stepsdone:
1056 if 'phases' in pullop.stepsdone:
1056 return
1057 return
1057 remotephases = pullop.remote.listkeys('phases')
1058 remotephases = pullop.remote.listkeys('phases')
1058 _pullapplyphases(pullop, remotephases)
1059 _pullapplyphases(pullop, remotephases)
1059
1060
1060 def _pullapplyphases(pullop, remotephases):
1061 def _pullapplyphases(pullop, remotephases):
1061 """apply phase movement from observed remote state"""
1062 """apply phase movement from observed remote state"""
1062 if 'phases' in pullop.stepsdone:
1063 if 'phases' in pullop.stepsdone:
1063 return
1064 return
1064 pullop.stepsdone.add('phases')
1065 pullop.stepsdone.add('phases')
1065 publishing = bool(remotephases.get('publishing', False))
1066 publishing = bool(remotephases.get('publishing', False))
1066 if remotephases and not publishing:
1067 if remotephases and not publishing:
1067 # remote is new and unpublishing
1068 # remote is new and unpublishing
1068 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1069 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1069 pullop.pulledsubset,
1070 pullop.pulledsubset,
1070 remotephases)
1071 remotephases)
1071 dheads = pullop.pulledsubset
1072 dheads = pullop.pulledsubset
1072 else:
1073 else:
1073 # Remote is old or publishing all common changesets
1074 # Remote is old or publishing all common changesets
1074 # should be seen as public
1075 # should be seen as public
1075 pheads = pullop.pulledsubset
1076 pheads = pullop.pulledsubset
1076 dheads = []
1077 dheads = []
1077 unfi = pullop.repo.unfiltered()
1078 unfi = pullop.repo.unfiltered()
1078 phase = unfi._phasecache.phase
1079 phase = unfi._phasecache.phase
1079 rev = unfi.changelog.nodemap.get
1080 rev = unfi.changelog.nodemap.get
1080 public = phases.public
1081 public = phases.public
1081 draft = phases.draft
1082 draft = phases.draft
1082
1083
1083 # exclude changesets already public locally and update the others
1084 # exclude changesets already public locally and update the others
1084 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1085 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1085 if pheads:
1086 if pheads:
1086 tr = pullop.gettransaction()
1087 tr = pullop.gettransaction()
1087 phases.advanceboundary(pullop.repo, tr, public, pheads)
1088 phases.advanceboundary(pullop.repo, tr, public, pheads)
1088
1089
1089 # exclude changesets already draft locally and update the others
1090 # exclude changesets already draft locally and update the others
1090 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1091 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1091 if dheads:
1092 if dheads:
1092 tr = pullop.gettransaction()
1093 tr = pullop.gettransaction()
1093 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1094 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1094
1095
1095 def _pullbookmarks(pullop):
1096 def _pullbookmarks(pullop):
1096 """process the remote bookmark information to update the local one"""
1097 """process the remote bookmark information to update the local one"""
1097 if 'bookmarks' in pullop.stepsdone:
1098 if 'bookmarks' in pullop.stepsdone:
1098 return
1099 return
1099 pullop.stepsdone.add('bookmarks')
1100 pullop.stepsdone.add('bookmarks')
1100 repo = pullop.repo
1101 repo = pullop.repo
1101 remotebookmarks = pullop.remotebookmarks
1102 remotebookmarks = pullop.remotebookmarks
1102 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1103 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1103 pullop.remote.url(),
1104 pullop.remote.url(),
1104 pullop.gettransaction,
1105 pullop.gettransaction,
1105 explicit=pullop.explicitbookmarks)
1106 explicit=pullop.explicitbookmarks)
1106
1107
1107 def _pullobsolete(pullop):
1108 def _pullobsolete(pullop):
1108 """utility function to pull obsolete markers from a remote
1109 """utility function to pull obsolete markers from a remote
1109
1110
1110 The `gettransaction` is function that return the pull transaction, creating
1111 The `gettransaction` is function that return the pull transaction, creating
1111 one if necessary. We return the transaction to inform the calling code that
1112 one if necessary. We return the transaction to inform the calling code that
1112 a new transaction have been created (when applicable).
1113 a new transaction have been created (when applicable).
1113
1114
1114 Exists mostly to allow overriding for experimentation purpose"""
1115 Exists mostly to allow overriding for experimentation purpose"""
1115 if 'obsmarkers' in pullop.stepsdone:
1116 if 'obsmarkers' in pullop.stepsdone:
1116 return
1117 return
1117 pullop.stepsdone.add('obsmarkers')
1118 pullop.stepsdone.add('obsmarkers')
1118 tr = None
1119 tr = None
1119 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1120 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1120 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1121 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1121 remoteobs = pullop.remote.listkeys('obsolete')
1122 remoteobs = pullop.remote.listkeys('obsolete')
1122 if 'dump0' in remoteobs:
1123 if 'dump0' in remoteobs:
1123 tr = pullop.gettransaction()
1124 tr = pullop.gettransaction()
1124 for key in sorted(remoteobs, reverse=True):
1125 for key in sorted(remoteobs, reverse=True):
1125 if key.startswith('dump'):
1126 if key.startswith('dump'):
1126 data = base85.b85decode(remoteobs[key])
1127 data = base85.b85decode(remoteobs[key])
1127 pullop.repo.obsstore.mergemarkers(tr, data)
1128 pullop.repo.obsstore.mergemarkers(tr, data)
1128 pullop.repo.invalidatevolatilesets()
1129 pullop.repo.invalidatevolatilesets()
1129 return tr
1130 return tr
1130
1131
1131 def caps20to10(repo):
1132 def caps20to10(repo):
1132 """return a set with appropriate options to use bundle20 during getbundle"""
1133 """return a set with appropriate options to use bundle20 during getbundle"""
1133 caps = set(['HG20'])
1134 caps = set(['HG20'])
1134 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1135 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1135 caps.add('bundle2=' + urllib.quote(capsblob))
1136 caps.add('bundle2=' + urllib.quote(capsblob))
1136 return caps
1137 return caps
1137
1138
1138 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1139 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1139 getbundle2partsorder = []
1140 getbundle2partsorder = []
1140
1141
1141 # Mapping between step name and function
1142 # Mapping between step name and function
1142 #
1143 #
1143 # This exists to help extensions wrap steps if necessary
1144 # This exists to help extensions wrap steps if necessary
1144 getbundle2partsmapping = {}
1145 getbundle2partsmapping = {}
1145
1146
1146 def getbundle2partsgenerator(stepname, idx=None):
1147 def getbundle2partsgenerator(stepname, idx=None):
1147 """decorator for function generating bundle2 part for getbundle
1148 """decorator for function generating bundle2 part for getbundle
1148
1149
1149 The function is added to the step -> function mapping and appended to the
1150 The function is added to the step -> function mapping and appended to the
1150 list of steps. Beware that decorated functions will be added in order
1151 list of steps. Beware that decorated functions will be added in order
1151 (this may matter).
1152 (this may matter).
1152
1153
1153 You can only use this decorator for new steps, if you want to wrap a step
1154 You can only use this decorator for new steps, if you want to wrap a step
1154 from an extension, attack the getbundle2partsmapping dictionary directly."""
1155 from an extension, attack the getbundle2partsmapping dictionary directly."""
1155 def dec(func):
1156 def dec(func):
1156 assert stepname not in getbundle2partsmapping
1157 assert stepname not in getbundle2partsmapping
1157 getbundle2partsmapping[stepname] = func
1158 getbundle2partsmapping[stepname] = func
1158 if idx is None:
1159 if idx is None:
1159 getbundle2partsorder.append(stepname)
1160 getbundle2partsorder.append(stepname)
1160 else:
1161 else:
1161 getbundle2partsorder.insert(idx, stepname)
1162 getbundle2partsorder.insert(idx, stepname)
1162 return func
1163 return func
1163 return dec
1164 return dec
1164
1165
1165 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1166 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1166 **kwargs):
1167 **kwargs):
1167 """return a full bundle (with potentially multiple kind of parts)
1168 """return a full bundle (with potentially multiple kind of parts)
1168
1169
1169 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1170 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1170 passed. For now, the bundle can contain only changegroup, but this will
1171 passed. For now, the bundle can contain only changegroup, but this will
1171 changes when more part type will be available for bundle2.
1172 changes when more part type will be available for bundle2.
1172
1173
1173 This is different from changegroup.getchangegroup that only returns an HG10
1174 This is different from changegroup.getchangegroup that only returns an HG10
1174 changegroup bundle. They may eventually get reunited in the future when we
1175 changegroup bundle. They may eventually get reunited in the future when we
1175 have a clearer idea of the API we what to query different data.
1176 have a clearer idea of the API we what to query different data.
1176
1177
1177 The implementation is at a very early stage and will get massive rework
1178 The implementation is at a very early stage and will get massive rework
1178 when the API of bundle is refined.
1179 when the API of bundle is refined.
1179 """
1180 """
1180 # bundle10 case
1181 # bundle10 case
1181 usebundle2 = False
1182 usebundle2 = False
1182 if bundlecaps is not None:
1183 if bundlecaps is not None:
1183 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1184 usebundle2 = util.any((cap.startswith('HG2') for cap in bundlecaps))
1184 if not usebundle2:
1185 if not usebundle2:
1185 if bundlecaps and not kwargs.get('cg', True):
1186 if bundlecaps and not kwargs.get('cg', True):
1186 raise ValueError(_('request for bundle10 must include changegroup'))
1187 raise ValueError(_('request for bundle10 must include changegroup'))
1187
1188
1188 if kwargs:
1189 if kwargs:
1189 raise ValueError(_('unsupported getbundle arguments: %s')
1190 raise ValueError(_('unsupported getbundle arguments: %s')
1190 % ', '.join(sorted(kwargs.keys())))
1191 % ', '.join(sorted(kwargs.keys())))
1191 return changegroup.getchangegroup(repo, source, heads=heads,
1192 return changegroup.getchangegroup(repo, source, heads=heads,
1192 common=common, bundlecaps=bundlecaps)
1193 common=common, bundlecaps=bundlecaps)
1193
1194
1194 # bundle20 case
1195 # bundle20 case
1195 b2caps = {}
1196 b2caps = {}
1196 for bcaps in bundlecaps:
1197 for bcaps in bundlecaps:
1197 if bcaps.startswith('bundle2='):
1198 if bcaps.startswith('bundle2='):
1198 blob = urllib.unquote(bcaps[len('bundle2='):])
1199 blob = urllib.unquote(bcaps[len('bundle2='):])
1199 b2caps.update(bundle2.decodecaps(blob))
1200 b2caps.update(bundle2.decodecaps(blob))
1200 bundler = bundle2.bundle20(repo.ui, b2caps)
1201 bundler = bundle2.bundle20(repo.ui, b2caps)
1201
1202
1202 kwargs['heads'] = heads
1203 kwargs['heads'] = heads
1203 kwargs['common'] = common
1204 kwargs['common'] = common
1204
1205
1205 for name in getbundle2partsorder:
1206 for name in getbundle2partsorder:
1206 func = getbundle2partsmapping[name]
1207 func = getbundle2partsmapping[name]
1207 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1208 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1208 **kwargs)
1209 **kwargs)
1209
1210
1210 return util.chunkbuffer(bundler.getchunks())
1211 return util.chunkbuffer(bundler.getchunks())
1211
1212
1212 @getbundle2partsgenerator('changegroup')
1213 @getbundle2partsgenerator('changegroup')
1213 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1214 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1214 b2caps=None, heads=None, common=None, **kwargs):
1215 b2caps=None, heads=None, common=None, **kwargs):
1215 """add a changegroup part to the requested bundle"""
1216 """add a changegroup part to the requested bundle"""
1216 cg = None
1217 cg = None
1217 if kwargs.get('cg', True):
1218 if kwargs.get('cg', True):
1218 # build changegroup bundle here.
1219 # build changegroup bundle here.
1219 version = None
1220 version = None
1220 cgversions = b2caps.get('changegroup')
1221 cgversions = b2caps.get('changegroup')
1221 if not cgversions: # 3.1 and 3.2 ship with an empty value
1222 if not cgversions: # 3.1 and 3.2 ship with an empty value
1222 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1223 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1223 common=common,
1224 common=common,
1224 bundlecaps=bundlecaps)
1225 bundlecaps=bundlecaps)
1225 else:
1226 else:
1226 cgversions = [v for v in cgversions if v in changegroup.packermap]
1227 cgversions = [v for v in cgversions if v in changegroup.packermap]
1227 if not cgversions:
1228 if not cgversions:
1228 raise ValueError(_('no common changegroup version'))
1229 raise ValueError(_('no common changegroup version'))
1229 version = max(cgversions)
1230 version = max(cgversions)
1230 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1231 cg = changegroup.getchangegroupraw(repo, source, heads=heads,
1231 common=common,
1232 common=common,
1232 bundlecaps=bundlecaps,
1233 bundlecaps=bundlecaps,
1233 version=version)
1234 version=version)
1234
1235
1235 if cg:
1236 if cg:
1236 part = bundler.newpart('changegroup', data=cg)
1237 part = bundler.newpart('changegroup', data=cg)
1237 if version is not None:
1238 if version is not None:
1238 part.addparam('version', version)
1239 part.addparam('version', version)
1239
1240
1240 @getbundle2partsgenerator('listkeys')
1241 @getbundle2partsgenerator('listkeys')
1241 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1242 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1242 b2caps=None, **kwargs):
1243 b2caps=None, **kwargs):
1243 """add parts containing listkeys namespaces to the requested bundle"""
1244 """add parts containing listkeys namespaces to the requested bundle"""
1244 listkeys = kwargs.get('listkeys', ())
1245 listkeys = kwargs.get('listkeys', ())
1245 for namespace in listkeys:
1246 for namespace in listkeys:
1246 part = bundler.newpart('listkeys')
1247 part = bundler.newpart('listkeys')
1247 part.addparam('namespace', namespace)
1248 part.addparam('namespace', namespace)
1248 keys = repo.listkeys(namespace).items()
1249 keys = repo.listkeys(namespace).items()
1249 part.data = pushkey.encodekeys(keys)
1250 part.data = pushkey.encodekeys(keys)
1250
1251
1251 @getbundle2partsgenerator('obsmarkers')
1252 @getbundle2partsgenerator('obsmarkers')
1252 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1253 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1253 b2caps=None, heads=None, **kwargs):
1254 b2caps=None, heads=None, **kwargs):
1254 """add an obsolescence markers part to the requested bundle"""
1255 """add an obsolescence markers part to the requested bundle"""
1255 if kwargs.get('obsmarkers', False):
1256 if kwargs.get('obsmarkers', False):
1256 if heads is None:
1257 if heads is None:
1257 heads = repo.heads()
1258 heads = repo.heads()
1258 subset = [c.node() for c in repo.set('::%ln', heads)]
1259 subset = [c.node() for c in repo.set('::%ln', heads)]
1259 markers = repo.obsstore.relevantmarkers(subset)
1260 markers = repo.obsstore.relevantmarkers(subset)
1261 markers = sorted(markers)
1260 buildobsmarkerspart(bundler, markers)
1262 buildobsmarkerspart(bundler, markers)
1261
1263
1262 def check_heads(repo, their_heads, context):
1264 def check_heads(repo, their_heads, context):
1263 """check if the heads of a repo have been modified
1265 """check if the heads of a repo have been modified
1264
1266
1265 Used by peer for unbundling.
1267 Used by peer for unbundling.
1266 """
1268 """
1267 heads = repo.heads()
1269 heads = repo.heads()
1268 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1270 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1269 if not (their_heads == ['force'] or their_heads == heads or
1271 if not (their_heads == ['force'] or their_heads == heads or
1270 their_heads == ['hashed', heads_hash]):
1272 their_heads == ['hashed', heads_hash]):
1271 # someone else committed/pushed/unbundled while we
1273 # someone else committed/pushed/unbundled while we
1272 # were transferring data
1274 # were transferring data
1273 raise error.PushRaced('repository changed while %s - '
1275 raise error.PushRaced('repository changed while %s - '
1274 'please try again' % context)
1276 'please try again' % context)
1275
1277
1276 def unbundle(repo, cg, heads, source, url):
1278 def unbundle(repo, cg, heads, source, url):
1277 """Apply a bundle to a repo.
1279 """Apply a bundle to a repo.
1278
1280
1279 this function makes sure the repo is locked during the application and have
1281 this function makes sure the repo is locked during the application and have
1280 mechanism to check that no push race occurred between the creation of the
1282 mechanism to check that no push race occurred between the creation of the
1281 bundle and its application.
1283 bundle and its application.
1282
1284
1283 If the push was raced as PushRaced exception is raised."""
1285 If the push was raced as PushRaced exception is raised."""
1284 r = 0
1286 r = 0
1285 # need a transaction when processing a bundle2 stream
1287 # need a transaction when processing a bundle2 stream
1286 wlock = lock = tr = None
1288 wlock = lock = tr = None
1287 recordout = None
1289 recordout = None
1288 # quick fix for output mismatch with bundle2 in 3.4
1290 # quick fix for output mismatch with bundle2 in 3.4
1289 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1291 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1290 False)
1292 False)
1291 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1293 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1292 captureoutput = True
1294 captureoutput = True
1293 try:
1295 try:
1294 check_heads(repo, heads, 'uploading changes')
1296 check_heads(repo, heads, 'uploading changes')
1295 # push can proceed
1297 # push can proceed
1296 if util.safehasattr(cg, 'params'):
1298 if util.safehasattr(cg, 'params'):
1297 r = None
1299 r = None
1298 try:
1300 try:
1299 wlock = repo.wlock()
1301 wlock = repo.wlock()
1300 lock = repo.lock()
1302 lock = repo.lock()
1301 tr = repo.transaction(source)
1303 tr = repo.transaction(source)
1302 tr.hookargs['source'] = source
1304 tr.hookargs['source'] = source
1303 tr.hookargs['url'] = url
1305 tr.hookargs['url'] = url
1304 tr.hookargs['bundle2'] = '1'
1306 tr.hookargs['bundle2'] = '1'
1305 op = bundle2.bundleoperation(repo, lambda: tr,
1307 op = bundle2.bundleoperation(repo, lambda: tr,
1306 captureoutput=captureoutput)
1308 captureoutput=captureoutput)
1307 try:
1309 try:
1308 r = bundle2.processbundle(repo, cg, op=op)
1310 r = bundle2.processbundle(repo, cg, op=op)
1309 finally:
1311 finally:
1310 r = op.reply
1312 r = op.reply
1311 if captureoutput and r is not None:
1313 if captureoutput and r is not None:
1312 repo.ui.pushbuffer(error=True, subproc=True)
1314 repo.ui.pushbuffer(error=True, subproc=True)
1313 def recordout(output):
1315 def recordout(output):
1314 r.newpart('output', data=output, mandatory=False)
1316 r.newpart('output', data=output, mandatory=False)
1315 tr.close()
1317 tr.close()
1316 except Exception, exc:
1318 except Exception, exc:
1317 exc.duringunbundle2 = True
1319 exc.duringunbundle2 = True
1318 if captureoutput and r is not None:
1320 if captureoutput and r is not None:
1319 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1321 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1320 def recordout(output):
1322 def recordout(output):
1321 part = bundle2.bundlepart('output', data=output,
1323 part = bundle2.bundlepart('output', data=output,
1322 mandatory=False)
1324 mandatory=False)
1323 parts.append(part)
1325 parts.append(part)
1324 raise
1326 raise
1325 else:
1327 else:
1326 lock = repo.lock()
1328 lock = repo.lock()
1327 r = changegroup.addchangegroup(repo, cg, source, url)
1329 r = changegroup.addchangegroup(repo, cg, source, url)
1328 finally:
1330 finally:
1329 lockmod.release(tr, lock, wlock)
1331 lockmod.release(tr, lock, wlock)
1330 if recordout is not None:
1332 if recordout is not None:
1331 recordout(repo.ui.popbuffer())
1333 recordout(repo.ui.popbuffer())
1332 return r
1334 return r
@@ -1,1252 +1,1252 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node, parsers
71 import util, base85, node, parsers
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77 _calcsize = struct.calcsize
77 _calcsize = struct.calcsize
78 propertycache = util.propertycache
78 propertycache = util.propertycache
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 # Options for obsolescence
84 # Options for obsolescence
85 createmarkersopt = 'createmarkers'
85 createmarkersopt = 'createmarkers'
86 allowunstableopt = 'allowunstable'
86 allowunstableopt = 'allowunstable'
87 exchangeopt = 'exchange'
87 exchangeopt = 'exchange'
88
88
89 ### obsolescence marker flag
89 ### obsolescence marker flag
90
90
91 ## bumpedfix flag
91 ## bumpedfix flag
92 #
92 #
93 # When a changeset A' succeed to a changeset A which became public, we call A'
93 # When a changeset A' succeed to a changeset A which became public, we call A'
94 # "bumped" because it's a successors of a public changesets
94 # "bumped" because it's a successors of a public changesets
95 #
95 #
96 # o A' (bumped)
96 # o A' (bumped)
97 # |`:
97 # |`:
98 # | o A
98 # | o A
99 # |/
99 # |/
100 # o Z
100 # o Z
101 #
101 #
102 # The way to solve this situation is to create a new changeset Ad as children
102 # The way to solve this situation is to create a new changeset Ad as children
103 # of A. This changeset have the same content than A'. So the diff from A to A'
103 # of A. This changeset have the same content than A'. So the diff from A to A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
104 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
105 #
105 #
106 # o Ad
106 # o Ad
107 # |`:
107 # |`:
108 # | x A'
108 # | x A'
109 # |'|
109 # |'|
110 # o | A
110 # o | A
111 # |/
111 # |/
112 # o Z
112 # o Z
113 #
113 #
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
114 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
115 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
116 # This flag mean that the successors express the changes between the public and
116 # This flag mean that the successors express the changes between the public and
117 # bumped version and fix the situation, breaking the transitivity of
117 # bumped version and fix the situation, breaking the transitivity of
118 # "bumped" here.
118 # "bumped" here.
119 bumpedfix = 1
119 bumpedfix = 1
120 usingsha256 = 2
120 usingsha256 = 2
121
121
122 ## Parsing and writing of version "0"
122 ## Parsing and writing of version "0"
123 #
123 #
124 # The header is followed by the markers. Each marker is made of:
124 # The header is followed by the markers. Each marker is made of:
125 #
125 #
126 # - 1 uint8 : number of new changesets "N", can be zero.
126 # - 1 uint8 : number of new changesets "N", can be zero.
127 #
127 #
128 # - 1 uint32: metadata size "M" in bytes.
128 # - 1 uint32: metadata size "M" in bytes.
129 #
129 #
130 # - 1 byte: a bit field. It is reserved for flags used in common
130 # - 1 byte: a bit field. It is reserved for flags used in common
131 # obsolete marker operations, to avoid repeated decoding of metadata
131 # obsolete marker operations, to avoid repeated decoding of metadata
132 # entries.
132 # entries.
133 #
133 #
134 # - 20 bytes: obsoleted changeset identifier.
134 # - 20 bytes: obsoleted changeset identifier.
135 #
135 #
136 # - N*20 bytes: new changesets identifiers.
136 # - N*20 bytes: new changesets identifiers.
137 #
137 #
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
138 # - M bytes: metadata as a sequence of nul-terminated strings. Each
139 # string contains a key and a value, separated by a colon ':', without
139 # string contains a key and a value, separated by a colon ':', without
140 # additional encoding. Keys cannot contain '\0' or ':' and values
140 # additional encoding. Keys cannot contain '\0' or ':' and values
141 # cannot contain '\0'.
141 # cannot contain '\0'.
142 _fm0version = 0
142 _fm0version = 0
143 _fm0fixed = '>BIB20s'
143 _fm0fixed = '>BIB20s'
144 _fm0node = '20s'
144 _fm0node = '20s'
145 _fm0fsize = _calcsize(_fm0fixed)
145 _fm0fsize = _calcsize(_fm0fixed)
146 _fm0fnodesize = _calcsize(_fm0node)
146 _fm0fnodesize = _calcsize(_fm0node)
147
147
148 def _fm0readmarkers(data, off):
148 def _fm0readmarkers(data, off):
149 # Loop on markers
149 # Loop on markers
150 l = len(data)
150 l = len(data)
151 while off + _fm0fsize <= l:
151 while off + _fm0fsize <= l:
152 # read fixed part
152 # read fixed part
153 cur = data[off:off + _fm0fsize]
153 cur = data[off:off + _fm0fsize]
154 off += _fm0fsize
154 off += _fm0fsize
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
155 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
156 # read replacement
156 # read replacement
157 sucs = ()
157 sucs = ()
158 if numsuc:
158 if numsuc:
159 s = (_fm0fnodesize * numsuc)
159 s = (_fm0fnodesize * numsuc)
160 cur = data[off:off + s]
160 cur = data[off:off + s]
161 sucs = _unpack(_fm0node * numsuc, cur)
161 sucs = _unpack(_fm0node * numsuc, cur)
162 off += s
162 off += s
163 # read metadata
163 # read metadata
164 # (metadata will be decoded on demand)
164 # (metadata will be decoded on demand)
165 metadata = data[off:off + mdsize]
165 metadata = data[off:off + mdsize]
166 if len(metadata) != mdsize:
166 if len(metadata) != mdsize:
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
167 raise util.Abort(_('parsing obsolete marker: metadata is too '
168 'short, %d bytes expected, got %d')
168 'short, %d bytes expected, got %d')
169 % (mdsize, len(metadata)))
169 % (mdsize, len(metadata)))
170 off += mdsize
170 off += mdsize
171 metadata = _fm0decodemeta(metadata)
171 metadata = _fm0decodemeta(metadata)
172 try:
172 try:
173 when, offset = metadata.pop('date', '0 0').split(' ')
173 when, offset = metadata.pop('date', '0 0').split(' ')
174 date = float(when), int(offset)
174 date = float(when), int(offset)
175 except ValueError:
175 except ValueError:
176 date = (0., 0)
176 date = (0., 0)
177 parents = None
177 parents = None
178 if 'p2' in metadata:
178 if 'p2' in metadata:
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
179 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
180 elif 'p1' in metadata:
180 elif 'p1' in metadata:
181 parents = (metadata.pop('p1', None),)
181 parents = (metadata.pop('p1', None),)
182 elif 'p0' in metadata:
182 elif 'p0' in metadata:
183 parents = ()
183 parents = ()
184 if parents is not None:
184 if parents is not None:
185 try:
185 try:
186 parents = tuple(node.bin(p) for p in parents)
186 parents = tuple(node.bin(p) for p in parents)
187 # if parent content is not a nodeid, drop the data
187 # if parent content is not a nodeid, drop the data
188 for p in parents:
188 for p in parents:
189 if len(p) != 20:
189 if len(p) != 20:
190 parents = None
190 parents = None
191 break
191 break
192 except TypeError:
192 except TypeError:
193 # if content cannot be translated to nodeid drop the data.
193 # if content cannot be translated to nodeid drop the data.
194 parents = None
194 parents = None
195
195
196 metadata = tuple(sorted(metadata.iteritems()))
196 metadata = tuple(sorted(metadata.iteritems()))
197
197
198 yield (pre, sucs, flags, metadata, date, parents)
198 yield (pre, sucs, flags, metadata, date, parents)
199
199
200 def _fm0encodeonemarker(marker):
200 def _fm0encodeonemarker(marker):
201 pre, sucs, flags, metadata, date, parents = marker
201 pre, sucs, flags, metadata, date, parents = marker
202 if flags & usingsha256:
202 if flags & usingsha256:
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
203 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
204 metadata = dict(metadata)
204 metadata = dict(metadata)
205 time, tz = date
205 time, tz = date
206 metadata['date'] = '%r %i' % (time, tz)
206 metadata['date'] = '%r %i' % (time, tz)
207 if parents is not None:
207 if parents is not None:
208 if not parents:
208 if not parents:
209 # mark that we explicitly recorded no parents
209 # mark that we explicitly recorded no parents
210 metadata['p0'] = ''
210 metadata['p0'] = ''
211 for i, p in enumerate(parents):
211 for i, p in enumerate(parents):
212 metadata['p%i' % (i + 1)] = node.hex(p)
212 metadata['p%i' % (i + 1)] = node.hex(p)
213 metadata = _fm0encodemeta(metadata)
213 metadata = _fm0encodemeta(metadata)
214 numsuc = len(sucs)
214 numsuc = len(sucs)
215 format = _fm0fixed + (_fm0node * numsuc)
215 format = _fm0fixed + (_fm0node * numsuc)
216 data = [numsuc, len(metadata), flags, pre]
216 data = [numsuc, len(metadata), flags, pre]
217 data.extend(sucs)
217 data.extend(sucs)
218 return _pack(format, *data) + metadata
218 return _pack(format, *data) + metadata
219
219
220 def _fm0encodemeta(meta):
220 def _fm0encodemeta(meta):
221 """Return encoded metadata string to string mapping.
221 """Return encoded metadata string to string mapping.
222
222
223 Assume no ':' in key and no '\0' in both key and value."""
223 Assume no ':' in key and no '\0' in both key and value."""
224 for key, value in meta.iteritems():
224 for key, value in meta.iteritems():
225 if ':' in key or '\0' in key:
225 if ':' in key or '\0' in key:
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
226 raise ValueError("':' and '\0' are forbidden in metadata key'")
227 if '\0' in value:
227 if '\0' in value:
228 raise ValueError("':' is forbidden in metadata value'")
228 raise ValueError("':' is forbidden in metadata value'")
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
229 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
230
230
231 def _fm0decodemeta(data):
231 def _fm0decodemeta(data):
232 """Return string to string dictionary from encoded version."""
232 """Return string to string dictionary from encoded version."""
233 d = {}
233 d = {}
234 for l in data.split('\0'):
234 for l in data.split('\0'):
235 if l:
235 if l:
236 key, value = l.split(':')
236 key, value = l.split(':')
237 d[key] = value
237 d[key] = value
238 return d
238 return d
239
239
240 ## Parsing and writing of version "1"
240 ## Parsing and writing of version "1"
241 #
241 #
242 # The header is followed by the markers. Each marker is made of:
242 # The header is followed by the markers. Each marker is made of:
243 #
243 #
244 # - uint32: total size of the marker (including this field)
244 # - uint32: total size of the marker (including this field)
245 #
245 #
246 # - float64: date in seconds since epoch
246 # - float64: date in seconds since epoch
247 #
247 #
248 # - int16: timezone offset in minutes
248 # - int16: timezone offset in minutes
249 #
249 #
250 # - uint16: a bit field. It is reserved for flags used in common
250 # - uint16: a bit field. It is reserved for flags used in common
251 # obsolete marker operations, to avoid repeated decoding of metadata
251 # obsolete marker operations, to avoid repeated decoding of metadata
252 # entries.
252 # entries.
253 #
253 #
254 # - uint8: number of successors "N", can be zero.
254 # - uint8: number of successors "N", can be zero.
255 #
255 #
256 # - uint8: number of parents "P", can be zero.
256 # - uint8: number of parents "P", can be zero.
257 #
257 #
258 # 0: parents data stored but no parent,
258 # 0: parents data stored but no parent,
259 # 1: one parent stored,
259 # 1: one parent stored,
260 # 2: two parents stored,
260 # 2: two parents stored,
261 # 3: no parent data stored
261 # 3: no parent data stored
262 #
262 #
263 # - uint8: number of metadata entries M
263 # - uint8: number of metadata entries M
264 #
264 #
265 # - 20 or 32 bytes: precursor changeset identifier.
265 # - 20 or 32 bytes: precursor changeset identifier.
266 #
266 #
267 # - N*(20 or 32) bytes: successors changesets identifiers.
267 # - N*(20 or 32) bytes: successors changesets identifiers.
268 #
268 #
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
269 # - P*(20 or 32) bytes: parents of the precursors changesets.
270 #
270 #
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
271 # - M*(uint8, uint8): size of all metadata entries (key and value)
272 #
272 #
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
273 # - remaining bytes: the metadata, each (key, value) pair after the other.
274 _fm1version = 1
274 _fm1version = 1
275 _fm1fixed = '>IdhHBBB20s'
275 _fm1fixed = '>IdhHBBB20s'
276 _fm1nodesha1 = '20s'
276 _fm1nodesha1 = '20s'
277 _fm1nodesha256 = '32s'
277 _fm1nodesha256 = '32s'
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
278 _fm1nodesha1size = _calcsize(_fm1nodesha1)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
279 _fm1nodesha256size = _calcsize(_fm1nodesha256)
280 _fm1fsize = _calcsize(_fm1fixed)
280 _fm1fsize = _calcsize(_fm1fixed)
281 _fm1parentnone = 3
281 _fm1parentnone = 3
282 _fm1parentshift = 14
282 _fm1parentshift = 14
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
283 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
284 _fm1metapair = 'BB'
284 _fm1metapair = 'BB'
285 _fm1metapairsize = _calcsize('BB')
285 _fm1metapairsize = _calcsize('BB')
286
286
287 def _fm1purereadmarkers(data, off):
287 def _fm1purereadmarkers(data, off):
288 # make some global constants local for performance
288 # make some global constants local for performance
289 noneflag = _fm1parentnone
289 noneflag = _fm1parentnone
290 sha2flag = usingsha256
290 sha2flag = usingsha256
291 sha1size = _fm1nodesha1size
291 sha1size = _fm1nodesha1size
292 sha2size = _fm1nodesha256size
292 sha2size = _fm1nodesha256size
293 sha1fmt = _fm1nodesha1
293 sha1fmt = _fm1nodesha1
294 sha2fmt = _fm1nodesha256
294 sha2fmt = _fm1nodesha256
295 metasize = _fm1metapairsize
295 metasize = _fm1metapairsize
296 metafmt = _fm1metapair
296 metafmt = _fm1metapair
297 fsize = _fm1fsize
297 fsize = _fm1fsize
298 unpack = _unpack
298 unpack = _unpack
299
299
300 # Loop on markers
300 # Loop on markers
301 stop = len(data) - _fm1fsize
301 stop = len(data) - _fm1fsize
302 ufixed = util.unpacker(_fm1fixed)
302 ufixed = util.unpacker(_fm1fixed)
303
303
304 while off <= stop:
304 while off <= stop:
305 # read fixed part
305 # read fixed part
306 o1 = off + fsize
306 o1 = off + fsize
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
307 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
308
308
309 if flags & sha2flag:
309 if flags & sha2flag:
310 # FIXME: prec was read as a SHA1, needs to be amended
310 # FIXME: prec was read as a SHA1, needs to be amended
311
311
312 # read 0 or more successors
312 # read 0 or more successors
313 if numsuc == 1:
313 if numsuc == 1:
314 o2 = o1 + sha2size
314 o2 = o1 + sha2size
315 sucs = (data[o1:o2],)
315 sucs = (data[o1:o2],)
316 else:
316 else:
317 o2 = o1 + sha2size * numsuc
317 o2 = o1 + sha2size * numsuc
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
318 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
319
319
320 # read parents
320 # read parents
321 if numpar == noneflag:
321 if numpar == noneflag:
322 o3 = o2
322 o3 = o2
323 parents = None
323 parents = None
324 elif numpar == 1:
324 elif numpar == 1:
325 o3 = o2 + sha2size
325 o3 = o2 + sha2size
326 parents = (data[o2:o3],)
326 parents = (data[o2:o3],)
327 else:
327 else:
328 o3 = o2 + sha2size * numpar
328 o3 = o2 + sha2size * numpar
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
329 parents = unpack(sha2fmt * numpar, data[o2:o3])
330 else:
330 else:
331 # read 0 or more successors
331 # read 0 or more successors
332 if numsuc == 1:
332 if numsuc == 1:
333 o2 = o1 + sha1size
333 o2 = o1 + sha1size
334 sucs = (data[o1:o2],)
334 sucs = (data[o1:o2],)
335 else:
335 else:
336 o2 = o1 + sha1size * numsuc
336 o2 = o1 + sha1size * numsuc
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
337 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
338
338
339 # read parents
339 # read parents
340 if numpar == noneflag:
340 if numpar == noneflag:
341 o3 = o2
341 o3 = o2
342 parents = None
342 parents = None
343 elif numpar == 1:
343 elif numpar == 1:
344 o3 = o2 + sha1size
344 o3 = o2 + sha1size
345 parents = (data[o2:o3],)
345 parents = (data[o2:o3],)
346 else:
346 else:
347 o3 = o2 + sha1size * numpar
347 o3 = o2 + sha1size * numpar
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
348 parents = unpack(sha1fmt * numpar, data[o2:o3])
349
349
350 # read metadata
350 # read metadata
351 off = o3 + metasize * nummeta
351 off = o3 + metasize * nummeta
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
352 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
353 metadata = []
353 metadata = []
354 for idx in xrange(0, len(metapairsize), 2):
354 for idx in xrange(0, len(metapairsize), 2):
355 o1 = off + metapairsize[idx]
355 o1 = off + metapairsize[idx]
356 o2 = o1 + metapairsize[idx + 1]
356 o2 = o1 + metapairsize[idx + 1]
357 metadata.append((data[off:o1], data[o1:o2]))
357 metadata.append((data[off:o1], data[o1:o2]))
358 off = o2
358 off = o2
359
359
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
360 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
361
361
362 def _fm1encodeonemarker(marker):
362 def _fm1encodeonemarker(marker):
363 pre, sucs, flags, metadata, date, parents = marker
363 pre, sucs, flags, metadata, date, parents = marker
364 # determine node size
364 # determine node size
365 _fm1node = _fm1nodesha1
365 _fm1node = _fm1nodesha1
366 if flags & usingsha256:
366 if flags & usingsha256:
367 _fm1node = _fm1nodesha256
367 _fm1node = _fm1nodesha256
368 numsuc = len(sucs)
368 numsuc = len(sucs)
369 numextranodes = numsuc
369 numextranodes = numsuc
370 if parents is None:
370 if parents is None:
371 numpar = _fm1parentnone
371 numpar = _fm1parentnone
372 else:
372 else:
373 numpar = len(parents)
373 numpar = len(parents)
374 numextranodes += numpar
374 numextranodes += numpar
375 formatnodes = _fm1node * numextranodes
375 formatnodes = _fm1node * numextranodes
376 formatmeta = _fm1metapair * len(metadata)
376 formatmeta = _fm1metapair * len(metadata)
377 format = _fm1fixed + formatnodes + formatmeta
377 format = _fm1fixed + formatnodes + formatmeta
378 # tz is stored in minutes so we divide by 60
378 # tz is stored in minutes so we divide by 60
379 tz = date[1]//60
379 tz = date[1]//60
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
380 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
381 data.extend(sucs)
381 data.extend(sucs)
382 if parents is not None:
382 if parents is not None:
383 data.extend(parents)
383 data.extend(parents)
384 totalsize = _calcsize(format)
384 totalsize = _calcsize(format)
385 for key, value in metadata:
385 for key, value in metadata:
386 lk = len(key)
386 lk = len(key)
387 lv = len(value)
387 lv = len(value)
388 data.append(lk)
388 data.append(lk)
389 data.append(lv)
389 data.append(lv)
390 totalsize += lk + lv
390 totalsize += lk + lv
391 data[0] = totalsize
391 data[0] = totalsize
392 data = [_pack(format, *data)]
392 data = [_pack(format, *data)]
393 for key, value in metadata:
393 for key, value in metadata:
394 data.append(key)
394 data.append(key)
395 data.append(value)
395 data.append(value)
396 return ''.join(data)
396 return ''.join(data)
397
397
398 def _fm1readmarkers(data, off):
398 def _fm1readmarkers(data, off):
399 native = getattr(parsers, 'fm1readmarkers', None)
399 native = getattr(parsers, 'fm1readmarkers', None)
400 if not native:
400 if not native:
401 return _fm1purereadmarkers(data, off)
401 return _fm1purereadmarkers(data, off)
402 stop = len(data) - _fm1fsize
402 stop = len(data) - _fm1fsize
403 return native(data, off, stop)
403 return native(data, off, stop)
404
404
405 # mapping to read/write various marker formats
405 # mapping to read/write various marker formats
406 # <version> -> (decoder, encoder)
406 # <version> -> (decoder, encoder)
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
407 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
408 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
409
409
410 @util.nogc
410 @util.nogc
411 def _readmarkers(data):
411 def _readmarkers(data):
412 """Read and enumerate markers from raw data"""
412 """Read and enumerate markers from raw data"""
413 off = 0
413 off = 0
414 diskversion = _unpack('>B', data[off:off + 1])[0]
414 diskversion = _unpack('>B', data[off:off + 1])[0]
415 off += 1
415 off += 1
416 if diskversion not in formats:
416 if diskversion not in formats:
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
417 raise util.Abort(_('parsing obsolete marker: unknown version %r')
418 % diskversion)
418 % diskversion)
419 return diskversion, formats[diskversion][0](data, off)
419 return diskversion, formats[diskversion][0](data, off)
420
420
421 def encodemarkers(markers, addheader=False, version=_fm0version):
421 def encodemarkers(markers, addheader=False, version=_fm0version):
422 # Kept separate from flushmarkers(), it will be reused for
422 # Kept separate from flushmarkers(), it will be reused for
423 # markers exchange.
423 # markers exchange.
424 encodeone = formats[version][1]
424 encodeone = formats[version][1]
425 if addheader:
425 if addheader:
426 yield _pack('>B', version)
426 yield _pack('>B', version)
427 for marker in markers:
427 for marker in markers:
428 yield encodeone(marker)
428 yield encodeone(marker)
429
429
430
430
431 class marker(object):
431 class marker(object):
432 """Wrap obsolete marker raw data"""
432 """Wrap obsolete marker raw data"""
433
433
434 def __init__(self, repo, data):
434 def __init__(self, repo, data):
435 # the repo argument will be used to create changectx in later version
435 # the repo argument will be used to create changectx in later version
436 self._repo = repo
436 self._repo = repo
437 self._data = data
437 self._data = data
438 self._decodedmeta = None
438 self._decodedmeta = None
439
439
440 def __hash__(self):
440 def __hash__(self):
441 return hash(self._data)
441 return hash(self._data)
442
442
443 def __eq__(self, other):
443 def __eq__(self, other):
444 if type(other) != type(self):
444 if type(other) != type(self):
445 return False
445 return False
446 return self._data == other._data
446 return self._data == other._data
447
447
448 def precnode(self):
448 def precnode(self):
449 """Precursor changeset node identifier"""
449 """Precursor changeset node identifier"""
450 return self._data[0]
450 return self._data[0]
451
451
452 def succnodes(self):
452 def succnodes(self):
453 """List of successor changesets node identifiers"""
453 """List of successor changesets node identifiers"""
454 return self._data[1]
454 return self._data[1]
455
455
456 def parentnodes(self):
456 def parentnodes(self):
457 """Parents of the precursors (None if not recorded)"""
457 """Parents of the precursors (None if not recorded)"""
458 return self._data[5]
458 return self._data[5]
459
459
460 def metadata(self):
460 def metadata(self):
461 """Decoded metadata dictionary"""
461 """Decoded metadata dictionary"""
462 return dict(self._data[3])
462 return dict(self._data[3])
463
463
464 def date(self):
464 def date(self):
465 """Creation date as (unixtime, offset)"""
465 """Creation date as (unixtime, offset)"""
466 return self._data[4]
466 return self._data[4]
467
467
468 def flags(self):
468 def flags(self):
469 """The flags field of the marker"""
469 """The flags field of the marker"""
470 return self._data[2]
470 return self._data[2]
471
471
472 @util.nogc
472 @util.nogc
473 def _addsuccessors(successors, markers):
473 def _addsuccessors(successors, markers):
474 for mark in markers:
474 for mark in markers:
475 successors.setdefault(mark[0], set()).add(mark)
475 successors.setdefault(mark[0], set()).add(mark)
476
476
477 @util.nogc
477 @util.nogc
478 def _addprecursors(precursors, markers):
478 def _addprecursors(precursors, markers):
479 for mark in markers:
479 for mark in markers:
480 for suc in mark[1]:
480 for suc in mark[1]:
481 precursors.setdefault(suc, set()).add(mark)
481 precursors.setdefault(suc, set()).add(mark)
482
482
483 @util.nogc
483 @util.nogc
484 def _addchildren(children, markers):
484 def _addchildren(children, markers):
485 for mark in markers:
485 for mark in markers:
486 parents = mark[5]
486 parents = mark[5]
487 if parents is not None:
487 if parents is not None:
488 for p in parents:
488 for p in parents:
489 children.setdefault(p, set()).add(mark)
489 children.setdefault(p, set()).add(mark)
490
490
491 def _checkinvalidmarkers(markers):
491 def _checkinvalidmarkers(markers):
492 """search for marker with invalid data and raise error if needed
492 """search for marker with invalid data and raise error if needed
493
493
494 Exist as a separated function to allow the evolve extension for a more
494 Exist as a separated function to allow the evolve extension for a more
495 subtle handling.
495 subtle handling.
496 """
496 """
497 for mark in markers:
497 for mark in markers:
498 if node.nullid in mark[1]:
498 if node.nullid in mark[1]:
499 raise util.Abort(_('bad obsolescence marker detected: '
499 raise util.Abort(_('bad obsolescence marker detected: '
500 'invalid successors nullid'))
500 'invalid successors nullid'))
501
501
502 class obsstore(object):
502 class obsstore(object):
503 """Store obsolete markers
503 """Store obsolete markers
504
504
505 Markers can be accessed with two mappings:
505 Markers can be accessed with two mappings:
506 - precursors[x] -> set(markers on precursors edges of x)
506 - precursors[x] -> set(markers on precursors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
507 - successors[x] -> set(markers on successors edges of x)
508 - children[x] -> set(markers on precursors edges of children(x)
508 - children[x] -> set(markers on precursors edges of children(x)
509 """
509 """
510
510
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
511 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
512 # prec: nodeid, precursor changesets
512 # prec: nodeid, precursor changesets
513 # succs: tuple of nodeid, successor changesets (0-N length)
513 # succs: tuple of nodeid, successor changesets (0-N length)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
514 # flag: integer, flag field carrying modifier for the markers (see doc)
515 # meta: binary blob, encoded metadata dictionary
515 # meta: binary blob, encoded metadata dictionary
516 # date: (float, int) tuple, date of marker creation
516 # date: (float, int) tuple, date of marker creation
517 # parents: (tuple of nodeid) or None, parents of precursors
517 # parents: (tuple of nodeid) or None, parents of precursors
518 # None is used when no data has been recorded
518 # None is used when no data has been recorded
519
519
520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
520 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
521 # caches for various obsolescence related cache
521 # caches for various obsolescence related cache
522 self.caches = {}
522 self.caches = {}
523 self._all = []
523 self._all = []
524 self.sopener = sopener
524 self.sopener = sopener
525 data = sopener.tryread('obsstore')
525 data = sopener.tryread('obsstore')
526 self._version = defaultformat
526 self._version = defaultformat
527 self._readonly = readonly
527 self._readonly = readonly
528 if data:
528 if data:
529 self._version, markers = _readmarkers(data)
529 self._version, markers = _readmarkers(data)
530 self._addmarkers(markers)
530 self._addmarkers(markers)
531
531
532 def __iter__(self):
532 def __iter__(self):
533 return iter(self._all)
533 return iter(self._all)
534
534
535 def __len__(self):
535 def __len__(self):
536 return len(self._all)
536 return len(self._all)
537
537
538 def __nonzero__(self):
538 def __nonzero__(self):
539 return bool(self._all)
539 return bool(self._all)
540
540
541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
541 def create(self, transaction, prec, succs=(), flag=0, parents=None,
542 date=None, metadata=None):
542 date=None, metadata=None):
543 """obsolete: add a new obsolete marker
543 """obsolete: add a new obsolete marker
544
544
545 * ensuring it is hashable
545 * ensuring it is hashable
546 * check mandatory metadata
546 * check mandatory metadata
547 * encode metadata
547 * encode metadata
548
548
549 If you are a human writing code creating marker you want to use the
549 If you are a human writing code creating marker you want to use the
550 `createmarkers` function in this module instead.
550 `createmarkers` function in this module instead.
551
551
552 return True if a new marker have been added, False if the markers
552 return True if a new marker have been added, False if the markers
553 already existed (no op).
553 already existed (no op).
554 """
554 """
555 if metadata is None:
555 if metadata is None:
556 metadata = {}
556 metadata = {}
557 if date is None:
557 if date is None:
558 if 'date' in metadata:
558 if 'date' in metadata:
559 # as a courtesy for out-of-tree extensions
559 # as a courtesy for out-of-tree extensions
560 date = util.parsedate(metadata.pop('date'))
560 date = util.parsedate(metadata.pop('date'))
561 else:
561 else:
562 date = util.makedate()
562 date = util.makedate()
563 if len(prec) != 20:
563 if len(prec) != 20:
564 raise ValueError(prec)
564 raise ValueError(prec)
565 for succ in succs:
565 for succ in succs:
566 if len(succ) != 20:
566 if len(succ) != 20:
567 raise ValueError(succ)
567 raise ValueError(succ)
568 if prec in succs:
568 if prec in succs:
569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
569 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
570
570
571 metadata = tuple(sorted(metadata.iteritems()))
571 metadata = tuple(sorted(metadata.iteritems()))
572
572
573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
573 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
574 return bool(self.add(transaction, [marker]))
574 return bool(self.add(transaction, [marker]))
575
575
576 def add(self, transaction, markers):
576 def add(self, transaction, markers):
577 """Add new markers to the store
577 """Add new markers to the store
578
578
579 Take care of filtering duplicate.
579 Take care of filtering duplicate.
580 Return the number of new marker."""
580 Return the number of new marker."""
581 if self._readonly:
581 if self._readonly:
582 raise util.Abort('creating obsolete markers is not enabled on this '
582 raise util.Abort('creating obsolete markers is not enabled on this '
583 'repo')
583 'repo')
584 known = set(self._all)
584 known = set(self._all)
585 new = []
585 new = []
586 for m in markers:
586 for m in markers:
587 if m not in known:
587 if m not in known:
588 known.add(m)
588 known.add(m)
589 new.append(m)
589 new.append(m)
590 if new:
590 if new:
591 f = self.sopener('obsstore', 'ab')
591 f = self.sopener('obsstore', 'ab')
592 try:
592 try:
593 offset = f.tell()
593 offset = f.tell()
594 transaction.add('obsstore', offset)
594 transaction.add('obsstore', offset)
595 # offset == 0: new file - add the version header
595 # offset == 0: new file - add the version header
596 for bytes in encodemarkers(new, offset == 0, self._version):
596 for bytes in encodemarkers(new, offset == 0, self._version):
597 f.write(bytes)
597 f.write(bytes)
598 finally:
598 finally:
599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
599 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
600 # call 'filecacheentry.refresh()' here
600 # call 'filecacheentry.refresh()' here
601 f.close()
601 f.close()
602 self._addmarkers(new)
602 self._addmarkers(new)
603 # new marker *may* have changed several set. invalidate the cache.
603 # new marker *may* have changed several set. invalidate the cache.
604 self.caches.clear()
604 self.caches.clear()
605 # records the number of new markers for the transaction hooks
605 # records the number of new markers for the transaction hooks
606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
606 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
607 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
608 return len(new)
608 return len(new)
609
609
610 def mergemarkers(self, transaction, data):
610 def mergemarkers(self, transaction, data):
611 """merge a binary stream of markers inside the obsstore
611 """merge a binary stream of markers inside the obsstore
612
612
613 Returns the number of new markers added."""
613 Returns the number of new markers added."""
614 version, markers = _readmarkers(data)
614 version, markers = _readmarkers(data)
615 return self.add(transaction, markers)
615 return self.add(transaction, markers)
616
616
617 @propertycache
617 @propertycache
618 def successors(self):
618 def successors(self):
619 successors = {}
619 successors = {}
620 _addsuccessors(successors, self._all)
620 _addsuccessors(successors, self._all)
621 return successors
621 return successors
622
622
623 @propertycache
623 @propertycache
624 def precursors(self):
624 def precursors(self):
625 precursors = {}
625 precursors = {}
626 _addprecursors(precursors, self._all)
626 _addprecursors(precursors, self._all)
627 return precursors
627 return precursors
628
628
629 @propertycache
629 @propertycache
630 def children(self):
630 def children(self):
631 children = {}
631 children = {}
632 _addchildren(children, self._all)
632 _addchildren(children, self._all)
633 return children
633 return children
634
634
635 def _cached(self, attr):
635 def _cached(self, attr):
636 return attr in self.__dict__
636 return attr in self.__dict__
637
637
638 def _addmarkers(self, markers):
638 def _addmarkers(self, markers):
639 markers = list(markers) # to allow repeated iteration
639 markers = list(markers) # to allow repeated iteration
640 self._all.extend(markers)
640 self._all.extend(markers)
641 if self._cached('successors'):
641 if self._cached('successors'):
642 _addsuccessors(self.successors, markers)
642 _addsuccessors(self.successors, markers)
643 if self._cached('precursors'):
643 if self._cached('precursors'):
644 _addprecursors(self.precursors, markers)
644 _addprecursors(self.precursors, markers)
645 if self._cached('children'):
645 if self._cached('children'):
646 _addchildren(self.children, markers)
646 _addchildren(self.children, markers)
647 _checkinvalidmarkers(markers)
647 _checkinvalidmarkers(markers)
648
648
649 def relevantmarkers(self, nodes):
649 def relevantmarkers(self, nodes):
650 """return a set of all obsolescence markers relevant to a set of nodes.
650 """return a set of all obsolescence markers relevant to a set of nodes.
651
651
652 "relevant" to a set of nodes mean:
652 "relevant" to a set of nodes mean:
653
653
654 - marker that use this changeset as successor
654 - marker that use this changeset as successor
655 - prune marker of direct children on this changeset
655 - prune marker of direct children on this changeset
656 - recursive application of the two rules on precursors of these markers
656 - recursive application of the two rules on precursors of these markers
657
657
658 It is a set so you cannot rely on order."""
658 It is a set so you cannot rely on order."""
659
659
660 pendingnodes = set(nodes)
660 pendingnodes = set(nodes)
661 seenmarkers = set()
661 seenmarkers = set()
662 seennodes = set(pendingnodes)
662 seennodes = set(pendingnodes)
663 precursorsmarkers = self.precursors
663 precursorsmarkers = self.precursors
664 children = self.children
664 children = self.children
665 while pendingnodes:
665 while pendingnodes:
666 direct = set()
666 direct = set()
667 for current in pendingnodes:
667 for current in pendingnodes:
668 direct.update(precursorsmarkers.get(current, ()))
668 direct.update(precursorsmarkers.get(current, ()))
669 pruned = [m for m in children.get(current, ()) if not m[1]]
669 pruned = [m for m in children.get(current, ()) if not m[1]]
670 direct.update(pruned)
670 direct.update(pruned)
671 direct -= seenmarkers
671 direct -= seenmarkers
672 pendingnodes = set([m[0] for m in direct])
672 pendingnodes = set([m[0] for m in direct])
673 seenmarkers |= direct
673 seenmarkers |= direct
674 pendingnodes -= seennodes
674 pendingnodes -= seennodes
675 seennodes |= pendingnodes
675 seennodes |= pendingnodes
676 return seenmarkers
676 return seenmarkers
677
677
678 def commonversion(versions):
678 def commonversion(versions):
679 """Return the newest version listed in both versions and our local formats.
679 """Return the newest version listed in both versions and our local formats.
680
680
681 Returns None if no common version exists.
681 Returns None if no common version exists.
682 """
682 """
683 versions.sort(reverse=True)
683 versions.sort(reverse=True)
684 # search for highest version known on both side
684 # search for highest version known on both side
685 for v in versions:
685 for v in versions:
686 if v in formats:
686 if v in formats:
687 return v
687 return v
688 return None
688 return None
689
689
690 # arbitrary picked to fit into 8K limit from HTTP server
690 # arbitrary picked to fit into 8K limit from HTTP server
691 # you have to take in account:
691 # you have to take in account:
692 # - the version header
692 # - the version header
693 # - the base85 encoding
693 # - the base85 encoding
694 _maxpayload = 5300
694 _maxpayload = 5300
695
695
696 def _pushkeyescape(markers):
696 def _pushkeyescape(markers):
697 """encode markers into a dict suitable for pushkey exchange
697 """encode markers into a dict suitable for pushkey exchange
698
698
699 - binary data is base85 encoded
699 - binary data is base85 encoded
700 - split in chunks smaller than 5300 bytes"""
700 - split in chunks smaller than 5300 bytes"""
701 keys = {}
701 keys = {}
702 parts = []
702 parts = []
703 currentlen = _maxpayload * 2 # ensure we create a new part
703 currentlen = _maxpayload * 2 # ensure we create a new part
704 for marker in markers:
704 for marker in markers:
705 nextdata = _fm0encodeonemarker(marker)
705 nextdata = _fm0encodeonemarker(marker)
706 if (len(nextdata) + currentlen > _maxpayload):
706 if (len(nextdata) + currentlen > _maxpayload):
707 currentpart = []
707 currentpart = []
708 currentlen = 0
708 currentlen = 0
709 parts.append(currentpart)
709 parts.append(currentpart)
710 currentpart.append(nextdata)
710 currentpart.append(nextdata)
711 currentlen += len(nextdata)
711 currentlen += len(nextdata)
712 for idx, part in enumerate(reversed(parts)):
712 for idx, part in enumerate(reversed(parts)):
713 data = ''.join([_pack('>B', _fm0version)] + part)
713 data = ''.join([_pack('>B', _fm0version)] + part)
714 keys['dump%i' % idx] = base85.b85encode(data)
714 keys['dump%i' % idx] = base85.b85encode(data)
715 return keys
715 return keys
716
716
717 def listmarkers(repo):
717 def listmarkers(repo):
718 """List markers over pushkey"""
718 """List markers over pushkey"""
719 if not repo.obsstore:
719 if not repo.obsstore:
720 return {}
720 return {}
721 return _pushkeyescape(repo.obsstore)
721 return _pushkeyescape(sorted(repo.obsstore))
722
722
723 def pushmarker(repo, key, old, new):
723 def pushmarker(repo, key, old, new):
724 """Push markers over pushkey"""
724 """Push markers over pushkey"""
725 if not key.startswith('dump'):
725 if not key.startswith('dump'):
726 repo.ui.warn(_('unknown key: %r') % key)
726 repo.ui.warn(_('unknown key: %r') % key)
727 return 0
727 return 0
728 if old:
728 if old:
729 repo.ui.warn(_('unexpected old value for %r') % key)
729 repo.ui.warn(_('unexpected old value for %r') % key)
730 return 0
730 return 0
731 data = base85.b85decode(new)
731 data = base85.b85decode(new)
732 lock = repo.lock()
732 lock = repo.lock()
733 try:
733 try:
734 tr = repo.transaction('pushkey: obsolete markers')
734 tr = repo.transaction('pushkey: obsolete markers')
735 try:
735 try:
736 repo.obsstore.mergemarkers(tr, data)
736 repo.obsstore.mergemarkers(tr, data)
737 tr.close()
737 tr.close()
738 return 1
738 return 1
739 finally:
739 finally:
740 tr.release()
740 tr.release()
741 finally:
741 finally:
742 lock.release()
742 lock.release()
743
743
744 def getmarkers(repo, nodes=None):
744 def getmarkers(repo, nodes=None):
745 """returns markers known in a repository
745 """returns markers known in a repository
746
746
747 If <nodes> is specified, only markers "relevant" to those nodes are are
747 If <nodes> is specified, only markers "relevant" to those nodes are are
748 returned"""
748 returned"""
749 if nodes is None:
749 if nodes is None:
750 rawmarkers = repo.obsstore
750 rawmarkers = repo.obsstore
751 else:
751 else:
752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
752 rawmarkers = repo.obsstore.relevantmarkers(nodes)
753
753
754 for markerdata in rawmarkers:
754 for markerdata in rawmarkers:
755 yield marker(repo, markerdata)
755 yield marker(repo, markerdata)
756
756
757 def relevantmarkers(repo, node):
757 def relevantmarkers(repo, node):
758 """all obsolete markers relevant to some revision"""
758 """all obsolete markers relevant to some revision"""
759 for markerdata in repo.obsstore.relevantmarkers(node):
759 for markerdata in repo.obsstore.relevantmarkers(node):
760 yield marker(repo, markerdata)
760 yield marker(repo, markerdata)
761
761
762
762
763 def precursormarkers(ctx):
763 def precursormarkers(ctx):
764 """obsolete marker marking this changeset as a successors"""
764 """obsolete marker marking this changeset as a successors"""
765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
765 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
766 yield marker(ctx.repo(), data)
766 yield marker(ctx.repo(), data)
767
767
768 def successormarkers(ctx):
768 def successormarkers(ctx):
769 """obsolete marker making this changeset obsolete"""
769 """obsolete marker making this changeset obsolete"""
770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
770 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
771 yield marker(ctx.repo(), data)
771 yield marker(ctx.repo(), data)
772
772
773 def allsuccessors(obsstore, nodes, ignoreflags=0):
773 def allsuccessors(obsstore, nodes, ignoreflags=0):
774 """Yield node for every successor of <nodes>.
774 """Yield node for every successor of <nodes>.
775
775
776 Some successors may be unknown locally.
776 Some successors may be unknown locally.
777
777
778 This is a linear yield unsuited to detecting split changesets. It includes
778 This is a linear yield unsuited to detecting split changesets. It includes
779 initial nodes too."""
779 initial nodes too."""
780 remaining = set(nodes)
780 remaining = set(nodes)
781 seen = set(remaining)
781 seen = set(remaining)
782 while remaining:
782 while remaining:
783 current = remaining.pop()
783 current = remaining.pop()
784 yield current
784 yield current
785 for mark in obsstore.successors.get(current, ()):
785 for mark in obsstore.successors.get(current, ()):
786 # ignore marker flagged with specified flag
786 # ignore marker flagged with specified flag
787 if mark[2] & ignoreflags:
787 if mark[2] & ignoreflags:
788 continue
788 continue
789 for suc in mark[1]:
789 for suc in mark[1]:
790 if suc not in seen:
790 if suc not in seen:
791 seen.add(suc)
791 seen.add(suc)
792 remaining.add(suc)
792 remaining.add(suc)
793
793
794 def allprecursors(obsstore, nodes, ignoreflags=0):
794 def allprecursors(obsstore, nodes, ignoreflags=0):
795 """Yield node for every precursors of <nodes>.
795 """Yield node for every precursors of <nodes>.
796
796
797 Some precursors may be unknown locally.
797 Some precursors may be unknown locally.
798
798
799 This is a linear yield unsuited to detecting folded changesets. It includes
799 This is a linear yield unsuited to detecting folded changesets. It includes
800 initial nodes too."""
800 initial nodes too."""
801
801
802 remaining = set(nodes)
802 remaining = set(nodes)
803 seen = set(remaining)
803 seen = set(remaining)
804 while remaining:
804 while remaining:
805 current = remaining.pop()
805 current = remaining.pop()
806 yield current
806 yield current
807 for mark in obsstore.precursors.get(current, ()):
807 for mark in obsstore.precursors.get(current, ()):
808 # ignore marker flagged with specified flag
808 # ignore marker flagged with specified flag
809 if mark[2] & ignoreflags:
809 if mark[2] & ignoreflags:
810 continue
810 continue
811 suc = mark[0]
811 suc = mark[0]
812 if suc not in seen:
812 if suc not in seen:
813 seen.add(suc)
813 seen.add(suc)
814 remaining.add(suc)
814 remaining.add(suc)
815
815
816 def foreground(repo, nodes):
816 def foreground(repo, nodes):
817 """return all nodes in the "foreground" of other node
817 """return all nodes in the "foreground" of other node
818
818
819 The foreground of a revision is anything reachable using parent -> children
819 The foreground of a revision is anything reachable using parent -> children
820 or precursor -> successor relation. It is very similar to "descendant" but
820 or precursor -> successor relation. It is very similar to "descendant" but
821 augmented with obsolescence information.
821 augmented with obsolescence information.
822
822
823 Beware that possible obsolescence cycle may result if complex situation.
823 Beware that possible obsolescence cycle may result if complex situation.
824 """
824 """
825 repo = repo.unfiltered()
825 repo = repo.unfiltered()
826 foreground = set(repo.set('%ln::', nodes))
826 foreground = set(repo.set('%ln::', nodes))
827 if repo.obsstore:
827 if repo.obsstore:
828 # We only need this complicated logic if there is obsolescence
828 # We only need this complicated logic if there is obsolescence
829 # XXX will probably deserve an optimised revset.
829 # XXX will probably deserve an optimised revset.
830 nm = repo.changelog.nodemap
830 nm = repo.changelog.nodemap
831 plen = -1
831 plen = -1
832 # compute the whole set of successors or descendants
832 # compute the whole set of successors or descendants
833 while len(foreground) != plen:
833 while len(foreground) != plen:
834 plen = len(foreground)
834 plen = len(foreground)
835 succs = set(c.node() for c in foreground)
835 succs = set(c.node() for c in foreground)
836 mutable = [c.node() for c in foreground if c.mutable()]
836 mutable = [c.node() for c in foreground if c.mutable()]
837 succs.update(allsuccessors(repo.obsstore, mutable))
837 succs.update(allsuccessors(repo.obsstore, mutable))
838 known = (n for n in succs if n in nm)
838 known = (n for n in succs if n in nm)
839 foreground = set(repo.set('%ln::', known))
839 foreground = set(repo.set('%ln::', known))
840 return set(c.node() for c in foreground)
840 return set(c.node() for c in foreground)
841
841
842
842
843 def successorssets(repo, initialnode, cache=None):
843 def successorssets(repo, initialnode, cache=None):
844 """Return all set of successors of initial nodes
844 """Return all set of successors of initial nodes
845
845
846 The successors set of a changeset A are a group of revisions that succeed
846 The successors set of a changeset A are a group of revisions that succeed
847 A. It succeeds A as a consistent whole, each revision being only a partial
847 A. It succeeds A as a consistent whole, each revision being only a partial
848 replacement. The successors set contains non-obsolete changesets only.
848 replacement. The successors set contains non-obsolete changesets only.
849
849
850 This function returns the full list of successor sets which is why it
850 This function returns the full list of successor sets which is why it
851 returns a list of tuples and not just a single tuple. Each tuple is a valid
851 returns a list of tuples and not just a single tuple. Each tuple is a valid
852 successors set. Not that (A,) may be a valid successors set for changeset A
852 successors set. Not that (A,) may be a valid successors set for changeset A
853 (see below).
853 (see below).
854
854
855 In most cases, a changeset A will have a single element (e.g. the changeset
855 In most cases, a changeset A will have a single element (e.g. the changeset
856 A is replaced by A') in its successors set. Though, it is also common for a
856 A is replaced by A') in its successors set. Though, it is also common for a
857 changeset A to have no elements in its successor set (e.g. the changeset
857 changeset A to have no elements in its successor set (e.g. the changeset
858 has been pruned). Therefore, the returned list of successors sets will be
858 has been pruned). Therefore, the returned list of successors sets will be
859 [(A',)] or [], respectively.
859 [(A',)] or [], respectively.
860
860
861 When a changeset A is split into A' and B', however, it will result in a
861 When a changeset A is split into A' and B', however, it will result in a
862 successors set containing more than a single element, i.e. [(A',B')].
862 successors set containing more than a single element, i.e. [(A',B')].
863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
863 Divergent changesets will result in multiple successors sets, i.e. [(A',),
864 (A'')].
864 (A'')].
865
865
866 If a changeset A is not obsolete, then it will conceptually have no
866 If a changeset A is not obsolete, then it will conceptually have no
867 successors set. To distinguish this from a pruned changeset, the successor
867 successors set. To distinguish this from a pruned changeset, the successor
868 set will only contain itself, i.e. [(A,)].
868 set will only contain itself, i.e. [(A,)].
869
869
870 Finally, successors unknown locally are considered to be pruned (obsoleted
870 Finally, successors unknown locally are considered to be pruned (obsoleted
871 without any successors).
871 without any successors).
872
872
873 The optional `cache` parameter is a dictionary that may contain precomputed
873 The optional `cache` parameter is a dictionary that may contain precomputed
874 successors sets. It is meant to reuse the computation of a previous call to
874 successors sets. It is meant to reuse the computation of a previous call to
875 `successorssets` when multiple calls are made at the same time. The cache
875 `successorssets` when multiple calls are made at the same time. The cache
876 dictionary is updated in place. The caller is responsible for its live
876 dictionary is updated in place. The caller is responsible for its live
877 spawn. Code that makes multiple calls to `successorssets` *must* use this
877 spawn. Code that makes multiple calls to `successorssets` *must* use this
878 cache mechanism or suffer terrible performances.
878 cache mechanism or suffer terrible performances.
879
879
880 """
880 """
881
881
882 succmarkers = repo.obsstore.successors
882 succmarkers = repo.obsstore.successors
883
883
884 # Stack of nodes we search successors sets for
884 # Stack of nodes we search successors sets for
885 toproceed = [initialnode]
885 toproceed = [initialnode]
886 # set version of above list for fast loop detection
886 # set version of above list for fast loop detection
887 # element added to "toproceed" must be added here
887 # element added to "toproceed" must be added here
888 stackedset = set(toproceed)
888 stackedset = set(toproceed)
889 if cache is None:
889 if cache is None:
890 cache = {}
890 cache = {}
891
891
892 # This while loop is the flattened version of a recursive search for
892 # This while loop is the flattened version of a recursive search for
893 # successors sets
893 # successors sets
894 #
894 #
895 # def successorssets(x):
895 # def successorssets(x):
896 # successors = directsuccessors(x)
896 # successors = directsuccessors(x)
897 # ss = [[]]
897 # ss = [[]]
898 # for succ in directsuccessors(x):
898 # for succ in directsuccessors(x):
899 # # product as in itertools cartesian product
899 # # product as in itertools cartesian product
900 # ss = product(ss, successorssets(succ))
900 # ss = product(ss, successorssets(succ))
901 # return ss
901 # return ss
902 #
902 #
903 # But we can not use plain recursive calls here:
903 # But we can not use plain recursive calls here:
904 # - that would blow the python call stack
904 # - that would blow the python call stack
905 # - obsolescence markers may have cycles, we need to handle them.
905 # - obsolescence markers may have cycles, we need to handle them.
906 #
906 #
907 # The `toproceed` list act as our call stack. Every node we search
907 # The `toproceed` list act as our call stack. Every node we search
908 # successors set for are stacked there.
908 # successors set for are stacked there.
909 #
909 #
910 # The `stackedset` is set version of this stack used to check if a node is
910 # The `stackedset` is set version of this stack used to check if a node is
911 # already stacked. This check is used to detect cycles and prevent infinite
911 # already stacked. This check is used to detect cycles and prevent infinite
912 # loop.
912 # loop.
913 #
913 #
914 # successors set of all nodes are stored in the `cache` dictionary.
914 # successors set of all nodes are stored in the `cache` dictionary.
915 #
915 #
916 # After this while loop ends we use the cache to return the successors sets
916 # After this while loop ends we use the cache to return the successors sets
917 # for the node requested by the caller.
917 # for the node requested by the caller.
918 while toproceed:
918 while toproceed:
919 # Every iteration tries to compute the successors sets of the topmost
919 # Every iteration tries to compute the successors sets of the topmost
920 # node of the stack: CURRENT.
920 # node of the stack: CURRENT.
921 #
921 #
922 # There are four possible outcomes:
922 # There are four possible outcomes:
923 #
923 #
924 # 1) We already know the successors sets of CURRENT:
924 # 1) We already know the successors sets of CURRENT:
925 # -> mission accomplished, pop it from the stack.
925 # -> mission accomplished, pop it from the stack.
926 # 2) Node is not obsolete:
926 # 2) Node is not obsolete:
927 # -> the node is its own successors sets. Add it to the cache.
927 # -> the node is its own successors sets. Add it to the cache.
928 # 3) We do not know successors set of direct successors of CURRENT:
928 # 3) We do not know successors set of direct successors of CURRENT:
929 # -> We add those successors to the stack.
929 # -> We add those successors to the stack.
930 # 4) We know successors sets of all direct successors of CURRENT:
930 # 4) We know successors sets of all direct successors of CURRENT:
931 # -> We can compute CURRENT successors set and add it to the
931 # -> We can compute CURRENT successors set and add it to the
932 # cache.
932 # cache.
933 #
933 #
934 current = toproceed[-1]
934 current = toproceed[-1]
935 if current in cache:
935 if current in cache:
936 # case (1): We already know the successors sets
936 # case (1): We already know the successors sets
937 stackedset.remove(toproceed.pop())
937 stackedset.remove(toproceed.pop())
938 elif current not in succmarkers:
938 elif current not in succmarkers:
939 # case (2): The node is not obsolete.
939 # case (2): The node is not obsolete.
940 if current in repo:
940 if current in repo:
941 # We have a valid last successors.
941 # We have a valid last successors.
942 cache[current] = [(current,)]
942 cache[current] = [(current,)]
943 else:
943 else:
944 # Final obsolete version is unknown locally.
944 # Final obsolete version is unknown locally.
945 # Do not count that as a valid successors
945 # Do not count that as a valid successors
946 cache[current] = []
946 cache[current] = []
947 else:
947 else:
948 # cases (3) and (4)
948 # cases (3) and (4)
949 #
949 #
950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
950 # We proceed in two phases. Phase 1 aims to distinguish case (3)
951 # from case (4):
951 # from case (4):
952 #
952 #
953 # For each direct successors of CURRENT, we check whether its
953 # For each direct successors of CURRENT, we check whether its
954 # successors sets are known. If they are not, we stack the
954 # successors sets are known. If they are not, we stack the
955 # unknown node and proceed to the next iteration of the while
955 # unknown node and proceed to the next iteration of the while
956 # loop. (case 3)
956 # loop. (case 3)
957 #
957 #
958 # During this step, we may detect obsolescence cycles: a node
958 # During this step, we may detect obsolescence cycles: a node
959 # with unknown successors sets but already in the call stack.
959 # with unknown successors sets but already in the call stack.
960 # In such a situation, we arbitrary set the successors sets of
960 # In such a situation, we arbitrary set the successors sets of
961 # the node to nothing (node pruned) to break the cycle.
961 # the node to nothing (node pruned) to break the cycle.
962 #
962 #
963 # If no break was encountered we proceed to phase 2.
963 # If no break was encountered we proceed to phase 2.
964 #
964 #
965 # Phase 2 computes successors sets of CURRENT (case 4); see details
965 # Phase 2 computes successors sets of CURRENT (case 4); see details
966 # in phase 2 itself.
966 # in phase 2 itself.
967 #
967 #
968 # Note the two levels of iteration in each phase.
968 # Note the two levels of iteration in each phase.
969 # - The first one handles obsolescence markers using CURRENT as
969 # - The first one handles obsolescence markers using CURRENT as
970 # precursor (successors markers of CURRENT).
970 # precursor (successors markers of CURRENT).
971 #
971 #
972 # Having multiple entry here means divergence.
972 # Having multiple entry here means divergence.
973 #
973 #
974 # - The second one handles successors defined in each marker.
974 # - The second one handles successors defined in each marker.
975 #
975 #
976 # Having none means pruned node, multiple successors means split,
976 # Having none means pruned node, multiple successors means split,
977 # single successors are standard replacement.
977 # single successors are standard replacement.
978 #
978 #
979 for mark in sorted(succmarkers[current]):
979 for mark in sorted(succmarkers[current]):
980 for suc in mark[1]:
980 for suc in mark[1]:
981 if suc not in cache:
981 if suc not in cache:
982 if suc in stackedset:
982 if suc in stackedset:
983 # cycle breaking
983 # cycle breaking
984 cache[suc] = []
984 cache[suc] = []
985 else:
985 else:
986 # case (3) If we have not computed successors sets
986 # case (3) If we have not computed successors sets
987 # of one of those successors we add it to the
987 # of one of those successors we add it to the
988 # `toproceed` stack and stop all work for this
988 # `toproceed` stack and stop all work for this
989 # iteration.
989 # iteration.
990 toproceed.append(suc)
990 toproceed.append(suc)
991 stackedset.add(suc)
991 stackedset.add(suc)
992 break
992 break
993 else:
993 else:
994 continue
994 continue
995 break
995 break
996 else:
996 else:
997 # case (4): we know all successors sets of all direct
997 # case (4): we know all successors sets of all direct
998 # successors
998 # successors
999 #
999 #
1000 # Successors set contributed by each marker depends on the
1000 # Successors set contributed by each marker depends on the
1001 # successors sets of all its "successors" node.
1001 # successors sets of all its "successors" node.
1002 #
1002 #
1003 # Each different marker is a divergence in the obsolescence
1003 # Each different marker is a divergence in the obsolescence
1004 # history. It contributes successors sets distinct from other
1004 # history. It contributes successors sets distinct from other
1005 # markers.
1005 # markers.
1006 #
1006 #
1007 # Within a marker, a successor may have divergent successors
1007 # Within a marker, a successor may have divergent successors
1008 # sets. In such a case, the marker will contribute multiple
1008 # sets. In such a case, the marker will contribute multiple
1009 # divergent successors sets. If multiple successors have
1009 # divergent successors sets. If multiple successors have
1010 # divergent successors sets, a Cartesian product is used.
1010 # divergent successors sets, a Cartesian product is used.
1011 #
1011 #
1012 # At the end we post-process successors sets to remove
1012 # At the end we post-process successors sets to remove
1013 # duplicated entry and successors set that are strict subset of
1013 # duplicated entry and successors set that are strict subset of
1014 # another one.
1014 # another one.
1015 succssets = []
1015 succssets = []
1016 for mark in sorted(succmarkers[current]):
1016 for mark in sorted(succmarkers[current]):
1017 # successors sets contributed by this marker
1017 # successors sets contributed by this marker
1018 markss = [[]]
1018 markss = [[]]
1019 for suc in mark[1]:
1019 for suc in mark[1]:
1020 # cardinal product with previous successors
1020 # cardinal product with previous successors
1021 productresult = []
1021 productresult = []
1022 for prefix in markss:
1022 for prefix in markss:
1023 for suffix in cache[suc]:
1023 for suffix in cache[suc]:
1024 newss = list(prefix)
1024 newss = list(prefix)
1025 for part in suffix:
1025 for part in suffix:
1026 # do not duplicated entry in successors set
1026 # do not duplicated entry in successors set
1027 # first entry wins.
1027 # first entry wins.
1028 if part not in newss:
1028 if part not in newss:
1029 newss.append(part)
1029 newss.append(part)
1030 productresult.append(newss)
1030 productresult.append(newss)
1031 markss = productresult
1031 markss = productresult
1032 succssets.extend(markss)
1032 succssets.extend(markss)
1033 # remove duplicated and subset
1033 # remove duplicated and subset
1034 seen = []
1034 seen = []
1035 final = []
1035 final = []
1036 candidate = sorted(((set(s), s) for s in succssets if s),
1036 candidate = sorted(((set(s), s) for s in succssets if s),
1037 key=lambda x: len(x[1]), reverse=True)
1037 key=lambda x: len(x[1]), reverse=True)
1038 for setversion, listversion in candidate:
1038 for setversion, listversion in candidate:
1039 for seenset in seen:
1039 for seenset in seen:
1040 if setversion.issubset(seenset):
1040 if setversion.issubset(seenset):
1041 break
1041 break
1042 else:
1042 else:
1043 final.append(listversion)
1043 final.append(listversion)
1044 seen.append(setversion)
1044 seen.append(setversion)
1045 final.reverse() # put small successors set first
1045 final.reverse() # put small successors set first
1046 cache[current] = final
1046 cache[current] = final
1047 return cache[initialnode]
1047 return cache[initialnode]
1048
1048
1049 def _knownrevs(repo, nodes):
1049 def _knownrevs(repo, nodes):
1050 """yield revision numbers of known nodes passed in parameters
1050 """yield revision numbers of known nodes passed in parameters
1051
1051
1052 Unknown revisions are silently ignored."""
1052 Unknown revisions are silently ignored."""
1053 torev = repo.changelog.nodemap.get
1053 torev = repo.changelog.nodemap.get
1054 for n in nodes:
1054 for n in nodes:
1055 rev = torev(n)
1055 rev = torev(n)
1056 if rev is not None:
1056 if rev is not None:
1057 yield rev
1057 yield rev
1058
1058
1059 # mapping of 'set-name' -> <function to compute this set>
1059 # mapping of 'set-name' -> <function to compute this set>
1060 cachefuncs = {}
1060 cachefuncs = {}
1061 def cachefor(name):
1061 def cachefor(name):
1062 """Decorator to register a function as computing the cache for a set"""
1062 """Decorator to register a function as computing the cache for a set"""
1063 def decorator(func):
1063 def decorator(func):
1064 assert name not in cachefuncs
1064 assert name not in cachefuncs
1065 cachefuncs[name] = func
1065 cachefuncs[name] = func
1066 return func
1066 return func
1067 return decorator
1067 return decorator
1068
1068
1069 def getrevs(repo, name):
1069 def getrevs(repo, name):
1070 """Return the set of revision that belong to the <name> set
1070 """Return the set of revision that belong to the <name> set
1071
1071
1072 Such access may compute the set and cache it for future use"""
1072 Such access may compute the set and cache it for future use"""
1073 repo = repo.unfiltered()
1073 repo = repo.unfiltered()
1074 if not repo.obsstore:
1074 if not repo.obsstore:
1075 return frozenset()
1075 return frozenset()
1076 if name not in repo.obsstore.caches:
1076 if name not in repo.obsstore.caches:
1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1077 repo.obsstore.caches[name] = cachefuncs[name](repo)
1078 return repo.obsstore.caches[name]
1078 return repo.obsstore.caches[name]
1079
1079
1080 # To be simple we need to invalidate obsolescence cache when:
1080 # To be simple we need to invalidate obsolescence cache when:
1081 #
1081 #
1082 # - new changeset is added:
1082 # - new changeset is added:
1083 # - public phase is changed
1083 # - public phase is changed
1084 # - obsolescence marker are added
1084 # - obsolescence marker are added
1085 # - strip is used a repo
1085 # - strip is used a repo
1086 def clearobscaches(repo):
1086 def clearobscaches(repo):
1087 """Remove all obsolescence related cache from a repo
1087 """Remove all obsolescence related cache from a repo
1088
1088
1089 This remove all cache in obsstore is the obsstore already exist on the
1089 This remove all cache in obsstore is the obsstore already exist on the
1090 repo.
1090 repo.
1091
1091
1092 (We could be smarter here given the exact event that trigger the cache
1092 (We could be smarter here given the exact event that trigger the cache
1093 clearing)"""
1093 clearing)"""
1094 # only clear cache is there is obsstore data in this repo
1094 # only clear cache is there is obsstore data in this repo
1095 if 'obsstore' in repo._filecache:
1095 if 'obsstore' in repo._filecache:
1096 repo.obsstore.caches.clear()
1096 repo.obsstore.caches.clear()
1097
1097
1098 @cachefor('obsolete')
1098 @cachefor('obsolete')
1099 def _computeobsoleteset(repo):
1099 def _computeobsoleteset(repo):
1100 """the set of obsolete revisions"""
1100 """the set of obsolete revisions"""
1101 obs = set()
1101 obs = set()
1102 getrev = repo.changelog.nodemap.get
1102 getrev = repo.changelog.nodemap.get
1103 getphase = repo._phasecache.phase
1103 getphase = repo._phasecache.phase
1104 for n in repo.obsstore.successors:
1104 for n in repo.obsstore.successors:
1105 rev = getrev(n)
1105 rev = getrev(n)
1106 if rev is not None and getphase(repo, rev):
1106 if rev is not None and getphase(repo, rev):
1107 obs.add(rev)
1107 obs.add(rev)
1108 return obs
1108 return obs
1109
1109
1110 @cachefor('unstable')
1110 @cachefor('unstable')
1111 def _computeunstableset(repo):
1111 def _computeunstableset(repo):
1112 """the set of non obsolete revisions with obsolete parents"""
1112 """the set of non obsolete revisions with obsolete parents"""
1113 revs = [(ctx.rev(), ctx) for ctx in
1113 revs = [(ctx.rev(), ctx) for ctx in
1114 repo.set('(not public()) and (not obsolete())')]
1114 repo.set('(not public()) and (not obsolete())')]
1115 revs.sort(key=lambda x:x[0])
1115 revs.sort(key=lambda x:x[0])
1116 unstable = set()
1116 unstable = set()
1117 for rev, ctx in revs:
1117 for rev, ctx in revs:
1118 # A rev is unstable if one of its parent is obsolete or unstable
1118 # A rev is unstable if one of its parent is obsolete or unstable
1119 # this works since we traverse following growing rev order
1119 # this works since we traverse following growing rev order
1120 if util.any((x.obsolete() or (x.rev() in unstable))
1120 if util.any((x.obsolete() or (x.rev() in unstable))
1121 for x in ctx.parents()):
1121 for x in ctx.parents()):
1122 unstable.add(rev)
1122 unstable.add(rev)
1123 return unstable
1123 return unstable
1124
1124
1125 @cachefor('suspended')
1125 @cachefor('suspended')
1126 def _computesuspendedset(repo):
1126 def _computesuspendedset(repo):
1127 """the set of obsolete parents with non obsolete descendants"""
1127 """the set of obsolete parents with non obsolete descendants"""
1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1128 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1129 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1130
1130
1131 @cachefor('extinct')
1131 @cachefor('extinct')
1132 def _computeextinctset(repo):
1132 def _computeextinctset(repo):
1133 """the set of obsolete parents without non obsolete descendants"""
1133 """the set of obsolete parents without non obsolete descendants"""
1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1134 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1135
1135
1136
1136
1137 @cachefor('bumped')
1137 @cachefor('bumped')
1138 def _computebumpedset(repo):
1138 def _computebumpedset(repo):
1139 """the set of revs trying to obsolete public revisions"""
1139 """the set of revs trying to obsolete public revisions"""
1140 bumped = set()
1140 bumped = set()
1141 # util function (avoid attribute lookup in the loop)
1141 # util function (avoid attribute lookup in the loop)
1142 phase = repo._phasecache.phase # would be faster to grab the full list
1142 phase = repo._phasecache.phase # would be faster to grab the full list
1143 public = phases.public
1143 public = phases.public
1144 cl = repo.changelog
1144 cl = repo.changelog
1145 torev = cl.nodemap.get
1145 torev = cl.nodemap.get
1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1146 for ctx in repo.set('(not public()) and (not obsolete())'):
1147 rev = ctx.rev()
1147 rev = ctx.rev()
1148 # We only evaluate mutable, non-obsolete revision
1148 # We only evaluate mutable, non-obsolete revision
1149 node = ctx.node()
1149 node = ctx.node()
1150 # (future) A cache of precursors may worth if split is very common
1150 # (future) A cache of precursors may worth if split is very common
1151 for pnode in allprecursors(repo.obsstore, [node],
1151 for pnode in allprecursors(repo.obsstore, [node],
1152 ignoreflags=bumpedfix):
1152 ignoreflags=bumpedfix):
1153 prev = torev(pnode) # unfiltered! but so is phasecache
1153 prev = torev(pnode) # unfiltered! but so is phasecache
1154 if (prev is not None) and (phase(repo, prev) <= public):
1154 if (prev is not None) and (phase(repo, prev) <= public):
1155 # we have a public precursors
1155 # we have a public precursors
1156 bumped.add(rev)
1156 bumped.add(rev)
1157 break # Next draft!
1157 break # Next draft!
1158 return bumped
1158 return bumped
1159
1159
1160 @cachefor('divergent')
1160 @cachefor('divergent')
1161 def _computedivergentset(repo):
1161 def _computedivergentset(repo):
1162 """the set of rev that compete to be the final successors of some revision.
1162 """the set of rev that compete to be the final successors of some revision.
1163 """
1163 """
1164 divergent = set()
1164 divergent = set()
1165 obsstore = repo.obsstore
1165 obsstore = repo.obsstore
1166 newermap = {}
1166 newermap = {}
1167 for ctx in repo.set('(not public()) - obsolete()'):
1167 for ctx in repo.set('(not public()) - obsolete()'):
1168 mark = obsstore.precursors.get(ctx.node(), ())
1168 mark = obsstore.precursors.get(ctx.node(), ())
1169 toprocess = set(mark)
1169 toprocess = set(mark)
1170 seen = set()
1170 seen = set()
1171 while toprocess:
1171 while toprocess:
1172 prec = toprocess.pop()[0]
1172 prec = toprocess.pop()[0]
1173 if prec in seen:
1173 if prec in seen:
1174 continue # emergency cycle hanging prevention
1174 continue # emergency cycle hanging prevention
1175 seen.add(prec)
1175 seen.add(prec)
1176 if prec not in newermap:
1176 if prec not in newermap:
1177 successorssets(repo, prec, newermap)
1177 successorssets(repo, prec, newermap)
1178 newer = [n for n in newermap[prec] if n]
1178 newer = [n for n in newermap[prec] if n]
1179 if len(newer) > 1:
1179 if len(newer) > 1:
1180 divergent.add(ctx.rev())
1180 divergent.add(ctx.rev())
1181 break
1181 break
1182 toprocess.update(obsstore.precursors.get(prec, ()))
1182 toprocess.update(obsstore.precursors.get(prec, ()))
1183 return divergent
1183 return divergent
1184
1184
1185
1185
1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1186 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1187 """Add obsolete markers between changesets in a repo
1187 """Add obsolete markers between changesets in a repo
1188
1188
1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1189 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1190 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1191 containing metadata for this marker only. It is merged with the global
1191 containing metadata for this marker only. It is merged with the global
1192 metadata specified through the `metadata` argument of this function,
1192 metadata specified through the `metadata` argument of this function,
1193
1193
1194 Trying to obsolete a public changeset will raise an exception.
1194 Trying to obsolete a public changeset will raise an exception.
1195
1195
1196 Current user and date are used except if specified otherwise in the
1196 Current user and date are used except if specified otherwise in the
1197 metadata attribute.
1197 metadata attribute.
1198
1198
1199 This function operates within a transaction of its own, but does
1199 This function operates within a transaction of its own, but does
1200 not take any lock on the repo.
1200 not take any lock on the repo.
1201 """
1201 """
1202 # prepare metadata
1202 # prepare metadata
1203 if metadata is None:
1203 if metadata is None:
1204 metadata = {}
1204 metadata = {}
1205 if 'user' not in metadata:
1205 if 'user' not in metadata:
1206 metadata['user'] = repo.ui.username()
1206 metadata['user'] = repo.ui.username()
1207 tr = repo.transaction('add-obsolescence-marker')
1207 tr = repo.transaction('add-obsolescence-marker')
1208 try:
1208 try:
1209 for rel in relations:
1209 for rel in relations:
1210 prec = rel[0]
1210 prec = rel[0]
1211 sucs = rel[1]
1211 sucs = rel[1]
1212 localmetadata = metadata.copy()
1212 localmetadata = metadata.copy()
1213 if 2 < len(rel):
1213 if 2 < len(rel):
1214 localmetadata.update(rel[2])
1214 localmetadata.update(rel[2])
1215
1215
1216 if not prec.mutable():
1216 if not prec.mutable():
1217 raise util.Abort("cannot obsolete immutable changeset: %s"
1217 raise util.Abort("cannot obsolete immutable changeset: %s"
1218 % prec)
1218 % prec)
1219 nprec = prec.node()
1219 nprec = prec.node()
1220 nsucs = tuple(s.node() for s in sucs)
1220 nsucs = tuple(s.node() for s in sucs)
1221 npare = None
1221 npare = None
1222 if not nsucs:
1222 if not nsucs:
1223 npare = tuple(p.node() for p in prec.parents())
1223 npare = tuple(p.node() for p in prec.parents())
1224 if nprec in nsucs:
1224 if nprec in nsucs:
1225 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1225 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1226 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1226 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1227 date=date, metadata=localmetadata)
1227 date=date, metadata=localmetadata)
1228 repo.filteredrevcache.clear()
1228 repo.filteredrevcache.clear()
1229 tr.close()
1229 tr.close()
1230 finally:
1230 finally:
1231 tr.release()
1231 tr.release()
1232
1232
1233 def isenabled(repo, option):
1233 def isenabled(repo, option):
1234 """Returns True if the given repository has the given obsolete option
1234 """Returns True if the given repository has the given obsolete option
1235 enabled.
1235 enabled.
1236 """
1236 """
1237 result = set(repo.ui.configlist('experimental', 'evolution'))
1237 result = set(repo.ui.configlist('experimental', 'evolution'))
1238 if 'all' in result:
1238 if 'all' in result:
1239 return True
1239 return True
1240
1240
1241 # For migration purposes, temporarily return true if the config hasn't been
1241 # For migration purposes, temporarily return true if the config hasn't been
1242 # set but _enabled is true.
1242 # set but _enabled is true.
1243 if len(result) == 0 and _enabled:
1243 if len(result) == 0 and _enabled:
1244 return True
1244 return True
1245
1245
1246 # createmarkers must be enabled if other options are enabled
1246 # createmarkers must be enabled if other options are enabled
1247 if ((allowunstableopt in result or exchangeopt in result) and
1247 if ((allowunstableopt in result or exchangeopt in result) and
1248 not createmarkersopt in result):
1248 not createmarkersopt in result):
1249 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1249 raise util.Abort(_("'createmarkers' obsolete option must be enabled "
1250 "if other obsolete options are enabled"))
1250 "if other obsolete options are enabled"))
1251
1251
1252 return option in result
1252 return option in result
@@ -1,888 +1,888 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [phases]
2 > [phases]
3 > # public changeset are not obsolete
3 > # public changeset are not obsolete
4 > publish=false
4 > publish=false
5 > [ui]
5 > [ui]
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg log -T "{node}\n" --hidden -r "desc('$1')"
14 > hg log -T "{node}\n" --hidden -r "desc('$1')"
15 > }
15 > }
16
16
17 $ cat > debugkeys.py <<EOF
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
23 >
24 > if repo.local():
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
25 > repo.__class__ = debugkeysrepo
26 > EOF
26 > EOF
27
27
28 $ hg init tmpa
28 $ hg init tmpa
29 $ cd tmpa
29 $ cd tmpa
30 $ mkcommit kill_me
30 $ mkcommit kill_me
31
31
32 Checking that the feature is properly disabled
32 Checking that the feature is properly disabled
33
33
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 abort: creating obsolete markers is not enabled on this repo
35 abort: creating obsolete markers is not enabled on this repo
36 [255]
36 [255]
37
37
38 Enabling it
38 Enabling it
39
39
40 $ cat >> $HGRCPATH << EOF
40 $ cat >> $HGRCPATH << EOF
41 > [experimental]
41 > [experimental]
42 > evolution=createmarkers,exchange
42 > evolution=createmarkers,exchange
43 > EOF
43 > EOF
44
44
45 Killing a single changeset without replacement
45 Killing a single changeset without replacement
46
46
47 $ hg debugobsolete 0
47 $ hg debugobsolete 0
48 abort: changeset references must be full hexadecimal node identifiers
48 abort: changeset references must be full hexadecimal node identifiers
49 [255]
49 [255]
50 $ hg debugobsolete '00'
50 $ hg debugobsolete '00'
51 abort: changeset references must be full hexadecimal node identifiers
51 abort: changeset references must be full hexadecimal node identifiers
52 [255]
52 [255]
53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
53 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
54 $ hg debugobsolete
54 $ hg debugobsolete
55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
55 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
56
56
57 (test that mercurial is not confused)
57 (test that mercurial is not confused)
58
58
59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
59 $ hg up null --quiet # having 0 as parent prevents it to be hidden
60 $ hg tip
60 $ hg tip
61 -1:000000000000 (public) [tip ]
61 -1:000000000000 (public) [tip ]
62 $ hg up --hidden tip --quiet
62 $ hg up --hidden tip --quiet
63
63
64 Killing a single changeset with itself should fail
64 Killing a single changeset with itself should fail
65 (simple local safeguard)
65 (simple local safeguard)
66
66
67 $ hg debugobsolete `getid kill_me` `getid kill_me`
67 $ hg debugobsolete `getid kill_me` `getid kill_me`
68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
68 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
69 [255]
69 [255]
70
70
71 $ cd ..
71 $ cd ..
72
72
73 Killing a single changeset with replacement
73 Killing a single changeset with replacement
74 (and testing the format option)
74 (and testing the format option)
75
75
76 $ hg init tmpb
76 $ hg init tmpb
77 $ cd tmpb
77 $ cd tmpb
78 $ mkcommit a
78 $ mkcommit a
79 $ mkcommit b
79 $ mkcommit b
80 $ mkcommit original_c
80 $ mkcommit original_c
81 $ hg up "desc('b')"
81 $ hg up "desc('b')"
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 $ mkcommit new_c
83 $ mkcommit new_c
84 created new head
84 created new head
85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
85 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
86 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '121 120'
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 2:245bde4270cd add original_c
88 2:245bde4270cd add original_c
89 $ hg debugrevlog -cd
89 $ hg debugrevlog -cd
90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
90 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
91 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
92 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
93 2 1 -1 118 193 118 118 59 0 76 192 0 1 0
94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
94 3 1 -1 193 260 193 193 59 0 66 258 0 2 0
95 $ hg debugobsolete
95 $ hg debugobsolete
96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
96 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
97
97
98 (check for version number of the obsstore)
98 (check for version number of the obsstore)
99
99
100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
100 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
101 \x00 (no-eol) (esc)
101 \x00 (no-eol) (esc)
102
102
103 do it again (it read the obsstore before adding new changeset)
103 do it again (it read the obsstore before adding new changeset)
104
104
105 $ hg up '.^'
105 $ hg up '.^'
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
107 $ mkcommit new_2_c
107 $ mkcommit new_2_c
108 created new head
108 created new head
109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
109 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
110 $ hg debugobsolete
110 $ hg debugobsolete
111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
111 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
112 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
113
113
114 Register two markers with a missing node
114 Register two markers with a missing node
115
115
116 $ hg up '.^'
116 $ hg up '.^'
117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
117 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
118 $ mkcommit new_3_c
118 $ mkcommit new_3_c
119 created new head
119 created new head
120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
120 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
121 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
122 $ hg debugobsolete
122 $ hg debugobsolete
123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
123 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
124 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
125 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
126 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
127
127
128 Refuse pathological nullid successors
128 Refuse pathological nullid successors
129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
129 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
130 transaction abort!
130 transaction abort!
131 rollback completed
131 rollback completed
132 abort: bad obsolescence marker detected: invalid successors nullid
132 abort: bad obsolescence marker detected: invalid successors nullid
133 [255]
133 [255]
134
134
135 Check that graphlog detect that a changeset is obsolete:
135 Check that graphlog detect that a changeset is obsolete:
136
136
137 $ hg log -G
137 $ hg log -G
138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
138 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
139 |
139 |
140 o 1:7c3bad9141dc (draft) [ ] add b
140 o 1:7c3bad9141dc (draft) [ ] add b
141 |
141 |
142 o 0:1f0dee641bb7 (draft) [ ] add a
142 o 0:1f0dee641bb7 (draft) [ ] add a
143
143
144
144
145 check that heads does not report them
145 check that heads does not report them
146
146
147 $ hg heads
147 $ hg heads
148 5:5601fb93a350 (draft) [tip ] add new_3_c
148 5:5601fb93a350 (draft) [tip ] add new_3_c
149 $ hg heads --hidden
149 $ hg heads --hidden
150 5:5601fb93a350 (draft) [tip ] add new_3_c
150 5:5601fb93a350 (draft) [tip ] add new_3_c
151 4:ca819180edb9 (draft) [ ] add new_2_c
151 4:ca819180edb9 (draft) [ ] add new_2_c
152 3:cdbce2fbb163 (draft) [ ] add new_c
152 3:cdbce2fbb163 (draft) [ ] add new_c
153 2:245bde4270cd (draft) [ ] add original_c
153 2:245bde4270cd (draft) [ ] add original_c
154
154
155
155
156 check that summary does not report them
156 check that summary does not report them
157
157
158 $ hg init ../sink
158 $ hg init ../sink
159 $ echo '[paths]' >> .hg/hgrc
159 $ echo '[paths]' >> .hg/hgrc
160 $ echo 'default=../sink' >> .hg/hgrc
160 $ echo 'default=../sink' >> .hg/hgrc
161 $ hg summary --remote
161 $ hg summary --remote
162 parent: 5:5601fb93a350 tip
162 parent: 5:5601fb93a350 tip
163 add new_3_c
163 add new_3_c
164 branch: default
164 branch: default
165 commit: (clean)
165 commit: (clean)
166 update: (current)
166 update: (current)
167 phases: 3 draft (draft)
167 phases: 3 draft (draft)
168 remote: 3 outgoing
168 remote: 3 outgoing
169
169
170 $ hg summary --remote --hidden
170 $ hg summary --remote --hidden
171 parent: 5:5601fb93a350 tip
171 parent: 5:5601fb93a350 tip
172 add new_3_c
172 add new_3_c
173 branch: default
173 branch: default
174 commit: (clean)
174 commit: (clean)
175 update: 3 new changesets, 4 branch heads (merge)
175 update: 3 new changesets, 4 branch heads (merge)
176 phases: 6 draft (draft)
176 phases: 6 draft (draft)
177 remote: 3 outgoing
177 remote: 3 outgoing
178
178
179 check that various commands work well with filtering
179 check that various commands work well with filtering
180
180
181 $ hg tip
181 $ hg tip
182 5:5601fb93a350 (draft) [tip ] add new_3_c
182 5:5601fb93a350 (draft) [tip ] add new_3_c
183 $ hg log -r 6
183 $ hg log -r 6
184 abort: unknown revision '6'!
184 abort: unknown revision '6'!
185 [255]
185 [255]
186 $ hg log -r 4
186 $ hg log -r 4
187 abort: hidden revision '4'!
187 abort: hidden revision '4'!
188 (use --hidden to access hidden revisions)
188 (use --hidden to access hidden revisions)
189 [255]
189 [255]
190 $ hg debugrevspec 'rev(6)'
190 $ hg debugrevspec 'rev(6)'
191 $ hg debugrevspec 'rev(4)'
191 $ hg debugrevspec 'rev(4)'
192 $ hg debugrevspec 'null'
192 $ hg debugrevspec 'null'
193 -1
193 -1
194
194
195 Check that public changeset are not accounted as obsolete:
195 Check that public changeset are not accounted as obsolete:
196
196
197 $ hg --hidden phase --public 2
197 $ hg --hidden phase --public 2
198 $ hg log -G
198 $ hg log -G
199 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
199 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
200 |
200 |
201 | o 2:245bde4270cd (public) [ ] add original_c
201 | o 2:245bde4270cd (public) [ ] add original_c
202 |/
202 |/
203 o 1:7c3bad9141dc (public) [ ] add b
203 o 1:7c3bad9141dc (public) [ ] add b
204 |
204 |
205 o 0:1f0dee641bb7 (public) [ ] add a
205 o 0:1f0dee641bb7 (public) [ ] add a
206
206
207
207
208 And that bumped changeset are detected
208 And that bumped changeset are detected
209 --------------------------------------
209 --------------------------------------
210
210
211 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
211 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
212 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
212 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
213 the public changeset
213 the public changeset
214
214
215 $ hg log --hidden -r 'bumped()'
215 $ hg log --hidden -r 'bumped()'
216 5:5601fb93a350 (draft) [tip ] add new_3_c
216 5:5601fb93a350 (draft) [tip ] add new_3_c
217
217
218 And that we can't push bumped changeset
218 And that we can't push bumped changeset
219
219
220 $ hg push ../tmpa -r 0 --force #(make repo related)
220 $ hg push ../tmpa -r 0 --force #(make repo related)
221 pushing to ../tmpa
221 pushing to ../tmpa
222 searching for changes
222 searching for changes
223 warning: repository is unrelated
223 warning: repository is unrelated
224 adding changesets
224 adding changesets
225 adding manifests
225 adding manifests
226 adding file changes
226 adding file changes
227 added 1 changesets with 1 changes to 1 files (+1 heads)
227 added 1 changesets with 1 changes to 1 files (+1 heads)
228 $ hg push ../tmpa
228 $ hg push ../tmpa
229 pushing to ../tmpa
229 pushing to ../tmpa
230 searching for changes
230 searching for changes
231 abort: push includes bumped changeset: 5601fb93a350!
231 abort: push includes bumped changeset: 5601fb93a350!
232 [255]
232 [255]
233
233
234 Fixing "bumped" situation
234 Fixing "bumped" situation
235 We need to create a clone of 5 and add a special marker with a flag
235 We need to create a clone of 5 and add a special marker with a flag
236
236
237 $ hg up '5^'
237 $ hg up '5^'
238 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
238 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
239 $ hg revert -ar 5
239 $ hg revert -ar 5
240 adding new_3_c
240 adding new_3_c
241 $ hg ci -m 'add n3w_3_c'
241 $ hg ci -m 'add n3w_3_c'
242 created new head
242 created new head
243 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
243 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
244 $ hg log -r 'bumped()'
244 $ hg log -r 'bumped()'
245 $ hg log -G
245 $ hg log -G
246 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
246 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
247 |
247 |
248 | o 2:245bde4270cd (public) [ ] add original_c
248 | o 2:245bde4270cd (public) [ ] add original_c
249 |/
249 |/
250 o 1:7c3bad9141dc (public) [ ] add b
250 o 1:7c3bad9141dc (public) [ ] add b
251 |
251 |
252 o 0:1f0dee641bb7 (public) [ ] add a
252 o 0:1f0dee641bb7 (public) [ ] add a
253
253
254
254
255 $ cd ..
255 $ cd ..
256
256
257 Revision 0 is hidden
257 Revision 0 is hidden
258 --------------------
258 --------------------
259
259
260 $ hg init rev0hidden
260 $ hg init rev0hidden
261 $ cd rev0hidden
261 $ cd rev0hidden
262
262
263 $ mkcommit kill0
263 $ mkcommit kill0
264 $ hg up -q null
264 $ hg up -q null
265 $ hg debugobsolete `getid kill0`
265 $ hg debugobsolete `getid kill0`
266 $ mkcommit a
266 $ mkcommit a
267 $ mkcommit b
267 $ mkcommit b
268
268
269 Should pick the first visible revision as "repo" node
269 Should pick the first visible revision as "repo" node
270
270
271 $ hg archive ../archive-null
271 $ hg archive ../archive-null
272 $ cat ../archive-null/.hg_archival.txt
272 $ cat ../archive-null/.hg_archival.txt
273 repo: 1f0dee641bb7258c56bd60e93edfa2405381c41e
273 repo: 1f0dee641bb7258c56bd60e93edfa2405381c41e
274 node: 7c3bad9141dcb46ff89abf5f61856facd56e476c
274 node: 7c3bad9141dcb46ff89abf5f61856facd56e476c
275 branch: default
275 branch: default
276 latesttag: null
276 latesttag: null
277 latesttagdistance: 2
277 latesttagdistance: 2
278 changessincelatesttag: 2
278 changessincelatesttag: 2
279
279
280
280
281 $ cd ..
281 $ cd ..
282
282
283 Exchange Test
283 Exchange Test
284 ============================
284 ============================
285
285
286 Destination repo does not have any data
286 Destination repo does not have any data
287 ---------------------------------------
287 ---------------------------------------
288
288
289 Simple incoming test
289 Simple incoming test
290
290
291 $ hg init tmpc
291 $ hg init tmpc
292 $ cd tmpc
292 $ cd tmpc
293 $ hg incoming ../tmpb
293 $ hg incoming ../tmpb
294 comparing with ../tmpb
294 comparing with ../tmpb
295 0:1f0dee641bb7 (public) [ ] add a
295 0:1f0dee641bb7 (public) [ ] add a
296 1:7c3bad9141dc (public) [ ] add b
296 1:7c3bad9141dc (public) [ ] add b
297 2:245bde4270cd (public) [ ] add original_c
297 2:245bde4270cd (public) [ ] add original_c
298 6:6f9641995072 (draft) [tip ] add n3w_3_c
298 6:6f9641995072 (draft) [tip ] add n3w_3_c
299
299
300 Try to pull markers
300 Try to pull markers
301 (extinct changeset are excluded but marker are pushed)
301 (extinct changeset are excluded but marker are pushed)
302
302
303 $ hg pull ../tmpb
303 $ hg pull ../tmpb
304 pulling from ../tmpb
304 pulling from ../tmpb
305 requesting all changes
305 requesting all changes
306 adding changesets
306 adding changesets
307 adding manifests
307 adding manifests
308 adding file changes
308 adding file changes
309 added 4 changesets with 4 changes to 4 files (+1 heads)
309 added 4 changesets with 4 changes to 4 files (+1 heads)
310 (run 'hg heads' to see heads, 'hg merge' to merge)
310 (run 'hg heads' to see heads, 'hg merge' to merge)
311 $ hg debugobsolete
311 $ hg debugobsolete
312 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
312 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
313 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
313 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
314 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
314 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
315 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
315 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
316 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
316 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
317
317
318 Rollback//Transaction support
318 Rollback//Transaction support
319
319
320 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
320 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
321 $ hg debugobsolete
321 $ hg debugobsolete
322 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
322 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
323 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
323 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
324 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
324 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
325 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
325 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
326 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
326 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
327 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
327 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
328 $ hg rollback -n
328 $ hg rollback -n
329 repository tip rolled back to revision 3 (undo debugobsolete)
329 repository tip rolled back to revision 3 (undo debugobsolete)
330 $ hg rollback
330 $ hg rollback
331 repository tip rolled back to revision 3 (undo debugobsolete)
331 repository tip rolled back to revision 3 (undo debugobsolete)
332 $ hg debugobsolete
332 $ hg debugobsolete
333 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
333 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
334 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
334 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
335 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
335 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
336 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
336 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
337 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
337 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
338
338
339 $ cd ..
339 $ cd ..
340
340
341 Try to push markers
341 Try to push markers
342
342
343 $ hg init tmpd
343 $ hg init tmpd
344 $ hg -R tmpb push tmpd
344 $ hg -R tmpb push tmpd
345 pushing to tmpd
345 pushing to tmpd
346 searching for changes
346 searching for changes
347 adding changesets
347 adding changesets
348 adding manifests
348 adding manifests
349 adding file changes
349 adding file changes
350 added 4 changesets with 4 changes to 4 files (+1 heads)
350 added 4 changesets with 4 changes to 4 files (+1 heads)
351 $ hg -R tmpd debugobsolete | sort
351 $ hg -R tmpd debugobsolete | sort
352 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
352 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
353 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
353 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
354 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
354 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
355 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
355 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
356 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
356 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
357
357
358 Check obsolete keys are exchanged only if source has an obsolete store
358 Check obsolete keys are exchanged only if source has an obsolete store
359
359
360 $ hg init empty
360 $ hg init empty
361 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
361 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
362 pushing to tmpd
362 pushing to tmpd
363 listkeys phases
363 listkeys phases
364 listkeys bookmarks
364 listkeys bookmarks
365 no changes found
365 no changes found
366 listkeys phases
366 listkeys phases
367 [1]
367 [1]
368
368
369 clone support
369 clone support
370 (markers are copied and extinct changesets are included to allow hardlinks)
370 (markers are copied and extinct changesets are included to allow hardlinks)
371
371
372 $ hg clone tmpb clone-dest
372 $ hg clone tmpb clone-dest
373 updating to branch default
373 updating to branch default
374 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
374 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
375 $ hg -R clone-dest log -G --hidden
375 $ hg -R clone-dest log -G --hidden
376 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
376 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
377 |
377 |
378 | x 5:5601fb93a350 (draft) [ ] add new_3_c
378 | x 5:5601fb93a350 (draft) [ ] add new_3_c
379 |/
379 |/
380 | x 4:ca819180edb9 (draft) [ ] add new_2_c
380 | x 4:ca819180edb9 (draft) [ ] add new_2_c
381 |/
381 |/
382 | x 3:cdbce2fbb163 (draft) [ ] add new_c
382 | x 3:cdbce2fbb163 (draft) [ ] add new_c
383 |/
383 |/
384 | o 2:245bde4270cd (public) [ ] add original_c
384 | o 2:245bde4270cd (public) [ ] add original_c
385 |/
385 |/
386 o 1:7c3bad9141dc (public) [ ] add b
386 o 1:7c3bad9141dc (public) [ ] add b
387 |
387 |
388 o 0:1f0dee641bb7 (public) [ ] add a
388 o 0:1f0dee641bb7 (public) [ ] add a
389
389
390 $ hg -R clone-dest debugobsolete
390 $ hg -R clone-dest debugobsolete
391 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
391 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
392 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
392 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
393 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
393 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
394 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
394 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
395 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
395 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
396
396
397
397
398 Destination repo have existing data
398 Destination repo have existing data
399 ---------------------------------------
399 ---------------------------------------
400
400
401 On pull
401 On pull
402
402
403 $ hg init tmpe
403 $ hg init tmpe
404 $ cd tmpe
404 $ cd tmpe
405 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
405 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
406 $ hg pull ../tmpb
406 $ hg pull ../tmpb
407 pulling from ../tmpb
407 pulling from ../tmpb
408 requesting all changes
408 requesting all changes
409 adding changesets
409 adding changesets
410 adding manifests
410 adding manifests
411 adding file changes
411 adding file changes
412 added 4 changesets with 4 changes to 4 files (+1 heads)
412 added 4 changesets with 4 changes to 4 files (+1 heads)
413 (run 'hg heads' to see heads, 'hg merge' to merge)
413 (run 'hg heads' to see heads, 'hg merge' to merge)
414 $ hg debugobsolete
414 $ hg debugobsolete
415 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
415 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
416 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
416 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
417 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
417 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
418 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
418 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
419 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
419 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
420 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
420 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
421
421
422
422
423 On push
423 On push
424
424
425 $ hg push ../tmpc
425 $ hg push ../tmpc
426 pushing to ../tmpc
426 pushing to ../tmpc
427 searching for changes
427 searching for changes
428 no changes found
428 no changes found
429 [1]
429 [1]
430 $ hg -R ../tmpc debugobsolete
430 $ hg -R ../tmpc debugobsolete
431 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
431 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
432 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
432 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
433 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
433 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
434 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
434 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
435 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
435 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
436 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
436 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
437
437
438 detect outgoing obsolete and unstable
438 detect outgoing obsolete and unstable
439 ---------------------------------------
439 ---------------------------------------
440
440
441
441
442 $ hg log -G
442 $ hg log -G
443 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
443 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
444 |
444 |
445 | o 2:245bde4270cd (public) [ ] add original_c
445 | o 2:245bde4270cd (public) [ ] add original_c
446 |/
446 |/
447 o 1:7c3bad9141dc (public) [ ] add b
447 o 1:7c3bad9141dc (public) [ ] add b
448 |
448 |
449 o 0:1f0dee641bb7 (public) [ ] add a
449 o 0:1f0dee641bb7 (public) [ ] add a
450
450
451 $ hg up 'desc("n3w_3_c")'
451 $ hg up 'desc("n3w_3_c")'
452 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
452 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
453 $ mkcommit original_d
453 $ mkcommit original_d
454 $ mkcommit original_e
454 $ mkcommit original_e
455 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
455 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
456 $ hg debugobsolete | grep `getid original_d`
456 $ hg debugobsolete | grep `getid original_d`
457 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
457 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
458 $ hg log -r 'obsolete()'
458 $ hg log -r 'obsolete()'
459 4:94b33453f93b (draft) [ ] add original_d
459 4:94b33453f93b (draft) [ ] add original_d
460 $ hg log -G -r '::unstable()'
460 $ hg log -G -r '::unstable()'
461 @ 5:cda648ca50f5 (draft) [tip ] add original_e
461 @ 5:cda648ca50f5 (draft) [tip ] add original_e
462 |
462 |
463 x 4:94b33453f93b (draft) [ ] add original_d
463 x 4:94b33453f93b (draft) [ ] add original_d
464 |
464 |
465 o 3:6f9641995072 (draft) [ ] add n3w_3_c
465 o 3:6f9641995072 (draft) [ ] add n3w_3_c
466 |
466 |
467 o 1:7c3bad9141dc (public) [ ] add b
467 o 1:7c3bad9141dc (public) [ ] add b
468 |
468 |
469 o 0:1f0dee641bb7 (public) [ ] add a
469 o 0:1f0dee641bb7 (public) [ ] add a
470
470
471
471
472 refuse to push obsolete changeset
472 refuse to push obsolete changeset
473
473
474 $ hg push ../tmpc/ -r 'desc("original_d")'
474 $ hg push ../tmpc/ -r 'desc("original_d")'
475 pushing to ../tmpc/
475 pushing to ../tmpc/
476 searching for changes
476 searching for changes
477 abort: push includes obsolete changeset: 94b33453f93b!
477 abort: push includes obsolete changeset: 94b33453f93b!
478 [255]
478 [255]
479
479
480 refuse to push unstable changeset
480 refuse to push unstable changeset
481
481
482 $ hg push ../tmpc/
482 $ hg push ../tmpc/
483 pushing to ../tmpc/
483 pushing to ../tmpc/
484 searching for changes
484 searching for changes
485 abort: push includes unstable changeset: cda648ca50f5!
485 abort: push includes unstable changeset: cda648ca50f5!
486 [255]
486 [255]
487
487
488 Test that extinct changeset are properly detected
488 Test that extinct changeset are properly detected
489
489
490 $ hg log -r 'extinct()'
490 $ hg log -r 'extinct()'
491
491
492 Don't try to push extinct changeset
492 Don't try to push extinct changeset
493
493
494 $ hg init ../tmpf
494 $ hg init ../tmpf
495 $ hg out ../tmpf
495 $ hg out ../tmpf
496 comparing with ../tmpf
496 comparing with ../tmpf
497 searching for changes
497 searching for changes
498 0:1f0dee641bb7 (public) [ ] add a
498 0:1f0dee641bb7 (public) [ ] add a
499 1:7c3bad9141dc (public) [ ] add b
499 1:7c3bad9141dc (public) [ ] add b
500 2:245bde4270cd (public) [ ] add original_c
500 2:245bde4270cd (public) [ ] add original_c
501 3:6f9641995072 (draft) [ ] add n3w_3_c
501 3:6f9641995072 (draft) [ ] add n3w_3_c
502 4:94b33453f93b (draft) [ ] add original_d
502 4:94b33453f93b (draft) [ ] add original_d
503 5:cda648ca50f5 (draft) [tip ] add original_e
503 5:cda648ca50f5 (draft) [tip ] add original_e
504 $ hg push ../tmpf -f # -f because be push unstable too
504 $ hg push ../tmpf -f # -f because be push unstable too
505 pushing to ../tmpf
505 pushing to ../tmpf
506 searching for changes
506 searching for changes
507 adding changesets
507 adding changesets
508 adding manifests
508 adding manifests
509 adding file changes
509 adding file changes
510 added 6 changesets with 6 changes to 6 files (+1 heads)
510 added 6 changesets with 6 changes to 6 files (+1 heads)
511
511
512 no warning displayed
512 no warning displayed
513
513
514 $ hg push ../tmpf
514 $ hg push ../tmpf
515 pushing to ../tmpf
515 pushing to ../tmpf
516 searching for changes
516 searching for changes
517 no changes found
517 no changes found
518 [1]
518 [1]
519
519
520 Do not warn about new head when the new head is a successors of a remote one
520 Do not warn about new head when the new head is a successors of a remote one
521
521
522 $ hg log -G
522 $ hg log -G
523 @ 5:cda648ca50f5 (draft) [tip ] add original_e
523 @ 5:cda648ca50f5 (draft) [tip ] add original_e
524 |
524 |
525 x 4:94b33453f93b (draft) [ ] add original_d
525 x 4:94b33453f93b (draft) [ ] add original_d
526 |
526 |
527 o 3:6f9641995072 (draft) [ ] add n3w_3_c
527 o 3:6f9641995072 (draft) [ ] add n3w_3_c
528 |
528 |
529 | o 2:245bde4270cd (public) [ ] add original_c
529 | o 2:245bde4270cd (public) [ ] add original_c
530 |/
530 |/
531 o 1:7c3bad9141dc (public) [ ] add b
531 o 1:7c3bad9141dc (public) [ ] add b
532 |
532 |
533 o 0:1f0dee641bb7 (public) [ ] add a
533 o 0:1f0dee641bb7 (public) [ ] add a
534
534
535 $ hg up -q 'desc(n3w_3_c)'
535 $ hg up -q 'desc(n3w_3_c)'
536 $ mkcommit obsolete_e
536 $ mkcommit obsolete_e
537 created new head
537 created new head
538 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
538 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
539 $ hg outgoing ../tmpf # parasite hg outgoing testin
539 $ hg outgoing ../tmpf # parasite hg outgoing testin
540 comparing with ../tmpf
540 comparing with ../tmpf
541 searching for changes
541 searching for changes
542 6:3de5eca88c00 (draft) [tip ] add obsolete_e
542 6:3de5eca88c00 (draft) [tip ] add obsolete_e
543 $ hg push ../tmpf
543 $ hg push ../tmpf
544 pushing to ../tmpf
544 pushing to ../tmpf
545 searching for changes
545 searching for changes
546 adding changesets
546 adding changesets
547 adding manifests
547 adding manifests
548 adding file changes
548 adding file changes
549 added 1 changesets with 1 changes to 1 files (+1 heads)
549 added 1 changesets with 1 changes to 1 files (+1 heads)
550
550
551 test relevance computation
551 test relevance computation
552 ---------------------------------------
552 ---------------------------------------
553
553
554 Checking simple case of "marker relevance".
554 Checking simple case of "marker relevance".
555
555
556
556
557 Reminder of the repo situation
557 Reminder of the repo situation
558
558
559 $ hg log --hidden --graph
559 $ hg log --hidden --graph
560 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
560 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
561 |
561 |
562 | x 5:cda648ca50f5 (draft) [ ] add original_e
562 | x 5:cda648ca50f5 (draft) [ ] add original_e
563 | |
563 | |
564 | x 4:94b33453f93b (draft) [ ] add original_d
564 | x 4:94b33453f93b (draft) [ ] add original_d
565 |/
565 |/
566 o 3:6f9641995072 (draft) [ ] add n3w_3_c
566 o 3:6f9641995072 (draft) [ ] add n3w_3_c
567 |
567 |
568 | o 2:245bde4270cd (public) [ ] add original_c
568 | o 2:245bde4270cd (public) [ ] add original_c
569 |/
569 |/
570 o 1:7c3bad9141dc (public) [ ] add b
570 o 1:7c3bad9141dc (public) [ ] add b
571 |
571 |
572 o 0:1f0dee641bb7 (public) [ ] add a
572 o 0:1f0dee641bb7 (public) [ ] add a
573
573
574
574
575 List of all markers
575 List of all markers
576
576
577 $ hg debugobsolete
577 $ hg debugobsolete
578 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
578 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
579 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
579 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
580 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
580 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
581 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
581 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
582 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
582 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
583 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
583 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
584 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
584 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
585 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
585 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
586
586
587 List of changesets with no chain
587 List of changesets with no chain
588
588
589 $ hg debugobsolete --hidden --rev ::2
589 $ hg debugobsolete --hidden --rev ::2
590
590
591 List of changesets that are included on marker chain
591 List of changesets that are included on marker chain
592
592
593 $ hg debugobsolete --hidden --rev 6
593 $ hg debugobsolete --hidden --rev 6
594 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
594 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
595
595
596 List of changesets with a longer chain, (including a pruned children)
596 List of changesets with a longer chain, (including a pruned children)
597
597
598 $ hg debugobsolete --hidden --rev 3
598 $ hg debugobsolete --hidden --rev 3
599 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
599 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
600 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
600 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
601 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
601 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
602 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
602 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
603 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
603 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
604 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
604 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
605 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
605 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
606
606
607 List of both
607 List of both
608
608
609 $ hg debugobsolete --hidden --rev 3::6
609 $ hg debugobsolete --hidden --rev 3::6
610 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
610 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
611 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
611 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
612 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
612 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Thu Jan 01 00:00:01 1970 -0002) {'user': 'test'}
613 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
613 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
614 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
614 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
615 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
615 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
616 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
616 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
617 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
617 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
618
618
619 #if serve
619 #if serve
620
620
621 Test the debug output for exchange
621 Test the debug output for exchange
622 ----------------------------------
622 ----------------------------------
623
623
624 $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' --config 'experimental.bundle2-exp=True'
624 $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' --config 'experimental.bundle2-exp=True'
625 pulling from ../tmpb
625 pulling from ../tmpb
626 searching for changes
626 searching for changes
627 no changes found
627 no changes found
628 obsmarker-exchange: 346 bytes received
628 obsmarker-exchange: 346 bytes received
629
629
630 check hgweb does not explode
630 check hgweb does not explode
631 ====================================
631 ====================================
632
632
633 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
633 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
634 adding changesets
634 adding changesets
635 adding manifests
635 adding manifests
636 adding file changes
636 adding file changes
637 added 62 changesets with 63 changes to 9 files (+60 heads)
637 added 62 changesets with 63 changes to 9 files (+60 heads)
638 (run 'hg heads .' to see heads, 'hg merge' to merge)
638 (run 'hg heads .' to see heads, 'hg merge' to merge)
639 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
639 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
640 > do
640 > do
641 > hg debugobsolete $node
641 > hg debugobsolete $node
642 > done
642 > done
643 $ hg up tip
643 $ hg up tip
644 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
644 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
645
645
646 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
646 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
647 $ cat hg.pid >> $DAEMON_PIDS
647 $ cat hg.pid >> $DAEMON_PIDS
648
648
649 check changelog view
649 check changelog view
650
650
651 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
651 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
652 200 Script output follows
652 200 Script output follows
653
653
654 check graph view
654 check graph view
655
655
656 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
656 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
657 200 Script output follows
657 200 Script output follows
658
658
659 check filelog view
659 check filelog view
660
660
661 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
661 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg log -r . -T "{node}"`/'babar'
662 200 Script output follows
662 200 Script output follows
663
663
664 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
664 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
665 200 Script output follows
665 200 Script output follows
666 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
666 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
667 404 Not Found
667 404 Not Found
668 [1]
668 [1]
669
669
670 check that web.view config option:
670 check that web.view config option:
671
671
672 $ "$TESTDIR/killdaemons.py" hg.pid
672 $ "$TESTDIR/killdaemons.py" hg.pid
673 $ cat >> .hg/hgrc << EOF
673 $ cat >> .hg/hgrc << EOF
674 > [web]
674 > [web]
675 > view=all
675 > view=all
676 > EOF
676 > EOF
677 $ wait
677 $ wait
678 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
678 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
679 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
679 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
680 200 Script output follows
680 200 Script output follows
681 $ "$TESTDIR/killdaemons.py" hg.pid
681 $ "$TESTDIR/killdaemons.py" hg.pid
682
682
683 Checking _enable=False warning if obsolete marker exists
683 Checking _enable=False warning if obsolete marker exists
684
684
685 $ echo '[experimental]' >> $HGRCPATH
685 $ echo '[experimental]' >> $HGRCPATH
686 $ echo "evolution=" >> $HGRCPATH
686 $ echo "evolution=" >> $HGRCPATH
687 $ hg log -r tip
687 $ hg log -r tip
688 obsolete feature not enabled but 68 markers found!
688 obsolete feature not enabled but 68 markers found!
689 68:c15e9edfca13 (draft) [tip ] add celestine
689 68:c15e9edfca13 (draft) [tip ] add celestine
690
690
691 reenable for later test
691 reenable for later test
692
692
693 $ echo '[experimental]' >> $HGRCPATH
693 $ echo '[experimental]' >> $HGRCPATH
694 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
694 $ echo "evolution=createmarkers,exchange" >> $HGRCPATH
695
695
696 #endif
696 #endif
697
697
698 Test incoming/outcoming with changesets obsoleted remotely, known locally
698 Test incoming/outcoming with changesets obsoleted remotely, known locally
699 ===============================================================================
699 ===============================================================================
700
700
701 This test issue 3805
701 This test issue 3805
702
702
703 $ hg init repo-issue3805
703 $ hg init repo-issue3805
704 $ cd repo-issue3805
704 $ cd repo-issue3805
705 $ echo "foo" > foo
705 $ echo "foo" > foo
706 $ hg ci -Am "A"
706 $ hg ci -Am "A"
707 adding foo
707 adding foo
708 $ hg clone . ../other-issue3805
708 $ hg clone . ../other-issue3805
709 updating to branch default
709 updating to branch default
710 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
710 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
711 $ echo "bar" >> foo
711 $ echo "bar" >> foo
712 $ hg ci --amend
712 $ hg ci --amend
713 $ cd ../other-issue3805
713 $ cd ../other-issue3805
714 $ hg log -G
714 $ hg log -G
715 @ 0:193e9254ce7e (draft) [tip ] A
715 @ 0:193e9254ce7e (draft) [tip ] A
716
716
717 $ hg log -G -R ../repo-issue3805
717 $ hg log -G -R ../repo-issue3805
718 @ 2:3816541e5485 (draft) [tip ] A
718 @ 2:3816541e5485 (draft) [tip ] A
719
719
720 $ hg incoming
720 $ hg incoming
721 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
721 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
722 searching for changes
722 searching for changes
723 2:3816541e5485 (draft) [tip ] A
723 2:3816541e5485 (draft) [tip ] A
724 $ hg incoming --bundle ../issue3805.hg
724 $ hg incoming --bundle ../issue3805.hg
725 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
725 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
726 searching for changes
726 searching for changes
727 2:3816541e5485 (draft) [tip ] A
727 2:3816541e5485 (draft) [tip ] A
728 $ hg outgoing
728 $ hg outgoing
729 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
729 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
730 searching for changes
730 searching for changes
731 no changes found
731 no changes found
732 [1]
732 [1]
733
733
734 #if serve
734 #if serve
735
735
736 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
736 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
737 $ cat hg.pid >> $DAEMON_PIDS
737 $ cat hg.pid >> $DAEMON_PIDS
738
738
739 $ hg incoming http://localhost:$HGPORT
739 $ hg incoming http://localhost:$HGPORT
740 comparing with http://localhost:$HGPORT/
740 comparing with http://localhost:$HGPORT/
741 searching for changes
741 searching for changes
742 1:3816541e5485 (draft) [tip ] A
742 1:3816541e5485 (draft) [tip ] A
743 $ hg outgoing http://localhost:$HGPORT
743 $ hg outgoing http://localhost:$HGPORT
744 comparing with http://localhost:$HGPORT/
744 comparing with http://localhost:$HGPORT/
745 searching for changes
745 searching for changes
746 no changes found
746 no changes found
747 [1]
747 [1]
748
748
749 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
749 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
750
750
751 #endif
751 #endif
752
752
753 This test issue 3814
753 This test issue 3814
754
754
755 (nothing to push but locally hidden changeset)
755 (nothing to push but locally hidden changeset)
756
756
757 $ cd ..
757 $ cd ..
758 $ hg init repo-issue3814
758 $ hg init repo-issue3814
759 $ cd repo-issue3805
759 $ cd repo-issue3805
760 $ hg push -r 3816541e5485 ../repo-issue3814
760 $ hg push -r 3816541e5485 ../repo-issue3814
761 pushing to ../repo-issue3814
761 pushing to ../repo-issue3814
762 searching for changes
762 searching for changes
763 adding changesets
763 adding changesets
764 adding manifests
764 adding manifests
765 adding file changes
765 adding file changes
766 added 1 changesets with 1 changes to 1 files
766 added 1 changesets with 1 changes to 1 files
767 $ hg out ../repo-issue3814
767 $ hg out ../repo-issue3814
768 comparing with ../repo-issue3814
768 comparing with ../repo-issue3814
769 searching for changes
769 searching for changes
770 no changes found
770 no changes found
771 [1]
771 [1]
772
772
773 Test that a local tag blocks a changeset from being hidden
773 Test that a local tag blocks a changeset from being hidden
774
774
775 $ hg tag -l visible -r 0 --hidden
775 $ hg tag -l visible -r 0 --hidden
776 $ hg log -G
776 $ hg log -G
777 @ 2:3816541e5485 (draft) [tip ] A
777 @ 2:3816541e5485 (draft) [tip ] A
778
778
779 x 0:193e9254ce7e (draft) [visible ] A
779 x 0:193e9254ce7e (draft) [visible ] A
780
780
781 Test that removing a local tag does not cause some commands to fail
781 Test that removing a local tag does not cause some commands to fail
782
782
783 $ hg tag -l -r tip tiptag
783 $ hg tag -l -r tip tiptag
784 $ hg tags
784 $ hg tags
785 tiptag 2:3816541e5485
785 tiptag 2:3816541e5485
786 tip 2:3816541e5485
786 tip 2:3816541e5485
787 visible 0:193e9254ce7e
787 visible 0:193e9254ce7e
788 $ hg --config extensions.strip= strip -r tip --no-backup
788 $ hg --config extensions.strip= strip -r tip --no-backup
789 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
789 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
790 $ hg tags
790 $ hg tags
791 visible 0:193e9254ce7e
791 visible 0:193e9254ce7e
792 tip 0:193e9254ce7e
792 tip 0:193e9254ce7e
793
793
794 Test bundle overlay onto hidden revision
794 Test bundle overlay onto hidden revision
795
795
796 $ cd ..
796 $ cd ..
797 $ hg init repo-bundleoverlay
797 $ hg init repo-bundleoverlay
798 $ cd repo-bundleoverlay
798 $ cd repo-bundleoverlay
799 $ echo "A" > foo
799 $ echo "A" > foo
800 $ hg ci -Am "A"
800 $ hg ci -Am "A"
801 adding foo
801 adding foo
802 $ echo "B" >> foo
802 $ echo "B" >> foo
803 $ hg ci -m "B"
803 $ hg ci -m "B"
804 $ hg up 0
804 $ hg up 0
805 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
805 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
806 $ echo "C" >> foo
806 $ echo "C" >> foo
807 $ hg ci -m "C"
807 $ hg ci -m "C"
808 created new head
808 created new head
809 $ hg log -G
809 $ hg log -G
810 @ 2:c186d7714947 (draft) [tip ] C
810 @ 2:c186d7714947 (draft) [tip ] C
811 |
811 |
812 | o 1:44526ebb0f98 (draft) [ ] B
812 | o 1:44526ebb0f98 (draft) [ ] B
813 |/
813 |/
814 o 0:4b34ecfb0d56 (draft) [ ] A
814 o 0:4b34ecfb0d56 (draft) [ ] A
815
815
816
816
817 $ hg clone -r1 . ../other-bundleoverlay
817 $ hg clone -r1 . ../other-bundleoverlay
818 adding changesets
818 adding changesets
819 adding manifests
819 adding manifests
820 adding file changes
820 adding file changes
821 added 2 changesets with 2 changes to 1 files
821 added 2 changesets with 2 changes to 1 files
822 updating to branch default
822 updating to branch default
823 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
823 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
824 $ cd ../other-bundleoverlay
824 $ cd ../other-bundleoverlay
825 $ echo "B+" >> foo
825 $ echo "B+" >> foo
826 $ hg ci --amend -m "B+"
826 $ hg ci --amend -m "B+"
827 $ hg log -G --hidden
827 $ hg log -G --hidden
828 @ 3:b7d587542d40 (draft) [tip ] B+
828 @ 3:b7d587542d40 (draft) [tip ] B+
829 |
829 |
830 | x 2:eb95e9297e18 (draft) [ ] temporary amend commit for 44526ebb0f98
830 | x 2:eb95e9297e18 (draft) [ ] temporary amend commit for 44526ebb0f98
831 | |
831 | |
832 | x 1:44526ebb0f98 (draft) [ ] B
832 | x 1:44526ebb0f98 (draft) [ ] B
833 |/
833 |/
834 o 0:4b34ecfb0d56 (draft) [ ] A
834 o 0:4b34ecfb0d56 (draft) [ ] A
835
835
836
836
837 $ hg incoming ../repo-bundleoverlay --bundle ../bundleoverlay.hg
837 $ hg incoming ../repo-bundleoverlay --bundle ../bundleoverlay.hg
838 comparing with ../repo-bundleoverlay
838 comparing with ../repo-bundleoverlay
839 searching for changes
839 searching for changes
840 1:44526ebb0f98 (draft) [ ] B
840 1:44526ebb0f98 (draft) [ ] B
841 2:c186d7714947 (draft) [tip ] C
841 2:c186d7714947 (draft) [tip ] C
842 $ hg log -G -R ../bundleoverlay.hg
842 $ hg log -G -R ../bundleoverlay.hg
843 o 4:c186d7714947 (draft) [tip ] C
843 o 4:c186d7714947 (draft) [tip ] C
844 |
844 |
845 | @ 3:b7d587542d40 (draft) [ ] B+
845 | @ 3:b7d587542d40 (draft) [ ] B+
846 |/
846 |/
847 o 0:4b34ecfb0d56 (draft) [ ] A
847 o 0:4b34ecfb0d56 (draft) [ ] A
848
848
849
849
850 #if serve
850 #if serve
851
851
852 Test issue 4506
852 Test issue 4506
853
853
854 $ cd ..
854 $ cd ..
855 $ hg init repo-issue4506
855 $ hg init repo-issue4506
856 $ cd repo-issue4506
856 $ cd repo-issue4506
857 $ echo "0" > foo
857 $ echo "0" > foo
858 $ hg add foo
858 $ hg add foo
859 $ hg ci -m "content-0"
859 $ hg ci -m "content-0"
860
860
861 $ hg up null
861 $ hg up null
862 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
862 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
863 $ echo "1" > bar
863 $ echo "1" > bar
864 $ hg add bar
864 $ hg add bar
865 $ hg ci -m "content-1"
865 $ hg ci -m "content-1"
866 created new head
866 created new head
867 $ hg up 0
867 $ hg up 0
868 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
868 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
869 $ hg graft 1
869 $ hg graft 1
870 grafting 1:1c9eddb02162 "content-1" (tip)
870 grafting 1:1c9eddb02162 "content-1" (tip)
871
871
872 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
872 $ hg debugobsolete `hg log -r1 -T'{node}'` `hg log -r2 -T'{node}'`
873
873
874 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
874 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
875 $ cat hg.pid >> $DAEMON_PIDS
875 $ cat hg.pid >> $DAEMON_PIDS
876
876
877 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/1'
877 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/1'
878 404 Not Found
878 404 Not Found
879 [1]
879 [1]
880 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'file/tip/bar'
880 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'file/tip/bar'
881 200 Script output follows
881 200 Script output follows
882 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'annotate/tip/bar'
882 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'annotate/tip/bar'
883 200 Script output follows
883 200 Script output follows
884
884
885 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
885 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
886
886
887 #endif
887 #endif
888
888
General Comments 0
You need to be logged in to leave comments. Login now