##// END OF EJS Templates
obsolete: add readonly flag to obstore constructor...
Durham Goode -
r22950:bb8278b2 default
parent child Browse files
Show More
@@ -1,1790 +1,1791 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 # a list of (ui, featureset) functions.
183 # a list of (ui, featureset) functions.
184 # only functions defined in module of enabled extensions are invoked
184 # only functions defined in module of enabled extensions are invoked
185 featuresetupfuncs = set()
185 featuresetupfuncs = set()
186
186
187 def _baserequirements(self, create):
187 def _baserequirements(self, create):
188 return self.requirements[:]
188 return self.requirements[:]
189
189
190 def __init__(self, baseui, path=None, create=False):
190 def __init__(self, baseui, path=None, create=False):
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wopener = self.wvfs
192 self.wopener = self.wvfs
193 self.root = self.wvfs.base
193 self.root = self.wvfs.base
194 self.path = self.wvfs.join(".hg")
194 self.path = self.wvfs.join(".hg")
195 self.origroot = path
195 self.origroot = path
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.vfs = scmutil.vfs(self.path)
197 self.vfs = scmutil.vfs(self.path)
198 self.opener = self.vfs
198 self.opener = self.vfs
199 self.baseui = baseui
199 self.baseui = baseui
200 self.ui = baseui.copy()
200 self.ui = baseui.copy()
201 self.ui.copy = baseui.copy # prevent copying repo configuration
201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 # A list of callback to shape the phase if no data were found.
202 # A list of callback to shape the phase if no data were found.
203 # Callback are in the form: func(repo, roots) --> processed root.
203 # Callback are in the form: func(repo, roots) --> processed root.
204 # This list it to be filled by extension during repo setup
204 # This list it to be filled by extension during repo setup
205 self._phasedefaults = []
205 self._phasedefaults = []
206 try:
206 try:
207 self.ui.readconfig(self.join("hgrc"), self.root)
207 self.ui.readconfig(self.join("hgrc"), self.root)
208 extensions.loadall(self.ui)
208 extensions.loadall(self.ui)
209 except IOError:
209 except IOError:
210 pass
210 pass
211
211
212 if self.featuresetupfuncs:
212 if self.featuresetupfuncs:
213 self.supported = set(self._basesupported) # use private copy
213 self.supported = set(self._basesupported) # use private copy
214 extmods = set(m.__name__ for n, m
214 extmods = set(m.__name__ for n, m
215 in extensions.extensions(self.ui))
215 in extensions.extensions(self.ui))
216 for setupfunc in self.featuresetupfuncs:
216 for setupfunc in self.featuresetupfuncs:
217 if setupfunc.__module__ in extmods:
217 if setupfunc.__module__ in extmods:
218 setupfunc(self.ui, self.supported)
218 setupfunc(self.ui, self.supported)
219 else:
219 else:
220 self.supported = self._basesupported
220 self.supported = self._basesupported
221
221
222 if not self.vfs.isdir():
222 if not self.vfs.isdir():
223 if create:
223 if create:
224 if not self.wvfs.exists():
224 if not self.wvfs.exists():
225 self.wvfs.makedirs()
225 self.wvfs.makedirs()
226 self.vfs.makedir(notindexed=True)
226 self.vfs.makedir(notindexed=True)
227 requirements = self._baserequirements(create)
227 requirements = self._baserequirements(create)
228 if self.ui.configbool('format', 'usestore', True):
228 if self.ui.configbool('format', 'usestore', True):
229 self.vfs.mkdir("store")
229 self.vfs.mkdir("store")
230 requirements.append("store")
230 requirements.append("store")
231 if self.ui.configbool('format', 'usefncache', True):
231 if self.ui.configbool('format', 'usefncache', True):
232 requirements.append("fncache")
232 requirements.append("fncache")
233 if self.ui.configbool('format', 'dotencode', True):
233 if self.ui.configbool('format', 'dotencode', True):
234 requirements.append('dotencode')
234 requirements.append('dotencode')
235 # create an invalid changelog
235 # create an invalid changelog
236 self.vfs.append(
236 self.vfs.append(
237 "00changelog.i",
237 "00changelog.i",
238 '\0\0\0\2' # represents revlogv2
238 '\0\0\0\2' # represents revlogv2
239 ' dummy changelog to prevent using the old repo layout'
239 ' dummy changelog to prevent using the old repo layout'
240 )
240 )
241 if self.ui.configbool('format', 'generaldelta', False):
241 if self.ui.configbool('format', 'generaldelta', False):
242 requirements.append("generaldelta")
242 requirements.append("generaldelta")
243 requirements = set(requirements)
243 requirements = set(requirements)
244 else:
244 else:
245 raise error.RepoError(_("repository %s not found") % path)
245 raise error.RepoError(_("repository %s not found") % path)
246 elif create:
246 elif create:
247 raise error.RepoError(_("repository %s already exists") % path)
247 raise error.RepoError(_("repository %s already exists") % path)
248 else:
248 else:
249 try:
249 try:
250 requirements = scmutil.readrequires(self.vfs, self.supported)
250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 except IOError, inst:
251 except IOError, inst:
252 if inst.errno != errno.ENOENT:
252 if inst.errno != errno.ENOENT:
253 raise
253 raise
254 requirements = set()
254 requirements = set()
255
255
256 self.sharedpath = self.path
256 self.sharedpath = self.path
257 try:
257 try:
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 realpath=True)
259 realpath=True)
260 s = vfs.base
260 s = vfs.base
261 if not vfs.exists():
261 if not vfs.exists():
262 raise error.RepoError(
262 raise error.RepoError(
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 self.sharedpath = s
264 self.sharedpath = s
265 except IOError, inst:
265 except IOError, inst:
266 if inst.errno != errno.ENOENT:
266 if inst.errno != errno.ENOENT:
267 raise
267 raise
268
268
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.spath = self.store.path
270 self.spath = self.store.path
271 self.svfs = self.store.vfs
271 self.svfs = self.store.vfs
272 self.sopener = self.svfs
272 self.sopener = self.svfs
273 self.sjoin = self.store.join
273 self.sjoin = self.store.join
274 self.vfs.createmode = self.store.createmode
274 self.vfs.createmode = self.store.createmode
275 self._applyrequirements(requirements)
275 self._applyrequirements(requirements)
276 if create:
276 if create:
277 self._writerequirements()
277 self._writerequirements()
278
278
279
279
280 self._branchcaches = {}
280 self._branchcaches = {}
281 self.filterpats = {}
281 self.filterpats = {}
282 self._datafilters = {}
282 self._datafilters = {}
283 self._transref = self._lockref = self._wlockref = None
283 self._transref = self._lockref = self._wlockref = None
284
284
285 # A cache for various files under .hg/ that tracks file changes,
285 # A cache for various files under .hg/ that tracks file changes,
286 # (used by the filecache decorator)
286 # (used by the filecache decorator)
287 #
287 #
288 # Maps a property name to its util.filecacheentry
288 # Maps a property name to its util.filecacheentry
289 self._filecache = {}
289 self._filecache = {}
290
290
291 # hold sets of revision to be filtered
291 # hold sets of revision to be filtered
292 # should be cleared when something might have changed the filter value:
292 # should be cleared when something might have changed the filter value:
293 # - new changesets,
293 # - new changesets,
294 # - phase change,
294 # - phase change,
295 # - new obsolescence marker,
295 # - new obsolescence marker,
296 # - working directory parent change,
296 # - working directory parent change,
297 # - bookmark changes
297 # - bookmark changes
298 self.filteredrevcache = {}
298 self.filteredrevcache = {}
299
299
300 def close(self):
300 def close(self):
301 pass
301 pass
302
302
303 def _restrictcapabilities(self, caps):
303 def _restrictcapabilities(self, caps):
304 # bundle2 is not ready for prime time, drop it unless explicitly
304 # bundle2 is not ready for prime time, drop it unless explicitly
305 # required by the tests (or some brave tester)
305 # required by the tests (or some brave tester)
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 caps = set(caps)
307 caps = set(caps)
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 return caps
310 return caps
311
311
312 def _applyrequirements(self, requirements):
312 def _applyrequirements(self, requirements):
313 self.requirements = requirements
313 self.requirements = requirements
314 self.sopener.options = dict((r, 1) for r in requirements
314 self.sopener.options = dict((r, 1) for r in requirements
315 if r in self.openerreqs)
315 if r in self.openerreqs)
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 if chunkcachesize is not None:
317 if chunkcachesize is not None:
318 self.sopener.options['chunkcachesize'] = chunkcachesize
318 self.sopener.options['chunkcachesize'] = chunkcachesize
319
319
320 def _writerequirements(self):
320 def _writerequirements(self):
321 reqfile = self.opener("requires", "w")
321 reqfile = self.opener("requires", "w")
322 for r in sorted(self.requirements):
322 for r in sorted(self.requirements):
323 reqfile.write("%s\n" % r)
323 reqfile.write("%s\n" % r)
324 reqfile.close()
324 reqfile.close()
325
325
326 def _checknested(self, path):
326 def _checknested(self, path):
327 """Determine if path is a legal nested repository."""
327 """Determine if path is a legal nested repository."""
328 if not path.startswith(self.root):
328 if not path.startswith(self.root):
329 return False
329 return False
330 subpath = path[len(self.root) + 1:]
330 subpath = path[len(self.root) + 1:]
331 normsubpath = util.pconvert(subpath)
331 normsubpath = util.pconvert(subpath)
332
332
333 # XXX: Checking against the current working copy is wrong in
333 # XXX: Checking against the current working copy is wrong in
334 # the sense that it can reject things like
334 # the sense that it can reject things like
335 #
335 #
336 # $ hg cat -r 10 sub/x.txt
336 # $ hg cat -r 10 sub/x.txt
337 #
337 #
338 # if sub/ is no longer a subrepository in the working copy
338 # if sub/ is no longer a subrepository in the working copy
339 # parent revision.
339 # parent revision.
340 #
340 #
341 # However, it can of course also allow things that would have
341 # However, it can of course also allow things that would have
342 # been rejected before, such as the above cat command if sub/
342 # been rejected before, such as the above cat command if sub/
343 # is a subrepository now, but was a normal directory before.
343 # is a subrepository now, but was a normal directory before.
344 # The old path auditor would have rejected by mistake since it
344 # The old path auditor would have rejected by mistake since it
345 # panics when it sees sub/.hg/.
345 # panics when it sees sub/.hg/.
346 #
346 #
347 # All in all, checking against the working copy seems sensible
347 # All in all, checking against the working copy seems sensible
348 # since we want to prevent access to nested repositories on
348 # since we want to prevent access to nested repositories on
349 # the filesystem *now*.
349 # the filesystem *now*.
350 ctx = self[None]
350 ctx = self[None]
351 parts = util.splitpath(subpath)
351 parts = util.splitpath(subpath)
352 while parts:
352 while parts:
353 prefix = '/'.join(parts)
353 prefix = '/'.join(parts)
354 if prefix in ctx.substate:
354 if prefix in ctx.substate:
355 if prefix == normsubpath:
355 if prefix == normsubpath:
356 return True
356 return True
357 else:
357 else:
358 sub = ctx.sub(prefix)
358 sub = ctx.sub(prefix)
359 return sub.checknested(subpath[len(prefix) + 1:])
359 return sub.checknested(subpath[len(prefix) + 1:])
360 else:
360 else:
361 parts.pop()
361 parts.pop()
362 return False
362 return False
363
363
364 def peer(self):
364 def peer(self):
365 return localpeer(self) # not cached to avoid reference cycle
365 return localpeer(self) # not cached to avoid reference cycle
366
366
367 def unfiltered(self):
367 def unfiltered(self):
368 """Return unfiltered version of the repository
368 """Return unfiltered version of the repository
369
369
370 Intended to be overwritten by filtered repo."""
370 Intended to be overwritten by filtered repo."""
371 return self
371 return self
372
372
373 def filtered(self, name):
373 def filtered(self, name):
374 """Return a filtered version of a repository"""
374 """Return a filtered version of a repository"""
375 # build a new class with the mixin and the current class
375 # build a new class with the mixin and the current class
376 # (possibly subclass of the repo)
376 # (possibly subclass of the repo)
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 pass
378 pass
379 return proxycls(self, name)
379 return proxycls(self, name)
380
380
381 @repofilecache('bookmarks')
381 @repofilecache('bookmarks')
382 def _bookmarks(self):
382 def _bookmarks(self):
383 return bookmarks.bmstore(self)
383 return bookmarks.bmstore(self)
384
384
385 @repofilecache('bookmarks.current')
385 @repofilecache('bookmarks.current')
386 def _bookmarkcurrent(self):
386 def _bookmarkcurrent(self):
387 return bookmarks.readcurrent(self)
387 return bookmarks.readcurrent(self)
388
388
389 def bookmarkheads(self, bookmark):
389 def bookmarkheads(self, bookmark):
390 name = bookmark.split('@', 1)[0]
390 name = bookmark.split('@', 1)[0]
391 heads = []
391 heads = []
392 for mark, n in self._bookmarks.iteritems():
392 for mark, n in self._bookmarks.iteritems():
393 if mark.split('@', 1)[0] == name:
393 if mark.split('@', 1)[0] == name:
394 heads.append(n)
394 heads.append(n)
395 return heads
395 return heads
396
396
397 @storecache('phaseroots')
397 @storecache('phaseroots')
398 def _phasecache(self):
398 def _phasecache(self):
399 return phases.phasecache(self, self._phasedefaults)
399 return phases.phasecache(self, self._phasedefaults)
400
400
401 @storecache('obsstore')
401 @storecache('obsstore')
402 def obsstore(self):
402 def obsstore(self):
403 # read default format for new obsstore.
403 # read default format for new obsstore.
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 # rely on obsstore class default when possible.
405 # rely on obsstore class default when possible.
406 kwargs = {}
406 kwargs = {}
407 if defaultformat is not None:
407 if defaultformat is not None:
408 kwargs['defaultformat'] = defaultformat
408 kwargs['defaultformat'] = defaultformat
409 store = obsolete.obsstore(self.sopener, **kwargs)
409 store = obsolete.obsstore(self.sopener, readonly=not obsolete._enabled,
410 **kwargs)
410 if store and not obsolete._enabled:
411 if store and not obsolete._enabled:
411 # message is rare enough to not be translated
412 # message is rare enough to not be translated
412 msg = 'obsolete feature not enabled but %i markers found!\n'
413 msg = 'obsolete feature not enabled but %i markers found!\n'
413 self.ui.warn(msg % len(list(store)))
414 self.ui.warn(msg % len(list(store)))
414 return store
415 return store
415
416
416 @storecache('00changelog.i')
417 @storecache('00changelog.i')
417 def changelog(self):
418 def changelog(self):
418 c = changelog.changelog(self.sopener)
419 c = changelog.changelog(self.sopener)
419 if 'HG_PENDING' in os.environ:
420 if 'HG_PENDING' in os.environ:
420 p = os.environ['HG_PENDING']
421 p = os.environ['HG_PENDING']
421 if p.startswith(self.root):
422 if p.startswith(self.root):
422 c.readpending('00changelog.i.a')
423 c.readpending('00changelog.i.a')
423 return c
424 return c
424
425
425 @storecache('00manifest.i')
426 @storecache('00manifest.i')
426 def manifest(self):
427 def manifest(self):
427 return manifest.manifest(self.sopener)
428 return manifest.manifest(self.sopener)
428
429
429 @repofilecache('dirstate')
430 @repofilecache('dirstate')
430 def dirstate(self):
431 def dirstate(self):
431 warned = [0]
432 warned = [0]
432 def validate(node):
433 def validate(node):
433 try:
434 try:
434 self.changelog.rev(node)
435 self.changelog.rev(node)
435 return node
436 return node
436 except error.LookupError:
437 except error.LookupError:
437 if not warned[0]:
438 if not warned[0]:
438 warned[0] = True
439 warned[0] = True
439 self.ui.warn(_("warning: ignoring unknown"
440 self.ui.warn(_("warning: ignoring unknown"
440 " working parent %s!\n") % short(node))
441 " working parent %s!\n") % short(node))
441 return nullid
442 return nullid
442
443
443 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
444
445
445 def __getitem__(self, changeid):
446 def __getitem__(self, changeid):
446 if changeid is None:
447 if changeid is None:
447 return context.workingctx(self)
448 return context.workingctx(self)
448 return context.changectx(self, changeid)
449 return context.changectx(self, changeid)
449
450
450 def __contains__(self, changeid):
451 def __contains__(self, changeid):
451 try:
452 try:
452 return bool(self.lookup(changeid))
453 return bool(self.lookup(changeid))
453 except error.RepoLookupError:
454 except error.RepoLookupError:
454 return False
455 return False
455
456
456 def __nonzero__(self):
457 def __nonzero__(self):
457 return True
458 return True
458
459
459 def __len__(self):
460 def __len__(self):
460 return len(self.changelog)
461 return len(self.changelog)
461
462
462 def __iter__(self):
463 def __iter__(self):
463 return iter(self.changelog)
464 return iter(self.changelog)
464
465
465 def revs(self, expr, *args):
466 def revs(self, expr, *args):
466 '''Return a list of revisions matching the given revset'''
467 '''Return a list of revisions matching the given revset'''
467 expr = revset.formatspec(expr, *args)
468 expr = revset.formatspec(expr, *args)
468 m = revset.match(None, expr)
469 m = revset.match(None, expr)
469 return m(self, revset.spanset(self))
470 return m(self, revset.spanset(self))
470
471
471 def set(self, expr, *args):
472 def set(self, expr, *args):
472 '''
473 '''
473 Yield a context for each matching revision, after doing arg
474 Yield a context for each matching revision, after doing arg
474 replacement via revset.formatspec
475 replacement via revset.formatspec
475 '''
476 '''
476 for r in self.revs(expr, *args):
477 for r in self.revs(expr, *args):
477 yield self[r]
478 yield self[r]
478
479
479 def url(self):
480 def url(self):
480 return 'file:' + self.root
481 return 'file:' + self.root
481
482
482 def hook(self, name, throw=False, **args):
483 def hook(self, name, throw=False, **args):
483 """Call a hook, passing this repo instance.
484 """Call a hook, passing this repo instance.
484
485
485 This a convenience method to aid invoking hooks. Extensions likely
486 This a convenience method to aid invoking hooks. Extensions likely
486 won't call this unless they have registered a custom hook or are
487 won't call this unless they have registered a custom hook or are
487 replacing code that is expected to call a hook.
488 replacing code that is expected to call a hook.
488 """
489 """
489 return hook.hook(self.ui, self, name, throw, **args)
490 return hook.hook(self.ui, self, name, throw, **args)
490
491
491 @unfilteredmethod
492 @unfilteredmethod
492 def _tag(self, names, node, message, local, user, date, extra={},
493 def _tag(self, names, node, message, local, user, date, extra={},
493 editor=False):
494 editor=False):
494 if isinstance(names, str):
495 if isinstance(names, str):
495 names = (names,)
496 names = (names,)
496
497
497 branches = self.branchmap()
498 branches = self.branchmap()
498 for name in names:
499 for name in names:
499 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 self.hook('pretag', throw=True, node=hex(node), tag=name,
500 local=local)
501 local=local)
501 if name in branches:
502 if name in branches:
502 self.ui.warn(_("warning: tag %s conflicts with existing"
503 self.ui.warn(_("warning: tag %s conflicts with existing"
503 " branch name\n") % name)
504 " branch name\n") % name)
504
505
505 def writetags(fp, names, munge, prevtags):
506 def writetags(fp, names, munge, prevtags):
506 fp.seek(0, 2)
507 fp.seek(0, 2)
507 if prevtags and prevtags[-1] != '\n':
508 if prevtags and prevtags[-1] != '\n':
508 fp.write('\n')
509 fp.write('\n')
509 for name in names:
510 for name in names:
510 m = munge and munge(name) or name
511 m = munge and munge(name) or name
511 if (self._tagscache.tagtypes and
512 if (self._tagscache.tagtypes and
512 name in self._tagscache.tagtypes):
513 name in self._tagscache.tagtypes):
513 old = self.tags().get(name, nullid)
514 old = self.tags().get(name, nullid)
514 fp.write('%s %s\n' % (hex(old), m))
515 fp.write('%s %s\n' % (hex(old), m))
515 fp.write('%s %s\n' % (hex(node), m))
516 fp.write('%s %s\n' % (hex(node), m))
516 fp.close()
517 fp.close()
517
518
518 prevtags = ''
519 prevtags = ''
519 if local:
520 if local:
520 try:
521 try:
521 fp = self.opener('localtags', 'r+')
522 fp = self.opener('localtags', 'r+')
522 except IOError:
523 except IOError:
523 fp = self.opener('localtags', 'a')
524 fp = self.opener('localtags', 'a')
524 else:
525 else:
525 prevtags = fp.read()
526 prevtags = fp.read()
526
527
527 # local tags are stored in the current charset
528 # local tags are stored in the current charset
528 writetags(fp, names, None, prevtags)
529 writetags(fp, names, None, prevtags)
529 for name in names:
530 for name in names:
530 self.hook('tag', node=hex(node), tag=name, local=local)
531 self.hook('tag', node=hex(node), tag=name, local=local)
531 return
532 return
532
533
533 try:
534 try:
534 fp = self.wfile('.hgtags', 'rb+')
535 fp = self.wfile('.hgtags', 'rb+')
535 except IOError, e:
536 except IOError, e:
536 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
537 raise
538 raise
538 fp = self.wfile('.hgtags', 'ab')
539 fp = self.wfile('.hgtags', 'ab')
539 else:
540 else:
540 prevtags = fp.read()
541 prevtags = fp.read()
541
542
542 # committed tags are stored in UTF-8
543 # committed tags are stored in UTF-8
543 writetags(fp, names, encoding.fromlocal, prevtags)
544 writetags(fp, names, encoding.fromlocal, prevtags)
544
545
545 fp.close()
546 fp.close()
546
547
547 self.invalidatecaches()
548 self.invalidatecaches()
548
549
549 if '.hgtags' not in self.dirstate:
550 if '.hgtags' not in self.dirstate:
550 self[None].add(['.hgtags'])
551 self[None].add(['.hgtags'])
551
552
552 m = matchmod.exact(self.root, '', ['.hgtags'])
553 m = matchmod.exact(self.root, '', ['.hgtags'])
553 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 tagnode = self.commit(message, user, date, extra=extra, match=m,
554 editor=editor)
555 editor=editor)
555
556
556 for name in names:
557 for name in names:
557 self.hook('tag', node=hex(node), tag=name, local=local)
558 self.hook('tag', node=hex(node), tag=name, local=local)
558
559
559 return tagnode
560 return tagnode
560
561
561 def tag(self, names, node, message, local, user, date, editor=False):
562 def tag(self, names, node, message, local, user, date, editor=False):
562 '''tag a revision with one or more symbolic names.
563 '''tag a revision with one or more symbolic names.
563
564
564 names is a list of strings or, when adding a single tag, names may be a
565 names is a list of strings or, when adding a single tag, names may be a
565 string.
566 string.
566
567
567 if local is True, the tags are stored in a per-repository file.
568 if local is True, the tags are stored in a per-repository file.
568 otherwise, they are stored in the .hgtags file, and a new
569 otherwise, they are stored in the .hgtags file, and a new
569 changeset is committed with the change.
570 changeset is committed with the change.
570
571
571 keyword arguments:
572 keyword arguments:
572
573
573 local: whether to store tags in non-version-controlled file
574 local: whether to store tags in non-version-controlled file
574 (default False)
575 (default False)
575
576
576 message: commit message to use if committing
577 message: commit message to use if committing
577
578
578 user: name of user to use if committing
579 user: name of user to use if committing
579
580
580 date: date tuple to use if committing'''
581 date: date tuple to use if committing'''
581
582
582 if not local:
583 if not local:
583 m = matchmod.exact(self.root, '', ['.hgtags'])
584 m = matchmod.exact(self.root, '', ['.hgtags'])
584 if util.any(self.status(match=m, unknown=True, ignored=True)):
585 if util.any(self.status(match=m, unknown=True, ignored=True)):
585 raise util.Abort(_('working copy of .hgtags is changed'),
586 raise util.Abort(_('working copy of .hgtags is changed'),
586 hint=_('please commit .hgtags manually'))
587 hint=_('please commit .hgtags manually'))
587
588
588 self.tags() # instantiate the cache
589 self.tags() # instantiate the cache
589 self._tag(names, node, message, local, user, date, editor=editor)
590 self._tag(names, node, message, local, user, date, editor=editor)
590
591
591 @filteredpropertycache
592 @filteredpropertycache
592 def _tagscache(self):
593 def _tagscache(self):
593 '''Returns a tagscache object that contains various tags related
594 '''Returns a tagscache object that contains various tags related
594 caches.'''
595 caches.'''
595
596
596 # This simplifies its cache management by having one decorated
597 # This simplifies its cache management by having one decorated
597 # function (this one) and the rest simply fetch things from it.
598 # function (this one) and the rest simply fetch things from it.
598 class tagscache(object):
599 class tagscache(object):
599 def __init__(self):
600 def __init__(self):
600 # These two define the set of tags for this repository. tags
601 # These two define the set of tags for this repository. tags
601 # maps tag name to node; tagtypes maps tag name to 'global' or
602 # maps tag name to node; tagtypes maps tag name to 'global' or
602 # 'local'. (Global tags are defined by .hgtags across all
603 # 'local'. (Global tags are defined by .hgtags across all
603 # heads, and local tags are defined in .hg/localtags.)
604 # heads, and local tags are defined in .hg/localtags.)
604 # They constitute the in-memory cache of tags.
605 # They constitute the in-memory cache of tags.
605 self.tags = self.tagtypes = None
606 self.tags = self.tagtypes = None
606
607
607 self.nodetagscache = self.tagslist = None
608 self.nodetagscache = self.tagslist = None
608
609
609 cache = tagscache()
610 cache = tagscache()
610 cache.tags, cache.tagtypes = self._findtags()
611 cache.tags, cache.tagtypes = self._findtags()
611
612
612 return cache
613 return cache
613
614
614 def tags(self):
615 def tags(self):
615 '''return a mapping of tag to node'''
616 '''return a mapping of tag to node'''
616 t = {}
617 t = {}
617 if self.changelog.filteredrevs:
618 if self.changelog.filteredrevs:
618 tags, tt = self._findtags()
619 tags, tt = self._findtags()
619 else:
620 else:
620 tags = self._tagscache.tags
621 tags = self._tagscache.tags
621 for k, v in tags.iteritems():
622 for k, v in tags.iteritems():
622 try:
623 try:
623 # ignore tags to unknown nodes
624 # ignore tags to unknown nodes
624 self.changelog.rev(v)
625 self.changelog.rev(v)
625 t[k] = v
626 t[k] = v
626 except (error.LookupError, ValueError):
627 except (error.LookupError, ValueError):
627 pass
628 pass
628 return t
629 return t
629
630
630 def _findtags(self):
631 def _findtags(self):
631 '''Do the hard work of finding tags. Return a pair of dicts
632 '''Do the hard work of finding tags. Return a pair of dicts
632 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 (tags, tagtypes) where tags maps tag name to node, and tagtypes
633 maps tag name to a string like \'global\' or \'local\'.
634 maps tag name to a string like \'global\' or \'local\'.
634 Subclasses or extensions are free to add their own tags, but
635 Subclasses or extensions are free to add their own tags, but
635 should be aware that the returned dicts will be retained for the
636 should be aware that the returned dicts will be retained for the
636 duration of the localrepo object.'''
637 duration of the localrepo object.'''
637
638
638 # XXX what tagtype should subclasses/extensions use? Currently
639 # XXX what tagtype should subclasses/extensions use? Currently
639 # mq and bookmarks add tags, but do not set the tagtype at all.
640 # mq and bookmarks add tags, but do not set the tagtype at all.
640 # Should each extension invent its own tag type? Should there
641 # Should each extension invent its own tag type? Should there
641 # be one tagtype for all such "virtual" tags? Or is the status
642 # be one tagtype for all such "virtual" tags? Or is the status
642 # quo fine?
643 # quo fine?
643
644
644 alltags = {} # map tag name to (node, hist)
645 alltags = {} # map tag name to (node, hist)
645 tagtypes = {}
646 tagtypes = {}
646
647
647 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
648 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
649
650
650 # Build the return dicts. Have to re-encode tag names because
651 # Build the return dicts. Have to re-encode tag names because
651 # the tags module always uses UTF-8 (in order not to lose info
652 # the tags module always uses UTF-8 (in order not to lose info
652 # writing to the cache), but the rest of Mercurial wants them in
653 # writing to the cache), but the rest of Mercurial wants them in
653 # local encoding.
654 # local encoding.
654 tags = {}
655 tags = {}
655 for (name, (node, hist)) in alltags.iteritems():
656 for (name, (node, hist)) in alltags.iteritems():
656 if node != nullid:
657 if node != nullid:
657 tags[encoding.tolocal(name)] = node
658 tags[encoding.tolocal(name)] = node
658 tags['tip'] = self.changelog.tip()
659 tags['tip'] = self.changelog.tip()
659 tagtypes = dict([(encoding.tolocal(name), value)
660 tagtypes = dict([(encoding.tolocal(name), value)
660 for (name, value) in tagtypes.iteritems()])
661 for (name, value) in tagtypes.iteritems()])
661 return (tags, tagtypes)
662 return (tags, tagtypes)
662
663
663 def tagtype(self, tagname):
664 def tagtype(self, tagname):
664 '''
665 '''
665 return the type of the given tag. result can be:
666 return the type of the given tag. result can be:
666
667
667 'local' : a local tag
668 'local' : a local tag
668 'global' : a global tag
669 'global' : a global tag
669 None : tag does not exist
670 None : tag does not exist
670 '''
671 '''
671
672
672 return self._tagscache.tagtypes.get(tagname)
673 return self._tagscache.tagtypes.get(tagname)
673
674
674 def tagslist(self):
675 def tagslist(self):
675 '''return a list of tags ordered by revision'''
676 '''return a list of tags ordered by revision'''
676 if not self._tagscache.tagslist:
677 if not self._tagscache.tagslist:
677 l = []
678 l = []
678 for t, n in self.tags().iteritems():
679 for t, n in self.tags().iteritems():
679 l.append((self.changelog.rev(n), t, n))
680 l.append((self.changelog.rev(n), t, n))
680 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
681
682
682 return self._tagscache.tagslist
683 return self._tagscache.tagslist
683
684
684 def nodetags(self, node):
685 def nodetags(self, node):
685 '''return the tags associated with a node'''
686 '''return the tags associated with a node'''
686 if not self._tagscache.nodetagscache:
687 if not self._tagscache.nodetagscache:
687 nodetagscache = {}
688 nodetagscache = {}
688 for t, n in self._tagscache.tags.iteritems():
689 for t, n in self._tagscache.tags.iteritems():
689 nodetagscache.setdefault(n, []).append(t)
690 nodetagscache.setdefault(n, []).append(t)
690 for tags in nodetagscache.itervalues():
691 for tags in nodetagscache.itervalues():
691 tags.sort()
692 tags.sort()
692 self._tagscache.nodetagscache = nodetagscache
693 self._tagscache.nodetagscache = nodetagscache
693 return self._tagscache.nodetagscache.get(node, [])
694 return self._tagscache.nodetagscache.get(node, [])
694
695
695 def nodebookmarks(self, node):
696 def nodebookmarks(self, node):
696 marks = []
697 marks = []
697 for bookmark, n in self._bookmarks.iteritems():
698 for bookmark, n in self._bookmarks.iteritems():
698 if n == node:
699 if n == node:
699 marks.append(bookmark)
700 marks.append(bookmark)
700 return sorted(marks)
701 return sorted(marks)
701
702
702 def branchmap(self):
703 def branchmap(self):
703 '''returns a dictionary {branch: [branchheads]} with branchheads
704 '''returns a dictionary {branch: [branchheads]} with branchheads
704 ordered by increasing revision number'''
705 ordered by increasing revision number'''
705 branchmap.updatecache(self)
706 branchmap.updatecache(self)
706 return self._branchcaches[self.filtername]
707 return self._branchcaches[self.filtername]
707
708
708 def branchtip(self, branch):
709 def branchtip(self, branch):
709 '''return the tip node for a given branch'''
710 '''return the tip node for a given branch'''
710 try:
711 try:
711 return self.branchmap().branchtip(branch)
712 return self.branchmap().branchtip(branch)
712 except KeyError:
713 except KeyError:
713 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
714
715
715 def lookup(self, key):
716 def lookup(self, key):
716 return self[key].node()
717 return self[key].node()
717
718
718 def lookupbranch(self, key, remote=None):
719 def lookupbranch(self, key, remote=None):
719 repo = remote or self
720 repo = remote or self
720 if key in repo.branchmap():
721 if key in repo.branchmap():
721 return key
722 return key
722
723
723 repo = (remote and remote.local()) and remote or self
724 repo = (remote and remote.local()) and remote or self
724 return repo[key].branch()
725 return repo[key].branch()
725
726
726 def known(self, nodes):
727 def known(self, nodes):
727 nm = self.changelog.nodemap
728 nm = self.changelog.nodemap
728 pc = self._phasecache
729 pc = self._phasecache
729 result = []
730 result = []
730 for n in nodes:
731 for n in nodes:
731 r = nm.get(n)
732 r = nm.get(n)
732 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 resp = not (r is None or pc.phase(self, r) >= phases.secret)
733 result.append(resp)
734 result.append(resp)
734 return result
735 return result
735
736
736 def local(self):
737 def local(self):
737 return self
738 return self
738
739
739 def cancopy(self):
740 def cancopy(self):
740 # so statichttprepo's override of local() works
741 # so statichttprepo's override of local() works
741 if not self.local():
742 if not self.local():
742 return False
743 return False
743 if not self.ui.configbool('phases', 'publish', True):
744 if not self.ui.configbool('phases', 'publish', True):
744 return True
745 return True
745 # if publishing we can't copy if there is filtered content
746 # if publishing we can't copy if there is filtered content
746 return not self.filtered('visible').changelog.filteredrevs
747 return not self.filtered('visible').changelog.filteredrevs
747
748
748 def join(self, f, *insidef):
749 def join(self, f, *insidef):
749 return os.path.join(self.path, f, *insidef)
750 return os.path.join(self.path, f, *insidef)
750
751
751 def wjoin(self, f, *insidef):
752 def wjoin(self, f, *insidef):
752 return os.path.join(self.root, f, *insidef)
753 return os.path.join(self.root, f, *insidef)
753
754
754 def file(self, f):
755 def file(self, f):
755 if f[0] == '/':
756 if f[0] == '/':
756 f = f[1:]
757 f = f[1:]
757 return filelog.filelog(self.sopener, f)
758 return filelog.filelog(self.sopener, f)
758
759
759 def changectx(self, changeid):
760 def changectx(self, changeid):
760 return self[changeid]
761 return self[changeid]
761
762
762 def parents(self, changeid=None):
763 def parents(self, changeid=None):
763 '''get list of changectxs for parents of changeid'''
764 '''get list of changectxs for parents of changeid'''
764 return self[changeid].parents()
765 return self[changeid].parents()
765
766
766 def setparents(self, p1, p2=nullid):
767 def setparents(self, p1, p2=nullid):
767 self.dirstate.beginparentchange()
768 self.dirstate.beginparentchange()
768 copies = self.dirstate.setparents(p1, p2)
769 copies = self.dirstate.setparents(p1, p2)
769 pctx = self[p1]
770 pctx = self[p1]
770 if copies:
771 if copies:
771 # Adjust copy records, the dirstate cannot do it, it
772 # Adjust copy records, the dirstate cannot do it, it
772 # requires access to parents manifests. Preserve them
773 # requires access to parents manifests. Preserve them
773 # only for entries added to first parent.
774 # only for entries added to first parent.
774 for f in copies:
775 for f in copies:
775 if f not in pctx and copies[f] in pctx:
776 if f not in pctx and copies[f] in pctx:
776 self.dirstate.copy(copies[f], f)
777 self.dirstate.copy(copies[f], f)
777 if p2 == nullid:
778 if p2 == nullid:
778 for f, s in sorted(self.dirstate.copies().items()):
779 for f, s in sorted(self.dirstate.copies().items()):
779 if f not in pctx and s not in pctx:
780 if f not in pctx and s not in pctx:
780 self.dirstate.copy(None, f)
781 self.dirstate.copy(None, f)
781 self.dirstate.endparentchange()
782 self.dirstate.endparentchange()
782
783
783 def filectx(self, path, changeid=None, fileid=None):
784 def filectx(self, path, changeid=None, fileid=None):
784 """changeid can be a changeset revision, node, or tag.
785 """changeid can be a changeset revision, node, or tag.
785 fileid can be a file revision or node."""
786 fileid can be a file revision or node."""
786 return context.filectx(self, path, changeid, fileid)
787 return context.filectx(self, path, changeid, fileid)
787
788
788 def getcwd(self):
789 def getcwd(self):
789 return self.dirstate.getcwd()
790 return self.dirstate.getcwd()
790
791
791 def pathto(self, f, cwd=None):
792 def pathto(self, f, cwd=None):
792 return self.dirstate.pathto(f, cwd)
793 return self.dirstate.pathto(f, cwd)
793
794
794 def wfile(self, f, mode='r'):
795 def wfile(self, f, mode='r'):
795 return self.wopener(f, mode)
796 return self.wopener(f, mode)
796
797
797 def _link(self, f):
798 def _link(self, f):
798 return self.wvfs.islink(f)
799 return self.wvfs.islink(f)
799
800
800 def _loadfilter(self, filter):
801 def _loadfilter(self, filter):
801 if filter not in self.filterpats:
802 if filter not in self.filterpats:
802 l = []
803 l = []
803 for pat, cmd in self.ui.configitems(filter):
804 for pat, cmd in self.ui.configitems(filter):
804 if cmd == '!':
805 if cmd == '!':
805 continue
806 continue
806 mf = matchmod.match(self.root, '', [pat])
807 mf = matchmod.match(self.root, '', [pat])
807 fn = None
808 fn = None
808 params = cmd
809 params = cmd
809 for name, filterfn in self._datafilters.iteritems():
810 for name, filterfn in self._datafilters.iteritems():
810 if cmd.startswith(name):
811 if cmd.startswith(name):
811 fn = filterfn
812 fn = filterfn
812 params = cmd[len(name):].lstrip()
813 params = cmd[len(name):].lstrip()
813 break
814 break
814 if not fn:
815 if not fn:
815 fn = lambda s, c, **kwargs: util.filter(s, c)
816 fn = lambda s, c, **kwargs: util.filter(s, c)
816 # Wrap old filters not supporting keyword arguments
817 # Wrap old filters not supporting keyword arguments
817 if not inspect.getargspec(fn)[2]:
818 if not inspect.getargspec(fn)[2]:
818 oldfn = fn
819 oldfn = fn
819 fn = lambda s, c, **kwargs: oldfn(s, c)
820 fn = lambda s, c, **kwargs: oldfn(s, c)
820 l.append((mf, fn, params))
821 l.append((mf, fn, params))
821 self.filterpats[filter] = l
822 self.filterpats[filter] = l
822 return self.filterpats[filter]
823 return self.filterpats[filter]
823
824
824 def _filter(self, filterpats, filename, data):
825 def _filter(self, filterpats, filename, data):
825 for mf, fn, cmd in filterpats:
826 for mf, fn, cmd in filterpats:
826 if mf(filename):
827 if mf(filename):
827 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
828 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
828 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
829 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
829 break
830 break
830
831
831 return data
832 return data
832
833
833 @unfilteredpropertycache
834 @unfilteredpropertycache
834 def _encodefilterpats(self):
835 def _encodefilterpats(self):
835 return self._loadfilter('encode')
836 return self._loadfilter('encode')
836
837
837 @unfilteredpropertycache
838 @unfilteredpropertycache
838 def _decodefilterpats(self):
839 def _decodefilterpats(self):
839 return self._loadfilter('decode')
840 return self._loadfilter('decode')
840
841
841 def adddatafilter(self, name, filter):
842 def adddatafilter(self, name, filter):
842 self._datafilters[name] = filter
843 self._datafilters[name] = filter
843
844
844 def wread(self, filename):
845 def wread(self, filename):
845 if self._link(filename):
846 if self._link(filename):
846 data = self.wvfs.readlink(filename)
847 data = self.wvfs.readlink(filename)
847 else:
848 else:
848 data = self.wopener.read(filename)
849 data = self.wopener.read(filename)
849 return self._filter(self._encodefilterpats, filename, data)
850 return self._filter(self._encodefilterpats, filename, data)
850
851
851 def wwrite(self, filename, data, flags):
852 def wwrite(self, filename, data, flags):
852 data = self._filter(self._decodefilterpats, filename, data)
853 data = self._filter(self._decodefilterpats, filename, data)
853 if 'l' in flags:
854 if 'l' in flags:
854 self.wopener.symlink(data, filename)
855 self.wopener.symlink(data, filename)
855 else:
856 else:
856 self.wopener.write(filename, data)
857 self.wopener.write(filename, data)
857 if 'x' in flags:
858 if 'x' in flags:
858 self.wvfs.setflags(filename, False, True)
859 self.wvfs.setflags(filename, False, True)
859
860
860 def wwritedata(self, filename, data):
861 def wwritedata(self, filename, data):
861 return self._filter(self._decodefilterpats, filename, data)
862 return self._filter(self._decodefilterpats, filename, data)
862
863
863 def transaction(self, desc, report=None):
864 def transaction(self, desc, report=None):
864 tr = self._transref and self._transref() or None
865 tr = self._transref and self._transref() or None
865 if tr and tr.running():
866 if tr and tr.running():
866 return tr.nest()
867 return tr.nest()
867
868
868 # abort here if the journal already exists
869 # abort here if the journal already exists
869 if self.svfs.exists("journal"):
870 if self.svfs.exists("journal"):
870 raise error.RepoError(
871 raise error.RepoError(
871 _("abandoned transaction found"),
872 _("abandoned transaction found"),
872 hint=_("run 'hg recover' to clean up transaction"))
873 hint=_("run 'hg recover' to clean up transaction"))
873
874
874 def onclose():
875 def onclose():
875 self.store.write(self._transref())
876 self.store.write(self._transref())
876
877
877 self._writejournal(desc)
878 self._writejournal(desc)
878 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
879 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
879 rp = report and report or self.ui.warn
880 rp = report and report or self.ui.warn
880 tr = transaction.transaction(rp, self.sopener,
881 tr = transaction.transaction(rp, self.sopener,
881 "journal",
882 "journal",
882 aftertrans(renames),
883 aftertrans(renames),
883 self.store.createmode,
884 self.store.createmode,
884 onclose)
885 onclose)
885 self._transref = weakref.ref(tr)
886 self._transref = weakref.ref(tr)
886 return tr
887 return tr
887
888
888 def _journalfiles(self):
889 def _journalfiles(self):
889 return ((self.svfs, 'journal'),
890 return ((self.svfs, 'journal'),
890 (self.vfs, 'journal.dirstate'),
891 (self.vfs, 'journal.dirstate'),
891 (self.vfs, 'journal.branch'),
892 (self.vfs, 'journal.branch'),
892 (self.vfs, 'journal.desc'),
893 (self.vfs, 'journal.desc'),
893 (self.vfs, 'journal.bookmarks'),
894 (self.vfs, 'journal.bookmarks'),
894 (self.svfs, 'journal.phaseroots'))
895 (self.svfs, 'journal.phaseroots'))
895
896
896 def undofiles(self):
897 def undofiles(self):
897 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
898 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
898
899
899 def _writejournal(self, desc):
900 def _writejournal(self, desc):
900 self.opener.write("journal.dirstate",
901 self.opener.write("journal.dirstate",
901 self.opener.tryread("dirstate"))
902 self.opener.tryread("dirstate"))
902 self.opener.write("journal.branch",
903 self.opener.write("journal.branch",
903 encoding.fromlocal(self.dirstate.branch()))
904 encoding.fromlocal(self.dirstate.branch()))
904 self.opener.write("journal.desc",
905 self.opener.write("journal.desc",
905 "%d\n%s\n" % (len(self), desc))
906 "%d\n%s\n" % (len(self), desc))
906 self.opener.write("journal.bookmarks",
907 self.opener.write("journal.bookmarks",
907 self.opener.tryread("bookmarks"))
908 self.opener.tryread("bookmarks"))
908 self.sopener.write("journal.phaseroots",
909 self.sopener.write("journal.phaseroots",
909 self.sopener.tryread("phaseroots"))
910 self.sopener.tryread("phaseroots"))
910
911
911 def recover(self):
912 def recover(self):
912 lock = self.lock()
913 lock = self.lock()
913 try:
914 try:
914 if self.svfs.exists("journal"):
915 if self.svfs.exists("journal"):
915 self.ui.status(_("rolling back interrupted transaction\n"))
916 self.ui.status(_("rolling back interrupted transaction\n"))
916 transaction.rollback(self.sopener, "journal",
917 transaction.rollback(self.sopener, "journal",
917 self.ui.warn)
918 self.ui.warn)
918 self.invalidate()
919 self.invalidate()
919 return True
920 return True
920 else:
921 else:
921 self.ui.warn(_("no interrupted transaction available\n"))
922 self.ui.warn(_("no interrupted transaction available\n"))
922 return False
923 return False
923 finally:
924 finally:
924 lock.release()
925 lock.release()
925
926
926 def rollback(self, dryrun=False, force=False):
927 def rollback(self, dryrun=False, force=False):
927 wlock = lock = None
928 wlock = lock = None
928 try:
929 try:
929 wlock = self.wlock()
930 wlock = self.wlock()
930 lock = self.lock()
931 lock = self.lock()
931 if self.svfs.exists("undo"):
932 if self.svfs.exists("undo"):
932 return self._rollback(dryrun, force)
933 return self._rollback(dryrun, force)
933 else:
934 else:
934 self.ui.warn(_("no rollback information available\n"))
935 self.ui.warn(_("no rollback information available\n"))
935 return 1
936 return 1
936 finally:
937 finally:
937 release(lock, wlock)
938 release(lock, wlock)
938
939
939 @unfilteredmethod # Until we get smarter cache management
940 @unfilteredmethod # Until we get smarter cache management
940 def _rollback(self, dryrun, force):
941 def _rollback(self, dryrun, force):
941 ui = self.ui
942 ui = self.ui
942 try:
943 try:
943 args = self.opener.read('undo.desc').splitlines()
944 args = self.opener.read('undo.desc').splitlines()
944 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 (oldlen, desc, detail) = (int(args[0]), args[1], None)
945 if len(args) >= 3:
946 if len(args) >= 3:
946 detail = args[2]
947 detail = args[2]
947 oldtip = oldlen - 1
948 oldtip = oldlen - 1
948
949
949 if detail and ui.verbose:
950 if detail and ui.verbose:
950 msg = (_('repository tip rolled back to revision %s'
951 msg = (_('repository tip rolled back to revision %s'
951 ' (undo %s: %s)\n')
952 ' (undo %s: %s)\n')
952 % (oldtip, desc, detail))
953 % (oldtip, desc, detail))
953 else:
954 else:
954 msg = (_('repository tip rolled back to revision %s'
955 msg = (_('repository tip rolled back to revision %s'
955 ' (undo %s)\n')
956 ' (undo %s)\n')
956 % (oldtip, desc))
957 % (oldtip, desc))
957 except IOError:
958 except IOError:
958 msg = _('rolling back unknown transaction\n')
959 msg = _('rolling back unknown transaction\n')
959 desc = None
960 desc = None
960
961
961 if not force and self['.'] != self['tip'] and desc == 'commit':
962 if not force and self['.'] != self['tip'] and desc == 'commit':
962 raise util.Abort(
963 raise util.Abort(
963 _('rollback of last commit while not checked out '
964 _('rollback of last commit while not checked out '
964 'may lose data'), hint=_('use -f to force'))
965 'may lose data'), hint=_('use -f to force'))
965
966
966 ui.status(msg)
967 ui.status(msg)
967 if dryrun:
968 if dryrun:
968 return 0
969 return 0
969
970
970 parents = self.dirstate.parents()
971 parents = self.dirstate.parents()
971 self.destroying()
972 self.destroying()
972 transaction.rollback(self.sopener, 'undo', ui.warn)
973 transaction.rollback(self.sopener, 'undo', ui.warn)
973 if self.vfs.exists('undo.bookmarks'):
974 if self.vfs.exists('undo.bookmarks'):
974 self.vfs.rename('undo.bookmarks', 'bookmarks')
975 self.vfs.rename('undo.bookmarks', 'bookmarks')
975 if self.svfs.exists('undo.phaseroots'):
976 if self.svfs.exists('undo.phaseroots'):
976 self.svfs.rename('undo.phaseroots', 'phaseroots')
977 self.svfs.rename('undo.phaseroots', 'phaseroots')
977 self.invalidate()
978 self.invalidate()
978
979
979 parentgone = (parents[0] not in self.changelog.nodemap or
980 parentgone = (parents[0] not in self.changelog.nodemap or
980 parents[1] not in self.changelog.nodemap)
981 parents[1] not in self.changelog.nodemap)
981 if parentgone:
982 if parentgone:
982 self.vfs.rename('undo.dirstate', 'dirstate')
983 self.vfs.rename('undo.dirstate', 'dirstate')
983 try:
984 try:
984 branch = self.opener.read('undo.branch')
985 branch = self.opener.read('undo.branch')
985 self.dirstate.setbranch(encoding.tolocal(branch))
986 self.dirstate.setbranch(encoding.tolocal(branch))
986 except IOError:
987 except IOError:
987 ui.warn(_('named branch could not be reset: '
988 ui.warn(_('named branch could not be reset: '
988 'current branch is still \'%s\'\n')
989 'current branch is still \'%s\'\n')
989 % self.dirstate.branch())
990 % self.dirstate.branch())
990
991
991 self.dirstate.invalidate()
992 self.dirstate.invalidate()
992 parents = tuple([p.rev() for p in self.parents()])
993 parents = tuple([p.rev() for p in self.parents()])
993 if len(parents) > 1:
994 if len(parents) > 1:
994 ui.status(_('working directory now based on '
995 ui.status(_('working directory now based on '
995 'revisions %d and %d\n') % parents)
996 'revisions %d and %d\n') % parents)
996 else:
997 else:
997 ui.status(_('working directory now based on '
998 ui.status(_('working directory now based on '
998 'revision %d\n') % parents)
999 'revision %d\n') % parents)
999 # TODO: if we know which new heads may result from this rollback, pass
1000 # TODO: if we know which new heads may result from this rollback, pass
1000 # them to destroy(), which will prevent the branchhead cache from being
1001 # them to destroy(), which will prevent the branchhead cache from being
1001 # invalidated.
1002 # invalidated.
1002 self.destroyed()
1003 self.destroyed()
1003 return 0
1004 return 0
1004
1005
1005 def invalidatecaches(self):
1006 def invalidatecaches(self):
1006
1007
1007 if '_tagscache' in vars(self):
1008 if '_tagscache' in vars(self):
1008 # can't use delattr on proxy
1009 # can't use delattr on proxy
1009 del self.__dict__['_tagscache']
1010 del self.__dict__['_tagscache']
1010
1011
1011 self.unfiltered()._branchcaches.clear()
1012 self.unfiltered()._branchcaches.clear()
1012 self.invalidatevolatilesets()
1013 self.invalidatevolatilesets()
1013
1014
1014 def invalidatevolatilesets(self):
1015 def invalidatevolatilesets(self):
1015 self.filteredrevcache.clear()
1016 self.filteredrevcache.clear()
1016 obsolete.clearobscaches(self)
1017 obsolete.clearobscaches(self)
1017
1018
1018 def invalidatedirstate(self):
1019 def invalidatedirstate(self):
1019 '''Invalidates the dirstate, causing the next call to dirstate
1020 '''Invalidates the dirstate, causing the next call to dirstate
1020 to check if it was modified since the last time it was read,
1021 to check if it was modified since the last time it was read,
1021 rereading it if it has.
1022 rereading it if it has.
1022
1023
1023 This is different to dirstate.invalidate() that it doesn't always
1024 This is different to dirstate.invalidate() that it doesn't always
1024 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 rereads the dirstate. Use dirstate.invalidate() if you want to
1025 explicitly read the dirstate again (i.e. restoring it to a previous
1026 explicitly read the dirstate again (i.e. restoring it to a previous
1026 known good state).'''
1027 known good state).'''
1027 if hasunfilteredcache(self, 'dirstate'):
1028 if hasunfilteredcache(self, 'dirstate'):
1028 for k in self.dirstate._filecache:
1029 for k in self.dirstate._filecache:
1029 try:
1030 try:
1030 delattr(self.dirstate, k)
1031 delattr(self.dirstate, k)
1031 except AttributeError:
1032 except AttributeError:
1032 pass
1033 pass
1033 delattr(self.unfiltered(), 'dirstate')
1034 delattr(self.unfiltered(), 'dirstate')
1034
1035
1035 def invalidate(self):
1036 def invalidate(self):
1036 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1037 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1037 for k in self._filecache:
1038 for k in self._filecache:
1038 # dirstate is invalidated separately in invalidatedirstate()
1039 # dirstate is invalidated separately in invalidatedirstate()
1039 if k == 'dirstate':
1040 if k == 'dirstate':
1040 continue
1041 continue
1041
1042
1042 try:
1043 try:
1043 delattr(unfiltered, k)
1044 delattr(unfiltered, k)
1044 except AttributeError:
1045 except AttributeError:
1045 pass
1046 pass
1046 self.invalidatecaches()
1047 self.invalidatecaches()
1047 self.store.invalidatecaches()
1048 self.store.invalidatecaches()
1048
1049
1049 def invalidateall(self):
1050 def invalidateall(self):
1050 '''Fully invalidates both store and non-store parts, causing the
1051 '''Fully invalidates both store and non-store parts, causing the
1051 subsequent operation to reread any outside changes.'''
1052 subsequent operation to reread any outside changes.'''
1052 # extension should hook this to invalidate its caches
1053 # extension should hook this to invalidate its caches
1053 self.invalidate()
1054 self.invalidate()
1054 self.invalidatedirstate()
1055 self.invalidatedirstate()
1055
1056
1056 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1057 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1057 try:
1058 try:
1058 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1059 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1059 except error.LockHeld, inst:
1060 except error.LockHeld, inst:
1060 if not wait:
1061 if not wait:
1061 raise
1062 raise
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 (desc, inst.locker))
1064 (desc, inst.locker))
1064 # default to 600 seconds timeout
1065 # default to 600 seconds timeout
1065 l = lockmod.lock(vfs, lockname,
1066 l = lockmod.lock(vfs, lockname,
1066 int(self.ui.config("ui", "timeout", "600")),
1067 int(self.ui.config("ui", "timeout", "600")),
1067 releasefn, desc=desc)
1068 releasefn, desc=desc)
1068 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1069 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1069 if acquirefn:
1070 if acquirefn:
1070 acquirefn()
1071 acquirefn()
1071 return l
1072 return l
1072
1073
1073 def _afterlock(self, callback):
1074 def _afterlock(self, callback):
1074 """add a callback to the current repository lock.
1075 """add a callback to the current repository lock.
1075
1076
1076 The callback will be executed on lock release."""
1077 The callback will be executed on lock release."""
1077 l = self._lockref and self._lockref()
1078 l = self._lockref and self._lockref()
1078 if l:
1079 if l:
1079 l.postrelease.append(callback)
1080 l.postrelease.append(callback)
1080 else:
1081 else:
1081 callback()
1082 callback()
1082
1083
1083 def lock(self, wait=True):
1084 def lock(self, wait=True):
1084 '''Lock the repository store (.hg/store) and return a weak reference
1085 '''Lock the repository store (.hg/store) and return a weak reference
1085 to the lock. Use this before modifying the store (e.g. committing or
1086 to the lock. Use this before modifying the store (e.g. committing or
1086 stripping). If you are opening a transaction, get a lock as well.)'''
1087 stripping). If you are opening a transaction, get a lock as well.)'''
1087 l = self._lockref and self._lockref()
1088 l = self._lockref and self._lockref()
1088 if l is not None and l.held:
1089 if l is not None and l.held:
1089 l.lock()
1090 l.lock()
1090 return l
1091 return l
1091
1092
1092 def unlock():
1093 def unlock():
1093 for k, ce in self._filecache.items():
1094 for k, ce in self._filecache.items():
1094 if k == 'dirstate' or k not in self.__dict__:
1095 if k == 'dirstate' or k not in self.__dict__:
1095 continue
1096 continue
1096 ce.refresh()
1097 ce.refresh()
1097
1098
1098 l = self._lock(self.svfs, "lock", wait, unlock,
1099 l = self._lock(self.svfs, "lock", wait, unlock,
1099 self.invalidate, _('repository %s') % self.origroot)
1100 self.invalidate, _('repository %s') % self.origroot)
1100 self._lockref = weakref.ref(l)
1101 self._lockref = weakref.ref(l)
1101 return l
1102 return l
1102
1103
1103 def wlock(self, wait=True):
1104 def wlock(self, wait=True):
1104 '''Lock the non-store parts of the repository (everything under
1105 '''Lock the non-store parts of the repository (everything under
1105 .hg except .hg/store) and return a weak reference to the lock.
1106 .hg except .hg/store) and return a weak reference to the lock.
1106 Use this before modifying files in .hg.'''
1107 Use this before modifying files in .hg.'''
1107 l = self._wlockref and self._wlockref()
1108 l = self._wlockref and self._wlockref()
1108 if l is not None and l.held:
1109 if l is not None and l.held:
1109 l.lock()
1110 l.lock()
1110 return l
1111 return l
1111
1112
1112 def unlock():
1113 def unlock():
1113 if self.dirstate.pendingparentchange():
1114 if self.dirstate.pendingparentchange():
1114 self.dirstate.invalidate()
1115 self.dirstate.invalidate()
1115 else:
1116 else:
1116 self.dirstate.write()
1117 self.dirstate.write()
1117
1118
1118 self._filecache['dirstate'].refresh()
1119 self._filecache['dirstate'].refresh()
1119
1120
1120 l = self._lock(self.vfs, "wlock", wait, unlock,
1121 l = self._lock(self.vfs, "wlock", wait, unlock,
1121 self.invalidatedirstate, _('working directory of %s') %
1122 self.invalidatedirstate, _('working directory of %s') %
1122 self.origroot)
1123 self.origroot)
1123 self._wlockref = weakref.ref(l)
1124 self._wlockref = weakref.ref(l)
1124 return l
1125 return l
1125
1126
1126 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1127 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1127 """
1128 """
1128 commit an individual file as part of a larger transaction
1129 commit an individual file as part of a larger transaction
1129 """
1130 """
1130
1131
1131 fname = fctx.path()
1132 fname = fctx.path()
1132 text = fctx.data()
1133 text = fctx.data()
1133 flog = self.file(fname)
1134 flog = self.file(fname)
1134 fparent1 = manifest1.get(fname, nullid)
1135 fparent1 = manifest1.get(fname, nullid)
1135 fparent2 = manifest2.get(fname, nullid)
1136 fparent2 = manifest2.get(fname, nullid)
1136
1137
1137 meta = {}
1138 meta = {}
1138 copy = fctx.renamed()
1139 copy = fctx.renamed()
1139 if copy and copy[0] != fname:
1140 if copy and copy[0] != fname:
1140 # Mark the new revision of this file as a copy of another
1141 # Mark the new revision of this file as a copy of another
1141 # file. This copy data will effectively act as a parent
1142 # file. This copy data will effectively act as a parent
1142 # of this new revision. If this is a merge, the first
1143 # of this new revision. If this is a merge, the first
1143 # parent will be the nullid (meaning "look up the copy data")
1144 # parent will be the nullid (meaning "look up the copy data")
1144 # and the second one will be the other parent. For example:
1145 # and the second one will be the other parent. For example:
1145 #
1146 #
1146 # 0 --- 1 --- 3 rev1 changes file foo
1147 # 0 --- 1 --- 3 rev1 changes file foo
1147 # \ / rev2 renames foo to bar and changes it
1148 # \ / rev2 renames foo to bar and changes it
1148 # \- 2 -/ rev3 should have bar with all changes and
1149 # \- 2 -/ rev3 should have bar with all changes and
1149 # should record that bar descends from
1150 # should record that bar descends from
1150 # bar in rev2 and foo in rev1
1151 # bar in rev2 and foo in rev1
1151 #
1152 #
1152 # this allows this merge to succeed:
1153 # this allows this merge to succeed:
1153 #
1154 #
1154 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1155 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1155 # \ / merging rev3 and rev4 should use bar@rev2
1156 # \ / merging rev3 and rev4 should use bar@rev2
1156 # \- 2 --- 4 as the merge base
1157 # \- 2 --- 4 as the merge base
1157 #
1158 #
1158
1159
1159 cfname = copy[0]
1160 cfname = copy[0]
1160 crev = manifest1.get(cfname)
1161 crev = manifest1.get(cfname)
1161 newfparent = fparent2
1162 newfparent = fparent2
1162
1163
1163 if manifest2: # branch merge
1164 if manifest2: # branch merge
1164 if fparent2 == nullid or crev is None: # copied on remote side
1165 if fparent2 == nullid or crev is None: # copied on remote side
1165 if cfname in manifest2:
1166 if cfname in manifest2:
1166 crev = manifest2[cfname]
1167 crev = manifest2[cfname]
1167 newfparent = fparent1
1168 newfparent = fparent1
1168
1169
1169 # find source in nearest ancestor if we've lost track
1170 # find source in nearest ancestor if we've lost track
1170 if not crev:
1171 if not crev:
1171 self.ui.debug(" %s: searching for copy revision for %s\n" %
1172 self.ui.debug(" %s: searching for copy revision for %s\n" %
1172 (fname, cfname))
1173 (fname, cfname))
1173 for ancestor in self[None].ancestors():
1174 for ancestor in self[None].ancestors():
1174 if cfname in ancestor:
1175 if cfname in ancestor:
1175 crev = ancestor[cfname].filenode()
1176 crev = ancestor[cfname].filenode()
1176 break
1177 break
1177
1178
1178 if crev:
1179 if crev:
1179 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1180 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1180 meta["copy"] = cfname
1181 meta["copy"] = cfname
1181 meta["copyrev"] = hex(crev)
1182 meta["copyrev"] = hex(crev)
1182 fparent1, fparent2 = nullid, newfparent
1183 fparent1, fparent2 = nullid, newfparent
1183 else:
1184 else:
1184 self.ui.warn(_("warning: can't find ancestor for '%s' "
1185 self.ui.warn(_("warning: can't find ancestor for '%s' "
1185 "copied from '%s'!\n") % (fname, cfname))
1186 "copied from '%s'!\n") % (fname, cfname))
1186
1187
1187 elif fparent1 == nullid:
1188 elif fparent1 == nullid:
1188 fparent1, fparent2 = fparent2, nullid
1189 fparent1, fparent2 = fparent2, nullid
1189 elif fparent2 != nullid:
1190 elif fparent2 != nullid:
1190 # is one parent an ancestor of the other?
1191 # is one parent an ancestor of the other?
1191 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1192 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1192 if fparent1 in fparentancestors:
1193 if fparent1 in fparentancestors:
1193 fparent1, fparent2 = fparent2, nullid
1194 fparent1, fparent2 = fparent2, nullid
1194 elif fparent2 in fparentancestors:
1195 elif fparent2 in fparentancestors:
1195 fparent2 = nullid
1196 fparent2 = nullid
1196
1197
1197 # is the file changed?
1198 # is the file changed?
1198 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1199 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1199 changelist.append(fname)
1200 changelist.append(fname)
1200 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1201 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1201 # are just the flags changed during merge?
1202 # are just the flags changed during merge?
1202 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1203 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1203 changelist.append(fname)
1204 changelist.append(fname)
1204
1205
1205 return fparent1
1206 return fparent1
1206
1207
1207 @unfilteredmethod
1208 @unfilteredmethod
1208 def commit(self, text="", user=None, date=None, match=None, force=False,
1209 def commit(self, text="", user=None, date=None, match=None, force=False,
1209 editor=False, extra={}):
1210 editor=False, extra={}):
1210 """Add a new revision to current repository.
1211 """Add a new revision to current repository.
1211
1212
1212 Revision information is gathered from the working directory,
1213 Revision information is gathered from the working directory,
1213 match can be used to filter the committed files. If editor is
1214 match can be used to filter the committed files. If editor is
1214 supplied, it is called to get a commit message.
1215 supplied, it is called to get a commit message.
1215 """
1216 """
1216
1217
1217 def fail(f, msg):
1218 def fail(f, msg):
1218 raise util.Abort('%s: %s' % (f, msg))
1219 raise util.Abort('%s: %s' % (f, msg))
1219
1220
1220 if not match:
1221 if not match:
1221 match = matchmod.always(self.root, '')
1222 match = matchmod.always(self.root, '')
1222
1223
1223 if not force:
1224 if not force:
1224 vdirs = []
1225 vdirs = []
1225 match.explicitdir = vdirs.append
1226 match.explicitdir = vdirs.append
1226 match.bad = fail
1227 match.bad = fail
1227
1228
1228 wlock = self.wlock()
1229 wlock = self.wlock()
1229 try:
1230 try:
1230 wctx = self[None]
1231 wctx = self[None]
1231 merge = len(wctx.parents()) > 1
1232 merge = len(wctx.parents()) > 1
1232
1233
1233 if (not force and merge and match and
1234 if (not force and merge and match and
1234 (match.files() or match.anypats())):
1235 (match.files() or match.anypats())):
1235 raise util.Abort(_('cannot partially commit a merge '
1236 raise util.Abort(_('cannot partially commit a merge '
1236 '(do not specify files or patterns)'))
1237 '(do not specify files or patterns)'))
1237
1238
1238 status = self.status(match=match, clean=force)
1239 status = self.status(match=match, clean=force)
1239 if force:
1240 if force:
1240 status.modified.extend(status.clean) # mq may commit clean files
1241 status.modified.extend(status.clean) # mq may commit clean files
1241
1242
1242 # check subrepos
1243 # check subrepos
1243 subs = []
1244 subs = []
1244 commitsubs = set()
1245 commitsubs = set()
1245 newstate = wctx.substate.copy()
1246 newstate = wctx.substate.copy()
1246 # only manage subrepos and .hgsubstate if .hgsub is present
1247 # only manage subrepos and .hgsubstate if .hgsub is present
1247 if '.hgsub' in wctx:
1248 if '.hgsub' in wctx:
1248 # we'll decide whether to track this ourselves, thanks
1249 # we'll decide whether to track this ourselves, thanks
1249 for c in status.modified, status.added, status.removed:
1250 for c in status.modified, status.added, status.removed:
1250 if '.hgsubstate' in c:
1251 if '.hgsubstate' in c:
1251 c.remove('.hgsubstate')
1252 c.remove('.hgsubstate')
1252
1253
1253 # compare current state to last committed state
1254 # compare current state to last committed state
1254 # build new substate based on last committed state
1255 # build new substate based on last committed state
1255 oldstate = wctx.p1().substate
1256 oldstate = wctx.p1().substate
1256 for s in sorted(newstate.keys()):
1257 for s in sorted(newstate.keys()):
1257 if not match(s):
1258 if not match(s):
1258 # ignore working copy, use old state if present
1259 # ignore working copy, use old state if present
1259 if s in oldstate:
1260 if s in oldstate:
1260 newstate[s] = oldstate[s]
1261 newstate[s] = oldstate[s]
1261 continue
1262 continue
1262 if not force:
1263 if not force:
1263 raise util.Abort(
1264 raise util.Abort(
1264 _("commit with new subrepo %s excluded") % s)
1265 _("commit with new subrepo %s excluded") % s)
1265 if wctx.sub(s).dirty(True):
1266 if wctx.sub(s).dirty(True):
1266 if not self.ui.configbool('ui', 'commitsubrepos'):
1267 if not self.ui.configbool('ui', 'commitsubrepos'):
1267 raise util.Abort(
1268 raise util.Abort(
1268 _("uncommitted changes in subrepo %s") % s,
1269 _("uncommitted changes in subrepo %s") % s,
1269 hint=_("use --subrepos for recursive commit"))
1270 hint=_("use --subrepos for recursive commit"))
1270 subs.append(s)
1271 subs.append(s)
1271 commitsubs.add(s)
1272 commitsubs.add(s)
1272 else:
1273 else:
1273 bs = wctx.sub(s).basestate()
1274 bs = wctx.sub(s).basestate()
1274 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1275 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1275 if oldstate.get(s, (None, None, None))[1] != bs:
1276 if oldstate.get(s, (None, None, None))[1] != bs:
1276 subs.append(s)
1277 subs.append(s)
1277
1278
1278 # check for removed subrepos
1279 # check for removed subrepos
1279 for p in wctx.parents():
1280 for p in wctx.parents():
1280 r = [s for s in p.substate if s not in newstate]
1281 r = [s for s in p.substate if s not in newstate]
1281 subs += [s for s in r if match(s)]
1282 subs += [s for s in r if match(s)]
1282 if subs:
1283 if subs:
1283 if (not match('.hgsub') and
1284 if (not match('.hgsub') and
1284 '.hgsub' in (wctx.modified() + wctx.added())):
1285 '.hgsub' in (wctx.modified() + wctx.added())):
1285 raise util.Abort(
1286 raise util.Abort(
1286 _("can't commit subrepos without .hgsub"))
1287 _("can't commit subrepos without .hgsub"))
1287 status.modified.insert(0, '.hgsubstate')
1288 status.modified.insert(0, '.hgsubstate')
1288
1289
1289 elif '.hgsub' in status.removed:
1290 elif '.hgsub' in status.removed:
1290 # clean up .hgsubstate when .hgsub is removed
1291 # clean up .hgsubstate when .hgsub is removed
1291 if ('.hgsubstate' in wctx and
1292 if ('.hgsubstate' in wctx and
1292 '.hgsubstate' not in (status.modified + status.added +
1293 '.hgsubstate' not in (status.modified + status.added +
1293 status.removed)):
1294 status.removed)):
1294 status.removed.insert(0, '.hgsubstate')
1295 status.removed.insert(0, '.hgsubstate')
1295
1296
1296 # make sure all explicit patterns are matched
1297 # make sure all explicit patterns are matched
1297 if not force and match.files():
1298 if not force and match.files():
1298 matched = set(status.modified + status.added + status.removed)
1299 matched = set(status.modified + status.added + status.removed)
1299
1300
1300 for f in match.files():
1301 for f in match.files():
1301 f = self.dirstate.normalize(f)
1302 f = self.dirstate.normalize(f)
1302 if f == '.' or f in matched or f in wctx.substate:
1303 if f == '.' or f in matched or f in wctx.substate:
1303 continue
1304 continue
1304 if f in status.deleted:
1305 if f in status.deleted:
1305 fail(f, _('file not found!'))
1306 fail(f, _('file not found!'))
1306 if f in vdirs: # visited directory
1307 if f in vdirs: # visited directory
1307 d = f + '/'
1308 d = f + '/'
1308 for mf in matched:
1309 for mf in matched:
1309 if mf.startswith(d):
1310 if mf.startswith(d):
1310 break
1311 break
1311 else:
1312 else:
1312 fail(f, _("no match under directory!"))
1313 fail(f, _("no match under directory!"))
1313 elif f not in self.dirstate:
1314 elif f not in self.dirstate:
1314 fail(f, _("file not tracked!"))
1315 fail(f, _("file not tracked!"))
1315
1316
1316 cctx = context.workingctx(self, text, user, date, extra, status)
1317 cctx = context.workingctx(self, text, user, date, extra, status)
1317
1318
1318 if (not force and not extra.get("close") and not merge
1319 if (not force and not extra.get("close") and not merge
1319 and not cctx.files()
1320 and not cctx.files()
1320 and wctx.branch() == wctx.p1().branch()):
1321 and wctx.branch() == wctx.p1().branch()):
1321 return None
1322 return None
1322
1323
1323 if merge and cctx.deleted():
1324 if merge and cctx.deleted():
1324 raise util.Abort(_("cannot commit merge with missing files"))
1325 raise util.Abort(_("cannot commit merge with missing files"))
1325
1326
1326 ms = mergemod.mergestate(self)
1327 ms = mergemod.mergestate(self)
1327 for f in status.modified:
1328 for f in status.modified:
1328 if f in ms and ms[f] == 'u':
1329 if f in ms and ms[f] == 'u':
1329 raise util.Abort(_("unresolved merge conflicts "
1330 raise util.Abort(_("unresolved merge conflicts "
1330 "(see hg help resolve)"))
1331 "(see hg help resolve)"))
1331
1332
1332 if editor:
1333 if editor:
1333 cctx._text = editor(self, cctx, subs)
1334 cctx._text = editor(self, cctx, subs)
1334 edited = (text != cctx._text)
1335 edited = (text != cctx._text)
1335
1336
1336 # Save commit message in case this transaction gets rolled back
1337 # Save commit message in case this transaction gets rolled back
1337 # (e.g. by a pretxncommit hook). Leave the content alone on
1338 # (e.g. by a pretxncommit hook). Leave the content alone on
1338 # the assumption that the user will use the same editor again.
1339 # the assumption that the user will use the same editor again.
1339 msgfn = self.savecommitmessage(cctx._text)
1340 msgfn = self.savecommitmessage(cctx._text)
1340
1341
1341 # commit subs and write new state
1342 # commit subs and write new state
1342 if subs:
1343 if subs:
1343 for s in sorted(commitsubs):
1344 for s in sorted(commitsubs):
1344 sub = wctx.sub(s)
1345 sub = wctx.sub(s)
1345 self.ui.status(_('committing subrepository %s\n') %
1346 self.ui.status(_('committing subrepository %s\n') %
1346 subrepo.subrelpath(sub))
1347 subrepo.subrelpath(sub))
1347 sr = sub.commit(cctx._text, user, date)
1348 sr = sub.commit(cctx._text, user, date)
1348 newstate[s] = (newstate[s][0], sr)
1349 newstate[s] = (newstate[s][0], sr)
1349 subrepo.writestate(self, newstate)
1350 subrepo.writestate(self, newstate)
1350
1351
1351 p1, p2 = self.dirstate.parents()
1352 p1, p2 = self.dirstate.parents()
1352 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1353 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1353 try:
1354 try:
1354 self.hook("precommit", throw=True, parent1=hookp1,
1355 self.hook("precommit", throw=True, parent1=hookp1,
1355 parent2=hookp2)
1356 parent2=hookp2)
1356 ret = self.commitctx(cctx, True)
1357 ret = self.commitctx(cctx, True)
1357 except: # re-raises
1358 except: # re-raises
1358 if edited:
1359 if edited:
1359 self.ui.write(
1360 self.ui.write(
1360 _('note: commit message saved in %s\n') % msgfn)
1361 _('note: commit message saved in %s\n') % msgfn)
1361 raise
1362 raise
1362
1363
1363 # update bookmarks, dirstate and mergestate
1364 # update bookmarks, dirstate and mergestate
1364 bookmarks.update(self, [p1, p2], ret)
1365 bookmarks.update(self, [p1, p2], ret)
1365 cctx.markcommitted(ret)
1366 cctx.markcommitted(ret)
1366 ms.reset()
1367 ms.reset()
1367 finally:
1368 finally:
1368 wlock.release()
1369 wlock.release()
1369
1370
1370 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1371 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1371 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1372 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1372 self._afterlock(commithook)
1373 self._afterlock(commithook)
1373 return ret
1374 return ret
1374
1375
1375 @unfilteredmethod
1376 @unfilteredmethod
1376 def commitctx(self, ctx, error=False):
1377 def commitctx(self, ctx, error=False):
1377 """Add a new revision to current repository.
1378 """Add a new revision to current repository.
1378 Revision information is passed via the context argument.
1379 Revision information is passed via the context argument.
1379 """
1380 """
1380
1381
1381 tr = None
1382 tr = None
1382 p1, p2 = ctx.p1(), ctx.p2()
1383 p1, p2 = ctx.p1(), ctx.p2()
1383 user = ctx.user()
1384 user = ctx.user()
1384
1385
1385 lock = self.lock()
1386 lock = self.lock()
1386 try:
1387 try:
1387 tr = self.transaction("commit")
1388 tr = self.transaction("commit")
1388 trp = weakref.proxy(tr)
1389 trp = weakref.proxy(tr)
1389
1390
1390 if ctx.files():
1391 if ctx.files():
1391 m1 = p1.manifest()
1392 m1 = p1.manifest()
1392 m2 = p2.manifest()
1393 m2 = p2.manifest()
1393 m = m1.copy()
1394 m = m1.copy()
1394
1395
1395 # check in files
1396 # check in files
1396 added = []
1397 added = []
1397 changed = []
1398 changed = []
1398 removed = list(ctx.removed())
1399 removed = list(ctx.removed())
1399 linkrev = len(self)
1400 linkrev = len(self)
1400 for f in sorted(ctx.modified() + ctx.added()):
1401 for f in sorted(ctx.modified() + ctx.added()):
1401 self.ui.note(f + "\n")
1402 self.ui.note(f + "\n")
1402 try:
1403 try:
1403 fctx = ctx[f]
1404 fctx = ctx[f]
1404 if fctx is None:
1405 if fctx is None:
1405 removed.append(f)
1406 removed.append(f)
1406 else:
1407 else:
1407 added.append(f)
1408 added.append(f)
1408 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1409 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1409 trp, changed)
1410 trp, changed)
1410 m.setflag(f, fctx.flags())
1411 m.setflag(f, fctx.flags())
1411 except OSError, inst:
1412 except OSError, inst:
1412 self.ui.warn(_("trouble committing %s!\n") % f)
1413 self.ui.warn(_("trouble committing %s!\n") % f)
1413 raise
1414 raise
1414 except IOError, inst:
1415 except IOError, inst:
1415 errcode = getattr(inst, 'errno', errno.ENOENT)
1416 errcode = getattr(inst, 'errno', errno.ENOENT)
1416 if error or errcode and errcode != errno.ENOENT:
1417 if error or errcode and errcode != errno.ENOENT:
1417 self.ui.warn(_("trouble committing %s!\n") % f)
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1418 raise
1419 raise
1419
1420
1420 # update manifest
1421 # update manifest
1421 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1422 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1422 drop = [f for f in removed if f in m]
1423 drop = [f for f in removed if f in m]
1423 for f in drop:
1424 for f in drop:
1424 del m[f]
1425 del m[f]
1425 mn = self.manifest.add(m, trp, linkrev,
1426 mn = self.manifest.add(m, trp, linkrev,
1426 p1.manifestnode(), p2.manifestnode(),
1427 p1.manifestnode(), p2.manifestnode(),
1427 added, drop)
1428 added, drop)
1428 files = changed + removed
1429 files = changed + removed
1429 else:
1430 else:
1430 mn = p1.manifestnode()
1431 mn = p1.manifestnode()
1431 files = []
1432 files = []
1432
1433
1433 # update changelog
1434 # update changelog
1434 self.changelog.delayupdate()
1435 self.changelog.delayupdate()
1435 n = self.changelog.add(mn, files, ctx.description(),
1436 n = self.changelog.add(mn, files, ctx.description(),
1436 trp, p1.node(), p2.node(),
1437 trp, p1.node(), p2.node(),
1437 user, ctx.date(), ctx.extra().copy())
1438 user, ctx.date(), ctx.extra().copy())
1438 p = lambda: self.changelog.writepending() and self.root or ""
1439 p = lambda: self.changelog.writepending() and self.root or ""
1439 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1440 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1440 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1441 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1441 parent2=xp2, pending=p)
1442 parent2=xp2, pending=p)
1442 self.changelog.finalize(trp)
1443 self.changelog.finalize(trp)
1443 # set the new commit is proper phase
1444 # set the new commit is proper phase
1444 targetphase = subrepo.newcommitphase(self.ui, ctx)
1445 targetphase = subrepo.newcommitphase(self.ui, ctx)
1445 if targetphase:
1446 if targetphase:
1446 # retract boundary do not alter parent changeset.
1447 # retract boundary do not alter parent changeset.
1447 # if a parent have higher the resulting phase will
1448 # if a parent have higher the resulting phase will
1448 # be compliant anyway
1449 # be compliant anyway
1449 #
1450 #
1450 # if minimal phase was 0 we don't need to retract anything
1451 # if minimal phase was 0 we don't need to retract anything
1451 phases.retractboundary(self, tr, targetphase, [n])
1452 phases.retractboundary(self, tr, targetphase, [n])
1452 tr.close()
1453 tr.close()
1453 branchmap.updatecache(self.filtered('served'))
1454 branchmap.updatecache(self.filtered('served'))
1454 return n
1455 return n
1455 finally:
1456 finally:
1456 if tr:
1457 if tr:
1457 tr.release()
1458 tr.release()
1458 lock.release()
1459 lock.release()
1459
1460
1460 @unfilteredmethod
1461 @unfilteredmethod
1461 def destroying(self):
1462 def destroying(self):
1462 '''Inform the repository that nodes are about to be destroyed.
1463 '''Inform the repository that nodes are about to be destroyed.
1463 Intended for use by strip and rollback, so there's a common
1464 Intended for use by strip and rollback, so there's a common
1464 place for anything that has to be done before destroying history.
1465 place for anything that has to be done before destroying history.
1465
1466
1466 This is mostly useful for saving state that is in memory and waiting
1467 This is mostly useful for saving state that is in memory and waiting
1467 to be flushed when the current lock is released. Because a call to
1468 to be flushed when the current lock is released. Because a call to
1468 destroyed is imminent, the repo will be invalidated causing those
1469 destroyed is imminent, the repo will be invalidated causing those
1469 changes to stay in memory (waiting for the next unlock), or vanish
1470 changes to stay in memory (waiting for the next unlock), or vanish
1470 completely.
1471 completely.
1471 '''
1472 '''
1472 # When using the same lock to commit and strip, the phasecache is left
1473 # When using the same lock to commit and strip, the phasecache is left
1473 # dirty after committing. Then when we strip, the repo is invalidated,
1474 # dirty after committing. Then when we strip, the repo is invalidated,
1474 # causing those changes to disappear.
1475 # causing those changes to disappear.
1475 if '_phasecache' in vars(self):
1476 if '_phasecache' in vars(self):
1476 self._phasecache.write()
1477 self._phasecache.write()
1477
1478
1478 @unfilteredmethod
1479 @unfilteredmethod
1479 def destroyed(self):
1480 def destroyed(self):
1480 '''Inform the repository that nodes have been destroyed.
1481 '''Inform the repository that nodes have been destroyed.
1481 Intended for use by strip and rollback, so there's a common
1482 Intended for use by strip and rollback, so there's a common
1482 place for anything that has to be done after destroying history.
1483 place for anything that has to be done after destroying history.
1483 '''
1484 '''
1484 # When one tries to:
1485 # When one tries to:
1485 # 1) destroy nodes thus calling this method (e.g. strip)
1486 # 1) destroy nodes thus calling this method (e.g. strip)
1486 # 2) use phasecache somewhere (e.g. commit)
1487 # 2) use phasecache somewhere (e.g. commit)
1487 #
1488 #
1488 # then 2) will fail because the phasecache contains nodes that were
1489 # then 2) will fail because the phasecache contains nodes that were
1489 # removed. We can either remove phasecache from the filecache,
1490 # removed. We can either remove phasecache from the filecache,
1490 # causing it to reload next time it is accessed, or simply filter
1491 # causing it to reload next time it is accessed, or simply filter
1491 # the removed nodes now and write the updated cache.
1492 # the removed nodes now and write the updated cache.
1492 self._phasecache.filterunknown(self)
1493 self._phasecache.filterunknown(self)
1493 self._phasecache.write()
1494 self._phasecache.write()
1494
1495
1495 # update the 'served' branch cache to help read only server process
1496 # update the 'served' branch cache to help read only server process
1496 # Thanks to branchcache collaboration this is done from the nearest
1497 # Thanks to branchcache collaboration this is done from the nearest
1497 # filtered subset and it is expected to be fast.
1498 # filtered subset and it is expected to be fast.
1498 branchmap.updatecache(self.filtered('served'))
1499 branchmap.updatecache(self.filtered('served'))
1499
1500
1500 # Ensure the persistent tag cache is updated. Doing it now
1501 # Ensure the persistent tag cache is updated. Doing it now
1501 # means that the tag cache only has to worry about destroyed
1502 # means that the tag cache only has to worry about destroyed
1502 # heads immediately after a strip/rollback. That in turn
1503 # heads immediately after a strip/rollback. That in turn
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1504 # guarantees that "cachetip == currenttip" (comparing both rev
1504 # and node) always means no nodes have been added or destroyed.
1505 # and node) always means no nodes have been added or destroyed.
1505
1506
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 # head, refresh the tag cache, then immediately add a new head.
1508 # head, refresh the tag cache, then immediately add a new head.
1508 # But I think doing it this way is necessary for the "instant
1509 # But I think doing it this way is necessary for the "instant
1509 # tag cache retrieval" case to work.
1510 # tag cache retrieval" case to work.
1510 self.invalidate()
1511 self.invalidate()
1511
1512
1512 def walk(self, match, node=None):
1513 def walk(self, match, node=None):
1513 '''
1514 '''
1514 walk recursively through the directory tree or a given
1515 walk recursively through the directory tree or a given
1515 changeset, finding all files matched by the match
1516 changeset, finding all files matched by the match
1516 function
1517 function
1517 '''
1518 '''
1518 return self[node].walk(match)
1519 return self[node].walk(match)
1519
1520
1520 def status(self, node1='.', node2=None, match=None,
1521 def status(self, node1='.', node2=None, match=None,
1521 ignored=False, clean=False, unknown=False,
1522 ignored=False, clean=False, unknown=False,
1522 listsubrepos=False):
1523 listsubrepos=False):
1523 '''a convenience method that calls node1.status(node2)'''
1524 '''a convenience method that calls node1.status(node2)'''
1524 return self[node1].status(node2, match, ignored, clean, unknown,
1525 return self[node1].status(node2, match, ignored, clean, unknown,
1525 listsubrepos)
1526 listsubrepos)
1526
1527
1527 def heads(self, start=None):
1528 def heads(self, start=None):
1528 heads = self.changelog.heads(start)
1529 heads = self.changelog.heads(start)
1529 # sort the output in rev descending order
1530 # sort the output in rev descending order
1530 return sorted(heads, key=self.changelog.rev, reverse=True)
1531 return sorted(heads, key=self.changelog.rev, reverse=True)
1531
1532
1532 def branchheads(self, branch=None, start=None, closed=False):
1533 def branchheads(self, branch=None, start=None, closed=False):
1533 '''return a (possibly filtered) list of heads for the given branch
1534 '''return a (possibly filtered) list of heads for the given branch
1534
1535
1535 Heads are returned in topological order, from newest to oldest.
1536 Heads are returned in topological order, from newest to oldest.
1536 If branch is None, use the dirstate branch.
1537 If branch is None, use the dirstate branch.
1537 If start is not None, return only heads reachable from start.
1538 If start is not None, return only heads reachable from start.
1538 If closed is True, return heads that are marked as closed as well.
1539 If closed is True, return heads that are marked as closed as well.
1539 '''
1540 '''
1540 if branch is None:
1541 if branch is None:
1541 branch = self[None].branch()
1542 branch = self[None].branch()
1542 branches = self.branchmap()
1543 branches = self.branchmap()
1543 if branch not in branches:
1544 if branch not in branches:
1544 return []
1545 return []
1545 # the cache returns heads ordered lowest to highest
1546 # the cache returns heads ordered lowest to highest
1546 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1547 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1547 if start is not None:
1548 if start is not None:
1548 # filter out the heads that cannot be reached from startrev
1549 # filter out the heads that cannot be reached from startrev
1549 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1550 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1550 bheads = [h for h in bheads if h in fbheads]
1551 bheads = [h for h in bheads if h in fbheads]
1551 return bheads
1552 return bheads
1552
1553
1553 def branches(self, nodes):
1554 def branches(self, nodes):
1554 if not nodes:
1555 if not nodes:
1555 nodes = [self.changelog.tip()]
1556 nodes = [self.changelog.tip()]
1556 b = []
1557 b = []
1557 for n in nodes:
1558 for n in nodes:
1558 t = n
1559 t = n
1559 while True:
1560 while True:
1560 p = self.changelog.parents(n)
1561 p = self.changelog.parents(n)
1561 if p[1] != nullid or p[0] == nullid:
1562 if p[1] != nullid or p[0] == nullid:
1562 b.append((t, n, p[0], p[1]))
1563 b.append((t, n, p[0], p[1]))
1563 break
1564 break
1564 n = p[0]
1565 n = p[0]
1565 return b
1566 return b
1566
1567
1567 def between(self, pairs):
1568 def between(self, pairs):
1568 r = []
1569 r = []
1569
1570
1570 for top, bottom in pairs:
1571 for top, bottom in pairs:
1571 n, l, i = top, [], 0
1572 n, l, i = top, [], 0
1572 f = 1
1573 f = 1
1573
1574
1574 while n != bottom and n != nullid:
1575 while n != bottom and n != nullid:
1575 p = self.changelog.parents(n)[0]
1576 p = self.changelog.parents(n)[0]
1576 if i == f:
1577 if i == f:
1577 l.append(n)
1578 l.append(n)
1578 f = f * 2
1579 f = f * 2
1579 n = p
1580 n = p
1580 i += 1
1581 i += 1
1581
1582
1582 r.append(l)
1583 r.append(l)
1583
1584
1584 return r
1585 return r
1585
1586
1586 def checkpush(self, pushop):
1587 def checkpush(self, pushop):
1587 """Extensions can override this function if additional checks have
1588 """Extensions can override this function if additional checks have
1588 to be performed before pushing, or call it if they override push
1589 to be performed before pushing, or call it if they override push
1589 command.
1590 command.
1590 """
1591 """
1591 pass
1592 pass
1592
1593
1593 @unfilteredpropertycache
1594 @unfilteredpropertycache
1594 def prepushoutgoinghooks(self):
1595 def prepushoutgoinghooks(self):
1595 """Return util.hooks consists of "(repo, remote, outgoing)"
1596 """Return util.hooks consists of "(repo, remote, outgoing)"
1596 functions, which are called before pushing changesets.
1597 functions, which are called before pushing changesets.
1597 """
1598 """
1598 return util.hooks()
1599 return util.hooks()
1599
1600
1600 def stream_in(self, remote, requirements):
1601 def stream_in(self, remote, requirements):
1601 lock = self.lock()
1602 lock = self.lock()
1602 try:
1603 try:
1603 # Save remote branchmap. We will use it later
1604 # Save remote branchmap. We will use it later
1604 # to speed up branchcache creation
1605 # to speed up branchcache creation
1605 rbranchmap = None
1606 rbranchmap = None
1606 if remote.capable("branchmap"):
1607 if remote.capable("branchmap"):
1607 rbranchmap = remote.branchmap()
1608 rbranchmap = remote.branchmap()
1608
1609
1609 fp = remote.stream_out()
1610 fp = remote.stream_out()
1610 l = fp.readline()
1611 l = fp.readline()
1611 try:
1612 try:
1612 resp = int(l)
1613 resp = int(l)
1613 except ValueError:
1614 except ValueError:
1614 raise error.ResponseError(
1615 raise error.ResponseError(
1615 _('unexpected response from remote server:'), l)
1616 _('unexpected response from remote server:'), l)
1616 if resp == 1:
1617 if resp == 1:
1617 raise util.Abort(_('operation forbidden by server'))
1618 raise util.Abort(_('operation forbidden by server'))
1618 elif resp == 2:
1619 elif resp == 2:
1619 raise util.Abort(_('locking the remote repository failed'))
1620 raise util.Abort(_('locking the remote repository failed'))
1620 elif resp != 0:
1621 elif resp != 0:
1621 raise util.Abort(_('the server sent an unknown error code'))
1622 raise util.Abort(_('the server sent an unknown error code'))
1622 self.ui.status(_('streaming all changes\n'))
1623 self.ui.status(_('streaming all changes\n'))
1623 l = fp.readline()
1624 l = fp.readline()
1624 try:
1625 try:
1625 total_files, total_bytes = map(int, l.split(' ', 1))
1626 total_files, total_bytes = map(int, l.split(' ', 1))
1626 except (ValueError, TypeError):
1627 except (ValueError, TypeError):
1627 raise error.ResponseError(
1628 raise error.ResponseError(
1628 _('unexpected response from remote server:'), l)
1629 _('unexpected response from remote server:'), l)
1629 self.ui.status(_('%d files to transfer, %s of data\n') %
1630 self.ui.status(_('%d files to transfer, %s of data\n') %
1630 (total_files, util.bytecount(total_bytes)))
1631 (total_files, util.bytecount(total_bytes)))
1631 handled_bytes = 0
1632 handled_bytes = 0
1632 self.ui.progress(_('clone'), 0, total=total_bytes)
1633 self.ui.progress(_('clone'), 0, total=total_bytes)
1633 start = time.time()
1634 start = time.time()
1634
1635
1635 tr = self.transaction(_('clone'))
1636 tr = self.transaction(_('clone'))
1636 try:
1637 try:
1637 for i in xrange(total_files):
1638 for i in xrange(total_files):
1638 # XXX doesn't support '\n' or '\r' in filenames
1639 # XXX doesn't support '\n' or '\r' in filenames
1639 l = fp.readline()
1640 l = fp.readline()
1640 try:
1641 try:
1641 name, size = l.split('\0', 1)
1642 name, size = l.split('\0', 1)
1642 size = int(size)
1643 size = int(size)
1643 except (ValueError, TypeError):
1644 except (ValueError, TypeError):
1644 raise error.ResponseError(
1645 raise error.ResponseError(
1645 _('unexpected response from remote server:'), l)
1646 _('unexpected response from remote server:'), l)
1646 if self.ui.debugflag:
1647 if self.ui.debugflag:
1647 self.ui.debug('adding %s (%s)\n' %
1648 self.ui.debug('adding %s (%s)\n' %
1648 (name, util.bytecount(size)))
1649 (name, util.bytecount(size)))
1649 # for backwards compat, name was partially encoded
1650 # for backwards compat, name was partially encoded
1650 ofp = self.sopener(store.decodedir(name), 'w')
1651 ofp = self.sopener(store.decodedir(name), 'w')
1651 for chunk in util.filechunkiter(fp, limit=size):
1652 for chunk in util.filechunkiter(fp, limit=size):
1652 handled_bytes += len(chunk)
1653 handled_bytes += len(chunk)
1653 self.ui.progress(_('clone'), handled_bytes,
1654 self.ui.progress(_('clone'), handled_bytes,
1654 total=total_bytes)
1655 total=total_bytes)
1655 ofp.write(chunk)
1656 ofp.write(chunk)
1656 ofp.close()
1657 ofp.close()
1657 tr.close()
1658 tr.close()
1658 finally:
1659 finally:
1659 tr.release()
1660 tr.release()
1660
1661
1661 # Writing straight to files circumvented the inmemory caches
1662 # Writing straight to files circumvented the inmemory caches
1662 self.invalidate()
1663 self.invalidate()
1663
1664
1664 elapsed = time.time() - start
1665 elapsed = time.time() - start
1665 if elapsed <= 0:
1666 if elapsed <= 0:
1666 elapsed = 0.001
1667 elapsed = 0.001
1667 self.ui.progress(_('clone'), None)
1668 self.ui.progress(_('clone'), None)
1668 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1669 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1669 (util.bytecount(total_bytes), elapsed,
1670 (util.bytecount(total_bytes), elapsed,
1670 util.bytecount(total_bytes / elapsed)))
1671 util.bytecount(total_bytes / elapsed)))
1671
1672
1672 # new requirements = old non-format requirements +
1673 # new requirements = old non-format requirements +
1673 # new format-related
1674 # new format-related
1674 # requirements from the streamed-in repository
1675 # requirements from the streamed-in repository
1675 requirements.update(set(self.requirements) - self.supportedformats)
1676 requirements.update(set(self.requirements) - self.supportedformats)
1676 self._applyrequirements(requirements)
1677 self._applyrequirements(requirements)
1677 self._writerequirements()
1678 self._writerequirements()
1678
1679
1679 if rbranchmap:
1680 if rbranchmap:
1680 rbheads = []
1681 rbheads = []
1681 for bheads in rbranchmap.itervalues():
1682 for bheads in rbranchmap.itervalues():
1682 rbheads.extend(bheads)
1683 rbheads.extend(bheads)
1683
1684
1684 if rbheads:
1685 if rbheads:
1685 rtiprev = max((int(self.changelog.rev(node))
1686 rtiprev = max((int(self.changelog.rev(node))
1686 for node in rbheads))
1687 for node in rbheads))
1687 cache = branchmap.branchcache(rbranchmap,
1688 cache = branchmap.branchcache(rbranchmap,
1688 self[rtiprev].node(),
1689 self[rtiprev].node(),
1689 rtiprev)
1690 rtiprev)
1690 # Try to stick it as low as possible
1691 # Try to stick it as low as possible
1691 # filter above served are unlikely to be fetch from a clone
1692 # filter above served are unlikely to be fetch from a clone
1692 for candidate in ('base', 'immutable', 'served'):
1693 for candidate in ('base', 'immutable', 'served'):
1693 rview = self.filtered(candidate)
1694 rview = self.filtered(candidate)
1694 if cache.validfor(rview):
1695 if cache.validfor(rview):
1695 self._branchcaches[candidate] = cache
1696 self._branchcaches[candidate] = cache
1696 cache.write(rview)
1697 cache.write(rview)
1697 break
1698 break
1698 self.invalidate()
1699 self.invalidate()
1699 return len(self.heads()) + 1
1700 return len(self.heads()) + 1
1700 finally:
1701 finally:
1701 lock.release()
1702 lock.release()
1702
1703
1703 def clone(self, remote, heads=[], stream=False):
1704 def clone(self, remote, heads=[], stream=False):
1704 '''clone remote repository.
1705 '''clone remote repository.
1705
1706
1706 keyword arguments:
1707 keyword arguments:
1707 heads: list of revs to clone (forces use of pull)
1708 heads: list of revs to clone (forces use of pull)
1708 stream: use streaming clone if possible'''
1709 stream: use streaming clone if possible'''
1709
1710
1710 # now, all clients that can request uncompressed clones can
1711 # now, all clients that can request uncompressed clones can
1711 # read repo formats supported by all servers that can serve
1712 # read repo formats supported by all servers that can serve
1712 # them.
1713 # them.
1713
1714
1714 # if revlog format changes, client will have to check version
1715 # if revlog format changes, client will have to check version
1715 # and format flags on "stream" capability, and use
1716 # and format flags on "stream" capability, and use
1716 # uncompressed only if compatible.
1717 # uncompressed only if compatible.
1717
1718
1718 if not stream:
1719 if not stream:
1719 # if the server explicitly prefers to stream (for fast LANs)
1720 # if the server explicitly prefers to stream (for fast LANs)
1720 stream = remote.capable('stream-preferred')
1721 stream = remote.capable('stream-preferred')
1721
1722
1722 if stream and not heads:
1723 if stream and not heads:
1723 # 'stream' means remote revlog format is revlogv1 only
1724 # 'stream' means remote revlog format is revlogv1 only
1724 if remote.capable('stream'):
1725 if remote.capable('stream'):
1725 return self.stream_in(remote, set(('revlogv1',)))
1726 return self.stream_in(remote, set(('revlogv1',)))
1726 # otherwise, 'streamreqs' contains the remote revlog format
1727 # otherwise, 'streamreqs' contains the remote revlog format
1727 streamreqs = remote.capable('streamreqs')
1728 streamreqs = remote.capable('streamreqs')
1728 if streamreqs:
1729 if streamreqs:
1729 streamreqs = set(streamreqs.split(','))
1730 streamreqs = set(streamreqs.split(','))
1730 # if we support it, stream in and adjust our requirements
1731 # if we support it, stream in and adjust our requirements
1731 if not streamreqs - self.supportedformats:
1732 if not streamreqs - self.supportedformats:
1732 return self.stream_in(remote, streamreqs)
1733 return self.stream_in(remote, streamreqs)
1733
1734
1734 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1735 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1735 try:
1736 try:
1736 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1737 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1737 ret = exchange.pull(self, remote, heads).cgresult
1738 ret = exchange.pull(self, remote, heads).cgresult
1738 finally:
1739 finally:
1739 self.ui.restoreconfig(quiet)
1740 self.ui.restoreconfig(quiet)
1740 return ret
1741 return ret
1741
1742
1742 def pushkey(self, namespace, key, old, new):
1743 def pushkey(self, namespace, key, old, new):
1743 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1744 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1744 old=old, new=new)
1745 old=old, new=new)
1745 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1746 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1746 ret = pushkey.push(self, namespace, key, old, new)
1747 ret = pushkey.push(self, namespace, key, old, new)
1747 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1748 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1748 ret=ret)
1749 ret=ret)
1749 return ret
1750 return ret
1750
1751
1751 def listkeys(self, namespace):
1752 def listkeys(self, namespace):
1752 self.hook('prelistkeys', throw=True, namespace=namespace)
1753 self.hook('prelistkeys', throw=True, namespace=namespace)
1753 self.ui.debug('listing keys for "%s"\n' % namespace)
1754 self.ui.debug('listing keys for "%s"\n' % namespace)
1754 values = pushkey.list(self, namespace)
1755 values = pushkey.list(self, namespace)
1755 self.hook('listkeys', namespace=namespace, values=values)
1756 self.hook('listkeys', namespace=namespace, values=values)
1756 return values
1757 return values
1757
1758
1758 def debugwireargs(self, one, two, three=None, four=None, five=None):
1759 def debugwireargs(self, one, two, three=None, four=None, five=None):
1759 '''used to test argument passing over the wire'''
1760 '''used to test argument passing over the wire'''
1760 return "%s %s %s %s %s" % (one, two, three, four, five)
1761 return "%s %s %s %s %s" % (one, two, three, four, five)
1761
1762
1762 def savecommitmessage(self, text):
1763 def savecommitmessage(self, text):
1763 fp = self.opener('last-message.txt', 'wb')
1764 fp = self.opener('last-message.txt', 'wb')
1764 try:
1765 try:
1765 fp.write(text)
1766 fp.write(text)
1766 finally:
1767 finally:
1767 fp.close()
1768 fp.close()
1768 return self.pathto(fp.name[len(self.root) + 1:])
1769 return self.pathto(fp.name[len(self.root) + 1:])
1769
1770
1770 # used to avoid circular references so destructors work
1771 # used to avoid circular references so destructors work
1771 def aftertrans(files):
1772 def aftertrans(files):
1772 renamefiles = [tuple(t) for t in files]
1773 renamefiles = [tuple(t) for t in files]
1773 def a():
1774 def a():
1774 for vfs, src, dest in renamefiles:
1775 for vfs, src, dest in renamefiles:
1775 try:
1776 try:
1776 vfs.rename(src, dest)
1777 vfs.rename(src, dest)
1777 except OSError: # journal file does not yet exist
1778 except OSError: # journal file does not yet exist
1778 pass
1779 pass
1779 return a
1780 return a
1780
1781
1781 def undoname(fn):
1782 def undoname(fn):
1782 base, name = os.path.split(fn)
1783 base, name = os.path.split(fn)
1783 assert name.startswith('journal')
1784 assert name.startswith('journal')
1784 return os.path.join(base, name.replace('journal', 'undo', 1))
1785 return os.path.join(base, name.replace('journal', 'undo', 1))
1785
1786
1786 def instance(ui, path, create):
1787 def instance(ui, path, create):
1787 return localrepository(ui, util.urllocalpath(path), create)
1788 return localrepository(ui, util.urllocalpath(path), create)
1788
1789
1789 def islocal(path):
1790 def islocal(path):
1790 return True
1791 return True
@@ -1,1161 +1,1163 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker are used:
45 - When changeset A is split into B and C, a single marker are used:
46
46
47 (A, (C, C))
47 (A, (C, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 import struct
70 import struct
71 import util, base85, node
71 import util, base85, node
72 import phases
72 import phases
73 from i18n import _
73 from i18n import _
74
74
75 _pack = struct.pack
75 _pack = struct.pack
76 _unpack = struct.unpack
76 _unpack = struct.unpack
77
77
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
78 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
79
79
80 # the obsolete feature is not mature enough to be enabled by default.
80 # the obsolete feature is not mature enough to be enabled by default.
81 # you have to rely on third party extension extension to enable this.
81 # you have to rely on third party extension extension to enable this.
82 _enabled = False
82 _enabled = False
83
83
84 ### obsolescence marker flag
84 ### obsolescence marker flag
85
85
86 ## bumpedfix flag
86 ## bumpedfix flag
87 #
87 #
88 # When a changeset A' succeed to a changeset A which became public, we call A'
88 # When a changeset A' succeed to a changeset A which became public, we call A'
89 # "bumped" because it's a successors of a public changesets
89 # "bumped" because it's a successors of a public changesets
90 #
90 #
91 # o A' (bumped)
91 # o A' (bumped)
92 # |`:
92 # |`:
93 # | o A
93 # | o A
94 # |/
94 # |/
95 # o Z
95 # o Z
96 #
96 #
97 # The way to solve this situation is to create a new changeset Ad as children
97 # The way to solve this situation is to create a new changeset Ad as children
98 # of A. This changeset have the same content than A'. So the diff from A to A'
98 # of A. This changeset have the same content than A'. So the diff from A to A'
99 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
99 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
100 #
100 #
101 # o Ad
101 # o Ad
102 # |`:
102 # |`:
103 # | x A'
103 # | x A'
104 # |'|
104 # |'|
105 # o | A
105 # o | A
106 # |/
106 # |/
107 # o Z
107 # o Z
108 #
108 #
109 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
109 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
110 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
110 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
111 # This flag mean that the successors express the changes between the public and
111 # This flag mean that the successors express the changes between the public and
112 # bumped version and fix the situation, breaking the transitivity of
112 # bumped version and fix the situation, breaking the transitivity of
113 # "bumped" here.
113 # "bumped" here.
114 bumpedfix = 1
114 bumpedfix = 1
115 usingsha256 = 2
115 usingsha256 = 2
116
116
117 ## Parsing and writing of version "0"
117 ## Parsing and writing of version "0"
118 #
118 #
119 # The header is followed by the markers. Each marker is made of:
119 # The header is followed by the markers. Each marker is made of:
120 #
120 #
121 # - 1 uint8 : number of new changesets "N", can be zero.
121 # - 1 uint8 : number of new changesets "N", can be zero.
122 #
122 #
123 # - 1 uint32: metadata size "M" in bytes.
123 # - 1 uint32: metadata size "M" in bytes.
124 #
124 #
125 # - 1 byte: a bit field. It is reserved for flags used in common
125 # - 1 byte: a bit field. It is reserved for flags used in common
126 # obsolete marker operations, to avoid repeated decoding of metadata
126 # obsolete marker operations, to avoid repeated decoding of metadata
127 # entries.
127 # entries.
128 #
128 #
129 # - 20 bytes: obsoleted changeset identifier.
129 # - 20 bytes: obsoleted changeset identifier.
130 #
130 #
131 # - N*20 bytes: new changesets identifiers.
131 # - N*20 bytes: new changesets identifiers.
132 #
132 #
133 # - M bytes: metadata as a sequence of nul-terminated strings. Each
133 # - M bytes: metadata as a sequence of nul-terminated strings. Each
134 # string contains a key and a value, separated by a colon ':', without
134 # string contains a key and a value, separated by a colon ':', without
135 # additional encoding. Keys cannot contain '\0' or ':' and values
135 # additional encoding. Keys cannot contain '\0' or ':' and values
136 # cannot contain '\0'.
136 # cannot contain '\0'.
137 _fm0version = 0
137 _fm0version = 0
138 _fm0fixed = '>BIB20s'
138 _fm0fixed = '>BIB20s'
139 _fm0node = '20s'
139 _fm0node = '20s'
140 _fm0fsize = struct.calcsize(_fm0fixed)
140 _fm0fsize = struct.calcsize(_fm0fixed)
141 _fm0fnodesize = struct.calcsize(_fm0node)
141 _fm0fnodesize = struct.calcsize(_fm0node)
142
142
143 def _fm0readmarkers(data, off=0):
143 def _fm0readmarkers(data, off=0):
144 # Loop on markers
144 # Loop on markers
145 l = len(data)
145 l = len(data)
146 while off + _fm0fsize <= l:
146 while off + _fm0fsize <= l:
147 # read fixed part
147 # read fixed part
148 cur = data[off:off + _fm0fsize]
148 cur = data[off:off + _fm0fsize]
149 off += _fm0fsize
149 off += _fm0fsize
150 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
150 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
151 # read replacement
151 # read replacement
152 sucs = ()
152 sucs = ()
153 if numsuc:
153 if numsuc:
154 s = (_fm0fnodesize * numsuc)
154 s = (_fm0fnodesize * numsuc)
155 cur = data[off:off + s]
155 cur = data[off:off + s]
156 sucs = _unpack(_fm0node * numsuc, cur)
156 sucs = _unpack(_fm0node * numsuc, cur)
157 off += s
157 off += s
158 # read metadata
158 # read metadata
159 # (metadata will be decoded on demand)
159 # (metadata will be decoded on demand)
160 metadata = data[off:off + mdsize]
160 metadata = data[off:off + mdsize]
161 if len(metadata) != mdsize:
161 if len(metadata) != mdsize:
162 raise util.Abort(_('parsing obsolete marker: metadata is too '
162 raise util.Abort(_('parsing obsolete marker: metadata is too '
163 'short, %d bytes expected, got %d')
163 'short, %d bytes expected, got %d')
164 % (mdsize, len(metadata)))
164 % (mdsize, len(metadata)))
165 off += mdsize
165 off += mdsize
166 metadata = _fm0decodemeta(metadata)
166 metadata = _fm0decodemeta(metadata)
167 try:
167 try:
168 when, offset = metadata.pop('date', '0 0').split(' ')
168 when, offset = metadata.pop('date', '0 0').split(' ')
169 date = float(when), int(offset)
169 date = float(when), int(offset)
170 except ValueError:
170 except ValueError:
171 date = (0., 0)
171 date = (0., 0)
172 parents = None
172 parents = None
173 if 'p2' in metadata:
173 if 'p2' in metadata:
174 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
174 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
175 elif 'p1' in metadata:
175 elif 'p1' in metadata:
176 parents = (metadata.pop('p1', None),)
176 parents = (metadata.pop('p1', None),)
177 elif 'p0' in metadata:
177 elif 'p0' in metadata:
178 parents = ()
178 parents = ()
179 if parents is not None:
179 if parents is not None:
180 try:
180 try:
181 parents = tuple(node.bin(p) for p in parents)
181 parents = tuple(node.bin(p) for p in parents)
182 # if parent content is not a nodeid, drop the data
182 # if parent content is not a nodeid, drop the data
183 for p in parents:
183 for p in parents:
184 if len(p) != 20:
184 if len(p) != 20:
185 parents = None
185 parents = None
186 break
186 break
187 except TypeError:
187 except TypeError:
188 # if content cannot be translated to nodeid drop the data.
188 # if content cannot be translated to nodeid drop the data.
189 parents = None
189 parents = None
190
190
191 metadata = tuple(sorted(metadata.iteritems()))
191 metadata = tuple(sorted(metadata.iteritems()))
192
192
193 yield (pre, sucs, flags, metadata, date, parents)
193 yield (pre, sucs, flags, metadata, date, parents)
194
194
195 def _fm0encodeonemarker(marker):
195 def _fm0encodeonemarker(marker):
196 pre, sucs, flags, metadata, date, parents = marker
196 pre, sucs, flags, metadata, date, parents = marker
197 if flags & usingsha256:
197 if flags & usingsha256:
198 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
198 raise util.Abort(_('cannot handle sha256 with old obsstore format'))
199 metadata = dict(metadata)
199 metadata = dict(metadata)
200 metadata['date'] = '%d %i' % date
200 metadata['date'] = '%d %i' % date
201 if parents is not None:
201 if parents is not None:
202 if not parents:
202 if not parents:
203 # mark that we explicitly recorded no parents
203 # mark that we explicitly recorded no parents
204 metadata['p0'] = ''
204 metadata['p0'] = ''
205 for i, p in enumerate(parents):
205 for i, p in enumerate(parents):
206 metadata['p%i' % (i + 1)] = node.hex(p)
206 metadata['p%i' % (i + 1)] = node.hex(p)
207 metadata = _fm0encodemeta(metadata)
207 metadata = _fm0encodemeta(metadata)
208 numsuc = len(sucs)
208 numsuc = len(sucs)
209 format = _fm0fixed + (_fm0node * numsuc)
209 format = _fm0fixed + (_fm0node * numsuc)
210 data = [numsuc, len(metadata), flags, pre]
210 data = [numsuc, len(metadata), flags, pre]
211 data.extend(sucs)
211 data.extend(sucs)
212 return _pack(format, *data) + metadata
212 return _pack(format, *data) + metadata
213
213
214 def _fm0encodemeta(meta):
214 def _fm0encodemeta(meta):
215 """Return encoded metadata string to string mapping.
215 """Return encoded metadata string to string mapping.
216
216
217 Assume no ':' in key and no '\0' in both key and value."""
217 Assume no ':' in key and no '\0' in both key and value."""
218 for key, value in meta.iteritems():
218 for key, value in meta.iteritems():
219 if ':' in key or '\0' in key:
219 if ':' in key or '\0' in key:
220 raise ValueError("':' and '\0' are forbidden in metadata key'")
220 raise ValueError("':' and '\0' are forbidden in metadata key'")
221 if '\0' in value:
221 if '\0' in value:
222 raise ValueError("':' is forbidden in metadata value'")
222 raise ValueError("':' is forbidden in metadata value'")
223 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
223 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
224
224
225 def _fm0decodemeta(data):
225 def _fm0decodemeta(data):
226 """Return string to string dictionary from encoded version."""
226 """Return string to string dictionary from encoded version."""
227 d = {}
227 d = {}
228 for l in data.split('\0'):
228 for l in data.split('\0'):
229 if l:
229 if l:
230 key, value = l.split(':')
230 key, value = l.split(':')
231 d[key] = value
231 d[key] = value
232 return d
232 return d
233
233
234 ## Parsing and writing of version "1"
234 ## Parsing and writing of version "1"
235 #
235 #
236 # The header is followed by the markers. Each marker is made of:
236 # The header is followed by the markers. Each marker is made of:
237 #
237 #
238 # - uint32: total size of the marker (including this field)
238 # - uint32: total size of the marker (including this field)
239 #
239 #
240 # - float64: date in seconds since epoch
240 # - float64: date in seconds since epoch
241 #
241 #
242 # - int16: timezone offset in minutes
242 # - int16: timezone offset in minutes
243 #
243 #
244 # - uint16: a bit field. It is reserved for flags used in common
244 # - uint16: a bit field. It is reserved for flags used in common
245 # obsolete marker operations, to avoid repeated decoding of metadata
245 # obsolete marker operations, to avoid repeated decoding of metadata
246 # entries.
246 # entries.
247 #
247 #
248 # - uint8: number of successors "N", can be zero.
248 # - uint8: number of successors "N", can be zero.
249 #
249 #
250 # - uint8: number of parents "P", can be zero.
250 # - uint8: number of parents "P", can be zero.
251 #
251 #
252 # 0: parents data stored but no parent,
252 # 0: parents data stored but no parent,
253 # 1: one parent stored,
253 # 1: one parent stored,
254 # 2: two parents stored,
254 # 2: two parents stored,
255 # 3: no parent data stored
255 # 3: no parent data stored
256 #
256 #
257 # - uint8: number of metadata entries M
257 # - uint8: number of metadata entries M
258 #
258 #
259 # - 20 or 32 bytes: precursor changeset identifier.
259 # - 20 or 32 bytes: precursor changeset identifier.
260 #
260 #
261 # - N*(20 or 32) bytes: successors changesets identifiers.
261 # - N*(20 or 32) bytes: successors changesets identifiers.
262 #
262 #
263 # - P*(20 or 32) bytes: parents of the precursors changesets.
263 # - P*(20 or 32) bytes: parents of the precursors changesets.
264 #
264 #
265 # - M*(uint8, uint8): size of all metadata entries (key and value)
265 # - M*(uint8, uint8): size of all metadata entries (key and value)
266 #
266 #
267 # - remaining bytes: the metadata, each (key, value) pair after the other.
267 # - remaining bytes: the metadata, each (key, value) pair after the other.
268 _fm1version = 1
268 _fm1version = 1
269 _fm1fixed = '>IdhHBBB20s'
269 _fm1fixed = '>IdhHBBB20s'
270 _fm1nodesha1 = '20s'
270 _fm1nodesha1 = '20s'
271 _fm1nodesha256 = '32s'
271 _fm1nodesha256 = '32s'
272 _fm1fsize = struct.calcsize(_fm1fixed)
272 _fm1fsize = struct.calcsize(_fm1fixed)
273 _fm1parentnone = 3
273 _fm1parentnone = 3
274 _fm1parentshift = 14
274 _fm1parentshift = 14
275 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
275 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
276 _fm1metapair = 'BB'
276 _fm1metapair = 'BB'
277 _fm1metapairsize = struct.calcsize('BB')
277 _fm1metapairsize = struct.calcsize('BB')
278
278
279 def _fm1readmarkers(data, off=0):
279 def _fm1readmarkers(data, off=0):
280 # Loop on markers
280 # Loop on markers
281 l = len(data)
281 l = len(data)
282 while off + _fm1fsize <= l:
282 while off + _fm1fsize <= l:
283 # read fixed part
283 # read fixed part
284 cur = data[off:off + _fm1fsize]
284 cur = data[off:off + _fm1fsize]
285 off += _fm1fsize
285 off += _fm1fsize
286 fixeddata = _unpack(_fm1fixed, cur)
286 fixeddata = _unpack(_fm1fixed, cur)
287 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
287 ttsize, seconds, tz, flags, numsuc, numpar, nummeta, prec = fixeddata
288 # extract the number of parents information
288 # extract the number of parents information
289 if numpar == _fm1parentnone:
289 if numpar == _fm1parentnone:
290 numpar = None
290 numpar = None
291 # build the date tuple (upgrade tz minutes to seconds)
291 # build the date tuple (upgrade tz minutes to seconds)
292 date = (seconds, tz * 60)
292 date = (seconds, tz * 60)
293 _fm1node = _fm1nodesha1
293 _fm1node = _fm1nodesha1
294 if flags & usingsha256:
294 if flags & usingsha256:
295 _fm1node = _fm1nodesha256
295 _fm1node = _fm1nodesha256
296 fnodesize = struct.calcsize(_fm1node)
296 fnodesize = struct.calcsize(_fm1node)
297 # read replacement
297 # read replacement
298 sucs = ()
298 sucs = ()
299 if numsuc:
299 if numsuc:
300 s = (fnodesize * numsuc)
300 s = (fnodesize * numsuc)
301 cur = data[off:off + s]
301 cur = data[off:off + s]
302 sucs = _unpack(_fm1node * numsuc, cur)
302 sucs = _unpack(_fm1node * numsuc, cur)
303 off += s
303 off += s
304 # read parents
304 # read parents
305 if numpar is None:
305 if numpar is None:
306 parents = None
306 parents = None
307 elif numpar == 0:
307 elif numpar == 0:
308 parents = ()
308 parents = ()
309 elif numpar: # neither None nor zero
309 elif numpar: # neither None nor zero
310 s = (fnodesize * numpar)
310 s = (fnodesize * numpar)
311 cur = data[off:off + s]
311 cur = data[off:off + s]
312 parents = _unpack(_fm1node * numpar, cur)
312 parents = _unpack(_fm1node * numpar, cur)
313 off += s
313 off += s
314 # read metadata
314 # read metadata
315 metaformat = '>' + (_fm1metapair * nummeta)
315 metaformat = '>' + (_fm1metapair * nummeta)
316 s = _fm1metapairsize * nummeta
316 s = _fm1metapairsize * nummeta
317 metapairsize = _unpack(metaformat, data[off:off + s])
317 metapairsize = _unpack(metaformat, data[off:off + s])
318 off += s
318 off += s
319 metadata = []
319 metadata = []
320 for idx in xrange(0, len(metapairsize), 2):
320 for idx in xrange(0, len(metapairsize), 2):
321 sk = metapairsize[idx]
321 sk = metapairsize[idx]
322 sv = metapairsize[idx + 1]
322 sv = metapairsize[idx + 1]
323 key = data[off:off + sk]
323 key = data[off:off + sk]
324 value = data[off + sk:off + sk + sv]
324 value = data[off + sk:off + sk + sv]
325 assert len(key) == sk
325 assert len(key) == sk
326 assert len(value) == sv
326 assert len(value) == sv
327 metadata.append((key, value))
327 metadata.append((key, value))
328 off += sk + sv
328 off += sk + sv
329 metadata = tuple(metadata)
329 metadata = tuple(metadata)
330
330
331 yield (prec, sucs, flags, metadata, date, parents)
331 yield (prec, sucs, flags, metadata, date, parents)
332
332
333 def _fm1encodeonemarker(marker):
333 def _fm1encodeonemarker(marker):
334 pre, sucs, flags, metadata, date, parents = marker
334 pre, sucs, flags, metadata, date, parents = marker
335 # determine node size
335 # determine node size
336 _fm1node = _fm1nodesha1
336 _fm1node = _fm1nodesha1
337 if flags & usingsha256:
337 if flags & usingsha256:
338 _fm1node = _fm1nodesha256
338 _fm1node = _fm1nodesha256
339 numsuc = len(sucs)
339 numsuc = len(sucs)
340 numextranodes = numsuc
340 numextranodes = numsuc
341 if parents is None:
341 if parents is None:
342 numpar = _fm1parentnone
342 numpar = _fm1parentnone
343 else:
343 else:
344 numpar = len(parents)
344 numpar = len(parents)
345 numextranodes += numpar
345 numextranodes += numpar
346 formatnodes = _fm1node * numextranodes
346 formatnodes = _fm1node * numextranodes
347 formatmeta = _fm1metapair * len(metadata)
347 formatmeta = _fm1metapair * len(metadata)
348 format = _fm1fixed + formatnodes + formatmeta
348 format = _fm1fixed + formatnodes + formatmeta
349 # tz is stored in minutes so we divide by 60
349 # tz is stored in minutes so we divide by 60
350 tz = date[1]//60
350 tz = date[1]//60
351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
351 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
352 data.extend(sucs)
352 data.extend(sucs)
353 if parents is not None:
353 if parents is not None:
354 data.extend(parents)
354 data.extend(parents)
355 totalsize = struct.calcsize(format)
355 totalsize = struct.calcsize(format)
356 for key, value in metadata:
356 for key, value in metadata:
357 lk = len(key)
357 lk = len(key)
358 lv = len(value)
358 lv = len(value)
359 data.append(lk)
359 data.append(lk)
360 data.append(lv)
360 data.append(lv)
361 totalsize += lk + lv
361 totalsize += lk + lv
362 data[0] = totalsize
362 data[0] = totalsize
363 data = [_pack(format, *data)]
363 data = [_pack(format, *data)]
364 for key, value in metadata:
364 for key, value in metadata:
365 data.append(key)
365 data.append(key)
366 data.append(value)
366 data.append(value)
367 return ''.join(data)
367 return ''.join(data)
368
368
369 # mapping to read/write various marker formats
369 # mapping to read/write various marker formats
370 # <version> -> (decoder, encoder)
370 # <version> -> (decoder, encoder)
371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
371 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
372 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
373
373
374 def _readmarkers(data):
374 def _readmarkers(data):
375 """Read and enumerate markers from raw data"""
375 """Read and enumerate markers from raw data"""
376 off = 0
376 off = 0
377 diskversion = _unpack('>B', data[off:off + 1])[0]
377 diskversion = _unpack('>B', data[off:off + 1])[0]
378 off += 1
378 off += 1
379 if diskversion not in formats:
379 if diskversion not in formats:
380 raise util.Abort(_('parsing obsolete marker: unknown version %r')
380 raise util.Abort(_('parsing obsolete marker: unknown version %r')
381 % diskversion)
381 % diskversion)
382 return diskversion, formats[diskversion][0](data, off)
382 return diskversion, formats[diskversion][0](data, off)
383
383
384 def encodemarkers(markers, addheader=False, version=_fm0version):
384 def encodemarkers(markers, addheader=False, version=_fm0version):
385 # Kept separate from flushmarkers(), it will be reused for
385 # Kept separate from flushmarkers(), it will be reused for
386 # markers exchange.
386 # markers exchange.
387 encodeone = formats[version][1]
387 encodeone = formats[version][1]
388 if addheader:
388 if addheader:
389 yield _pack('>B', version)
389 yield _pack('>B', version)
390 for marker in markers:
390 for marker in markers:
391 yield encodeone(marker)
391 yield encodeone(marker)
392
392
393
393
394 class marker(object):
394 class marker(object):
395 """Wrap obsolete marker raw data"""
395 """Wrap obsolete marker raw data"""
396
396
397 def __init__(self, repo, data):
397 def __init__(self, repo, data):
398 # the repo argument will be used to create changectx in later version
398 # the repo argument will be used to create changectx in later version
399 self._repo = repo
399 self._repo = repo
400 self._data = data
400 self._data = data
401 self._decodedmeta = None
401 self._decodedmeta = None
402
402
403 def __hash__(self):
403 def __hash__(self):
404 return hash(self._data)
404 return hash(self._data)
405
405
406 def __eq__(self, other):
406 def __eq__(self, other):
407 if type(other) != type(self):
407 if type(other) != type(self):
408 return False
408 return False
409 return self._data == other._data
409 return self._data == other._data
410
410
411 def precnode(self):
411 def precnode(self):
412 """Precursor changeset node identifier"""
412 """Precursor changeset node identifier"""
413 return self._data[0]
413 return self._data[0]
414
414
415 def succnodes(self):
415 def succnodes(self):
416 """List of successor changesets node identifiers"""
416 """List of successor changesets node identifiers"""
417 return self._data[1]
417 return self._data[1]
418
418
419 def parentnodes(self):
419 def parentnodes(self):
420 """Parents of the precursors (None if not recorded)"""
420 """Parents of the precursors (None if not recorded)"""
421 return self._data[5]
421 return self._data[5]
422
422
423 def metadata(self):
423 def metadata(self):
424 """Decoded metadata dictionary"""
424 """Decoded metadata dictionary"""
425 return dict(self._data[3])
425 return dict(self._data[3])
426
426
427 def date(self):
427 def date(self):
428 """Creation date as (unixtime, offset)"""
428 """Creation date as (unixtime, offset)"""
429 return self._data[4]
429 return self._data[4]
430
430
431 def flags(self):
431 def flags(self):
432 """The flags field of the marker"""
432 """The flags field of the marker"""
433 return self._data[2]
433 return self._data[2]
434
434
435 class obsstore(object):
435 class obsstore(object):
436 """Store obsolete markers
436 """Store obsolete markers
437
437
438 Markers can be accessed with two mappings:
438 Markers can be accessed with two mappings:
439 - precursors[x] -> set(markers on precursors edges of x)
439 - precursors[x] -> set(markers on precursors edges of x)
440 - successors[x] -> set(markers on successors edges of x)
440 - successors[x] -> set(markers on successors edges of x)
441 - children[x] -> set(markers on precursors edges of children(x)
441 - children[x] -> set(markers on precursors edges of children(x)
442 """
442 """
443
443
444 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
444 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
445 # prec: nodeid, precursor changesets
445 # prec: nodeid, precursor changesets
446 # succs: tuple of nodeid, successor changesets (0-N length)
446 # succs: tuple of nodeid, successor changesets (0-N length)
447 # flag: integer, flag field carrying modifier for the markers (see doc)
447 # flag: integer, flag field carrying modifier for the markers (see doc)
448 # meta: binary blob, encoded metadata dictionary
448 # meta: binary blob, encoded metadata dictionary
449 # date: (float, int) tuple, date of marker creation
449 # date: (float, int) tuple, date of marker creation
450 # parents: (tuple of nodeid) or None, parents of precursors
450 # parents: (tuple of nodeid) or None, parents of precursors
451 # None is used when no data has been recorded
451 # None is used when no data has been recorded
452
452
453 def __init__(self, sopener, defaultformat=_fm1version):
453 def __init__(self, sopener, defaultformat=_fm1version, readonly=False):
454 # caches for various obsolescence related cache
454 # caches for various obsolescence related cache
455 self.caches = {}
455 self.caches = {}
456 self._all = []
456 self._all = []
457 self.precursors = {}
457 self.precursors = {}
458 self.successors = {}
458 self.successors = {}
459 self.children = {}
459 self.children = {}
460 self.sopener = sopener
460 self.sopener = sopener
461 data = sopener.tryread('obsstore')
461 data = sopener.tryread('obsstore')
462 self._version = defaultformat
462 self._version = defaultformat
463 self._readonly = readonly
463 if data:
464 if data:
464 self._version, markers = _readmarkers(data)
465 self._version, markers = _readmarkers(data)
465 self._load(markers)
466 self._load(markers)
466
467
467 def __iter__(self):
468 def __iter__(self):
468 return iter(self._all)
469 return iter(self._all)
469
470
470 def __len__(self):
471 def __len__(self):
471 return len(self._all)
472 return len(self._all)
472
473
473 def __nonzero__(self):
474 def __nonzero__(self):
474 return bool(self._all)
475 return bool(self._all)
475
476
476 def create(self, transaction, prec, succs=(), flag=0, parents=None,
477 def create(self, transaction, prec, succs=(), flag=0, parents=None,
477 date=None, metadata=None):
478 date=None, metadata=None):
478 """obsolete: add a new obsolete marker
479 """obsolete: add a new obsolete marker
479
480
480 * ensuring it is hashable
481 * ensuring it is hashable
481 * check mandatory metadata
482 * check mandatory metadata
482 * encode metadata
483 * encode metadata
483
484
484 If you are a human writing code creating marker you want to use the
485 If you are a human writing code creating marker you want to use the
485 `createmarkers` function in this module instead.
486 `createmarkers` function in this module instead.
486
487
487 return True if a new marker have been added, False if the markers
488 return True if a new marker have been added, False if the markers
488 already existed (no op).
489 already existed (no op).
489 """
490 """
490 if metadata is None:
491 if metadata is None:
491 metadata = {}
492 metadata = {}
492 if date is None:
493 if date is None:
493 if 'date' in metadata:
494 if 'date' in metadata:
494 # as a courtesy for out-of-tree extensions
495 # as a courtesy for out-of-tree extensions
495 date = util.parsedate(metadata.pop('date'))
496 date = util.parsedate(metadata.pop('date'))
496 else:
497 else:
497 date = util.makedate()
498 date = util.makedate()
498 if len(prec) != 20:
499 if len(prec) != 20:
499 raise ValueError(prec)
500 raise ValueError(prec)
500 for succ in succs:
501 for succ in succs:
501 if len(succ) != 20:
502 if len(succ) != 20:
502 raise ValueError(succ)
503 raise ValueError(succ)
503 if prec in succs:
504 if prec in succs:
504 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
505 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
505
506
506 metadata = tuple(sorted(metadata.iteritems()))
507 metadata = tuple(sorted(metadata.iteritems()))
507
508
508 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
509 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
509 return bool(self.add(transaction, [marker]))
510 return bool(self.add(transaction, [marker]))
510
511
511 def add(self, transaction, markers):
512 def add(self, transaction, markers):
512 """Add new markers to the store
513 """Add new markers to the store
513
514
514 Take care of filtering duplicate.
515 Take care of filtering duplicate.
515 Return the number of new marker."""
516 Return the number of new marker."""
516 if not _enabled:
517 if self._readonly:
517 raise util.Abort('obsolete feature is not enabled on this repo')
518 raise util.Abort('creating obsolete markers is not enabled on this '
519 'repo')
518 known = set(self._all)
520 known = set(self._all)
519 new = []
521 new = []
520 for m in markers:
522 for m in markers:
521 if m not in known:
523 if m not in known:
522 known.add(m)
524 known.add(m)
523 new.append(m)
525 new.append(m)
524 if new:
526 if new:
525 f = self.sopener('obsstore', 'ab')
527 f = self.sopener('obsstore', 'ab')
526 try:
528 try:
527 # Whether the file's current position is at the begin or at
529 # Whether the file's current position is at the begin or at
528 # the end after opening a file for appending is implementation
530 # the end after opening a file for appending is implementation
529 # defined. So we must seek to the end before calling tell(),
531 # defined. So we must seek to the end before calling tell(),
530 # or we may get a zero offset for non-zero sized files on
532 # or we may get a zero offset for non-zero sized files on
531 # some platforms (issue3543).
533 # some platforms (issue3543).
532 f.seek(0, _SEEK_END)
534 f.seek(0, _SEEK_END)
533 offset = f.tell()
535 offset = f.tell()
534 transaction.add('obsstore', offset)
536 transaction.add('obsstore', offset)
535 # offset == 0: new file - add the version header
537 # offset == 0: new file - add the version header
536 for bytes in encodemarkers(new, offset == 0, self._version):
538 for bytes in encodemarkers(new, offset == 0, self._version):
537 f.write(bytes)
539 f.write(bytes)
538 finally:
540 finally:
539 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
541 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
540 # call 'filecacheentry.refresh()' here
542 # call 'filecacheentry.refresh()' here
541 f.close()
543 f.close()
542 self._load(new)
544 self._load(new)
543 # new marker *may* have changed several set. invalidate the cache.
545 # new marker *may* have changed several set. invalidate the cache.
544 self.caches.clear()
546 self.caches.clear()
545 # records the number of new markers for the transaction hooks
547 # records the number of new markers for the transaction hooks
546 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
548 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
547 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
549 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
548 return len(new)
550 return len(new)
549
551
550 def mergemarkers(self, transaction, data):
552 def mergemarkers(self, transaction, data):
551 """merge a binary stream of markers inside the obsstore
553 """merge a binary stream of markers inside the obsstore
552
554
553 Returns the number of new markers added."""
555 Returns the number of new markers added."""
554 version, markers = _readmarkers(data)
556 version, markers = _readmarkers(data)
555 return self.add(transaction, markers)
557 return self.add(transaction, markers)
556
558
557 def _load(self, markers):
559 def _load(self, markers):
558 for mark in markers:
560 for mark in markers:
559 self._all.append(mark)
561 self._all.append(mark)
560 pre, sucs = mark[:2]
562 pre, sucs = mark[:2]
561 self.successors.setdefault(pre, set()).add(mark)
563 self.successors.setdefault(pre, set()).add(mark)
562 for suc in sucs:
564 for suc in sucs:
563 self.precursors.setdefault(suc, set()).add(mark)
565 self.precursors.setdefault(suc, set()).add(mark)
564 parents = mark[5]
566 parents = mark[5]
565 if parents is not None:
567 if parents is not None:
566 for p in parents:
568 for p in parents:
567 self.children.setdefault(p, set()).add(mark)
569 self.children.setdefault(p, set()).add(mark)
568 if node.nullid in self.precursors:
570 if node.nullid in self.precursors:
569 raise util.Abort(_('bad obsolescence marker detected: '
571 raise util.Abort(_('bad obsolescence marker detected: '
570 'invalid successors nullid'))
572 'invalid successors nullid'))
571 def relevantmarkers(self, nodes):
573 def relevantmarkers(self, nodes):
572 """return a set of all obsolescence markers relevant to a set of nodes.
574 """return a set of all obsolescence markers relevant to a set of nodes.
573
575
574 "relevant" to a set of nodes mean:
576 "relevant" to a set of nodes mean:
575
577
576 - marker that use this changeset as successor
578 - marker that use this changeset as successor
577 - prune marker of direct children on this changeset
579 - prune marker of direct children on this changeset
578 - recursive application of the two rules on precursors of these markers
580 - recursive application of the two rules on precursors of these markers
579
581
580 It is a set so you cannot rely on order."""
582 It is a set so you cannot rely on order."""
581
583
582 pendingnodes = set(nodes)
584 pendingnodes = set(nodes)
583 seenmarkers = set()
585 seenmarkers = set()
584 seennodes = set(pendingnodes)
586 seennodes = set(pendingnodes)
585 precursorsmarkers = self.precursors
587 precursorsmarkers = self.precursors
586 children = self.children
588 children = self.children
587 while pendingnodes:
589 while pendingnodes:
588 direct = set()
590 direct = set()
589 for current in pendingnodes:
591 for current in pendingnodes:
590 direct.update(precursorsmarkers.get(current, ()))
592 direct.update(precursorsmarkers.get(current, ()))
591 pruned = [m for m in children.get(current, ()) if not m[1]]
593 pruned = [m for m in children.get(current, ()) if not m[1]]
592 direct.update(pruned)
594 direct.update(pruned)
593 direct -= seenmarkers
595 direct -= seenmarkers
594 pendingnodes = set([m[0] for m in direct])
596 pendingnodes = set([m[0] for m in direct])
595 seenmarkers |= direct
597 seenmarkers |= direct
596 pendingnodes -= seennodes
598 pendingnodes -= seennodes
597 seennodes |= pendingnodes
599 seennodes |= pendingnodes
598 return seenmarkers
600 return seenmarkers
599
601
600 def commonversion(versions):
602 def commonversion(versions):
601 """Return the newest version listed in both versions and our local formats.
603 """Return the newest version listed in both versions and our local formats.
602
604
603 Returns None if no common version exists.
605 Returns None if no common version exists.
604 """
606 """
605 versions.sort(reverse=True)
607 versions.sort(reverse=True)
606 # search for highest version known on both side
608 # search for highest version known on both side
607 for v in versions:
609 for v in versions:
608 if v in formats:
610 if v in formats:
609 return v
611 return v
610 return None
612 return None
611
613
612 # arbitrary picked to fit into 8K limit from HTTP server
614 # arbitrary picked to fit into 8K limit from HTTP server
613 # you have to take in account:
615 # you have to take in account:
614 # - the version header
616 # - the version header
615 # - the base85 encoding
617 # - the base85 encoding
616 _maxpayload = 5300
618 _maxpayload = 5300
617
619
618 def _pushkeyescape(markers):
620 def _pushkeyescape(markers):
619 """encode markers into a dict suitable for pushkey exchange
621 """encode markers into a dict suitable for pushkey exchange
620
622
621 - binary data is base85 encoded
623 - binary data is base85 encoded
622 - split in chunks smaller than 5300 bytes"""
624 - split in chunks smaller than 5300 bytes"""
623 keys = {}
625 keys = {}
624 parts = []
626 parts = []
625 currentlen = _maxpayload * 2 # ensure we create a new part
627 currentlen = _maxpayload * 2 # ensure we create a new part
626 for marker in markers:
628 for marker in markers:
627 nextdata = _fm0encodeonemarker(marker)
629 nextdata = _fm0encodeonemarker(marker)
628 if (len(nextdata) + currentlen > _maxpayload):
630 if (len(nextdata) + currentlen > _maxpayload):
629 currentpart = []
631 currentpart = []
630 currentlen = 0
632 currentlen = 0
631 parts.append(currentpart)
633 parts.append(currentpart)
632 currentpart.append(nextdata)
634 currentpart.append(nextdata)
633 currentlen += len(nextdata)
635 currentlen += len(nextdata)
634 for idx, part in enumerate(reversed(parts)):
636 for idx, part in enumerate(reversed(parts)):
635 data = ''.join([_pack('>B', _fm0version)] + part)
637 data = ''.join([_pack('>B', _fm0version)] + part)
636 keys['dump%i' % idx] = base85.b85encode(data)
638 keys['dump%i' % idx] = base85.b85encode(data)
637 return keys
639 return keys
638
640
639 def listmarkers(repo):
641 def listmarkers(repo):
640 """List markers over pushkey"""
642 """List markers over pushkey"""
641 if not repo.obsstore:
643 if not repo.obsstore:
642 return {}
644 return {}
643 return _pushkeyescape(repo.obsstore)
645 return _pushkeyescape(repo.obsstore)
644
646
645 def pushmarker(repo, key, old, new):
647 def pushmarker(repo, key, old, new):
646 """Push markers over pushkey"""
648 """Push markers over pushkey"""
647 if not key.startswith('dump'):
649 if not key.startswith('dump'):
648 repo.ui.warn(_('unknown key: %r') % key)
650 repo.ui.warn(_('unknown key: %r') % key)
649 return 0
651 return 0
650 if old:
652 if old:
651 repo.ui.warn(_('unexpected old value for %r') % key)
653 repo.ui.warn(_('unexpected old value for %r') % key)
652 return 0
654 return 0
653 data = base85.b85decode(new)
655 data = base85.b85decode(new)
654 lock = repo.lock()
656 lock = repo.lock()
655 try:
657 try:
656 tr = repo.transaction('pushkey: obsolete markers')
658 tr = repo.transaction('pushkey: obsolete markers')
657 try:
659 try:
658 repo.obsstore.mergemarkers(tr, data)
660 repo.obsstore.mergemarkers(tr, data)
659 tr.close()
661 tr.close()
660 return 1
662 return 1
661 finally:
663 finally:
662 tr.release()
664 tr.release()
663 finally:
665 finally:
664 lock.release()
666 lock.release()
665
667
666 def getmarkers(repo, nodes=None):
668 def getmarkers(repo, nodes=None):
667 """returns markers known in a repository
669 """returns markers known in a repository
668
670
669 If <nodes> is specified, only markers "relevant" to those nodes are are
671 If <nodes> is specified, only markers "relevant" to those nodes are are
670 returned"""
672 returned"""
671 if nodes is None:
673 if nodes is None:
672 rawmarkers = repo.obsstore
674 rawmarkers = repo.obsstore
673 else:
675 else:
674 rawmarkers = repo.obsstore.relevantmarkers(nodes)
676 rawmarkers = repo.obsstore.relevantmarkers(nodes)
675
677
676 for markerdata in rawmarkers:
678 for markerdata in rawmarkers:
677 yield marker(repo, markerdata)
679 yield marker(repo, markerdata)
678
680
679 def relevantmarkers(repo, node):
681 def relevantmarkers(repo, node):
680 """all obsolete markers relevant to some revision"""
682 """all obsolete markers relevant to some revision"""
681 for markerdata in repo.obsstore.relevantmarkers(node):
683 for markerdata in repo.obsstore.relevantmarkers(node):
682 yield marker(repo, markerdata)
684 yield marker(repo, markerdata)
683
685
684
686
685 def precursormarkers(ctx):
687 def precursormarkers(ctx):
686 """obsolete marker marking this changeset as a successors"""
688 """obsolete marker marking this changeset as a successors"""
687 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
689 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
688 yield marker(ctx._repo, data)
690 yield marker(ctx._repo, data)
689
691
690 def successormarkers(ctx):
692 def successormarkers(ctx):
691 """obsolete marker making this changeset obsolete"""
693 """obsolete marker making this changeset obsolete"""
692 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
694 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
693 yield marker(ctx._repo, data)
695 yield marker(ctx._repo, data)
694
696
695 def allsuccessors(obsstore, nodes, ignoreflags=0):
697 def allsuccessors(obsstore, nodes, ignoreflags=0):
696 """Yield node for every successor of <nodes>.
698 """Yield node for every successor of <nodes>.
697
699
698 Some successors may be unknown locally.
700 Some successors may be unknown locally.
699
701
700 This is a linear yield unsuited to detecting split changesets. It includes
702 This is a linear yield unsuited to detecting split changesets. It includes
701 initial nodes too."""
703 initial nodes too."""
702 remaining = set(nodes)
704 remaining = set(nodes)
703 seen = set(remaining)
705 seen = set(remaining)
704 while remaining:
706 while remaining:
705 current = remaining.pop()
707 current = remaining.pop()
706 yield current
708 yield current
707 for mark in obsstore.successors.get(current, ()):
709 for mark in obsstore.successors.get(current, ()):
708 # ignore marker flagged with specified flag
710 # ignore marker flagged with specified flag
709 if mark[2] & ignoreflags:
711 if mark[2] & ignoreflags:
710 continue
712 continue
711 for suc in mark[1]:
713 for suc in mark[1]:
712 if suc not in seen:
714 if suc not in seen:
713 seen.add(suc)
715 seen.add(suc)
714 remaining.add(suc)
716 remaining.add(suc)
715
717
716 def allprecursors(obsstore, nodes, ignoreflags=0):
718 def allprecursors(obsstore, nodes, ignoreflags=0):
717 """Yield node for every precursors of <nodes>.
719 """Yield node for every precursors of <nodes>.
718
720
719 Some precursors may be unknown locally.
721 Some precursors may be unknown locally.
720
722
721 This is a linear yield unsuited to detecting folded changesets. It includes
723 This is a linear yield unsuited to detecting folded changesets. It includes
722 initial nodes too."""
724 initial nodes too."""
723
725
724 remaining = set(nodes)
726 remaining = set(nodes)
725 seen = set(remaining)
727 seen = set(remaining)
726 while remaining:
728 while remaining:
727 current = remaining.pop()
729 current = remaining.pop()
728 yield current
730 yield current
729 for mark in obsstore.precursors.get(current, ()):
731 for mark in obsstore.precursors.get(current, ()):
730 # ignore marker flagged with specified flag
732 # ignore marker flagged with specified flag
731 if mark[2] & ignoreflags:
733 if mark[2] & ignoreflags:
732 continue
734 continue
733 suc = mark[0]
735 suc = mark[0]
734 if suc not in seen:
736 if suc not in seen:
735 seen.add(suc)
737 seen.add(suc)
736 remaining.add(suc)
738 remaining.add(suc)
737
739
738 def foreground(repo, nodes):
740 def foreground(repo, nodes):
739 """return all nodes in the "foreground" of other node
741 """return all nodes in the "foreground" of other node
740
742
741 The foreground of a revision is anything reachable using parent -> children
743 The foreground of a revision is anything reachable using parent -> children
742 or precursor -> successor relation. It is very similar to "descendant" but
744 or precursor -> successor relation. It is very similar to "descendant" but
743 augmented with obsolescence information.
745 augmented with obsolescence information.
744
746
745 Beware that possible obsolescence cycle may result if complex situation.
747 Beware that possible obsolescence cycle may result if complex situation.
746 """
748 """
747 repo = repo.unfiltered()
749 repo = repo.unfiltered()
748 foreground = set(repo.set('%ln::', nodes))
750 foreground = set(repo.set('%ln::', nodes))
749 if repo.obsstore:
751 if repo.obsstore:
750 # We only need this complicated logic if there is obsolescence
752 # We only need this complicated logic if there is obsolescence
751 # XXX will probably deserve an optimised revset.
753 # XXX will probably deserve an optimised revset.
752 nm = repo.changelog.nodemap
754 nm = repo.changelog.nodemap
753 plen = -1
755 plen = -1
754 # compute the whole set of successors or descendants
756 # compute the whole set of successors or descendants
755 while len(foreground) != plen:
757 while len(foreground) != plen:
756 plen = len(foreground)
758 plen = len(foreground)
757 succs = set(c.node() for c in foreground)
759 succs = set(c.node() for c in foreground)
758 mutable = [c.node() for c in foreground if c.mutable()]
760 mutable = [c.node() for c in foreground if c.mutable()]
759 succs.update(allsuccessors(repo.obsstore, mutable))
761 succs.update(allsuccessors(repo.obsstore, mutable))
760 known = (n for n in succs if n in nm)
762 known = (n for n in succs if n in nm)
761 foreground = set(repo.set('%ln::', known))
763 foreground = set(repo.set('%ln::', known))
762 return set(c.node() for c in foreground)
764 return set(c.node() for c in foreground)
763
765
764
766
765 def successorssets(repo, initialnode, cache=None):
767 def successorssets(repo, initialnode, cache=None):
766 """Return all set of successors of initial nodes
768 """Return all set of successors of initial nodes
767
769
768 The successors set of a changeset A are a group of revisions that succeed
770 The successors set of a changeset A are a group of revisions that succeed
769 A. It succeeds A as a consistent whole, each revision being only a partial
771 A. It succeeds A as a consistent whole, each revision being only a partial
770 replacement. The successors set contains non-obsolete changesets only.
772 replacement. The successors set contains non-obsolete changesets only.
771
773
772 This function returns the full list of successor sets which is why it
774 This function returns the full list of successor sets which is why it
773 returns a list of tuples and not just a single tuple. Each tuple is a valid
775 returns a list of tuples and not just a single tuple. Each tuple is a valid
774 successors set. Not that (A,) may be a valid successors set for changeset A
776 successors set. Not that (A,) may be a valid successors set for changeset A
775 (see below).
777 (see below).
776
778
777 In most cases, a changeset A will have a single element (e.g. the changeset
779 In most cases, a changeset A will have a single element (e.g. the changeset
778 A is replaced by A') in its successors set. Though, it is also common for a
780 A is replaced by A') in its successors set. Though, it is also common for a
779 changeset A to have no elements in its successor set (e.g. the changeset
781 changeset A to have no elements in its successor set (e.g. the changeset
780 has been pruned). Therefore, the returned list of successors sets will be
782 has been pruned). Therefore, the returned list of successors sets will be
781 [(A',)] or [], respectively.
783 [(A',)] or [], respectively.
782
784
783 When a changeset A is split into A' and B', however, it will result in a
785 When a changeset A is split into A' and B', however, it will result in a
784 successors set containing more than a single element, i.e. [(A',B')].
786 successors set containing more than a single element, i.e. [(A',B')].
785 Divergent changesets will result in multiple successors sets, i.e. [(A',),
787 Divergent changesets will result in multiple successors sets, i.e. [(A',),
786 (A'')].
788 (A'')].
787
789
788 If a changeset A is not obsolete, then it will conceptually have no
790 If a changeset A is not obsolete, then it will conceptually have no
789 successors set. To distinguish this from a pruned changeset, the successor
791 successors set. To distinguish this from a pruned changeset, the successor
790 set will only contain itself, i.e. [(A,)].
792 set will only contain itself, i.e. [(A,)].
791
793
792 Finally, successors unknown locally are considered to be pruned (obsoleted
794 Finally, successors unknown locally are considered to be pruned (obsoleted
793 without any successors).
795 without any successors).
794
796
795 The optional `cache` parameter is a dictionary that may contain precomputed
797 The optional `cache` parameter is a dictionary that may contain precomputed
796 successors sets. It is meant to reuse the computation of a previous call to
798 successors sets. It is meant to reuse the computation of a previous call to
797 `successorssets` when multiple calls are made at the same time. The cache
799 `successorssets` when multiple calls are made at the same time. The cache
798 dictionary is updated in place. The caller is responsible for its live
800 dictionary is updated in place. The caller is responsible for its live
799 spawn. Code that makes multiple calls to `successorssets` *must* use this
801 spawn. Code that makes multiple calls to `successorssets` *must* use this
800 cache mechanism or suffer terrible performances.
802 cache mechanism or suffer terrible performances.
801
803
802 """
804 """
803
805
804 succmarkers = repo.obsstore.successors
806 succmarkers = repo.obsstore.successors
805
807
806 # Stack of nodes we search successors sets for
808 # Stack of nodes we search successors sets for
807 toproceed = [initialnode]
809 toproceed = [initialnode]
808 # set version of above list for fast loop detection
810 # set version of above list for fast loop detection
809 # element added to "toproceed" must be added here
811 # element added to "toproceed" must be added here
810 stackedset = set(toproceed)
812 stackedset = set(toproceed)
811 if cache is None:
813 if cache is None:
812 cache = {}
814 cache = {}
813
815
814 # This while loop is the flattened version of a recursive search for
816 # This while loop is the flattened version of a recursive search for
815 # successors sets
817 # successors sets
816 #
818 #
817 # def successorssets(x):
819 # def successorssets(x):
818 # successors = directsuccessors(x)
820 # successors = directsuccessors(x)
819 # ss = [[]]
821 # ss = [[]]
820 # for succ in directsuccessors(x):
822 # for succ in directsuccessors(x):
821 # # product as in itertools cartesian product
823 # # product as in itertools cartesian product
822 # ss = product(ss, successorssets(succ))
824 # ss = product(ss, successorssets(succ))
823 # return ss
825 # return ss
824 #
826 #
825 # But we can not use plain recursive calls here:
827 # But we can not use plain recursive calls here:
826 # - that would blow the python call stack
828 # - that would blow the python call stack
827 # - obsolescence markers may have cycles, we need to handle them.
829 # - obsolescence markers may have cycles, we need to handle them.
828 #
830 #
829 # The `toproceed` list act as our call stack. Every node we search
831 # The `toproceed` list act as our call stack. Every node we search
830 # successors set for are stacked there.
832 # successors set for are stacked there.
831 #
833 #
832 # The `stackedset` is set version of this stack used to check if a node is
834 # The `stackedset` is set version of this stack used to check if a node is
833 # already stacked. This check is used to detect cycles and prevent infinite
835 # already stacked. This check is used to detect cycles and prevent infinite
834 # loop.
836 # loop.
835 #
837 #
836 # successors set of all nodes are stored in the `cache` dictionary.
838 # successors set of all nodes are stored in the `cache` dictionary.
837 #
839 #
838 # After this while loop ends we use the cache to return the successors sets
840 # After this while loop ends we use the cache to return the successors sets
839 # for the node requested by the caller.
841 # for the node requested by the caller.
840 while toproceed:
842 while toproceed:
841 # Every iteration tries to compute the successors sets of the topmost
843 # Every iteration tries to compute the successors sets of the topmost
842 # node of the stack: CURRENT.
844 # node of the stack: CURRENT.
843 #
845 #
844 # There are four possible outcomes:
846 # There are four possible outcomes:
845 #
847 #
846 # 1) We already know the successors sets of CURRENT:
848 # 1) We already know the successors sets of CURRENT:
847 # -> mission accomplished, pop it from the stack.
849 # -> mission accomplished, pop it from the stack.
848 # 2) Node is not obsolete:
850 # 2) Node is not obsolete:
849 # -> the node is its own successors sets. Add it to the cache.
851 # -> the node is its own successors sets. Add it to the cache.
850 # 3) We do not know successors set of direct successors of CURRENT:
852 # 3) We do not know successors set of direct successors of CURRENT:
851 # -> We add those successors to the stack.
853 # -> We add those successors to the stack.
852 # 4) We know successors sets of all direct successors of CURRENT:
854 # 4) We know successors sets of all direct successors of CURRENT:
853 # -> We can compute CURRENT successors set and add it to the
855 # -> We can compute CURRENT successors set and add it to the
854 # cache.
856 # cache.
855 #
857 #
856 current = toproceed[-1]
858 current = toproceed[-1]
857 if current in cache:
859 if current in cache:
858 # case (1): We already know the successors sets
860 # case (1): We already know the successors sets
859 stackedset.remove(toproceed.pop())
861 stackedset.remove(toproceed.pop())
860 elif current not in succmarkers:
862 elif current not in succmarkers:
861 # case (2): The node is not obsolete.
863 # case (2): The node is not obsolete.
862 if current in repo:
864 if current in repo:
863 # We have a valid last successors.
865 # We have a valid last successors.
864 cache[current] = [(current,)]
866 cache[current] = [(current,)]
865 else:
867 else:
866 # Final obsolete version is unknown locally.
868 # Final obsolete version is unknown locally.
867 # Do not count that as a valid successors
869 # Do not count that as a valid successors
868 cache[current] = []
870 cache[current] = []
869 else:
871 else:
870 # cases (3) and (4)
872 # cases (3) and (4)
871 #
873 #
872 # We proceed in two phases. Phase 1 aims to distinguish case (3)
874 # We proceed in two phases. Phase 1 aims to distinguish case (3)
873 # from case (4):
875 # from case (4):
874 #
876 #
875 # For each direct successors of CURRENT, we check whether its
877 # For each direct successors of CURRENT, we check whether its
876 # successors sets are known. If they are not, we stack the
878 # successors sets are known. If they are not, we stack the
877 # unknown node and proceed to the next iteration of the while
879 # unknown node and proceed to the next iteration of the while
878 # loop. (case 3)
880 # loop. (case 3)
879 #
881 #
880 # During this step, we may detect obsolescence cycles: a node
882 # During this step, we may detect obsolescence cycles: a node
881 # with unknown successors sets but already in the call stack.
883 # with unknown successors sets but already in the call stack.
882 # In such a situation, we arbitrary set the successors sets of
884 # In such a situation, we arbitrary set the successors sets of
883 # the node to nothing (node pruned) to break the cycle.
885 # the node to nothing (node pruned) to break the cycle.
884 #
886 #
885 # If no break was encountered we proceed to phase 2.
887 # If no break was encountered we proceed to phase 2.
886 #
888 #
887 # Phase 2 computes successors sets of CURRENT (case 4); see details
889 # Phase 2 computes successors sets of CURRENT (case 4); see details
888 # in phase 2 itself.
890 # in phase 2 itself.
889 #
891 #
890 # Note the two levels of iteration in each phase.
892 # Note the two levels of iteration in each phase.
891 # - The first one handles obsolescence markers using CURRENT as
893 # - The first one handles obsolescence markers using CURRENT as
892 # precursor (successors markers of CURRENT).
894 # precursor (successors markers of CURRENT).
893 #
895 #
894 # Having multiple entry here means divergence.
896 # Having multiple entry here means divergence.
895 #
897 #
896 # - The second one handles successors defined in each marker.
898 # - The second one handles successors defined in each marker.
897 #
899 #
898 # Having none means pruned node, multiple successors means split,
900 # Having none means pruned node, multiple successors means split,
899 # single successors are standard replacement.
901 # single successors are standard replacement.
900 #
902 #
901 for mark in sorted(succmarkers[current]):
903 for mark in sorted(succmarkers[current]):
902 for suc in mark[1]:
904 for suc in mark[1]:
903 if suc not in cache:
905 if suc not in cache:
904 if suc in stackedset:
906 if suc in stackedset:
905 # cycle breaking
907 # cycle breaking
906 cache[suc] = []
908 cache[suc] = []
907 else:
909 else:
908 # case (3) If we have not computed successors sets
910 # case (3) If we have not computed successors sets
909 # of one of those successors we add it to the
911 # of one of those successors we add it to the
910 # `toproceed` stack and stop all work for this
912 # `toproceed` stack and stop all work for this
911 # iteration.
913 # iteration.
912 toproceed.append(suc)
914 toproceed.append(suc)
913 stackedset.add(suc)
915 stackedset.add(suc)
914 break
916 break
915 else:
917 else:
916 continue
918 continue
917 break
919 break
918 else:
920 else:
919 # case (4): we know all successors sets of all direct
921 # case (4): we know all successors sets of all direct
920 # successors
922 # successors
921 #
923 #
922 # Successors set contributed by each marker depends on the
924 # Successors set contributed by each marker depends on the
923 # successors sets of all its "successors" node.
925 # successors sets of all its "successors" node.
924 #
926 #
925 # Each different marker is a divergence in the obsolescence
927 # Each different marker is a divergence in the obsolescence
926 # history. It contributes successors sets distinct from other
928 # history. It contributes successors sets distinct from other
927 # markers.
929 # markers.
928 #
930 #
929 # Within a marker, a successor may have divergent successors
931 # Within a marker, a successor may have divergent successors
930 # sets. In such a case, the marker will contribute multiple
932 # sets. In such a case, the marker will contribute multiple
931 # divergent successors sets. If multiple successors have
933 # divergent successors sets. If multiple successors have
932 # divergent successors sets, a Cartesian product is used.
934 # divergent successors sets, a Cartesian product is used.
933 #
935 #
934 # At the end we post-process successors sets to remove
936 # At the end we post-process successors sets to remove
935 # duplicated entry and successors set that are strict subset of
937 # duplicated entry and successors set that are strict subset of
936 # another one.
938 # another one.
937 succssets = []
939 succssets = []
938 for mark in sorted(succmarkers[current]):
940 for mark in sorted(succmarkers[current]):
939 # successors sets contributed by this marker
941 # successors sets contributed by this marker
940 markss = [[]]
942 markss = [[]]
941 for suc in mark[1]:
943 for suc in mark[1]:
942 # cardinal product with previous successors
944 # cardinal product with previous successors
943 productresult = []
945 productresult = []
944 for prefix in markss:
946 for prefix in markss:
945 for suffix in cache[suc]:
947 for suffix in cache[suc]:
946 newss = list(prefix)
948 newss = list(prefix)
947 for part in suffix:
949 for part in suffix:
948 # do not duplicated entry in successors set
950 # do not duplicated entry in successors set
949 # first entry wins.
951 # first entry wins.
950 if part not in newss:
952 if part not in newss:
951 newss.append(part)
953 newss.append(part)
952 productresult.append(newss)
954 productresult.append(newss)
953 markss = productresult
955 markss = productresult
954 succssets.extend(markss)
956 succssets.extend(markss)
955 # remove duplicated and subset
957 # remove duplicated and subset
956 seen = []
958 seen = []
957 final = []
959 final = []
958 candidate = sorted(((set(s), s) for s in succssets if s),
960 candidate = sorted(((set(s), s) for s in succssets if s),
959 key=lambda x: len(x[1]), reverse=True)
961 key=lambda x: len(x[1]), reverse=True)
960 for setversion, listversion in candidate:
962 for setversion, listversion in candidate:
961 for seenset in seen:
963 for seenset in seen:
962 if setversion.issubset(seenset):
964 if setversion.issubset(seenset):
963 break
965 break
964 else:
966 else:
965 final.append(listversion)
967 final.append(listversion)
966 seen.append(setversion)
968 seen.append(setversion)
967 final.reverse() # put small successors set first
969 final.reverse() # put small successors set first
968 cache[current] = final
970 cache[current] = final
969 return cache[initialnode]
971 return cache[initialnode]
970
972
971 def _knownrevs(repo, nodes):
973 def _knownrevs(repo, nodes):
972 """yield revision numbers of known nodes passed in parameters
974 """yield revision numbers of known nodes passed in parameters
973
975
974 Unknown revisions are silently ignored."""
976 Unknown revisions are silently ignored."""
975 torev = repo.changelog.nodemap.get
977 torev = repo.changelog.nodemap.get
976 for n in nodes:
978 for n in nodes:
977 rev = torev(n)
979 rev = torev(n)
978 if rev is not None:
980 if rev is not None:
979 yield rev
981 yield rev
980
982
981 # mapping of 'set-name' -> <function to compute this set>
983 # mapping of 'set-name' -> <function to compute this set>
982 cachefuncs = {}
984 cachefuncs = {}
983 def cachefor(name):
985 def cachefor(name):
984 """Decorator to register a function as computing the cache for a set"""
986 """Decorator to register a function as computing the cache for a set"""
985 def decorator(func):
987 def decorator(func):
986 assert name not in cachefuncs
988 assert name not in cachefuncs
987 cachefuncs[name] = func
989 cachefuncs[name] = func
988 return func
990 return func
989 return decorator
991 return decorator
990
992
991 def getrevs(repo, name):
993 def getrevs(repo, name):
992 """Return the set of revision that belong to the <name> set
994 """Return the set of revision that belong to the <name> set
993
995
994 Such access may compute the set and cache it for future use"""
996 Such access may compute the set and cache it for future use"""
995 repo = repo.unfiltered()
997 repo = repo.unfiltered()
996 if not repo.obsstore:
998 if not repo.obsstore:
997 return frozenset()
999 return frozenset()
998 if name not in repo.obsstore.caches:
1000 if name not in repo.obsstore.caches:
999 repo.obsstore.caches[name] = cachefuncs[name](repo)
1001 repo.obsstore.caches[name] = cachefuncs[name](repo)
1000 return repo.obsstore.caches[name]
1002 return repo.obsstore.caches[name]
1001
1003
1002 # To be simple we need to invalidate obsolescence cache when:
1004 # To be simple we need to invalidate obsolescence cache when:
1003 #
1005 #
1004 # - new changeset is added:
1006 # - new changeset is added:
1005 # - public phase is changed
1007 # - public phase is changed
1006 # - obsolescence marker are added
1008 # - obsolescence marker are added
1007 # - strip is used a repo
1009 # - strip is used a repo
1008 def clearobscaches(repo):
1010 def clearobscaches(repo):
1009 """Remove all obsolescence related cache from a repo
1011 """Remove all obsolescence related cache from a repo
1010
1012
1011 This remove all cache in obsstore is the obsstore already exist on the
1013 This remove all cache in obsstore is the obsstore already exist on the
1012 repo.
1014 repo.
1013
1015
1014 (We could be smarter here given the exact event that trigger the cache
1016 (We could be smarter here given the exact event that trigger the cache
1015 clearing)"""
1017 clearing)"""
1016 # only clear cache is there is obsstore data in this repo
1018 # only clear cache is there is obsstore data in this repo
1017 if 'obsstore' in repo._filecache:
1019 if 'obsstore' in repo._filecache:
1018 repo.obsstore.caches.clear()
1020 repo.obsstore.caches.clear()
1019
1021
1020 @cachefor('obsolete')
1022 @cachefor('obsolete')
1021 def _computeobsoleteset(repo):
1023 def _computeobsoleteset(repo):
1022 """the set of obsolete revisions"""
1024 """the set of obsolete revisions"""
1023 obs = set()
1025 obs = set()
1024 getrev = repo.changelog.nodemap.get
1026 getrev = repo.changelog.nodemap.get
1025 getphase = repo._phasecache.phase
1027 getphase = repo._phasecache.phase
1026 for n in repo.obsstore.successors:
1028 for n in repo.obsstore.successors:
1027 rev = getrev(n)
1029 rev = getrev(n)
1028 if rev is not None and getphase(repo, rev):
1030 if rev is not None and getphase(repo, rev):
1029 obs.add(rev)
1031 obs.add(rev)
1030 return obs
1032 return obs
1031
1033
1032 @cachefor('unstable')
1034 @cachefor('unstable')
1033 def _computeunstableset(repo):
1035 def _computeunstableset(repo):
1034 """the set of non obsolete revisions with obsolete parents"""
1036 """the set of non obsolete revisions with obsolete parents"""
1035 # revset is not efficient enough here
1037 # revset is not efficient enough here
1036 # we do (obsolete()::) - obsolete() by hand
1038 # we do (obsolete()::) - obsolete() by hand
1037 obs = getrevs(repo, 'obsolete')
1039 obs = getrevs(repo, 'obsolete')
1038 if not obs:
1040 if not obs:
1039 return set()
1041 return set()
1040 cl = repo.changelog
1042 cl = repo.changelog
1041 return set(r for r in cl.descendants(obs) if r not in obs)
1043 return set(r for r in cl.descendants(obs) if r not in obs)
1042
1044
1043 @cachefor('suspended')
1045 @cachefor('suspended')
1044 def _computesuspendedset(repo):
1046 def _computesuspendedset(repo):
1045 """the set of obsolete parents with non obsolete descendants"""
1047 """the set of obsolete parents with non obsolete descendants"""
1046 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1048 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1047 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1049 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1048
1050
1049 @cachefor('extinct')
1051 @cachefor('extinct')
1050 def _computeextinctset(repo):
1052 def _computeextinctset(repo):
1051 """the set of obsolete parents without non obsolete descendants"""
1053 """the set of obsolete parents without non obsolete descendants"""
1052 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1054 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1053
1055
1054
1056
1055 @cachefor('bumped')
1057 @cachefor('bumped')
1056 def _computebumpedset(repo):
1058 def _computebumpedset(repo):
1057 """the set of revs trying to obsolete public revisions"""
1059 """the set of revs trying to obsolete public revisions"""
1058 bumped = set()
1060 bumped = set()
1059 # util function (avoid attribute lookup in the loop)
1061 # util function (avoid attribute lookup in the loop)
1060 phase = repo._phasecache.phase # would be faster to grab the full list
1062 phase = repo._phasecache.phase # would be faster to grab the full list
1061 public = phases.public
1063 public = phases.public
1062 cl = repo.changelog
1064 cl = repo.changelog
1063 torev = cl.nodemap.get
1065 torev = cl.nodemap.get
1064 obs = getrevs(repo, 'obsolete')
1066 obs = getrevs(repo, 'obsolete')
1065 for rev in repo:
1067 for rev in repo:
1066 # We only evaluate mutable, non-obsolete revision
1068 # We only evaluate mutable, non-obsolete revision
1067 if (public < phase(repo, rev)) and (rev not in obs):
1069 if (public < phase(repo, rev)) and (rev not in obs):
1068 node = cl.node(rev)
1070 node = cl.node(rev)
1069 # (future) A cache of precursors may worth if split is very common
1071 # (future) A cache of precursors may worth if split is very common
1070 for pnode in allprecursors(repo.obsstore, [node],
1072 for pnode in allprecursors(repo.obsstore, [node],
1071 ignoreflags=bumpedfix):
1073 ignoreflags=bumpedfix):
1072 prev = torev(pnode) # unfiltered! but so is phasecache
1074 prev = torev(pnode) # unfiltered! but so is phasecache
1073 if (prev is not None) and (phase(repo, prev) <= public):
1075 if (prev is not None) and (phase(repo, prev) <= public):
1074 # we have a public precursors
1076 # we have a public precursors
1075 bumped.add(rev)
1077 bumped.add(rev)
1076 break # Next draft!
1078 break # Next draft!
1077 return bumped
1079 return bumped
1078
1080
1079 @cachefor('divergent')
1081 @cachefor('divergent')
1080 def _computedivergentset(repo):
1082 def _computedivergentset(repo):
1081 """the set of rev that compete to be the final successors of some revision.
1083 """the set of rev that compete to be the final successors of some revision.
1082 """
1084 """
1083 divergent = set()
1085 divergent = set()
1084 obsstore = repo.obsstore
1086 obsstore = repo.obsstore
1085 newermap = {}
1087 newermap = {}
1086 for ctx in repo.set('(not public()) - obsolete()'):
1088 for ctx in repo.set('(not public()) - obsolete()'):
1087 mark = obsstore.precursors.get(ctx.node(), ())
1089 mark = obsstore.precursors.get(ctx.node(), ())
1088 toprocess = set(mark)
1090 toprocess = set(mark)
1089 while toprocess:
1091 while toprocess:
1090 prec = toprocess.pop()[0]
1092 prec = toprocess.pop()[0]
1091 if prec not in newermap:
1093 if prec not in newermap:
1092 successorssets(repo, prec, newermap)
1094 successorssets(repo, prec, newermap)
1093 newer = [n for n in newermap[prec] if n]
1095 newer = [n for n in newermap[prec] if n]
1094 if len(newer) > 1:
1096 if len(newer) > 1:
1095 divergent.add(ctx.rev())
1097 divergent.add(ctx.rev())
1096 break
1098 break
1097 toprocess.update(obsstore.precursors.get(prec, ()))
1099 toprocess.update(obsstore.precursors.get(prec, ()))
1098 return divergent
1100 return divergent
1099
1101
1100
1102
1101 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1103 def createmarkers(repo, relations, flag=0, date=None, metadata=None):
1102 """Add obsolete markers between changesets in a repo
1104 """Add obsolete markers between changesets in a repo
1103
1105
1104 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1106 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1105 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1107 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1106 containing metadata for this marker only. It is merged with the global
1108 containing metadata for this marker only. It is merged with the global
1107 metadata specified through the `metadata` argument of this function,
1109 metadata specified through the `metadata` argument of this function,
1108
1110
1109 Trying to obsolete a public changeset will raise an exception.
1111 Trying to obsolete a public changeset will raise an exception.
1110
1112
1111 Current user and date are used except if specified otherwise in the
1113 Current user and date are used except if specified otherwise in the
1112 metadata attribute.
1114 metadata attribute.
1113
1115
1114 This function operates within a transaction of its own, but does
1116 This function operates within a transaction of its own, but does
1115 not take any lock on the repo.
1117 not take any lock on the repo.
1116 """
1118 """
1117 # prepare metadata
1119 # prepare metadata
1118 if metadata is None:
1120 if metadata is None:
1119 metadata = {}
1121 metadata = {}
1120 if 'user' not in metadata:
1122 if 'user' not in metadata:
1121 metadata['user'] = repo.ui.username()
1123 metadata['user'] = repo.ui.username()
1122 tr = repo.transaction('add-obsolescence-marker')
1124 tr = repo.transaction('add-obsolescence-marker')
1123 try:
1125 try:
1124 for rel in relations:
1126 for rel in relations:
1125 prec = rel[0]
1127 prec = rel[0]
1126 sucs = rel[1]
1128 sucs = rel[1]
1127 localmetadata = metadata.copy()
1129 localmetadata = metadata.copy()
1128 if 2 < len(rel):
1130 if 2 < len(rel):
1129 localmetadata.update(rel[2])
1131 localmetadata.update(rel[2])
1130
1132
1131 if not prec.mutable():
1133 if not prec.mutable():
1132 raise util.Abort("cannot obsolete immutable changeset: %s"
1134 raise util.Abort("cannot obsolete immutable changeset: %s"
1133 % prec)
1135 % prec)
1134 nprec = prec.node()
1136 nprec = prec.node()
1135 nsucs = tuple(s.node() for s in sucs)
1137 nsucs = tuple(s.node() for s in sucs)
1136 npare = None
1138 npare = None
1137 if not nsucs:
1139 if not nsucs:
1138 npare = tuple(p.node() for p in prec.parents())
1140 npare = tuple(p.node() for p in prec.parents())
1139 if nprec in nsucs:
1141 if nprec in nsucs:
1140 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1142 raise util.Abort("changeset %s cannot obsolete itself" % prec)
1141 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1143 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1142 date=date, metadata=localmetadata)
1144 date=date, metadata=localmetadata)
1143 repo.filteredrevcache.clear()
1145 repo.filteredrevcache.clear()
1144 tr.close()
1146 tr.close()
1145 finally:
1147 finally:
1146 tr.release()
1148 tr.release()
1147
1149
1148 def isenabled(repo, option):
1150 def isenabled(repo, option):
1149 """Returns True if the given repository has the given obsolete option
1151 """Returns True if the given repository has the given obsolete option
1150 enabled.
1152 enabled.
1151 """
1153 """
1152 result = set(repo.ui.configlist('experimental', 'evolution'))
1154 result = set(repo.ui.configlist('experimental', 'evolution'))
1153 if 'all' in result:
1155 if 'all' in result:
1154 return True
1156 return True
1155
1157
1156 # For migration purposes, temporarily return true if the config hasn't been
1158 # For migration purposes, temporarily return true if the config hasn't been
1157 # set but _enabled is true.
1159 # set but _enabled is true.
1158 if len(result) == 0 and _enabled:
1160 if len(result) == 0 and _enabled:
1159 return True
1161 return True
1160
1162
1161 return option in result
1163 return option in result
@@ -1,754 +1,754 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [phases]
2 > [phases]
3 > # public changeset are not obsolete
3 > # public changeset are not obsolete
4 > publish=false
4 > publish=false
5 > [ui]
5 > [ui]
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
6 > logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg id --debug --hidden -ir "desc('$1')"
14 > hg id --debug --hidden -ir "desc('$1')"
15 > }
15 > }
16
16
17 $ cat > debugkeys.py <<EOF
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
23 >
24 > if repo.local():
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
25 > repo.__class__ = debugkeysrepo
26 > EOF
26 > EOF
27
27
28 $ hg init tmpa
28 $ hg init tmpa
29 $ cd tmpa
29 $ cd tmpa
30 $ mkcommit kill_me
30 $ mkcommit kill_me
31
31
32 Checking that the feature is properly disabled
32 Checking that the feature is properly disabled
33
33
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 abort: obsolete feature is not enabled on this repo
35 abort: creating obsolete markers is not enabled on this repo
36 [255]
36 [255]
37
37
38 Enabling it
38 Enabling it
39
39
40 $ cat > ../obs.py << EOF
40 $ cat > ../obs.py << EOF
41 > import mercurial.obsolete
41 > import mercurial.obsolete
42 > mercurial.obsolete._enabled = True
42 > mercurial.obsolete._enabled = True
43 > EOF
43 > EOF
44 $ echo '[extensions]' >> $HGRCPATH
44 $ echo '[extensions]' >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
46
46
47 Killing a single changeset without replacement
47 Killing a single changeset without replacement
48
48
49 $ hg debugobsolete 0
49 $ hg debugobsolete 0
50 abort: changeset references must be full hexadecimal node identifiers
50 abort: changeset references must be full hexadecimal node identifiers
51 [255]
51 [255]
52 $ hg debugobsolete '00'
52 $ hg debugobsolete '00'
53 abort: changeset references must be full hexadecimal node identifiers
53 abort: changeset references must be full hexadecimal node identifiers
54 [255]
54 [255]
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
56 $ hg debugobsolete
56 $ hg debugobsolete
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'babar'}
58
58
59 (test that mercurial is not confused)
59 (test that mercurial is not confused)
60
60
61 $ hg up null --quiet # having 0 as parent prevents it to be hidden
61 $ hg up null --quiet # having 0 as parent prevents it to be hidden
62 $ hg tip
62 $ hg tip
63 -1:000000000000 (public) [tip ]
63 -1:000000000000 (public) [tip ]
64 $ hg up --hidden tip --quiet
64 $ hg up --hidden tip --quiet
65
65
66 Killing a single changeset with itself should fail
66 Killing a single changeset with itself should fail
67 (simple local safeguard)
67 (simple local safeguard)
68
68
69 $ hg debugobsolete `getid kill_me` `getid kill_me`
69 $ hg debugobsolete `getid kill_me` `getid kill_me`
70 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
70 abort: bad obsmarker input: in-marker cycle with 97b7c2d76b1845ed3eb988cd612611e72406cef0
71 [255]
71 [255]
72
72
73 $ cd ..
73 $ cd ..
74
74
75 Killing a single changeset with replacement
75 Killing a single changeset with replacement
76 (and testing the format option)
76 (and testing the format option)
77
77
78 $ hg init tmpb
78 $ hg init tmpb
79 $ cd tmpb
79 $ cd tmpb
80 $ mkcommit a
80 $ mkcommit a
81 $ mkcommit b
81 $ mkcommit b
82 $ mkcommit original_c
82 $ mkcommit original_c
83 $ hg up "desc('b')"
83 $ hg up "desc('b')"
84 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
84 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
85 $ mkcommit new_c
85 $ mkcommit new_c
86 created new head
86 created new head
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
87 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
88 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '56 120'
88 $ hg debugobsolete --config format.obsstore-version=0 --flag 12 `getid original_c` `getid new_c` -d '56 120'
89 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
89 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
90 2:245bde4270cd add original_c
90 2:245bde4270cd add original_c
91 $ hg debugrevlog -cd
91 $ hg debugrevlog -cd
92 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
92 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
93 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
93 0 -1 -1 0 59 0 0 0 0 58 58 0 1 0
94 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
94 1 0 -1 59 118 59 59 0 0 58 116 0 1 0
95 2 1 -1 118 204 59 59 59 0 76 192 0 1 1
95 2 1 -1 118 204 59 59 59 0 76 192 0 1 1
96 3 1 -1 204 271 204 204 59 0 66 258 0 2 0
96 3 1 -1 204 271 204 204 59 0 66 258 0 2 0
97 $ hg debugobsolete
97 $ hg debugobsolete
98 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
98 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
99
99
100 (check for version number of the obsstore)
100 (check for version number of the obsstore)
101
101
102 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
102 $ dd bs=1 count=1 if=.hg/store/obsstore 2>/dev/null
103 \x00 (no-eol) (esc)
103 \x00 (no-eol) (esc)
104
104
105 do it again (it read the obsstore before adding new changeset)
105 do it again (it read the obsstore before adding new changeset)
106
106
107 $ hg up '.^'
107 $ hg up '.^'
108 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
108 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
109 $ mkcommit new_2_c
109 $ mkcommit new_2_c
110 created new head
110 created new head
111 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
111 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
112 $ hg debugobsolete
112 $ hg debugobsolete
113 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
113 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
114 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
114 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
115
115
116 Register two markers with a missing node
116 Register two markers with a missing node
117
117
118 $ hg up '.^'
118 $ hg up '.^'
119 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
119 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
120 $ mkcommit new_3_c
120 $ mkcommit new_3_c
121 created new head
121 created new head
122 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
122 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
123 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
123 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
124 $ hg debugobsolete
124 $ hg debugobsolete
125 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
125 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
126 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
126 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
127 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
127 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
128 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
128 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
129
129
130 Refuse pathological nullid successors
130 Refuse pathological nullid successors
131 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
131 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
132 transaction abort!
132 transaction abort!
133 rollback completed
133 rollback completed
134 abort: bad obsolescence marker detected: invalid successors nullid
134 abort: bad obsolescence marker detected: invalid successors nullid
135 [255]
135 [255]
136
136
137 Check that graphlog detect that a changeset is obsolete:
137 Check that graphlog detect that a changeset is obsolete:
138
138
139 $ hg log -G
139 $ hg log -G
140 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
140 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
141 |
141 |
142 o 1:7c3bad9141dc (draft) [ ] add b
142 o 1:7c3bad9141dc (draft) [ ] add b
143 |
143 |
144 o 0:1f0dee641bb7 (draft) [ ] add a
144 o 0:1f0dee641bb7 (draft) [ ] add a
145
145
146
146
147 check that heads does not report them
147 check that heads does not report them
148
148
149 $ hg heads
149 $ hg heads
150 5:5601fb93a350 (draft) [tip ] add new_3_c
150 5:5601fb93a350 (draft) [tip ] add new_3_c
151 $ hg heads --hidden
151 $ hg heads --hidden
152 5:5601fb93a350 (draft) [tip ] add new_3_c
152 5:5601fb93a350 (draft) [tip ] add new_3_c
153 4:ca819180edb9 (draft) [ ] add new_2_c
153 4:ca819180edb9 (draft) [ ] add new_2_c
154 3:cdbce2fbb163 (draft) [ ] add new_c
154 3:cdbce2fbb163 (draft) [ ] add new_c
155 2:245bde4270cd (draft) [ ] add original_c
155 2:245bde4270cd (draft) [ ] add original_c
156
156
157
157
158 check that summary does not report them
158 check that summary does not report them
159
159
160 $ hg init ../sink
160 $ hg init ../sink
161 $ echo '[paths]' >> .hg/hgrc
161 $ echo '[paths]' >> .hg/hgrc
162 $ echo 'default=../sink' >> .hg/hgrc
162 $ echo 'default=../sink' >> .hg/hgrc
163 $ hg summary --remote
163 $ hg summary --remote
164 parent: 5:5601fb93a350 tip
164 parent: 5:5601fb93a350 tip
165 add new_3_c
165 add new_3_c
166 branch: default
166 branch: default
167 commit: (clean)
167 commit: (clean)
168 update: (current)
168 update: (current)
169 remote: 3 outgoing
169 remote: 3 outgoing
170
170
171 $ hg summary --remote --hidden
171 $ hg summary --remote --hidden
172 parent: 5:5601fb93a350 tip
172 parent: 5:5601fb93a350 tip
173 add new_3_c
173 add new_3_c
174 branch: default
174 branch: default
175 commit: (clean)
175 commit: (clean)
176 update: 3 new changesets, 4 branch heads (merge)
176 update: 3 new changesets, 4 branch heads (merge)
177 remote: 3 outgoing
177 remote: 3 outgoing
178
178
179 check that various commands work well with filtering
179 check that various commands work well with filtering
180
180
181 $ hg tip
181 $ hg tip
182 5:5601fb93a350 (draft) [tip ] add new_3_c
182 5:5601fb93a350 (draft) [tip ] add new_3_c
183 $ hg log -r 6
183 $ hg log -r 6
184 abort: unknown revision '6'!
184 abort: unknown revision '6'!
185 [255]
185 [255]
186 $ hg log -r 4
186 $ hg log -r 4
187 abort: unknown revision '4'!
187 abort: unknown revision '4'!
188 [255]
188 [255]
189
189
190 Check that public changeset are not accounted as obsolete:
190 Check that public changeset are not accounted as obsolete:
191
191
192 $ hg --hidden phase --public 2
192 $ hg --hidden phase --public 2
193 $ hg log -G
193 $ hg log -G
194 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
194 @ 5:5601fb93a350 (draft) [tip ] add new_3_c
195 |
195 |
196 | o 2:245bde4270cd (public) [ ] add original_c
196 | o 2:245bde4270cd (public) [ ] add original_c
197 |/
197 |/
198 o 1:7c3bad9141dc (public) [ ] add b
198 o 1:7c3bad9141dc (public) [ ] add b
199 |
199 |
200 o 0:1f0dee641bb7 (public) [ ] add a
200 o 0:1f0dee641bb7 (public) [ ] add a
201
201
202
202
203 And that bumped changeset are detected
203 And that bumped changeset are detected
204 --------------------------------------
204 --------------------------------------
205
205
206 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
206 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
207 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
207 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
208 the public changeset
208 the public changeset
209
209
210 $ hg log --hidden -r 'bumped()'
210 $ hg log --hidden -r 'bumped()'
211 5:5601fb93a350 (draft) [tip ] add new_3_c
211 5:5601fb93a350 (draft) [tip ] add new_3_c
212
212
213 And that we can't push bumped changeset
213 And that we can't push bumped changeset
214
214
215 $ hg push ../tmpa -r 0 --force #(make repo related)
215 $ hg push ../tmpa -r 0 --force #(make repo related)
216 pushing to ../tmpa
216 pushing to ../tmpa
217 searching for changes
217 searching for changes
218 warning: repository is unrelated
218 warning: repository is unrelated
219 adding changesets
219 adding changesets
220 adding manifests
220 adding manifests
221 adding file changes
221 adding file changes
222 added 1 changesets with 1 changes to 1 files (+1 heads)
222 added 1 changesets with 1 changes to 1 files (+1 heads)
223 $ hg push ../tmpa
223 $ hg push ../tmpa
224 pushing to ../tmpa
224 pushing to ../tmpa
225 searching for changes
225 searching for changes
226 abort: push includes bumped changeset: 5601fb93a350!
226 abort: push includes bumped changeset: 5601fb93a350!
227 [255]
227 [255]
228
228
229 Fixing "bumped" situation
229 Fixing "bumped" situation
230 We need to create a clone of 5 and add a special marker with a flag
230 We need to create a clone of 5 and add a special marker with a flag
231
231
232 $ hg up '5^'
232 $ hg up '5^'
233 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
233 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
234 $ hg revert -ar 5
234 $ hg revert -ar 5
235 adding new_3_c
235 adding new_3_c
236 $ hg ci -m 'add n3w_3_c'
236 $ hg ci -m 'add n3w_3_c'
237 created new head
237 created new head
238 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
238 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
239 $ hg log -r 'bumped()'
239 $ hg log -r 'bumped()'
240 $ hg log -G
240 $ hg log -G
241 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
241 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
242 |
242 |
243 | o 2:245bde4270cd (public) [ ] add original_c
243 | o 2:245bde4270cd (public) [ ] add original_c
244 |/
244 |/
245 o 1:7c3bad9141dc (public) [ ] add b
245 o 1:7c3bad9141dc (public) [ ] add b
246 |
246 |
247 o 0:1f0dee641bb7 (public) [ ] add a
247 o 0:1f0dee641bb7 (public) [ ] add a
248
248
249
249
250
250
251
251
252 $ cd ..
252 $ cd ..
253
253
254 Exchange Test
254 Exchange Test
255 ============================
255 ============================
256
256
257 Destination repo does not have any data
257 Destination repo does not have any data
258 ---------------------------------------
258 ---------------------------------------
259
259
260 Simple incoming test
260 Simple incoming test
261
261
262 $ hg init tmpc
262 $ hg init tmpc
263 $ cd tmpc
263 $ cd tmpc
264 $ hg incoming ../tmpb
264 $ hg incoming ../tmpb
265 comparing with ../tmpb
265 comparing with ../tmpb
266 0:1f0dee641bb7 (public) [ ] add a
266 0:1f0dee641bb7 (public) [ ] add a
267 1:7c3bad9141dc (public) [ ] add b
267 1:7c3bad9141dc (public) [ ] add b
268 2:245bde4270cd (public) [ ] add original_c
268 2:245bde4270cd (public) [ ] add original_c
269 6:6f9641995072 (draft) [tip ] add n3w_3_c
269 6:6f9641995072 (draft) [tip ] add n3w_3_c
270
270
271 Try to pull markers
271 Try to pull markers
272 (extinct changeset are excluded but marker are pushed)
272 (extinct changeset are excluded but marker are pushed)
273
273
274 $ hg pull ../tmpb
274 $ hg pull ../tmpb
275 pulling from ../tmpb
275 pulling from ../tmpb
276 requesting all changes
276 requesting all changes
277 adding changesets
277 adding changesets
278 adding manifests
278 adding manifests
279 adding file changes
279 adding file changes
280 added 4 changesets with 4 changes to 4 files (+1 heads)
280 added 4 changesets with 4 changes to 4 files (+1 heads)
281 (run 'hg heads' to see heads, 'hg merge' to merge)
281 (run 'hg heads' to see heads, 'hg merge' to merge)
282 $ hg debugobsolete
282 $ hg debugobsolete
283 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
283 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
284 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
284 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
285 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
285 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
286 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
286 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
287 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
287 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
288
288
289 Rollback//Transaction support
289 Rollback//Transaction support
290
290
291 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
291 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
292 $ hg debugobsolete
292 $ hg debugobsolete
293 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
293 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
294 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
294 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
295 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
295 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
296 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
296 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
297 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
297 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
298 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
298 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 (Thu Jan 01 00:22:20 1970 +0000) {'user': 'test'}
299 $ hg rollback -n
299 $ hg rollback -n
300 repository tip rolled back to revision 3 (undo debugobsolete)
300 repository tip rolled back to revision 3 (undo debugobsolete)
301 $ hg rollback
301 $ hg rollback
302 repository tip rolled back to revision 3 (undo debugobsolete)
302 repository tip rolled back to revision 3 (undo debugobsolete)
303 $ hg debugobsolete
303 $ hg debugobsolete
304 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
304 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
305 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
305 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
306 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
306 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
307 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
307 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
308 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
308 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
309
309
310 $ cd ..
310 $ cd ..
311
311
312 Try to push markers
312 Try to push markers
313
313
314 $ hg init tmpd
314 $ hg init tmpd
315 $ hg -R tmpb push tmpd
315 $ hg -R tmpb push tmpd
316 pushing to tmpd
316 pushing to tmpd
317 searching for changes
317 searching for changes
318 adding changesets
318 adding changesets
319 adding manifests
319 adding manifests
320 adding file changes
320 adding file changes
321 added 4 changesets with 4 changes to 4 files (+1 heads)
321 added 4 changesets with 4 changes to 4 files (+1 heads)
322 $ hg -R tmpd debugobsolete | sort
322 $ hg -R tmpd debugobsolete | sort
323 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
323 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
324 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
324 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
325 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
325 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
326 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
326 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
327 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
327 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
328
328
329 Check obsolete keys are exchanged only if source has an obsolete store
329 Check obsolete keys are exchanged only if source has an obsolete store
330
330
331 $ hg init empty
331 $ hg init empty
332 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
332 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
333 pushing to tmpd
333 pushing to tmpd
334 listkeys phases
334 listkeys phases
335 listkeys bookmarks
335 listkeys bookmarks
336 no changes found
336 no changes found
337 listkeys phases
337 listkeys phases
338 [1]
338 [1]
339
339
340 clone support
340 clone support
341 (markers are copied and extinct changesets are included to allow hardlinks)
341 (markers are copied and extinct changesets are included to allow hardlinks)
342
342
343 $ hg clone tmpb clone-dest
343 $ hg clone tmpb clone-dest
344 updating to branch default
344 updating to branch default
345 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
345 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 $ hg -R clone-dest log -G --hidden
346 $ hg -R clone-dest log -G --hidden
347 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
347 @ 6:6f9641995072 (draft) [tip ] add n3w_3_c
348 |
348 |
349 | x 5:5601fb93a350 (draft) [ ] add new_3_c
349 | x 5:5601fb93a350 (draft) [ ] add new_3_c
350 |/
350 |/
351 | x 4:ca819180edb9 (draft) [ ] add new_2_c
351 | x 4:ca819180edb9 (draft) [ ] add new_2_c
352 |/
352 |/
353 | x 3:cdbce2fbb163 (draft) [ ] add new_c
353 | x 3:cdbce2fbb163 (draft) [ ] add new_c
354 |/
354 |/
355 | o 2:245bde4270cd (public) [ ] add original_c
355 | o 2:245bde4270cd (public) [ ] add original_c
356 |/
356 |/
357 o 1:7c3bad9141dc (public) [ ] add b
357 o 1:7c3bad9141dc (public) [ ] add b
358 |
358 |
359 o 0:1f0dee641bb7 (public) [ ] add a
359 o 0:1f0dee641bb7 (public) [ ] add a
360
360
361 $ hg -R clone-dest debugobsolete
361 $ hg -R clone-dest debugobsolete
362 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
362 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
363 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
363 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
364 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
364 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
365 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
365 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
366 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
366 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
367
367
368
368
369 Destination repo have existing data
369 Destination repo have existing data
370 ---------------------------------------
370 ---------------------------------------
371
371
372 On pull
372 On pull
373
373
374 $ hg init tmpe
374 $ hg init tmpe
375 $ cd tmpe
375 $ cd tmpe
376 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
376 $ hg debugobsolete -d '1339 0' 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00
377 $ hg pull ../tmpb
377 $ hg pull ../tmpb
378 pulling from ../tmpb
378 pulling from ../tmpb
379 requesting all changes
379 requesting all changes
380 adding changesets
380 adding changesets
381 adding manifests
381 adding manifests
382 adding file changes
382 adding file changes
383 added 4 changesets with 4 changes to 4 files (+1 heads)
383 added 4 changesets with 4 changes to 4 files (+1 heads)
384 (run 'hg heads' to see heads, 'hg merge' to merge)
384 (run 'hg heads' to see heads, 'hg merge' to merge)
385 $ hg debugobsolete
385 $ hg debugobsolete
386 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
386 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
387 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
387 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
388 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
388 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
389 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
389 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
390 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
390 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
391 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
391 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
392
392
393
393
394 On push
394 On push
395
395
396 $ hg push ../tmpc
396 $ hg push ../tmpc
397 pushing to ../tmpc
397 pushing to ../tmpc
398 searching for changes
398 searching for changes
399 no changes found
399 no changes found
400 [1]
400 [1]
401 $ hg -R ../tmpc debugobsolete
401 $ hg -R ../tmpc debugobsolete
402 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
402 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
403 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
403 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
404 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
404 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
405 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
405 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
406 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
406 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
407 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
407 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
408
408
409 detect outgoing obsolete and unstable
409 detect outgoing obsolete and unstable
410 ---------------------------------------
410 ---------------------------------------
411
411
412
412
413 $ hg log -G
413 $ hg log -G
414 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
414 o 3:6f9641995072 (draft) [tip ] add n3w_3_c
415 |
415 |
416 | o 2:245bde4270cd (public) [ ] add original_c
416 | o 2:245bde4270cd (public) [ ] add original_c
417 |/
417 |/
418 o 1:7c3bad9141dc (public) [ ] add b
418 o 1:7c3bad9141dc (public) [ ] add b
419 |
419 |
420 o 0:1f0dee641bb7 (public) [ ] add a
420 o 0:1f0dee641bb7 (public) [ ] add a
421
421
422 $ hg up 'desc("n3w_3_c")'
422 $ hg up 'desc("n3w_3_c")'
423 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
423 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
424 $ mkcommit original_d
424 $ mkcommit original_d
425 $ mkcommit original_e
425 $ mkcommit original_e
426 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
426 $ hg debugobsolete --record-parents `getid original_d` -d '0 0'
427 $ hg debugobsolete | grep `getid original_d`
427 $ hg debugobsolete | grep `getid original_d`
428 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
428 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
429 $ hg log -r 'obsolete()'
429 $ hg log -r 'obsolete()'
430 4:94b33453f93b (draft) [ ] add original_d
430 4:94b33453f93b (draft) [ ] add original_d
431 $ hg log -G -r '::unstable()'
431 $ hg log -G -r '::unstable()'
432 @ 5:cda648ca50f5 (draft) [tip ] add original_e
432 @ 5:cda648ca50f5 (draft) [tip ] add original_e
433 |
433 |
434 x 4:94b33453f93b (draft) [ ] add original_d
434 x 4:94b33453f93b (draft) [ ] add original_d
435 |
435 |
436 o 3:6f9641995072 (draft) [ ] add n3w_3_c
436 o 3:6f9641995072 (draft) [ ] add n3w_3_c
437 |
437 |
438 o 1:7c3bad9141dc (public) [ ] add b
438 o 1:7c3bad9141dc (public) [ ] add b
439 |
439 |
440 o 0:1f0dee641bb7 (public) [ ] add a
440 o 0:1f0dee641bb7 (public) [ ] add a
441
441
442
442
443 refuse to push obsolete changeset
443 refuse to push obsolete changeset
444
444
445 $ hg push ../tmpc/ -r 'desc("original_d")'
445 $ hg push ../tmpc/ -r 'desc("original_d")'
446 pushing to ../tmpc/
446 pushing to ../tmpc/
447 searching for changes
447 searching for changes
448 abort: push includes obsolete changeset: 94b33453f93b!
448 abort: push includes obsolete changeset: 94b33453f93b!
449 [255]
449 [255]
450
450
451 refuse to push unstable changeset
451 refuse to push unstable changeset
452
452
453 $ hg push ../tmpc/
453 $ hg push ../tmpc/
454 pushing to ../tmpc/
454 pushing to ../tmpc/
455 searching for changes
455 searching for changes
456 abort: push includes unstable changeset: cda648ca50f5!
456 abort: push includes unstable changeset: cda648ca50f5!
457 [255]
457 [255]
458
458
459 Test that extinct changeset are properly detected
459 Test that extinct changeset are properly detected
460
460
461 $ hg log -r 'extinct()'
461 $ hg log -r 'extinct()'
462
462
463 Don't try to push extinct changeset
463 Don't try to push extinct changeset
464
464
465 $ hg init ../tmpf
465 $ hg init ../tmpf
466 $ hg out ../tmpf
466 $ hg out ../tmpf
467 comparing with ../tmpf
467 comparing with ../tmpf
468 searching for changes
468 searching for changes
469 0:1f0dee641bb7 (public) [ ] add a
469 0:1f0dee641bb7 (public) [ ] add a
470 1:7c3bad9141dc (public) [ ] add b
470 1:7c3bad9141dc (public) [ ] add b
471 2:245bde4270cd (public) [ ] add original_c
471 2:245bde4270cd (public) [ ] add original_c
472 3:6f9641995072 (draft) [ ] add n3w_3_c
472 3:6f9641995072 (draft) [ ] add n3w_3_c
473 4:94b33453f93b (draft) [ ] add original_d
473 4:94b33453f93b (draft) [ ] add original_d
474 5:cda648ca50f5 (draft) [tip ] add original_e
474 5:cda648ca50f5 (draft) [tip ] add original_e
475 $ hg push ../tmpf -f # -f because be push unstable too
475 $ hg push ../tmpf -f # -f because be push unstable too
476 pushing to ../tmpf
476 pushing to ../tmpf
477 searching for changes
477 searching for changes
478 adding changesets
478 adding changesets
479 adding manifests
479 adding manifests
480 adding file changes
480 adding file changes
481 added 6 changesets with 6 changes to 6 files (+1 heads)
481 added 6 changesets with 6 changes to 6 files (+1 heads)
482
482
483 no warning displayed
483 no warning displayed
484
484
485 $ hg push ../tmpf
485 $ hg push ../tmpf
486 pushing to ../tmpf
486 pushing to ../tmpf
487 searching for changes
487 searching for changes
488 no changes found
488 no changes found
489 [1]
489 [1]
490
490
491 Do not warn about new head when the new head is a successors of a remote one
491 Do not warn about new head when the new head is a successors of a remote one
492
492
493 $ hg log -G
493 $ hg log -G
494 @ 5:cda648ca50f5 (draft) [tip ] add original_e
494 @ 5:cda648ca50f5 (draft) [tip ] add original_e
495 |
495 |
496 x 4:94b33453f93b (draft) [ ] add original_d
496 x 4:94b33453f93b (draft) [ ] add original_d
497 |
497 |
498 o 3:6f9641995072 (draft) [ ] add n3w_3_c
498 o 3:6f9641995072 (draft) [ ] add n3w_3_c
499 |
499 |
500 | o 2:245bde4270cd (public) [ ] add original_c
500 | o 2:245bde4270cd (public) [ ] add original_c
501 |/
501 |/
502 o 1:7c3bad9141dc (public) [ ] add b
502 o 1:7c3bad9141dc (public) [ ] add b
503 |
503 |
504 o 0:1f0dee641bb7 (public) [ ] add a
504 o 0:1f0dee641bb7 (public) [ ] add a
505
505
506 $ hg up -q 'desc(n3w_3_c)'
506 $ hg up -q 'desc(n3w_3_c)'
507 $ mkcommit obsolete_e
507 $ mkcommit obsolete_e
508 created new head
508 created new head
509 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
509 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
510 $ hg outgoing ../tmpf # parasite hg outgoing testin
510 $ hg outgoing ../tmpf # parasite hg outgoing testin
511 comparing with ../tmpf
511 comparing with ../tmpf
512 searching for changes
512 searching for changes
513 6:3de5eca88c00 (draft) [tip ] add obsolete_e
513 6:3de5eca88c00 (draft) [tip ] add obsolete_e
514 $ hg push ../tmpf
514 $ hg push ../tmpf
515 pushing to ../tmpf
515 pushing to ../tmpf
516 searching for changes
516 searching for changes
517 adding changesets
517 adding changesets
518 adding manifests
518 adding manifests
519 adding file changes
519 adding file changes
520 added 1 changesets with 1 changes to 1 files (+1 heads)
520 added 1 changesets with 1 changes to 1 files (+1 heads)
521
521
522 test relevance computation
522 test relevance computation
523 ---------------------------------------
523 ---------------------------------------
524
524
525 Checking simple case of "marker relevance".
525 Checking simple case of "marker relevance".
526
526
527
527
528 Reminder of the repo situation
528 Reminder of the repo situation
529
529
530 $ hg log --hidden --graph
530 $ hg log --hidden --graph
531 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
531 @ 6:3de5eca88c00 (draft) [tip ] add obsolete_e
532 |
532 |
533 | x 5:cda648ca50f5 (draft) [ ] add original_e
533 | x 5:cda648ca50f5 (draft) [ ] add original_e
534 | |
534 | |
535 | x 4:94b33453f93b (draft) [ ] add original_d
535 | x 4:94b33453f93b (draft) [ ] add original_d
536 |/
536 |/
537 o 3:6f9641995072 (draft) [ ] add n3w_3_c
537 o 3:6f9641995072 (draft) [ ] add n3w_3_c
538 |
538 |
539 | o 2:245bde4270cd (public) [ ] add original_c
539 | o 2:245bde4270cd (public) [ ] add original_c
540 |/
540 |/
541 o 1:7c3bad9141dc (public) [ ] add b
541 o 1:7c3bad9141dc (public) [ ] add b
542 |
542 |
543 o 0:1f0dee641bb7 (public) [ ] add a
543 o 0:1f0dee641bb7 (public) [ ] add a
544
544
545
545
546 List of all markers
546 List of all markers
547
547
548 $ hg debugobsolete
548 $ hg debugobsolete
549 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
549 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
550 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
550 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
551 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
551 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
552 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
552 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
553 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
553 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
554 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
554 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
555 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
555 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
556 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
556 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
557
557
558 List of changesets with no chain
558 List of changesets with no chain
559
559
560 $ hg debugobsolete --hidden --rev ::2
560 $ hg debugobsolete --hidden --rev ::2
561
561
562 List of changesets that are included on marker chain
562 List of changesets that are included on marker chain
563
563
564 $ hg debugobsolete --hidden --rev 6
564 $ hg debugobsolete --hidden --rev 6
565 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
565 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
566
566
567 List of changesets with a longer chain, (including a pruned children)
567 List of changesets with a longer chain, (including a pruned children)
568
568
569 $ hg debugobsolete --hidden --rev 3
569 $ hg debugobsolete --hidden --rev 3
570 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
570 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
571 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
571 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
572 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
572 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
573 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
573 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
574 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
574 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
575 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
575 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
576 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
576 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
577
577
578 List of both
578 List of both
579
579
580 $ hg debugobsolete --hidden --rev 3::6
580 $ hg debugobsolete --hidden --rev 3::6
581 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
581 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
582 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
582 1339133913391339133913391339133913391339 ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:19 1970 +0000) {'user': 'test'}
583 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
583 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C (Wed Dec 31 23:58:56 1969 -0002) {'user': 'test'}
584 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
584 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
585 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
585 94b33453f93bdb8d457ef9b770851a618bf413e1 0 {6f96419950729f3671185b847352890f074f7557} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
586 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
586 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 (Thu Jan 01 00:22:18 1970 +0000) {'user': 'test'}
587 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
587 cda648ca50f50482b7055c0b0c4c117bba6733d9 3de5eca88c00aa039da7399a220f4a5221faa585 0 (*) {'user': 'test'} (glob)
588 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
588 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 (Thu Jan 01 00:22:17 1970 +0000) {'user': 'test'}
589
589
590 #if serve
590 #if serve
591
591
592 check hgweb does not explode
592 check hgweb does not explode
593 ====================================
593 ====================================
594
594
595 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
595 $ hg unbundle $TESTDIR/bundles/hgweb+obs.hg
596 adding changesets
596 adding changesets
597 adding manifests
597 adding manifests
598 adding file changes
598 adding file changes
599 added 62 changesets with 63 changes to 9 files (+60 heads)
599 added 62 changesets with 63 changes to 9 files (+60 heads)
600 (run 'hg heads .' to see heads, 'hg merge' to merge)
600 (run 'hg heads .' to see heads, 'hg merge' to merge)
601 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
601 $ for node in `hg log -r 'desc(babar_)' --template '{node}\n'`;
602 > do
602 > do
603 > hg debugobsolete $node
603 > hg debugobsolete $node
604 > done
604 > done
605 $ hg up tip
605 $ hg up tip
606 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
606 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
607
607
608 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
608 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
609 $ cat hg.pid >> $DAEMON_PIDS
609 $ cat hg.pid >> $DAEMON_PIDS
610
610
611 check changelog view
611 check changelog view
612
612
613 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
613 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'shortlog/'
614 200 Script output follows
614 200 Script output follows
615
615
616 check graph view
616 check graph view
617
617
618 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
618 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'graph'
619 200 Script output follows
619 200 Script output follows
620
620
621 check filelog view
621 check filelog view
622
622
623 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg id --debug --id`/'babar'
623 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'log/'`hg id --debug --id`/'babar'
624 200 Script output follows
624 200 Script output follows
625
625
626 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
626 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/68'
627 200 Script output follows
627 200 Script output follows
628 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
628 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
629 404 Not Found
629 404 Not Found
630 [1]
630 [1]
631
631
632 check that web.view config option:
632 check that web.view config option:
633
633
634 $ "$TESTDIR/killdaemons.py" hg.pid
634 $ "$TESTDIR/killdaemons.py" hg.pid
635 $ cat >> .hg/hgrc << EOF
635 $ cat >> .hg/hgrc << EOF
636 > [web]
636 > [web]
637 > view=all
637 > view=all
638 > EOF
638 > EOF
639 $ wait
639 $ wait
640 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
640 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
641 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
641 $ "$TESTDIR/get-with-headers.py" --headeronly localhost:$HGPORT 'rev/67'
642 200 Script output follows
642 200 Script output follows
643 $ "$TESTDIR/killdaemons.py" hg.pid
643 $ "$TESTDIR/killdaemons.py" hg.pid
644
644
645 Checking _enable=False warning if obsolete marker exists
645 Checking _enable=False warning if obsolete marker exists
646
646
647 $ echo '[extensions]' >> $HGRCPATH
647 $ echo '[extensions]' >> $HGRCPATH
648 $ echo "obs=!" >> $HGRCPATH
648 $ echo "obs=!" >> $HGRCPATH
649 $ hg log -r tip
649 $ hg log -r tip
650 obsolete feature not enabled but 68 markers found!
650 obsolete feature not enabled but 68 markers found!
651 68:c15e9edfca13 (draft) [tip ] add celestine
651 68:c15e9edfca13 (draft) [tip ] add celestine
652
652
653 reenable for later test
653 reenable for later test
654
654
655 $ echo '[extensions]' >> $HGRCPATH
655 $ echo '[extensions]' >> $HGRCPATH
656 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
656 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
657
657
658 #endif
658 #endif
659
659
660 Test incoming/outcoming with changesets obsoleted remotely, known locally
660 Test incoming/outcoming with changesets obsoleted remotely, known locally
661 ===============================================================================
661 ===============================================================================
662
662
663 This test issue 3805
663 This test issue 3805
664
664
665 $ hg init repo-issue3805
665 $ hg init repo-issue3805
666 $ cd repo-issue3805
666 $ cd repo-issue3805
667 $ echo "foo" > foo
667 $ echo "foo" > foo
668 $ hg ci -Am "A"
668 $ hg ci -Am "A"
669 adding foo
669 adding foo
670 $ hg clone . ../other-issue3805
670 $ hg clone . ../other-issue3805
671 updating to branch default
671 updating to branch default
672 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
672 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
673 $ echo "bar" >> foo
673 $ echo "bar" >> foo
674 $ hg ci --amend
674 $ hg ci --amend
675 $ cd ../other-issue3805
675 $ cd ../other-issue3805
676 $ hg log -G
676 $ hg log -G
677 @ 0:193e9254ce7e (draft) [tip ] A
677 @ 0:193e9254ce7e (draft) [tip ] A
678
678
679 $ hg log -G -R ../repo-issue3805
679 $ hg log -G -R ../repo-issue3805
680 @ 2:3816541e5485 (draft) [tip ] A
680 @ 2:3816541e5485 (draft) [tip ] A
681
681
682 $ hg incoming
682 $ hg incoming
683 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
683 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
684 searching for changes
684 searching for changes
685 2:3816541e5485 (draft) [tip ] A
685 2:3816541e5485 (draft) [tip ] A
686 $ hg incoming --bundle ../issue3805.hg
686 $ hg incoming --bundle ../issue3805.hg
687 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
687 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
688 searching for changes
688 searching for changes
689 2:3816541e5485 (draft) [tip ] A
689 2:3816541e5485 (draft) [tip ] A
690 $ hg outgoing
690 $ hg outgoing
691 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
691 comparing with $TESTTMP/tmpe/repo-issue3805 (glob)
692 searching for changes
692 searching for changes
693 no changes found
693 no changes found
694 [1]
694 [1]
695
695
696 #if serve
696 #if serve
697
697
698 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
698 $ hg serve -R ../repo-issue3805 -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
699 $ cat hg.pid >> $DAEMON_PIDS
699 $ cat hg.pid >> $DAEMON_PIDS
700
700
701 $ hg incoming http://localhost:$HGPORT
701 $ hg incoming http://localhost:$HGPORT
702 comparing with http://localhost:$HGPORT/
702 comparing with http://localhost:$HGPORT/
703 searching for changes
703 searching for changes
704 1:3816541e5485 (public) [tip ] A
704 1:3816541e5485 (public) [tip ] A
705 $ hg outgoing http://localhost:$HGPORT
705 $ hg outgoing http://localhost:$HGPORT
706 comparing with http://localhost:$HGPORT/
706 comparing with http://localhost:$HGPORT/
707 searching for changes
707 searching for changes
708 no changes found
708 no changes found
709 [1]
709 [1]
710
710
711 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
711 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
712
712
713 #endif
713 #endif
714
714
715 This test issue 3814
715 This test issue 3814
716
716
717 (nothing to push but locally hidden changeset)
717 (nothing to push but locally hidden changeset)
718
718
719 $ cd ..
719 $ cd ..
720 $ hg init repo-issue3814
720 $ hg init repo-issue3814
721 $ cd repo-issue3805
721 $ cd repo-issue3805
722 $ hg push -r 3816541e5485 ../repo-issue3814
722 $ hg push -r 3816541e5485 ../repo-issue3814
723 pushing to ../repo-issue3814
723 pushing to ../repo-issue3814
724 searching for changes
724 searching for changes
725 adding changesets
725 adding changesets
726 adding manifests
726 adding manifests
727 adding file changes
727 adding file changes
728 added 1 changesets with 1 changes to 1 files
728 added 1 changesets with 1 changes to 1 files
729 $ hg out ../repo-issue3814
729 $ hg out ../repo-issue3814
730 comparing with ../repo-issue3814
730 comparing with ../repo-issue3814
731 searching for changes
731 searching for changes
732 no changes found
732 no changes found
733 [1]
733 [1]
734
734
735 Test that a local tag blocks a changeset from being hidden
735 Test that a local tag blocks a changeset from being hidden
736
736
737 $ hg tag -l visible -r 0 --hidden
737 $ hg tag -l visible -r 0 --hidden
738 $ hg log -G
738 $ hg log -G
739 @ 2:3816541e5485 (draft) [tip ] A
739 @ 2:3816541e5485 (draft) [tip ] A
740
740
741 x 0:193e9254ce7e (draft) [visible ] A
741 x 0:193e9254ce7e (draft) [visible ] A
742
742
743 Test that removing a local tag does not cause some commands to fail
743 Test that removing a local tag does not cause some commands to fail
744
744
745 $ hg tag -l -r tip tiptag
745 $ hg tag -l -r tip tiptag
746 $ hg tags
746 $ hg tags
747 tiptag 2:3816541e5485
747 tiptag 2:3816541e5485
748 tip 2:3816541e5485
748 tip 2:3816541e5485
749 visible 0:193e9254ce7e
749 visible 0:193e9254ce7e
750 $ hg --config extensions.strip= strip -r tip --no-backup
750 $ hg --config extensions.strip= strip -r tip --no-backup
751 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
751 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
752 $ hg tags
752 $ hg tags
753 visible 0:193e9254ce7e
753 visible 0:193e9254ce7e
754 tip 0:193e9254ce7e
754 tip 0:193e9254ce7e
General Comments 0
You need to be logged in to leave comments. Login now