##// END OF EJS Templates
track-tags: introduce first bits of tags tracking during transaction...
Pierre-Yves David -
r31994:b36318e6 default
parent child Browse files
Show More
@@ -1,1986 +1,2032 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'))
117 'unbundle'))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'))
251 'manifestv2'))
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # wvfs: rooted at the repository root, used to access the working copy
263 # wvfs: rooted at the repository root, used to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 self.vfs = None
266 self.vfs = None
267 # svfs: usually rooted at .hg/store, used to access repository history
267 # svfs: usually rooted at .hg/store, used to access repository history
268 # If this is a shared repository, this vfs may point to another
268 # If this is a shared repository, this vfs may point to another
269 # repository's .hg/store directory.
269 # repository's .hg/store directory.
270 self.svfs = None
270 self.svfs = None
271 self.root = self.wvfs.base
271 self.root = self.wvfs.base
272 self.path = self.wvfs.join(".hg")
272 self.path = self.wvfs.join(".hg")
273 self.origroot = path
273 self.origroot = path
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 realfs=False)
276 realfs=False)
277 self.vfs = vfsmod.vfs(self.path)
277 self.vfs = vfsmod.vfs(self.path)
278 self.baseui = baseui
278 self.baseui = baseui
279 self.ui = baseui.copy()
279 self.ui = baseui.copy()
280 self.ui.copy = baseui.copy # prevent copying repo configuration
280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 # A list of callback to shape the phase if no data were found.
281 # A list of callback to shape the phase if no data were found.
282 # Callback are in the form: func(repo, roots) --> processed root.
282 # Callback are in the form: func(repo, roots) --> processed root.
283 # This list it to be filled by extension during repo setup
283 # This list it to be filled by extension during repo setup
284 self._phasedefaults = []
284 self._phasedefaults = []
285 try:
285 try:
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 self._loadextensions()
287 self._loadextensions()
288 except IOError:
288 except IOError:
289 pass
289 pass
290
290
291 if self.featuresetupfuncs:
291 if self.featuresetupfuncs:
292 self.supported = set(self._basesupported) # use private copy
292 self.supported = set(self._basesupported) # use private copy
293 extmods = set(m.__name__ for n, m
293 extmods = set(m.__name__ for n, m
294 in extensions.extensions(self.ui))
294 in extensions.extensions(self.ui))
295 for setupfunc in self.featuresetupfuncs:
295 for setupfunc in self.featuresetupfuncs:
296 if setupfunc.__module__ in extmods:
296 if setupfunc.__module__ in extmods:
297 setupfunc(self.ui, self.supported)
297 setupfunc(self.ui, self.supported)
298 else:
298 else:
299 self.supported = self._basesupported
299 self.supported = self._basesupported
300 color.setup(self.ui)
300 color.setup(self.ui)
301
301
302 # Add compression engines.
302 # Add compression engines.
303 for name in util.compengines:
303 for name in util.compengines:
304 engine = util.compengines[name]
304 engine = util.compengines[name]
305 if engine.revlogheader():
305 if engine.revlogheader():
306 self.supported.add('exp-compression-%s' % name)
306 self.supported.add('exp-compression-%s' % name)
307
307
308 if not self.vfs.isdir():
308 if not self.vfs.isdir():
309 if create:
309 if create:
310 self.requirements = newreporequirements(self)
310 self.requirements = newreporequirements(self)
311
311
312 if not self.wvfs.exists():
312 if not self.wvfs.exists():
313 self.wvfs.makedirs()
313 self.wvfs.makedirs()
314 self.vfs.makedir(notindexed=True)
314 self.vfs.makedir(notindexed=True)
315
315
316 if 'store' in self.requirements:
316 if 'store' in self.requirements:
317 self.vfs.mkdir("store")
317 self.vfs.mkdir("store")
318
318
319 # create an invalid changelog
319 # create an invalid changelog
320 self.vfs.append(
320 self.vfs.append(
321 "00changelog.i",
321 "00changelog.i",
322 '\0\0\0\2' # represents revlogv2
322 '\0\0\0\2' # represents revlogv2
323 ' dummy changelog to prevent using the old repo layout'
323 ' dummy changelog to prevent using the old repo layout'
324 )
324 )
325 else:
325 else:
326 raise error.RepoError(_("repository %s not found") % path)
326 raise error.RepoError(_("repository %s not found") % path)
327 elif create:
327 elif create:
328 raise error.RepoError(_("repository %s already exists") % path)
328 raise error.RepoError(_("repository %s already exists") % path)
329 else:
329 else:
330 try:
330 try:
331 self.requirements = scmutil.readrequires(
331 self.requirements = scmutil.readrequires(
332 self.vfs, self.supported)
332 self.vfs, self.supported)
333 except IOError as inst:
333 except IOError as inst:
334 if inst.errno != errno.ENOENT:
334 if inst.errno != errno.ENOENT:
335 raise
335 raise
336
336
337 self.sharedpath = self.path
337 self.sharedpath = self.path
338 try:
338 try:
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 if 'relshared' in self.requirements:
340 if 'relshared' in self.requirements:
341 sharedpath = self.vfs.join(sharedpath)
341 sharedpath = self.vfs.join(sharedpath)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 s = vfs.base
343 s = vfs.base
344 if not vfs.exists():
344 if not vfs.exists():
345 raise error.RepoError(
345 raise error.RepoError(
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 self.sharedpath = s
347 self.sharedpath = s
348 except IOError as inst:
348 except IOError as inst:
349 if inst.errno != errno.ENOENT:
349 if inst.errno != errno.ENOENT:
350 raise
350 raise
351
351
352 self.store = store.store(
352 self.store = store.store(
353 self.requirements, self.sharedpath, vfsmod.vfs)
353 self.requirements, self.sharedpath, vfsmod.vfs)
354 self.spath = self.store.path
354 self.spath = self.store.path
355 self.svfs = self.store.vfs
355 self.svfs = self.store.vfs
356 self.sjoin = self.store.join
356 self.sjoin = self.store.join
357 self.vfs.createmode = self.store.createmode
357 self.vfs.createmode = self.store.createmode
358 self._applyopenerreqs()
358 self._applyopenerreqs()
359 if create:
359 if create:
360 self._writerequirements()
360 self._writerequirements()
361
361
362 self._dirstatevalidatewarned = False
362 self._dirstatevalidatewarned = False
363
363
364 self._branchcaches = {}
364 self._branchcaches = {}
365 self._revbranchcache = None
365 self._revbranchcache = None
366 self.filterpats = {}
366 self.filterpats = {}
367 self._datafilters = {}
367 self._datafilters = {}
368 self._transref = self._lockref = self._wlockref = None
368 self._transref = self._lockref = self._wlockref = None
369
369
370 # A cache for various files under .hg/ that tracks file changes,
370 # A cache for various files under .hg/ that tracks file changes,
371 # (used by the filecache decorator)
371 # (used by the filecache decorator)
372 #
372 #
373 # Maps a property name to its util.filecacheentry
373 # Maps a property name to its util.filecacheentry
374 self._filecache = {}
374 self._filecache = {}
375
375
376 # hold sets of revision to be filtered
376 # hold sets of revision to be filtered
377 # should be cleared when something might have changed the filter value:
377 # should be cleared when something might have changed the filter value:
378 # - new changesets,
378 # - new changesets,
379 # - phase change,
379 # - phase change,
380 # - new obsolescence marker,
380 # - new obsolescence marker,
381 # - working directory parent change,
381 # - working directory parent change,
382 # - bookmark changes
382 # - bookmark changes
383 self.filteredrevcache = {}
383 self.filteredrevcache = {}
384
384
385 # generic mapping between names and nodes
385 # generic mapping between names and nodes
386 self.names = namespaces.namespaces()
386 self.names = namespaces.namespaces()
387
387
388 @property
388 @property
389 def wopener(self):
389 def wopener(self):
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
390 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
391 return self.wvfs
391 return self.wvfs
392
392
393 @property
393 @property
394 def opener(self):
394 def opener(self):
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
395 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
396 return self.vfs
396 return self.vfs
397
397
398 def close(self):
398 def close(self):
399 self._writecaches()
399 self._writecaches()
400
400
401 def _loadextensions(self):
401 def _loadextensions(self):
402 extensions.loadall(self.ui)
402 extensions.loadall(self.ui)
403
403
404 def _writecaches(self):
404 def _writecaches(self):
405 if self._revbranchcache:
405 if self._revbranchcache:
406 self._revbranchcache.write()
406 self._revbranchcache.write()
407
407
408 def _restrictcapabilities(self, caps):
408 def _restrictcapabilities(self, caps):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
409 if self.ui.configbool('experimental', 'bundle2-advertise', True):
410 caps = set(caps)
410 caps = set(caps)
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
411 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
412 caps.add('bundle2=' + urlreq.quote(capsblob))
413 return caps
413 return caps
414
414
415 def _applyopenerreqs(self):
415 def _applyopenerreqs(self):
416 self.svfs.options = dict((r, 1) for r in self.requirements
416 self.svfs.options = dict((r, 1) for r in self.requirements
417 if r in self.openerreqs)
417 if r in self.openerreqs)
418 # experimental config: format.chunkcachesize
418 # experimental config: format.chunkcachesize
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
419 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
420 if chunkcachesize is not None:
420 if chunkcachesize is not None:
421 self.svfs.options['chunkcachesize'] = chunkcachesize
421 self.svfs.options['chunkcachesize'] = chunkcachesize
422 # experimental config: format.maxchainlen
422 # experimental config: format.maxchainlen
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
423 maxchainlen = self.ui.configint('format', 'maxchainlen')
424 if maxchainlen is not None:
424 if maxchainlen is not None:
425 self.svfs.options['maxchainlen'] = maxchainlen
425 self.svfs.options['maxchainlen'] = maxchainlen
426 # experimental config: format.manifestcachesize
426 # experimental config: format.manifestcachesize
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
427 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
428 if manifestcachesize is not None:
428 if manifestcachesize is not None:
429 self.svfs.options['manifestcachesize'] = manifestcachesize
429 self.svfs.options['manifestcachesize'] = manifestcachesize
430 # experimental config: format.aggressivemergedeltas
430 # experimental config: format.aggressivemergedeltas
431 aggressivemergedeltas = self.ui.configbool('format',
431 aggressivemergedeltas = self.ui.configbool('format',
432 'aggressivemergedeltas', False)
432 'aggressivemergedeltas', False)
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
433 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
434 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
435
435
436 for r in self.requirements:
436 for r in self.requirements:
437 if r.startswith('exp-compression-'):
437 if r.startswith('exp-compression-'):
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
438 self.svfs.options['compengine'] = r[len('exp-compression-'):]
439
439
440 def _writerequirements(self):
440 def _writerequirements(self):
441 scmutil.writerequires(self.vfs, self.requirements)
441 scmutil.writerequires(self.vfs, self.requirements)
442
442
443 def _checknested(self, path):
443 def _checknested(self, path):
444 """Determine if path is a legal nested repository."""
444 """Determine if path is a legal nested repository."""
445 if not path.startswith(self.root):
445 if not path.startswith(self.root):
446 return False
446 return False
447 subpath = path[len(self.root) + 1:]
447 subpath = path[len(self.root) + 1:]
448 normsubpath = util.pconvert(subpath)
448 normsubpath = util.pconvert(subpath)
449
449
450 # XXX: Checking against the current working copy is wrong in
450 # XXX: Checking against the current working copy is wrong in
451 # the sense that it can reject things like
451 # the sense that it can reject things like
452 #
452 #
453 # $ hg cat -r 10 sub/x.txt
453 # $ hg cat -r 10 sub/x.txt
454 #
454 #
455 # if sub/ is no longer a subrepository in the working copy
455 # if sub/ is no longer a subrepository in the working copy
456 # parent revision.
456 # parent revision.
457 #
457 #
458 # However, it can of course also allow things that would have
458 # However, it can of course also allow things that would have
459 # been rejected before, such as the above cat command if sub/
459 # been rejected before, such as the above cat command if sub/
460 # is a subrepository now, but was a normal directory before.
460 # is a subrepository now, but was a normal directory before.
461 # The old path auditor would have rejected by mistake since it
461 # The old path auditor would have rejected by mistake since it
462 # panics when it sees sub/.hg/.
462 # panics when it sees sub/.hg/.
463 #
463 #
464 # All in all, checking against the working copy seems sensible
464 # All in all, checking against the working copy seems sensible
465 # since we want to prevent access to nested repositories on
465 # since we want to prevent access to nested repositories on
466 # the filesystem *now*.
466 # the filesystem *now*.
467 ctx = self[None]
467 ctx = self[None]
468 parts = util.splitpath(subpath)
468 parts = util.splitpath(subpath)
469 while parts:
469 while parts:
470 prefix = '/'.join(parts)
470 prefix = '/'.join(parts)
471 if prefix in ctx.substate:
471 if prefix in ctx.substate:
472 if prefix == normsubpath:
472 if prefix == normsubpath:
473 return True
473 return True
474 else:
474 else:
475 sub = ctx.sub(prefix)
475 sub = ctx.sub(prefix)
476 return sub.checknested(subpath[len(prefix) + 1:])
476 return sub.checknested(subpath[len(prefix) + 1:])
477 else:
477 else:
478 parts.pop()
478 parts.pop()
479 return False
479 return False
480
480
481 def peer(self):
481 def peer(self):
482 return localpeer(self) # not cached to avoid reference cycle
482 return localpeer(self) # not cached to avoid reference cycle
483
483
484 def unfiltered(self):
484 def unfiltered(self):
485 """Return unfiltered version of the repository
485 """Return unfiltered version of the repository
486
486
487 Intended to be overwritten by filtered repo."""
487 Intended to be overwritten by filtered repo."""
488 return self
488 return self
489
489
490 def filtered(self, name):
490 def filtered(self, name):
491 """Return a filtered version of a repository"""
491 """Return a filtered version of a repository"""
492 # build a new class with the mixin and the current class
492 # build a new class with the mixin and the current class
493 # (possibly subclass of the repo)
493 # (possibly subclass of the repo)
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
494 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
495 pass
495 pass
496 return filteredrepo(self, name)
496 return filteredrepo(self, name)
497
497
498 @repofilecache('bookmarks', 'bookmarks.current')
498 @repofilecache('bookmarks', 'bookmarks.current')
499 def _bookmarks(self):
499 def _bookmarks(self):
500 return bookmarks.bmstore(self)
500 return bookmarks.bmstore(self)
501
501
502 @property
502 @property
503 def _activebookmark(self):
503 def _activebookmark(self):
504 return self._bookmarks.active
504 return self._bookmarks.active
505
505
506 def bookmarkheads(self, bookmark):
506 def bookmarkheads(self, bookmark):
507 name = bookmark.split('@', 1)[0]
507 name = bookmark.split('@', 1)[0]
508 heads = []
508 heads = []
509 for mark, n in self._bookmarks.iteritems():
509 for mark, n in self._bookmarks.iteritems():
510 if mark.split('@', 1)[0] == name:
510 if mark.split('@', 1)[0] == name:
511 heads.append(n)
511 heads.append(n)
512 return heads
512 return heads
513
513
514 # _phaserevs and _phasesets depend on changelog. what we need is to
514 # _phaserevs and _phasesets depend on changelog. what we need is to
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
515 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
516 # can't be easily expressed in filecache mechanism.
516 # can't be easily expressed in filecache mechanism.
517 @storecache('phaseroots', '00changelog.i')
517 @storecache('phaseroots', '00changelog.i')
518 def _phasecache(self):
518 def _phasecache(self):
519 return phases.phasecache(self, self._phasedefaults)
519 return phases.phasecache(self, self._phasedefaults)
520
520
521 @storecache('obsstore')
521 @storecache('obsstore')
522 def obsstore(self):
522 def obsstore(self):
523 # read default format for new obsstore.
523 # read default format for new obsstore.
524 # developer config: format.obsstore-version
524 # developer config: format.obsstore-version
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
525 defaultformat = self.ui.configint('format', 'obsstore-version', None)
526 # rely on obsstore class default when possible.
526 # rely on obsstore class default when possible.
527 kwargs = {}
527 kwargs = {}
528 if defaultformat is not None:
528 if defaultformat is not None:
529 kwargs['defaultformat'] = defaultformat
529 kwargs['defaultformat'] = defaultformat
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
530 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
531 store = obsolete.obsstore(self.svfs, readonly=readonly,
532 **kwargs)
532 **kwargs)
533 if store and readonly:
533 if store and readonly:
534 self.ui.warn(
534 self.ui.warn(
535 _('obsolete feature not enabled but %i markers found!\n')
535 _('obsolete feature not enabled but %i markers found!\n')
536 % len(list(store)))
536 % len(list(store)))
537 return store
537 return store
538
538
539 @storecache('00changelog.i')
539 @storecache('00changelog.i')
540 def changelog(self):
540 def changelog(self):
541 c = changelog.changelog(self.svfs)
541 c = changelog.changelog(self.svfs)
542 if txnutil.mayhavepending(self.root):
542 if txnutil.mayhavepending(self.root):
543 c.readpending('00changelog.i.a')
543 c.readpending('00changelog.i.a')
544 return c
544 return c
545
545
546 def _constructmanifest(self):
546 def _constructmanifest(self):
547 # This is a temporary function while we migrate from manifest to
547 # This is a temporary function while we migrate from manifest to
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
548 # manifestlog. It allows bundlerepo and unionrepo to intercept the
549 # manifest creation.
549 # manifest creation.
550 return manifest.manifestrevlog(self.svfs)
550 return manifest.manifestrevlog(self.svfs)
551
551
552 @storecache('00manifest.i')
552 @storecache('00manifest.i')
553 def manifestlog(self):
553 def manifestlog(self):
554 return manifest.manifestlog(self.svfs, self)
554 return manifest.manifestlog(self.svfs, self)
555
555
556 @repofilecache('dirstate')
556 @repofilecache('dirstate')
557 def dirstate(self):
557 def dirstate(self):
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
558 return dirstate.dirstate(self.vfs, self.ui, self.root,
559 self._dirstatevalidate)
559 self._dirstatevalidate)
560
560
561 def _dirstatevalidate(self, node):
561 def _dirstatevalidate(self, node):
562 try:
562 try:
563 self.changelog.rev(node)
563 self.changelog.rev(node)
564 return node
564 return node
565 except error.LookupError:
565 except error.LookupError:
566 if not self._dirstatevalidatewarned:
566 if not self._dirstatevalidatewarned:
567 self._dirstatevalidatewarned = True
567 self._dirstatevalidatewarned = True
568 self.ui.warn(_("warning: ignoring unknown"
568 self.ui.warn(_("warning: ignoring unknown"
569 " working parent %s!\n") % short(node))
569 " working parent %s!\n") % short(node))
570 return nullid
570 return nullid
571
571
572 def __getitem__(self, changeid):
572 def __getitem__(self, changeid):
573 if changeid is None or changeid == wdirrev:
573 if changeid is None or changeid == wdirrev:
574 return context.workingctx(self)
574 return context.workingctx(self)
575 if isinstance(changeid, slice):
575 if isinstance(changeid, slice):
576 return [context.changectx(self, i)
576 return [context.changectx(self, i)
577 for i in xrange(*changeid.indices(len(self)))
577 for i in xrange(*changeid.indices(len(self)))
578 if i not in self.changelog.filteredrevs]
578 if i not in self.changelog.filteredrevs]
579 return context.changectx(self, changeid)
579 return context.changectx(self, changeid)
580
580
581 def __contains__(self, changeid):
581 def __contains__(self, changeid):
582 try:
582 try:
583 self[changeid]
583 self[changeid]
584 return True
584 return True
585 except error.RepoLookupError:
585 except error.RepoLookupError:
586 return False
586 return False
587
587
588 def __nonzero__(self):
588 def __nonzero__(self):
589 return True
589 return True
590
590
591 __bool__ = __nonzero__
591 __bool__ = __nonzero__
592
592
593 def __len__(self):
593 def __len__(self):
594 return len(self.changelog)
594 return len(self.changelog)
595
595
596 def __iter__(self):
596 def __iter__(self):
597 return iter(self.changelog)
597 return iter(self.changelog)
598
598
599 def revs(self, expr, *args):
599 def revs(self, expr, *args):
600 '''Find revisions matching a revset.
600 '''Find revisions matching a revset.
601
601
602 The revset is specified as a string ``expr`` that may contain
602 The revset is specified as a string ``expr`` that may contain
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
603 %-formatting to escape certain types. See ``revsetlang.formatspec``.
604
604
605 Revset aliases from the configuration are not expanded. To expand
605 Revset aliases from the configuration are not expanded. To expand
606 user aliases, consider calling ``scmutil.revrange()`` or
606 user aliases, consider calling ``scmutil.revrange()`` or
607 ``repo.anyrevs([expr], user=True)``.
607 ``repo.anyrevs([expr], user=True)``.
608
608
609 Returns a revset.abstractsmartset, which is a list-like interface
609 Returns a revset.abstractsmartset, which is a list-like interface
610 that contains integer revisions.
610 that contains integer revisions.
611 '''
611 '''
612 expr = revsetlang.formatspec(expr, *args)
612 expr = revsetlang.formatspec(expr, *args)
613 m = revset.match(None, expr)
613 m = revset.match(None, expr)
614 return m(self)
614 return m(self)
615
615
616 def set(self, expr, *args):
616 def set(self, expr, *args):
617 '''Find revisions matching a revset and emit changectx instances.
617 '''Find revisions matching a revset and emit changectx instances.
618
618
619 This is a convenience wrapper around ``revs()`` that iterates the
619 This is a convenience wrapper around ``revs()`` that iterates the
620 result and is a generator of changectx instances.
620 result and is a generator of changectx instances.
621
621
622 Revset aliases from the configuration are not expanded. To expand
622 Revset aliases from the configuration are not expanded. To expand
623 user aliases, consider calling ``scmutil.revrange()``.
623 user aliases, consider calling ``scmutil.revrange()``.
624 '''
624 '''
625 for r in self.revs(expr, *args):
625 for r in self.revs(expr, *args):
626 yield self[r]
626 yield self[r]
627
627
628 def anyrevs(self, specs, user=False):
628 def anyrevs(self, specs, user=False):
629 '''Find revisions matching one of the given revsets.
629 '''Find revisions matching one of the given revsets.
630
630
631 Revset aliases from the configuration are not expanded by default. To
631 Revset aliases from the configuration are not expanded by default. To
632 expand user aliases, specify ``user=True``.
632 expand user aliases, specify ``user=True``.
633 '''
633 '''
634 if user:
634 if user:
635 m = revset.matchany(self.ui, specs, repo=self)
635 m = revset.matchany(self.ui, specs, repo=self)
636 else:
636 else:
637 m = revset.matchany(None, specs)
637 m = revset.matchany(None, specs)
638 return m(self)
638 return m(self)
639
639
640 def url(self):
640 def url(self):
641 return 'file:' + self.root
641 return 'file:' + self.root
642
642
643 def hook(self, name, throw=False, **args):
643 def hook(self, name, throw=False, **args):
644 """Call a hook, passing this repo instance.
644 """Call a hook, passing this repo instance.
645
645
646 This a convenience method to aid invoking hooks. Extensions likely
646 This a convenience method to aid invoking hooks. Extensions likely
647 won't call this unless they have registered a custom hook or are
647 won't call this unless they have registered a custom hook or are
648 replacing code that is expected to call a hook.
648 replacing code that is expected to call a hook.
649 """
649 """
650 return hook.hook(self.ui, self, name, throw, **args)
650 return hook.hook(self.ui, self, name, throw, **args)
651
651
652 def tag(self, names, node, message, local, user, date, editor=False):
652 def tag(self, names, node, message, local, user, date, editor=False):
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
653 self.ui.deprecwarn("use 'tagsmod.tag' instead of 'repo.tag'", '4.2')
654 tagsmod.tag(self, names, node, message, local, user, date,
654 tagsmod.tag(self, names, node, message, local, user, date,
655 editor=editor)
655 editor=editor)
656
656
657 @filteredpropertycache
657 @filteredpropertycache
658 def _tagscache(self):
658 def _tagscache(self):
659 '''Returns a tagscache object that contains various tags related
659 '''Returns a tagscache object that contains various tags related
660 caches.'''
660 caches.'''
661
661
662 # This simplifies its cache management by having one decorated
662 # This simplifies its cache management by having one decorated
663 # function (this one) and the rest simply fetch things from it.
663 # function (this one) and the rest simply fetch things from it.
664 class tagscache(object):
664 class tagscache(object):
665 def __init__(self):
665 def __init__(self):
666 # These two define the set of tags for this repository. tags
666 # These two define the set of tags for this repository. tags
667 # maps tag name to node; tagtypes maps tag name to 'global' or
667 # maps tag name to node; tagtypes maps tag name to 'global' or
668 # 'local'. (Global tags are defined by .hgtags across all
668 # 'local'. (Global tags are defined by .hgtags across all
669 # heads, and local tags are defined in .hg/localtags.)
669 # heads, and local tags are defined in .hg/localtags.)
670 # They constitute the in-memory cache of tags.
670 # They constitute the in-memory cache of tags.
671 self.tags = self.tagtypes = None
671 self.tags = self.tagtypes = None
672
672
673 self.nodetagscache = self.tagslist = None
673 self.nodetagscache = self.tagslist = None
674
674
675 cache = tagscache()
675 cache = tagscache()
676 cache.tags, cache.tagtypes = self._findtags()
676 cache.tags, cache.tagtypes = self._findtags()
677
677
678 return cache
678 return cache
679
679
680 def tags(self):
680 def tags(self):
681 '''return a mapping of tag to node'''
681 '''return a mapping of tag to node'''
682 t = {}
682 t = {}
683 if self.changelog.filteredrevs:
683 if self.changelog.filteredrevs:
684 tags, tt = self._findtags()
684 tags, tt = self._findtags()
685 else:
685 else:
686 tags = self._tagscache.tags
686 tags = self._tagscache.tags
687 for k, v in tags.iteritems():
687 for k, v in tags.iteritems():
688 try:
688 try:
689 # ignore tags to unknown nodes
689 # ignore tags to unknown nodes
690 self.changelog.rev(v)
690 self.changelog.rev(v)
691 t[k] = v
691 t[k] = v
692 except (error.LookupError, ValueError):
692 except (error.LookupError, ValueError):
693 pass
693 pass
694 return t
694 return t
695
695
696 def _findtags(self):
696 def _findtags(self):
697 '''Do the hard work of finding tags. Return a pair of dicts
697 '''Do the hard work of finding tags. Return a pair of dicts
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
698 (tags, tagtypes) where tags maps tag name to node, and tagtypes
699 maps tag name to a string like \'global\' or \'local\'.
699 maps tag name to a string like \'global\' or \'local\'.
700 Subclasses or extensions are free to add their own tags, but
700 Subclasses or extensions are free to add their own tags, but
701 should be aware that the returned dicts will be retained for the
701 should be aware that the returned dicts will be retained for the
702 duration of the localrepo object.'''
702 duration of the localrepo object.'''
703
703
704 # XXX what tagtype should subclasses/extensions use? Currently
704 # XXX what tagtype should subclasses/extensions use? Currently
705 # mq and bookmarks add tags, but do not set the tagtype at all.
705 # mq and bookmarks add tags, but do not set the tagtype at all.
706 # Should each extension invent its own tag type? Should there
706 # Should each extension invent its own tag type? Should there
707 # be one tagtype for all such "virtual" tags? Or is the status
707 # be one tagtype for all such "virtual" tags? Or is the status
708 # quo fine?
708 # quo fine?
709
709
710
710
711 # map tag name to (node, hist)
711 # map tag name to (node, hist)
712 alltags = tagsmod.findglobaltags(self.ui, self)
712 alltags = tagsmod.findglobaltags(self.ui, self)
713 # map tag name to tag type
713 # map tag name to tag type
714 tagtypes = dict((tag, 'global') for tag in alltags)
714 tagtypes = dict((tag, 'global') for tag in alltags)
715
715
716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717
717
718 # Build the return dicts. Have to re-encode tag names because
718 # Build the return dicts. Have to re-encode tag names because
719 # the tags module always uses UTF-8 (in order not to lose info
719 # the tags module always uses UTF-8 (in order not to lose info
720 # writing to the cache), but the rest of Mercurial wants them in
720 # writing to the cache), but the rest of Mercurial wants them in
721 # local encoding.
721 # local encoding.
722 tags = {}
722 tags = {}
723 for (name, (node, hist)) in alltags.iteritems():
723 for (name, (node, hist)) in alltags.iteritems():
724 if node != nullid:
724 if node != nullid:
725 tags[encoding.tolocal(name)] = node
725 tags[encoding.tolocal(name)] = node
726 tags['tip'] = self.changelog.tip()
726 tags['tip'] = self.changelog.tip()
727 tagtypes = dict([(encoding.tolocal(name), value)
727 tagtypes = dict([(encoding.tolocal(name), value)
728 for (name, value) in tagtypes.iteritems()])
728 for (name, value) in tagtypes.iteritems()])
729 return (tags, tagtypes)
729 return (tags, tagtypes)
730
730
731 def tagtype(self, tagname):
731 def tagtype(self, tagname):
732 '''
732 '''
733 return the type of the given tag. result can be:
733 return the type of the given tag. result can be:
734
734
735 'local' : a local tag
735 'local' : a local tag
736 'global' : a global tag
736 'global' : a global tag
737 None : tag does not exist
737 None : tag does not exist
738 '''
738 '''
739
739
740 return self._tagscache.tagtypes.get(tagname)
740 return self._tagscache.tagtypes.get(tagname)
741
741
742 def tagslist(self):
742 def tagslist(self):
743 '''return a list of tags ordered by revision'''
743 '''return a list of tags ordered by revision'''
744 if not self._tagscache.tagslist:
744 if not self._tagscache.tagslist:
745 l = []
745 l = []
746 for t, n in self.tags().iteritems():
746 for t, n in self.tags().iteritems():
747 l.append((self.changelog.rev(n), t, n))
747 l.append((self.changelog.rev(n), t, n))
748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749
749
750 return self._tagscache.tagslist
750 return self._tagscache.tagslist
751
751
752 def nodetags(self, node):
752 def nodetags(self, node):
753 '''return the tags associated with a node'''
753 '''return the tags associated with a node'''
754 if not self._tagscache.nodetagscache:
754 if not self._tagscache.nodetagscache:
755 nodetagscache = {}
755 nodetagscache = {}
756 for t, n in self._tagscache.tags.iteritems():
756 for t, n in self._tagscache.tags.iteritems():
757 nodetagscache.setdefault(n, []).append(t)
757 nodetagscache.setdefault(n, []).append(t)
758 for tags in nodetagscache.itervalues():
758 for tags in nodetagscache.itervalues():
759 tags.sort()
759 tags.sort()
760 self._tagscache.nodetagscache = nodetagscache
760 self._tagscache.nodetagscache = nodetagscache
761 return self._tagscache.nodetagscache.get(node, [])
761 return self._tagscache.nodetagscache.get(node, [])
762
762
763 def nodebookmarks(self, node):
763 def nodebookmarks(self, node):
764 """return the list of bookmarks pointing to the specified node"""
764 """return the list of bookmarks pointing to the specified node"""
765 marks = []
765 marks = []
766 for bookmark, n in self._bookmarks.iteritems():
766 for bookmark, n in self._bookmarks.iteritems():
767 if n == node:
767 if n == node:
768 marks.append(bookmark)
768 marks.append(bookmark)
769 return sorted(marks)
769 return sorted(marks)
770
770
771 def branchmap(self):
771 def branchmap(self):
772 '''returns a dictionary {branch: [branchheads]} with branchheads
772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 ordered by increasing revision number'''
773 ordered by increasing revision number'''
774 branchmap.updatecache(self)
774 branchmap.updatecache(self)
775 return self._branchcaches[self.filtername]
775 return self._branchcaches[self.filtername]
776
776
777 @unfilteredmethod
777 @unfilteredmethod
778 def revbranchcache(self):
778 def revbranchcache(self):
779 if not self._revbranchcache:
779 if not self._revbranchcache:
780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 return self._revbranchcache
781 return self._revbranchcache
782
782
783 def branchtip(self, branch, ignoremissing=False):
783 def branchtip(self, branch, ignoremissing=False):
784 '''return the tip node for a given branch
784 '''return the tip node for a given branch
785
785
786 If ignoremissing is True, then this method will not raise an error.
786 If ignoremissing is True, then this method will not raise an error.
787 This is helpful for callers that only expect None for a missing branch
787 This is helpful for callers that only expect None for a missing branch
788 (e.g. namespace).
788 (e.g. namespace).
789
789
790 '''
790 '''
791 try:
791 try:
792 return self.branchmap().branchtip(branch)
792 return self.branchmap().branchtip(branch)
793 except KeyError:
793 except KeyError:
794 if not ignoremissing:
794 if not ignoremissing:
795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 else:
796 else:
797 pass
797 pass
798
798
799 def lookup(self, key):
799 def lookup(self, key):
800 return self[key].node()
800 return self[key].node()
801
801
802 def lookupbranch(self, key, remote=None):
802 def lookupbranch(self, key, remote=None):
803 repo = remote or self
803 repo = remote or self
804 if key in repo.branchmap():
804 if key in repo.branchmap():
805 return key
805 return key
806
806
807 repo = (remote and remote.local()) and remote or self
807 repo = (remote and remote.local()) and remote or self
808 return repo[key].branch()
808 return repo[key].branch()
809
809
810 def known(self, nodes):
810 def known(self, nodes):
811 cl = self.changelog
811 cl = self.changelog
812 nm = cl.nodemap
812 nm = cl.nodemap
813 filtered = cl.filteredrevs
813 filtered = cl.filteredrevs
814 result = []
814 result = []
815 for n in nodes:
815 for n in nodes:
816 r = nm.get(n)
816 r = nm.get(n)
817 resp = not (r is None or r in filtered)
817 resp = not (r is None or r in filtered)
818 result.append(resp)
818 result.append(resp)
819 return result
819 return result
820
820
821 def local(self):
821 def local(self):
822 return self
822 return self
823
823
824 def publishing(self):
824 def publishing(self):
825 # it's safe (and desirable) to trust the publish flag unconditionally
825 # it's safe (and desirable) to trust the publish flag unconditionally
826 # so that we don't finalize changes shared between users via ssh or nfs
826 # so that we don't finalize changes shared between users via ssh or nfs
827 return self.ui.configbool('phases', 'publish', True, untrusted=True)
827 return self.ui.configbool('phases', 'publish', True, untrusted=True)
828
828
829 def cancopy(self):
829 def cancopy(self):
830 # so statichttprepo's override of local() works
830 # so statichttprepo's override of local() works
831 if not self.local():
831 if not self.local():
832 return False
832 return False
833 if not self.publishing():
833 if not self.publishing():
834 return True
834 return True
835 # if publishing we can't copy if there is filtered content
835 # if publishing we can't copy if there is filtered content
836 return not self.filtered('visible').changelog.filteredrevs
836 return not self.filtered('visible').changelog.filteredrevs
837
837
838 def shared(self):
838 def shared(self):
839 '''the type of shared repository (None if not shared)'''
839 '''the type of shared repository (None if not shared)'''
840 if self.sharedpath != self.path:
840 if self.sharedpath != self.path:
841 return 'store'
841 return 'store'
842 return None
842 return None
843
843
844 def join(self, f, *insidef):
844 def join(self, f, *insidef):
845 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.2')
845 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.2')
846 return self.vfs.join(os.path.join(f, *insidef))
846 return self.vfs.join(os.path.join(f, *insidef))
847
847
848 def wjoin(self, f, *insidef):
848 def wjoin(self, f, *insidef):
849 return self.vfs.reljoin(self.root, f, *insidef)
849 return self.vfs.reljoin(self.root, f, *insidef)
850
850
851 def file(self, f):
851 def file(self, f):
852 if f[0] == '/':
852 if f[0] == '/':
853 f = f[1:]
853 f = f[1:]
854 return filelog.filelog(self.svfs, f)
854 return filelog.filelog(self.svfs, f)
855
855
856 def changectx(self, changeid):
856 def changectx(self, changeid):
857 return self[changeid]
857 return self[changeid]
858
858
859 def setparents(self, p1, p2=nullid):
859 def setparents(self, p1, p2=nullid):
860 self.dirstate.beginparentchange()
860 self.dirstate.beginparentchange()
861 copies = self.dirstate.setparents(p1, p2)
861 copies = self.dirstate.setparents(p1, p2)
862 pctx = self[p1]
862 pctx = self[p1]
863 if copies:
863 if copies:
864 # Adjust copy records, the dirstate cannot do it, it
864 # Adjust copy records, the dirstate cannot do it, it
865 # requires access to parents manifests. Preserve them
865 # requires access to parents manifests. Preserve them
866 # only for entries added to first parent.
866 # only for entries added to first parent.
867 for f in copies:
867 for f in copies:
868 if f not in pctx and copies[f] in pctx:
868 if f not in pctx and copies[f] in pctx:
869 self.dirstate.copy(copies[f], f)
869 self.dirstate.copy(copies[f], f)
870 if p2 == nullid:
870 if p2 == nullid:
871 for f, s in sorted(self.dirstate.copies().items()):
871 for f, s in sorted(self.dirstate.copies().items()):
872 if f not in pctx and s not in pctx:
872 if f not in pctx and s not in pctx:
873 self.dirstate.copy(None, f)
873 self.dirstate.copy(None, f)
874 self.dirstate.endparentchange()
874 self.dirstate.endparentchange()
875
875
876 def filectx(self, path, changeid=None, fileid=None):
876 def filectx(self, path, changeid=None, fileid=None):
877 """changeid can be a changeset revision, node, or tag.
877 """changeid can be a changeset revision, node, or tag.
878 fileid can be a file revision or node."""
878 fileid can be a file revision or node."""
879 return context.filectx(self, path, changeid, fileid)
879 return context.filectx(self, path, changeid, fileid)
880
880
881 def getcwd(self):
881 def getcwd(self):
882 return self.dirstate.getcwd()
882 return self.dirstate.getcwd()
883
883
884 def pathto(self, f, cwd=None):
884 def pathto(self, f, cwd=None):
885 return self.dirstate.pathto(f, cwd)
885 return self.dirstate.pathto(f, cwd)
886
886
887 def wfile(self, f, mode='r'):
887 def wfile(self, f, mode='r'):
888 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
888 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
889 return self.wvfs(f, mode)
889 return self.wvfs(f, mode)
890
890
891 def _link(self, f):
891 def _link(self, f):
892 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
892 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
893 '4.2')
893 '4.2')
894 return self.wvfs.islink(f)
894 return self.wvfs.islink(f)
895
895
896 def _loadfilter(self, filter):
896 def _loadfilter(self, filter):
897 if filter not in self.filterpats:
897 if filter not in self.filterpats:
898 l = []
898 l = []
899 for pat, cmd in self.ui.configitems(filter):
899 for pat, cmd in self.ui.configitems(filter):
900 if cmd == '!':
900 if cmd == '!':
901 continue
901 continue
902 mf = matchmod.match(self.root, '', [pat])
902 mf = matchmod.match(self.root, '', [pat])
903 fn = None
903 fn = None
904 params = cmd
904 params = cmd
905 for name, filterfn in self._datafilters.iteritems():
905 for name, filterfn in self._datafilters.iteritems():
906 if cmd.startswith(name):
906 if cmd.startswith(name):
907 fn = filterfn
907 fn = filterfn
908 params = cmd[len(name):].lstrip()
908 params = cmd[len(name):].lstrip()
909 break
909 break
910 if not fn:
910 if not fn:
911 fn = lambda s, c, **kwargs: util.filter(s, c)
911 fn = lambda s, c, **kwargs: util.filter(s, c)
912 # Wrap old filters not supporting keyword arguments
912 # Wrap old filters not supporting keyword arguments
913 if not inspect.getargspec(fn)[2]:
913 if not inspect.getargspec(fn)[2]:
914 oldfn = fn
914 oldfn = fn
915 fn = lambda s, c, **kwargs: oldfn(s, c)
915 fn = lambda s, c, **kwargs: oldfn(s, c)
916 l.append((mf, fn, params))
916 l.append((mf, fn, params))
917 self.filterpats[filter] = l
917 self.filterpats[filter] = l
918 return self.filterpats[filter]
918 return self.filterpats[filter]
919
919
920 def _filter(self, filterpats, filename, data):
920 def _filter(self, filterpats, filename, data):
921 for mf, fn, cmd in filterpats:
921 for mf, fn, cmd in filterpats:
922 if mf(filename):
922 if mf(filename):
923 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
923 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
924 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
924 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
925 break
925 break
926
926
927 return data
927 return data
928
928
929 @unfilteredpropertycache
929 @unfilteredpropertycache
930 def _encodefilterpats(self):
930 def _encodefilterpats(self):
931 return self._loadfilter('encode')
931 return self._loadfilter('encode')
932
932
933 @unfilteredpropertycache
933 @unfilteredpropertycache
934 def _decodefilterpats(self):
934 def _decodefilterpats(self):
935 return self._loadfilter('decode')
935 return self._loadfilter('decode')
936
936
937 def adddatafilter(self, name, filter):
937 def adddatafilter(self, name, filter):
938 self._datafilters[name] = filter
938 self._datafilters[name] = filter
939
939
940 def wread(self, filename):
940 def wread(self, filename):
941 if self.wvfs.islink(filename):
941 if self.wvfs.islink(filename):
942 data = self.wvfs.readlink(filename)
942 data = self.wvfs.readlink(filename)
943 else:
943 else:
944 data = self.wvfs.read(filename)
944 data = self.wvfs.read(filename)
945 return self._filter(self._encodefilterpats, filename, data)
945 return self._filter(self._encodefilterpats, filename, data)
946
946
947 def wwrite(self, filename, data, flags, backgroundclose=False):
947 def wwrite(self, filename, data, flags, backgroundclose=False):
948 """write ``data`` into ``filename`` in the working directory
948 """write ``data`` into ``filename`` in the working directory
949
949
950 This returns length of written (maybe decoded) data.
950 This returns length of written (maybe decoded) data.
951 """
951 """
952 data = self._filter(self._decodefilterpats, filename, data)
952 data = self._filter(self._decodefilterpats, filename, data)
953 if 'l' in flags:
953 if 'l' in flags:
954 self.wvfs.symlink(data, filename)
954 self.wvfs.symlink(data, filename)
955 else:
955 else:
956 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
956 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
957 if 'x' in flags:
957 if 'x' in flags:
958 self.wvfs.setflags(filename, False, True)
958 self.wvfs.setflags(filename, False, True)
959 return len(data)
959 return len(data)
960
960
961 def wwritedata(self, filename, data):
961 def wwritedata(self, filename, data):
962 return self._filter(self._decodefilterpats, filename, data)
962 return self._filter(self._decodefilterpats, filename, data)
963
963
964 def currenttransaction(self):
964 def currenttransaction(self):
965 """return the current transaction or None if non exists"""
965 """return the current transaction or None if non exists"""
966 if self._transref:
966 if self._transref:
967 tr = self._transref()
967 tr = self._transref()
968 else:
968 else:
969 tr = None
969 tr = None
970
970
971 if tr and tr.running():
971 if tr and tr.running():
972 return tr
972 return tr
973 return None
973 return None
974
974
975 def transaction(self, desc, report=None):
975 def transaction(self, desc, report=None):
976 if (self.ui.configbool('devel', 'all-warnings')
976 if (self.ui.configbool('devel', 'all-warnings')
977 or self.ui.configbool('devel', 'check-locks')):
977 or self.ui.configbool('devel', 'check-locks')):
978 if self._currentlock(self._lockref) is None:
978 if self._currentlock(self._lockref) is None:
979 raise error.ProgrammingError('transaction requires locking')
979 raise error.ProgrammingError('transaction requires locking')
980 tr = self.currenttransaction()
980 tr = self.currenttransaction()
981 if tr is not None:
981 if tr is not None:
982 return tr.nest()
982 return tr.nest()
983
983
984 # abort here if the journal already exists
984 # abort here if the journal already exists
985 if self.svfs.exists("journal"):
985 if self.svfs.exists("journal"):
986 raise error.RepoError(
986 raise error.RepoError(
987 _("abandoned transaction found"),
987 _("abandoned transaction found"),
988 hint=_("run 'hg recover' to clean up transaction"))
988 hint=_("run 'hg recover' to clean up transaction"))
989
989
990 idbase = "%.40f#%f" % (random.random(), time.time())
990 idbase = "%.40f#%f" % (random.random(), time.time())
991 ha = hex(hashlib.sha1(idbase).digest())
991 ha = hex(hashlib.sha1(idbase).digest())
992 txnid = 'TXN:' + ha
992 txnid = 'TXN:' + ha
993 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
993 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
994
994
995 self._writejournal(desc)
995 self._writejournal(desc)
996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
996 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
997 if report:
997 if report:
998 rp = report
998 rp = report
999 else:
999 else:
1000 rp = self.ui.warn
1000 rp = self.ui.warn
1001 vfsmap = {'plain': self.vfs} # root of .hg/
1001 vfsmap = {'plain': self.vfs} # root of .hg/
1002 # we must avoid cyclic reference between repo and transaction.
1002 # we must avoid cyclic reference between repo and transaction.
1003 reporef = weakref.ref(self)
1003 reporef = weakref.ref(self)
1004 def validate(tr):
1004 # Code to track tag movement
1005 #
1006 # Since tags are all handled as file content, it is actually quite hard
1007 # to track these movement from a code perspective. So we fallback to a
1008 # tracking at the repository level. One could envision to track changes
1009 # to the '.hgtags' file through changegroup apply but that fails to
1010 # cope with case where transaction expose new heads without changegroup
1011 # being involved (eg: phase movement).
1012 #
1013 # For now, We gate the feature behind a flag since this likely comes
1014 # with performance impacts. The current code run more often than needed
1015 # and do not use caches as much as it could. The current focus is on
1016 # the behavior of the feature so we disable it by default. The flag
1017 # will be removed when we are happy with the performance impact.
1018 tracktags = lambda x: None
1019 # experimental config: experimental.hook-track-tags
1020 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1021 False)
1022 if desc != 'strip' and shouldtracktags:
1023 oldheads = self.changelog.headrevs()
1024 def tracktags(tr2):
1025 repo = reporef()
1026 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1027 newheads = repo.changelog.headrevs()
1028 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1029 # notes: we compare lists here.
1030 # As we do it only once buiding set would not be cheaper
1031 if oldfnodes != newfnodes:
1032 tr2.hookargs['tag_moved'] = '1'
1033 def validate(tr2):
1005 """will run pre-closing hooks"""
1034 """will run pre-closing hooks"""
1035 # XXX the transaction API is a bit lacking here so we take a hacky
1036 # path for now
1037 #
1038 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1039 # dict is copied before these run. In addition we needs the data
1040 # available to in memory hooks too.
1041 #
1042 # Moreover, we also need to make sure this runs before txnclose
1043 # hooks and there is no "pending" mechanism that would execute
1044 # logic only if hooks are about to run.
1045 #
1046 # Fixing this limitation of the transaction is also needed to track
1047 # other families of changes (bookmarks, phases, obsolescence).
1048 #
1049 # This will have to be fixed before we remove the experimental
1050 # gating.
1051 tracktags(tr2)
1006 reporef().hook('pretxnclose', throw=True,
1052 reporef().hook('pretxnclose', throw=True,
1007 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1053 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1008 def releasefn(tr, success):
1054 def releasefn(tr, success):
1009 repo = reporef()
1055 repo = reporef()
1010 if success:
1056 if success:
1011 # this should be explicitly invoked here, because
1057 # this should be explicitly invoked here, because
1012 # in-memory changes aren't written out at closing
1058 # in-memory changes aren't written out at closing
1013 # transaction, if tr.addfilegenerator (via
1059 # transaction, if tr.addfilegenerator (via
1014 # dirstate.write or so) isn't invoked while
1060 # dirstate.write or so) isn't invoked while
1015 # transaction running
1061 # transaction running
1016 repo.dirstate.write(None)
1062 repo.dirstate.write(None)
1017 else:
1063 else:
1018 # discard all changes (including ones already written
1064 # discard all changes (including ones already written
1019 # out) in this transaction
1065 # out) in this transaction
1020 repo.dirstate.restorebackup(None, prefix='journal.')
1066 repo.dirstate.restorebackup(None, prefix='journal.')
1021
1067
1022 repo.invalidate(clearfilecache=True)
1068 repo.invalidate(clearfilecache=True)
1023
1069
1024 tr = transaction.transaction(rp, self.svfs, vfsmap,
1070 tr = transaction.transaction(rp, self.svfs, vfsmap,
1025 "journal",
1071 "journal",
1026 "undo",
1072 "undo",
1027 aftertrans(renames),
1073 aftertrans(renames),
1028 self.store.createmode,
1074 self.store.createmode,
1029 validator=validate,
1075 validator=validate,
1030 releasefn=releasefn)
1076 releasefn=releasefn)
1031
1077
1032 tr.hookargs['txnid'] = txnid
1078 tr.hookargs['txnid'] = txnid
1033 # note: writing the fncache only during finalize mean that the file is
1079 # note: writing the fncache only during finalize mean that the file is
1034 # outdated when running hooks. As fncache is used for streaming clone,
1080 # outdated when running hooks. As fncache is used for streaming clone,
1035 # this is not expected to break anything that happen during the hooks.
1081 # this is not expected to break anything that happen during the hooks.
1036 tr.addfinalize('flush-fncache', self.store.write)
1082 tr.addfinalize('flush-fncache', self.store.write)
1037 def txnclosehook(tr2):
1083 def txnclosehook(tr2):
1038 """To be run if transaction is successful, will schedule a hook run
1084 """To be run if transaction is successful, will schedule a hook run
1039 """
1085 """
1040 # Don't reference tr2 in hook() so we don't hold a reference.
1086 # Don't reference tr2 in hook() so we don't hold a reference.
1041 # This reduces memory consumption when there are multiple
1087 # This reduces memory consumption when there are multiple
1042 # transactions per lock. This can likely go away if issue5045
1088 # transactions per lock. This can likely go away if issue5045
1043 # fixes the function accumulation.
1089 # fixes the function accumulation.
1044 hookargs = tr2.hookargs
1090 hookargs = tr2.hookargs
1045
1091
1046 def hook():
1092 def hook():
1047 reporef().hook('txnclose', throw=False, txnname=desc,
1093 reporef().hook('txnclose', throw=False, txnname=desc,
1048 **pycompat.strkwargs(hookargs))
1094 **pycompat.strkwargs(hookargs))
1049 reporef()._afterlock(hook)
1095 reporef()._afterlock(hook)
1050 tr.addfinalize('txnclose-hook', txnclosehook)
1096 tr.addfinalize('txnclose-hook', txnclosehook)
1051 def txnaborthook(tr2):
1097 def txnaborthook(tr2):
1052 """To be run if transaction is aborted
1098 """To be run if transaction is aborted
1053 """
1099 """
1054 reporef().hook('txnabort', throw=False, txnname=desc,
1100 reporef().hook('txnabort', throw=False, txnname=desc,
1055 **tr2.hookargs)
1101 **tr2.hookargs)
1056 tr.addabort('txnabort-hook', txnaborthook)
1102 tr.addabort('txnabort-hook', txnaborthook)
1057 # avoid eager cache invalidation. in-memory data should be identical
1103 # avoid eager cache invalidation. in-memory data should be identical
1058 # to stored data if transaction has no error.
1104 # to stored data if transaction has no error.
1059 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1105 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1060 self._transref = weakref.ref(tr)
1106 self._transref = weakref.ref(tr)
1061 return tr
1107 return tr
1062
1108
1063 def _journalfiles(self):
1109 def _journalfiles(self):
1064 return ((self.svfs, 'journal'),
1110 return ((self.svfs, 'journal'),
1065 (self.vfs, 'journal.dirstate'),
1111 (self.vfs, 'journal.dirstate'),
1066 (self.vfs, 'journal.branch'),
1112 (self.vfs, 'journal.branch'),
1067 (self.vfs, 'journal.desc'),
1113 (self.vfs, 'journal.desc'),
1068 (self.vfs, 'journal.bookmarks'),
1114 (self.vfs, 'journal.bookmarks'),
1069 (self.svfs, 'journal.phaseroots'))
1115 (self.svfs, 'journal.phaseroots'))
1070
1116
1071 def undofiles(self):
1117 def undofiles(self):
1072 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1118 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1073
1119
1074 def _writejournal(self, desc):
1120 def _writejournal(self, desc):
1075 self.dirstate.savebackup(None, prefix='journal.')
1121 self.dirstate.savebackup(None, prefix='journal.')
1076 self.vfs.write("journal.branch",
1122 self.vfs.write("journal.branch",
1077 encoding.fromlocal(self.dirstate.branch()))
1123 encoding.fromlocal(self.dirstate.branch()))
1078 self.vfs.write("journal.desc",
1124 self.vfs.write("journal.desc",
1079 "%d\n%s\n" % (len(self), desc))
1125 "%d\n%s\n" % (len(self), desc))
1080 self.vfs.write("journal.bookmarks",
1126 self.vfs.write("journal.bookmarks",
1081 self.vfs.tryread("bookmarks"))
1127 self.vfs.tryread("bookmarks"))
1082 self.svfs.write("journal.phaseroots",
1128 self.svfs.write("journal.phaseroots",
1083 self.svfs.tryread("phaseroots"))
1129 self.svfs.tryread("phaseroots"))
1084
1130
1085 def recover(self):
1131 def recover(self):
1086 with self.lock():
1132 with self.lock():
1087 if self.svfs.exists("journal"):
1133 if self.svfs.exists("journal"):
1088 self.ui.status(_("rolling back interrupted transaction\n"))
1134 self.ui.status(_("rolling back interrupted transaction\n"))
1089 vfsmap = {'': self.svfs,
1135 vfsmap = {'': self.svfs,
1090 'plain': self.vfs,}
1136 'plain': self.vfs,}
1091 transaction.rollback(self.svfs, vfsmap, "journal",
1137 transaction.rollback(self.svfs, vfsmap, "journal",
1092 self.ui.warn)
1138 self.ui.warn)
1093 self.invalidate()
1139 self.invalidate()
1094 return True
1140 return True
1095 else:
1141 else:
1096 self.ui.warn(_("no interrupted transaction available\n"))
1142 self.ui.warn(_("no interrupted transaction available\n"))
1097 return False
1143 return False
1098
1144
1099 def rollback(self, dryrun=False, force=False):
1145 def rollback(self, dryrun=False, force=False):
1100 wlock = lock = dsguard = None
1146 wlock = lock = dsguard = None
1101 try:
1147 try:
1102 wlock = self.wlock()
1148 wlock = self.wlock()
1103 lock = self.lock()
1149 lock = self.lock()
1104 if self.svfs.exists("undo"):
1150 if self.svfs.exists("undo"):
1105 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1151 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1106
1152
1107 return self._rollback(dryrun, force, dsguard)
1153 return self._rollback(dryrun, force, dsguard)
1108 else:
1154 else:
1109 self.ui.warn(_("no rollback information available\n"))
1155 self.ui.warn(_("no rollback information available\n"))
1110 return 1
1156 return 1
1111 finally:
1157 finally:
1112 release(dsguard, lock, wlock)
1158 release(dsguard, lock, wlock)
1113
1159
1114 @unfilteredmethod # Until we get smarter cache management
1160 @unfilteredmethod # Until we get smarter cache management
1115 def _rollback(self, dryrun, force, dsguard):
1161 def _rollback(self, dryrun, force, dsguard):
1116 ui = self.ui
1162 ui = self.ui
1117 try:
1163 try:
1118 args = self.vfs.read('undo.desc').splitlines()
1164 args = self.vfs.read('undo.desc').splitlines()
1119 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1165 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1120 if len(args) >= 3:
1166 if len(args) >= 3:
1121 detail = args[2]
1167 detail = args[2]
1122 oldtip = oldlen - 1
1168 oldtip = oldlen - 1
1123
1169
1124 if detail and ui.verbose:
1170 if detail and ui.verbose:
1125 msg = (_('repository tip rolled back to revision %s'
1171 msg = (_('repository tip rolled back to revision %s'
1126 ' (undo %s: %s)\n')
1172 ' (undo %s: %s)\n')
1127 % (oldtip, desc, detail))
1173 % (oldtip, desc, detail))
1128 else:
1174 else:
1129 msg = (_('repository tip rolled back to revision %s'
1175 msg = (_('repository tip rolled back to revision %s'
1130 ' (undo %s)\n')
1176 ' (undo %s)\n')
1131 % (oldtip, desc))
1177 % (oldtip, desc))
1132 except IOError:
1178 except IOError:
1133 msg = _('rolling back unknown transaction\n')
1179 msg = _('rolling back unknown transaction\n')
1134 desc = None
1180 desc = None
1135
1181
1136 if not force and self['.'] != self['tip'] and desc == 'commit':
1182 if not force and self['.'] != self['tip'] and desc == 'commit':
1137 raise error.Abort(
1183 raise error.Abort(
1138 _('rollback of last commit while not checked out '
1184 _('rollback of last commit while not checked out '
1139 'may lose data'), hint=_('use -f to force'))
1185 'may lose data'), hint=_('use -f to force'))
1140
1186
1141 ui.status(msg)
1187 ui.status(msg)
1142 if dryrun:
1188 if dryrun:
1143 return 0
1189 return 0
1144
1190
1145 parents = self.dirstate.parents()
1191 parents = self.dirstate.parents()
1146 self.destroying()
1192 self.destroying()
1147 vfsmap = {'plain': self.vfs, '': self.svfs}
1193 vfsmap = {'plain': self.vfs, '': self.svfs}
1148 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1194 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1149 if self.vfs.exists('undo.bookmarks'):
1195 if self.vfs.exists('undo.bookmarks'):
1150 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1196 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1151 if self.svfs.exists('undo.phaseroots'):
1197 if self.svfs.exists('undo.phaseroots'):
1152 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1198 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1153 self.invalidate()
1199 self.invalidate()
1154
1200
1155 parentgone = (parents[0] not in self.changelog.nodemap or
1201 parentgone = (parents[0] not in self.changelog.nodemap or
1156 parents[1] not in self.changelog.nodemap)
1202 parents[1] not in self.changelog.nodemap)
1157 if parentgone:
1203 if parentgone:
1158 # prevent dirstateguard from overwriting already restored one
1204 # prevent dirstateguard from overwriting already restored one
1159 dsguard.close()
1205 dsguard.close()
1160
1206
1161 self.dirstate.restorebackup(None, prefix='undo.')
1207 self.dirstate.restorebackup(None, prefix='undo.')
1162 try:
1208 try:
1163 branch = self.vfs.read('undo.branch')
1209 branch = self.vfs.read('undo.branch')
1164 self.dirstate.setbranch(encoding.tolocal(branch))
1210 self.dirstate.setbranch(encoding.tolocal(branch))
1165 except IOError:
1211 except IOError:
1166 ui.warn(_('named branch could not be reset: '
1212 ui.warn(_('named branch could not be reset: '
1167 'current branch is still \'%s\'\n')
1213 'current branch is still \'%s\'\n')
1168 % self.dirstate.branch())
1214 % self.dirstate.branch())
1169
1215
1170 parents = tuple([p.rev() for p in self[None].parents()])
1216 parents = tuple([p.rev() for p in self[None].parents()])
1171 if len(parents) > 1:
1217 if len(parents) > 1:
1172 ui.status(_('working directory now based on '
1218 ui.status(_('working directory now based on '
1173 'revisions %d and %d\n') % parents)
1219 'revisions %d and %d\n') % parents)
1174 else:
1220 else:
1175 ui.status(_('working directory now based on '
1221 ui.status(_('working directory now based on '
1176 'revision %d\n') % parents)
1222 'revision %d\n') % parents)
1177 mergemod.mergestate.clean(self, self['.'].node())
1223 mergemod.mergestate.clean(self, self['.'].node())
1178
1224
1179 # TODO: if we know which new heads may result from this rollback, pass
1225 # TODO: if we know which new heads may result from this rollback, pass
1180 # them to destroy(), which will prevent the branchhead cache from being
1226 # them to destroy(), which will prevent the branchhead cache from being
1181 # invalidated.
1227 # invalidated.
1182 self.destroyed()
1228 self.destroyed()
1183 return 0
1229 return 0
1184
1230
1185 def invalidatecaches(self):
1231 def invalidatecaches(self):
1186
1232
1187 if '_tagscache' in vars(self):
1233 if '_tagscache' in vars(self):
1188 # can't use delattr on proxy
1234 # can't use delattr on proxy
1189 del self.__dict__['_tagscache']
1235 del self.__dict__['_tagscache']
1190
1236
1191 self.unfiltered()._branchcaches.clear()
1237 self.unfiltered()._branchcaches.clear()
1192 self.invalidatevolatilesets()
1238 self.invalidatevolatilesets()
1193
1239
1194 def invalidatevolatilesets(self):
1240 def invalidatevolatilesets(self):
1195 self.filteredrevcache.clear()
1241 self.filteredrevcache.clear()
1196 obsolete.clearobscaches(self)
1242 obsolete.clearobscaches(self)
1197
1243
1198 def invalidatedirstate(self):
1244 def invalidatedirstate(self):
1199 '''Invalidates the dirstate, causing the next call to dirstate
1245 '''Invalidates the dirstate, causing the next call to dirstate
1200 to check if it was modified since the last time it was read,
1246 to check if it was modified since the last time it was read,
1201 rereading it if it has.
1247 rereading it if it has.
1202
1248
1203 This is different to dirstate.invalidate() that it doesn't always
1249 This is different to dirstate.invalidate() that it doesn't always
1204 rereads the dirstate. Use dirstate.invalidate() if you want to
1250 rereads the dirstate. Use dirstate.invalidate() if you want to
1205 explicitly read the dirstate again (i.e. restoring it to a previous
1251 explicitly read the dirstate again (i.e. restoring it to a previous
1206 known good state).'''
1252 known good state).'''
1207 if hasunfilteredcache(self, 'dirstate'):
1253 if hasunfilteredcache(self, 'dirstate'):
1208 for k in self.dirstate._filecache:
1254 for k in self.dirstate._filecache:
1209 try:
1255 try:
1210 delattr(self.dirstate, k)
1256 delattr(self.dirstate, k)
1211 except AttributeError:
1257 except AttributeError:
1212 pass
1258 pass
1213 delattr(self.unfiltered(), 'dirstate')
1259 delattr(self.unfiltered(), 'dirstate')
1214
1260
1215 def invalidate(self, clearfilecache=False):
1261 def invalidate(self, clearfilecache=False):
1216 '''Invalidates both store and non-store parts other than dirstate
1262 '''Invalidates both store and non-store parts other than dirstate
1217
1263
1218 If a transaction is running, invalidation of store is omitted,
1264 If a transaction is running, invalidation of store is omitted,
1219 because discarding in-memory changes might cause inconsistency
1265 because discarding in-memory changes might cause inconsistency
1220 (e.g. incomplete fncache causes unintentional failure, but
1266 (e.g. incomplete fncache causes unintentional failure, but
1221 redundant one doesn't).
1267 redundant one doesn't).
1222 '''
1268 '''
1223 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1269 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 for k in list(self._filecache.keys()):
1270 for k in list(self._filecache.keys()):
1225 # dirstate is invalidated separately in invalidatedirstate()
1271 # dirstate is invalidated separately in invalidatedirstate()
1226 if k == 'dirstate':
1272 if k == 'dirstate':
1227 continue
1273 continue
1228
1274
1229 if clearfilecache:
1275 if clearfilecache:
1230 del self._filecache[k]
1276 del self._filecache[k]
1231 try:
1277 try:
1232 delattr(unfiltered, k)
1278 delattr(unfiltered, k)
1233 except AttributeError:
1279 except AttributeError:
1234 pass
1280 pass
1235 self.invalidatecaches()
1281 self.invalidatecaches()
1236 if not self.currenttransaction():
1282 if not self.currenttransaction():
1237 # TODO: Changing contents of store outside transaction
1283 # TODO: Changing contents of store outside transaction
1238 # causes inconsistency. We should make in-memory store
1284 # causes inconsistency. We should make in-memory store
1239 # changes detectable, and abort if changed.
1285 # changes detectable, and abort if changed.
1240 self.store.invalidatecaches()
1286 self.store.invalidatecaches()
1241
1287
1242 def invalidateall(self):
1288 def invalidateall(self):
1243 '''Fully invalidates both store and non-store parts, causing the
1289 '''Fully invalidates both store and non-store parts, causing the
1244 subsequent operation to reread any outside changes.'''
1290 subsequent operation to reread any outside changes.'''
1245 # extension should hook this to invalidate its caches
1291 # extension should hook this to invalidate its caches
1246 self.invalidate()
1292 self.invalidate()
1247 self.invalidatedirstate()
1293 self.invalidatedirstate()
1248
1294
1249 @unfilteredmethod
1295 @unfilteredmethod
1250 def _refreshfilecachestats(self, tr):
1296 def _refreshfilecachestats(self, tr):
1251 """Reload stats of cached files so that they are flagged as valid"""
1297 """Reload stats of cached files so that they are flagged as valid"""
1252 for k, ce in self._filecache.items():
1298 for k, ce in self._filecache.items():
1253 if k == 'dirstate' or k not in self.__dict__:
1299 if k == 'dirstate' or k not in self.__dict__:
1254 continue
1300 continue
1255 ce.refresh()
1301 ce.refresh()
1256
1302
1257 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1303 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1258 inheritchecker=None, parentenvvar=None):
1304 inheritchecker=None, parentenvvar=None):
1259 parentlock = None
1305 parentlock = None
1260 # the contents of parentenvvar are used by the underlying lock to
1306 # the contents of parentenvvar are used by the underlying lock to
1261 # determine whether it can be inherited
1307 # determine whether it can be inherited
1262 if parentenvvar is not None:
1308 if parentenvvar is not None:
1263 parentlock = encoding.environ.get(parentenvvar)
1309 parentlock = encoding.environ.get(parentenvvar)
1264 try:
1310 try:
1265 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1311 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1266 acquirefn=acquirefn, desc=desc,
1312 acquirefn=acquirefn, desc=desc,
1267 inheritchecker=inheritchecker,
1313 inheritchecker=inheritchecker,
1268 parentlock=parentlock)
1314 parentlock=parentlock)
1269 except error.LockHeld as inst:
1315 except error.LockHeld as inst:
1270 if not wait:
1316 if not wait:
1271 raise
1317 raise
1272 # show more details for new-style locks
1318 # show more details for new-style locks
1273 if ':' in inst.locker:
1319 if ':' in inst.locker:
1274 host, pid = inst.locker.split(":", 1)
1320 host, pid = inst.locker.split(":", 1)
1275 self.ui.warn(
1321 self.ui.warn(
1276 _("waiting for lock on %s held by process %r "
1322 _("waiting for lock on %s held by process %r "
1277 "on host %r\n") % (desc, pid, host))
1323 "on host %r\n") % (desc, pid, host))
1278 else:
1324 else:
1279 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1325 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1280 (desc, inst.locker))
1326 (desc, inst.locker))
1281 # default to 600 seconds timeout
1327 # default to 600 seconds timeout
1282 l = lockmod.lock(vfs, lockname,
1328 l = lockmod.lock(vfs, lockname,
1283 int(self.ui.config("ui", "timeout", "600")),
1329 int(self.ui.config("ui", "timeout", "600")),
1284 releasefn=releasefn, acquirefn=acquirefn,
1330 releasefn=releasefn, acquirefn=acquirefn,
1285 desc=desc)
1331 desc=desc)
1286 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1332 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1287 return l
1333 return l
1288
1334
1289 def _afterlock(self, callback):
1335 def _afterlock(self, callback):
1290 """add a callback to be run when the repository is fully unlocked
1336 """add a callback to be run when the repository is fully unlocked
1291
1337
1292 The callback will be executed when the outermost lock is released
1338 The callback will be executed when the outermost lock is released
1293 (with wlock being higher level than 'lock')."""
1339 (with wlock being higher level than 'lock')."""
1294 for ref in (self._wlockref, self._lockref):
1340 for ref in (self._wlockref, self._lockref):
1295 l = ref and ref()
1341 l = ref and ref()
1296 if l and l.held:
1342 if l and l.held:
1297 l.postrelease.append(callback)
1343 l.postrelease.append(callback)
1298 break
1344 break
1299 else: # no lock have been found.
1345 else: # no lock have been found.
1300 callback()
1346 callback()
1301
1347
1302 def lock(self, wait=True):
1348 def lock(self, wait=True):
1303 '''Lock the repository store (.hg/store) and return a weak reference
1349 '''Lock the repository store (.hg/store) and return a weak reference
1304 to the lock. Use this before modifying the store (e.g. committing or
1350 to the lock. Use this before modifying the store (e.g. committing or
1305 stripping). If you are opening a transaction, get a lock as well.)
1351 stripping). If you are opening a transaction, get a lock as well.)
1306
1352
1307 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1353 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1308 'wlock' first to avoid a dead-lock hazard.'''
1354 'wlock' first to avoid a dead-lock hazard.'''
1309 l = self._currentlock(self._lockref)
1355 l = self._currentlock(self._lockref)
1310 if l is not None:
1356 if l is not None:
1311 l.lock()
1357 l.lock()
1312 return l
1358 return l
1313
1359
1314 l = self._lock(self.svfs, "lock", wait, None,
1360 l = self._lock(self.svfs, "lock", wait, None,
1315 self.invalidate, _('repository %s') % self.origroot)
1361 self.invalidate, _('repository %s') % self.origroot)
1316 self._lockref = weakref.ref(l)
1362 self._lockref = weakref.ref(l)
1317 return l
1363 return l
1318
1364
1319 def _wlockchecktransaction(self):
1365 def _wlockchecktransaction(self):
1320 if self.currenttransaction() is not None:
1366 if self.currenttransaction() is not None:
1321 raise error.LockInheritanceContractViolation(
1367 raise error.LockInheritanceContractViolation(
1322 'wlock cannot be inherited in the middle of a transaction')
1368 'wlock cannot be inherited in the middle of a transaction')
1323
1369
1324 def wlock(self, wait=True):
1370 def wlock(self, wait=True):
1325 '''Lock the non-store parts of the repository (everything under
1371 '''Lock the non-store parts of the repository (everything under
1326 .hg except .hg/store) and return a weak reference to the lock.
1372 .hg except .hg/store) and return a weak reference to the lock.
1327
1373
1328 Use this before modifying files in .hg.
1374 Use this before modifying files in .hg.
1329
1375
1330 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1376 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1331 'wlock' first to avoid a dead-lock hazard.'''
1377 'wlock' first to avoid a dead-lock hazard.'''
1332 l = self._wlockref and self._wlockref()
1378 l = self._wlockref and self._wlockref()
1333 if l is not None and l.held:
1379 if l is not None and l.held:
1334 l.lock()
1380 l.lock()
1335 return l
1381 return l
1336
1382
1337 # We do not need to check for non-waiting lock acquisition. Such
1383 # We do not need to check for non-waiting lock acquisition. Such
1338 # acquisition would not cause dead-lock as they would just fail.
1384 # acquisition would not cause dead-lock as they would just fail.
1339 if wait and (self.ui.configbool('devel', 'all-warnings')
1385 if wait and (self.ui.configbool('devel', 'all-warnings')
1340 or self.ui.configbool('devel', 'check-locks')):
1386 or self.ui.configbool('devel', 'check-locks')):
1341 if self._currentlock(self._lockref) is not None:
1387 if self._currentlock(self._lockref) is not None:
1342 self.ui.develwarn('"wlock" acquired after "lock"')
1388 self.ui.develwarn('"wlock" acquired after "lock"')
1343
1389
1344 def unlock():
1390 def unlock():
1345 if self.dirstate.pendingparentchange():
1391 if self.dirstate.pendingparentchange():
1346 self.dirstate.invalidate()
1392 self.dirstate.invalidate()
1347 else:
1393 else:
1348 self.dirstate.write(None)
1394 self.dirstate.write(None)
1349
1395
1350 self._filecache['dirstate'].refresh()
1396 self._filecache['dirstate'].refresh()
1351
1397
1352 l = self._lock(self.vfs, "wlock", wait, unlock,
1398 l = self._lock(self.vfs, "wlock", wait, unlock,
1353 self.invalidatedirstate, _('working directory of %s') %
1399 self.invalidatedirstate, _('working directory of %s') %
1354 self.origroot,
1400 self.origroot,
1355 inheritchecker=self._wlockchecktransaction,
1401 inheritchecker=self._wlockchecktransaction,
1356 parentenvvar='HG_WLOCK_LOCKER')
1402 parentenvvar='HG_WLOCK_LOCKER')
1357 self._wlockref = weakref.ref(l)
1403 self._wlockref = weakref.ref(l)
1358 return l
1404 return l
1359
1405
1360 def _currentlock(self, lockref):
1406 def _currentlock(self, lockref):
1361 """Returns the lock if it's held, or None if it's not."""
1407 """Returns the lock if it's held, or None if it's not."""
1362 if lockref is None:
1408 if lockref is None:
1363 return None
1409 return None
1364 l = lockref()
1410 l = lockref()
1365 if l is None or not l.held:
1411 if l is None or not l.held:
1366 return None
1412 return None
1367 return l
1413 return l
1368
1414
1369 def currentwlock(self):
1415 def currentwlock(self):
1370 """Returns the wlock if it's held, or None if it's not."""
1416 """Returns the wlock if it's held, or None if it's not."""
1371 return self._currentlock(self._wlockref)
1417 return self._currentlock(self._wlockref)
1372
1418
1373 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1419 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1374 """
1420 """
1375 commit an individual file as part of a larger transaction
1421 commit an individual file as part of a larger transaction
1376 """
1422 """
1377
1423
1378 fname = fctx.path()
1424 fname = fctx.path()
1379 fparent1 = manifest1.get(fname, nullid)
1425 fparent1 = manifest1.get(fname, nullid)
1380 fparent2 = manifest2.get(fname, nullid)
1426 fparent2 = manifest2.get(fname, nullid)
1381 if isinstance(fctx, context.filectx):
1427 if isinstance(fctx, context.filectx):
1382 node = fctx.filenode()
1428 node = fctx.filenode()
1383 if node in [fparent1, fparent2]:
1429 if node in [fparent1, fparent2]:
1384 self.ui.debug('reusing %s filelog entry\n' % fname)
1430 self.ui.debug('reusing %s filelog entry\n' % fname)
1385 if manifest1.flags(fname) != fctx.flags():
1431 if manifest1.flags(fname) != fctx.flags():
1386 changelist.append(fname)
1432 changelist.append(fname)
1387 return node
1433 return node
1388
1434
1389 flog = self.file(fname)
1435 flog = self.file(fname)
1390 meta = {}
1436 meta = {}
1391 copy = fctx.renamed()
1437 copy = fctx.renamed()
1392 if copy and copy[0] != fname:
1438 if copy and copy[0] != fname:
1393 # Mark the new revision of this file as a copy of another
1439 # Mark the new revision of this file as a copy of another
1394 # file. This copy data will effectively act as a parent
1440 # file. This copy data will effectively act as a parent
1395 # of this new revision. If this is a merge, the first
1441 # of this new revision. If this is a merge, the first
1396 # parent will be the nullid (meaning "look up the copy data")
1442 # parent will be the nullid (meaning "look up the copy data")
1397 # and the second one will be the other parent. For example:
1443 # and the second one will be the other parent. For example:
1398 #
1444 #
1399 # 0 --- 1 --- 3 rev1 changes file foo
1445 # 0 --- 1 --- 3 rev1 changes file foo
1400 # \ / rev2 renames foo to bar and changes it
1446 # \ / rev2 renames foo to bar and changes it
1401 # \- 2 -/ rev3 should have bar with all changes and
1447 # \- 2 -/ rev3 should have bar with all changes and
1402 # should record that bar descends from
1448 # should record that bar descends from
1403 # bar in rev2 and foo in rev1
1449 # bar in rev2 and foo in rev1
1404 #
1450 #
1405 # this allows this merge to succeed:
1451 # this allows this merge to succeed:
1406 #
1452 #
1407 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1453 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1408 # \ / merging rev3 and rev4 should use bar@rev2
1454 # \ / merging rev3 and rev4 should use bar@rev2
1409 # \- 2 --- 4 as the merge base
1455 # \- 2 --- 4 as the merge base
1410 #
1456 #
1411
1457
1412 cfname = copy[0]
1458 cfname = copy[0]
1413 crev = manifest1.get(cfname)
1459 crev = manifest1.get(cfname)
1414 newfparent = fparent2
1460 newfparent = fparent2
1415
1461
1416 if manifest2: # branch merge
1462 if manifest2: # branch merge
1417 if fparent2 == nullid or crev is None: # copied on remote side
1463 if fparent2 == nullid or crev is None: # copied on remote side
1418 if cfname in manifest2:
1464 if cfname in manifest2:
1419 crev = manifest2[cfname]
1465 crev = manifest2[cfname]
1420 newfparent = fparent1
1466 newfparent = fparent1
1421
1467
1422 # Here, we used to search backwards through history to try to find
1468 # Here, we used to search backwards through history to try to find
1423 # where the file copy came from if the source of a copy was not in
1469 # where the file copy came from if the source of a copy was not in
1424 # the parent directory. However, this doesn't actually make sense to
1470 # the parent directory. However, this doesn't actually make sense to
1425 # do (what does a copy from something not in your working copy even
1471 # do (what does a copy from something not in your working copy even
1426 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1472 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1427 # the user that copy information was dropped, so if they didn't
1473 # the user that copy information was dropped, so if they didn't
1428 # expect this outcome it can be fixed, but this is the correct
1474 # expect this outcome it can be fixed, but this is the correct
1429 # behavior in this circumstance.
1475 # behavior in this circumstance.
1430
1476
1431 if crev:
1477 if crev:
1432 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1478 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1433 meta["copy"] = cfname
1479 meta["copy"] = cfname
1434 meta["copyrev"] = hex(crev)
1480 meta["copyrev"] = hex(crev)
1435 fparent1, fparent2 = nullid, newfparent
1481 fparent1, fparent2 = nullid, newfparent
1436 else:
1482 else:
1437 self.ui.warn(_("warning: can't find ancestor for '%s' "
1483 self.ui.warn(_("warning: can't find ancestor for '%s' "
1438 "copied from '%s'!\n") % (fname, cfname))
1484 "copied from '%s'!\n") % (fname, cfname))
1439
1485
1440 elif fparent1 == nullid:
1486 elif fparent1 == nullid:
1441 fparent1, fparent2 = fparent2, nullid
1487 fparent1, fparent2 = fparent2, nullid
1442 elif fparent2 != nullid:
1488 elif fparent2 != nullid:
1443 # is one parent an ancestor of the other?
1489 # is one parent an ancestor of the other?
1444 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1490 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1445 if fparent1 in fparentancestors:
1491 if fparent1 in fparentancestors:
1446 fparent1, fparent2 = fparent2, nullid
1492 fparent1, fparent2 = fparent2, nullid
1447 elif fparent2 in fparentancestors:
1493 elif fparent2 in fparentancestors:
1448 fparent2 = nullid
1494 fparent2 = nullid
1449
1495
1450 # is the file changed?
1496 # is the file changed?
1451 text = fctx.data()
1497 text = fctx.data()
1452 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1498 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1453 changelist.append(fname)
1499 changelist.append(fname)
1454 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1500 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1455 # are just the flags changed during merge?
1501 # are just the flags changed during merge?
1456 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1502 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1457 changelist.append(fname)
1503 changelist.append(fname)
1458
1504
1459 return fparent1
1505 return fparent1
1460
1506
1461 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1507 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1462 """check for commit arguments that aren't committable"""
1508 """check for commit arguments that aren't committable"""
1463 if match.isexact() or match.prefix():
1509 if match.isexact() or match.prefix():
1464 matched = set(status.modified + status.added + status.removed)
1510 matched = set(status.modified + status.added + status.removed)
1465
1511
1466 for f in match.files():
1512 for f in match.files():
1467 f = self.dirstate.normalize(f)
1513 f = self.dirstate.normalize(f)
1468 if f == '.' or f in matched or f in wctx.substate:
1514 if f == '.' or f in matched or f in wctx.substate:
1469 continue
1515 continue
1470 if f in status.deleted:
1516 if f in status.deleted:
1471 fail(f, _('file not found!'))
1517 fail(f, _('file not found!'))
1472 if f in vdirs: # visited directory
1518 if f in vdirs: # visited directory
1473 d = f + '/'
1519 d = f + '/'
1474 for mf in matched:
1520 for mf in matched:
1475 if mf.startswith(d):
1521 if mf.startswith(d):
1476 break
1522 break
1477 else:
1523 else:
1478 fail(f, _("no match under directory!"))
1524 fail(f, _("no match under directory!"))
1479 elif f not in self.dirstate:
1525 elif f not in self.dirstate:
1480 fail(f, _("file not tracked!"))
1526 fail(f, _("file not tracked!"))
1481
1527
1482 @unfilteredmethod
1528 @unfilteredmethod
1483 def commit(self, text="", user=None, date=None, match=None, force=False,
1529 def commit(self, text="", user=None, date=None, match=None, force=False,
1484 editor=False, extra=None):
1530 editor=False, extra=None):
1485 """Add a new revision to current repository.
1531 """Add a new revision to current repository.
1486
1532
1487 Revision information is gathered from the working directory,
1533 Revision information is gathered from the working directory,
1488 match can be used to filter the committed files. If editor is
1534 match can be used to filter the committed files. If editor is
1489 supplied, it is called to get a commit message.
1535 supplied, it is called to get a commit message.
1490 """
1536 """
1491 if extra is None:
1537 if extra is None:
1492 extra = {}
1538 extra = {}
1493
1539
1494 def fail(f, msg):
1540 def fail(f, msg):
1495 raise error.Abort('%s: %s' % (f, msg))
1541 raise error.Abort('%s: %s' % (f, msg))
1496
1542
1497 if not match:
1543 if not match:
1498 match = matchmod.always(self.root, '')
1544 match = matchmod.always(self.root, '')
1499
1545
1500 if not force:
1546 if not force:
1501 vdirs = []
1547 vdirs = []
1502 match.explicitdir = vdirs.append
1548 match.explicitdir = vdirs.append
1503 match.bad = fail
1549 match.bad = fail
1504
1550
1505 wlock = lock = tr = None
1551 wlock = lock = tr = None
1506 try:
1552 try:
1507 wlock = self.wlock()
1553 wlock = self.wlock()
1508 lock = self.lock() # for recent changelog (see issue4368)
1554 lock = self.lock() # for recent changelog (see issue4368)
1509
1555
1510 wctx = self[None]
1556 wctx = self[None]
1511 merge = len(wctx.parents()) > 1
1557 merge = len(wctx.parents()) > 1
1512
1558
1513 if not force and merge and match.ispartial():
1559 if not force and merge and match.ispartial():
1514 raise error.Abort(_('cannot partially commit a merge '
1560 raise error.Abort(_('cannot partially commit a merge '
1515 '(do not specify files or patterns)'))
1561 '(do not specify files or patterns)'))
1516
1562
1517 status = self.status(match=match, clean=force)
1563 status = self.status(match=match, clean=force)
1518 if force:
1564 if force:
1519 status.modified.extend(status.clean) # mq may commit clean files
1565 status.modified.extend(status.clean) # mq may commit clean files
1520
1566
1521 # check subrepos
1567 # check subrepos
1522 subs = []
1568 subs = []
1523 commitsubs = set()
1569 commitsubs = set()
1524 newstate = wctx.substate.copy()
1570 newstate = wctx.substate.copy()
1525 # only manage subrepos and .hgsubstate if .hgsub is present
1571 # only manage subrepos and .hgsubstate if .hgsub is present
1526 if '.hgsub' in wctx:
1572 if '.hgsub' in wctx:
1527 # we'll decide whether to track this ourselves, thanks
1573 # we'll decide whether to track this ourselves, thanks
1528 for c in status.modified, status.added, status.removed:
1574 for c in status.modified, status.added, status.removed:
1529 if '.hgsubstate' in c:
1575 if '.hgsubstate' in c:
1530 c.remove('.hgsubstate')
1576 c.remove('.hgsubstate')
1531
1577
1532 # compare current state to last committed state
1578 # compare current state to last committed state
1533 # build new substate based on last committed state
1579 # build new substate based on last committed state
1534 oldstate = wctx.p1().substate
1580 oldstate = wctx.p1().substate
1535 for s in sorted(newstate.keys()):
1581 for s in sorted(newstate.keys()):
1536 if not match(s):
1582 if not match(s):
1537 # ignore working copy, use old state if present
1583 # ignore working copy, use old state if present
1538 if s in oldstate:
1584 if s in oldstate:
1539 newstate[s] = oldstate[s]
1585 newstate[s] = oldstate[s]
1540 continue
1586 continue
1541 if not force:
1587 if not force:
1542 raise error.Abort(
1588 raise error.Abort(
1543 _("commit with new subrepo %s excluded") % s)
1589 _("commit with new subrepo %s excluded") % s)
1544 dirtyreason = wctx.sub(s).dirtyreason(True)
1590 dirtyreason = wctx.sub(s).dirtyreason(True)
1545 if dirtyreason:
1591 if dirtyreason:
1546 if not self.ui.configbool('ui', 'commitsubrepos'):
1592 if not self.ui.configbool('ui', 'commitsubrepos'):
1547 raise error.Abort(dirtyreason,
1593 raise error.Abort(dirtyreason,
1548 hint=_("use --subrepos for recursive commit"))
1594 hint=_("use --subrepos for recursive commit"))
1549 subs.append(s)
1595 subs.append(s)
1550 commitsubs.add(s)
1596 commitsubs.add(s)
1551 else:
1597 else:
1552 bs = wctx.sub(s).basestate()
1598 bs = wctx.sub(s).basestate()
1553 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1599 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1554 if oldstate.get(s, (None, None, None))[1] != bs:
1600 if oldstate.get(s, (None, None, None))[1] != bs:
1555 subs.append(s)
1601 subs.append(s)
1556
1602
1557 # check for removed subrepos
1603 # check for removed subrepos
1558 for p in wctx.parents():
1604 for p in wctx.parents():
1559 r = [s for s in p.substate if s not in newstate]
1605 r = [s for s in p.substate if s not in newstate]
1560 subs += [s for s in r if match(s)]
1606 subs += [s for s in r if match(s)]
1561 if subs:
1607 if subs:
1562 if (not match('.hgsub') and
1608 if (not match('.hgsub') and
1563 '.hgsub' in (wctx.modified() + wctx.added())):
1609 '.hgsub' in (wctx.modified() + wctx.added())):
1564 raise error.Abort(
1610 raise error.Abort(
1565 _("can't commit subrepos without .hgsub"))
1611 _("can't commit subrepos without .hgsub"))
1566 status.modified.insert(0, '.hgsubstate')
1612 status.modified.insert(0, '.hgsubstate')
1567
1613
1568 elif '.hgsub' in status.removed:
1614 elif '.hgsub' in status.removed:
1569 # clean up .hgsubstate when .hgsub is removed
1615 # clean up .hgsubstate when .hgsub is removed
1570 if ('.hgsubstate' in wctx and
1616 if ('.hgsubstate' in wctx and
1571 '.hgsubstate' not in (status.modified + status.added +
1617 '.hgsubstate' not in (status.modified + status.added +
1572 status.removed)):
1618 status.removed)):
1573 status.removed.insert(0, '.hgsubstate')
1619 status.removed.insert(0, '.hgsubstate')
1574
1620
1575 # make sure all explicit patterns are matched
1621 # make sure all explicit patterns are matched
1576 if not force:
1622 if not force:
1577 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1623 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1578
1624
1579 cctx = context.workingcommitctx(self, status,
1625 cctx = context.workingcommitctx(self, status,
1580 text, user, date, extra)
1626 text, user, date, extra)
1581
1627
1582 # internal config: ui.allowemptycommit
1628 # internal config: ui.allowemptycommit
1583 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1629 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1584 or extra.get('close') or merge or cctx.files()
1630 or extra.get('close') or merge or cctx.files()
1585 or self.ui.configbool('ui', 'allowemptycommit'))
1631 or self.ui.configbool('ui', 'allowemptycommit'))
1586 if not allowemptycommit:
1632 if not allowemptycommit:
1587 return None
1633 return None
1588
1634
1589 if merge and cctx.deleted():
1635 if merge and cctx.deleted():
1590 raise error.Abort(_("cannot commit merge with missing files"))
1636 raise error.Abort(_("cannot commit merge with missing files"))
1591
1637
1592 ms = mergemod.mergestate.read(self)
1638 ms = mergemod.mergestate.read(self)
1593 mergeutil.checkunresolved(ms)
1639 mergeutil.checkunresolved(ms)
1594
1640
1595 if editor:
1641 if editor:
1596 cctx._text = editor(self, cctx, subs)
1642 cctx._text = editor(self, cctx, subs)
1597 edited = (text != cctx._text)
1643 edited = (text != cctx._text)
1598
1644
1599 # Save commit message in case this transaction gets rolled back
1645 # Save commit message in case this transaction gets rolled back
1600 # (e.g. by a pretxncommit hook). Leave the content alone on
1646 # (e.g. by a pretxncommit hook). Leave the content alone on
1601 # the assumption that the user will use the same editor again.
1647 # the assumption that the user will use the same editor again.
1602 msgfn = self.savecommitmessage(cctx._text)
1648 msgfn = self.savecommitmessage(cctx._text)
1603
1649
1604 # commit subs and write new state
1650 # commit subs and write new state
1605 if subs:
1651 if subs:
1606 for s in sorted(commitsubs):
1652 for s in sorted(commitsubs):
1607 sub = wctx.sub(s)
1653 sub = wctx.sub(s)
1608 self.ui.status(_('committing subrepository %s\n') %
1654 self.ui.status(_('committing subrepository %s\n') %
1609 subrepo.subrelpath(sub))
1655 subrepo.subrelpath(sub))
1610 sr = sub.commit(cctx._text, user, date)
1656 sr = sub.commit(cctx._text, user, date)
1611 newstate[s] = (newstate[s][0], sr)
1657 newstate[s] = (newstate[s][0], sr)
1612 subrepo.writestate(self, newstate)
1658 subrepo.writestate(self, newstate)
1613
1659
1614 p1, p2 = self.dirstate.parents()
1660 p1, p2 = self.dirstate.parents()
1615 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1661 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1616 try:
1662 try:
1617 self.hook("precommit", throw=True, parent1=hookp1,
1663 self.hook("precommit", throw=True, parent1=hookp1,
1618 parent2=hookp2)
1664 parent2=hookp2)
1619 tr = self.transaction('commit')
1665 tr = self.transaction('commit')
1620 ret = self.commitctx(cctx, True)
1666 ret = self.commitctx(cctx, True)
1621 except: # re-raises
1667 except: # re-raises
1622 if edited:
1668 if edited:
1623 self.ui.write(
1669 self.ui.write(
1624 _('note: commit message saved in %s\n') % msgfn)
1670 _('note: commit message saved in %s\n') % msgfn)
1625 raise
1671 raise
1626 # update bookmarks, dirstate and mergestate
1672 # update bookmarks, dirstate and mergestate
1627 bookmarks.update(self, [p1, p2], ret)
1673 bookmarks.update(self, [p1, p2], ret)
1628 cctx.markcommitted(ret)
1674 cctx.markcommitted(ret)
1629 ms.reset()
1675 ms.reset()
1630 tr.close()
1676 tr.close()
1631
1677
1632 finally:
1678 finally:
1633 lockmod.release(tr, lock, wlock)
1679 lockmod.release(tr, lock, wlock)
1634
1680
1635 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1681 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1636 # hack for command that use a temporary commit (eg: histedit)
1682 # hack for command that use a temporary commit (eg: histedit)
1637 # temporary commit got stripped before hook release
1683 # temporary commit got stripped before hook release
1638 if self.changelog.hasnode(ret):
1684 if self.changelog.hasnode(ret):
1639 self.hook("commit", node=node, parent1=parent1,
1685 self.hook("commit", node=node, parent1=parent1,
1640 parent2=parent2)
1686 parent2=parent2)
1641 self._afterlock(commithook)
1687 self._afterlock(commithook)
1642 return ret
1688 return ret
1643
1689
1644 @unfilteredmethod
1690 @unfilteredmethod
1645 def commitctx(self, ctx, error=False):
1691 def commitctx(self, ctx, error=False):
1646 """Add a new revision to current repository.
1692 """Add a new revision to current repository.
1647 Revision information is passed via the context argument.
1693 Revision information is passed via the context argument.
1648 """
1694 """
1649
1695
1650 tr = None
1696 tr = None
1651 p1, p2 = ctx.p1(), ctx.p2()
1697 p1, p2 = ctx.p1(), ctx.p2()
1652 user = ctx.user()
1698 user = ctx.user()
1653
1699
1654 lock = self.lock()
1700 lock = self.lock()
1655 try:
1701 try:
1656 tr = self.transaction("commit")
1702 tr = self.transaction("commit")
1657 trp = weakref.proxy(tr)
1703 trp = weakref.proxy(tr)
1658
1704
1659 if ctx.manifestnode():
1705 if ctx.manifestnode():
1660 # reuse an existing manifest revision
1706 # reuse an existing manifest revision
1661 mn = ctx.manifestnode()
1707 mn = ctx.manifestnode()
1662 files = ctx.files()
1708 files = ctx.files()
1663 elif ctx.files():
1709 elif ctx.files():
1664 m1ctx = p1.manifestctx()
1710 m1ctx = p1.manifestctx()
1665 m2ctx = p2.manifestctx()
1711 m2ctx = p2.manifestctx()
1666 mctx = m1ctx.copy()
1712 mctx = m1ctx.copy()
1667
1713
1668 m = mctx.read()
1714 m = mctx.read()
1669 m1 = m1ctx.read()
1715 m1 = m1ctx.read()
1670 m2 = m2ctx.read()
1716 m2 = m2ctx.read()
1671
1717
1672 # check in files
1718 # check in files
1673 added = []
1719 added = []
1674 changed = []
1720 changed = []
1675 removed = list(ctx.removed())
1721 removed = list(ctx.removed())
1676 linkrev = len(self)
1722 linkrev = len(self)
1677 self.ui.note(_("committing files:\n"))
1723 self.ui.note(_("committing files:\n"))
1678 for f in sorted(ctx.modified() + ctx.added()):
1724 for f in sorted(ctx.modified() + ctx.added()):
1679 self.ui.note(f + "\n")
1725 self.ui.note(f + "\n")
1680 try:
1726 try:
1681 fctx = ctx[f]
1727 fctx = ctx[f]
1682 if fctx is None:
1728 if fctx is None:
1683 removed.append(f)
1729 removed.append(f)
1684 else:
1730 else:
1685 added.append(f)
1731 added.append(f)
1686 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1732 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1687 trp, changed)
1733 trp, changed)
1688 m.setflag(f, fctx.flags())
1734 m.setflag(f, fctx.flags())
1689 except OSError as inst:
1735 except OSError as inst:
1690 self.ui.warn(_("trouble committing %s!\n") % f)
1736 self.ui.warn(_("trouble committing %s!\n") % f)
1691 raise
1737 raise
1692 except IOError as inst:
1738 except IOError as inst:
1693 errcode = getattr(inst, 'errno', errno.ENOENT)
1739 errcode = getattr(inst, 'errno', errno.ENOENT)
1694 if error or errcode and errcode != errno.ENOENT:
1740 if error or errcode and errcode != errno.ENOENT:
1695 self.ui.warn(_("trouble committing %s!\n") % f)
1741 self.ui.warn(_("trouble committing %s!\n") % f)
1696 raise
1742 raise
1697
1743
1698 # update manifest
1744 # update manifest
1699 self.ui.note(_("committing manifest\n"))
1745 self.ui.note(_("committing manifest\n"))
1700 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1746 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1701 drop = [f for f in removed if f in m]
1747 drop = [f for f in removed if f in m]
1702 for f in drop:
1748 for f in drop:
1703 del m[f]
1749 del m[f]
1704 mn = mctx.write(trp, linkrev,
1750 mn = mctx.write(trp, linkrev,
1705 p1.manifestnode(), p2.manifestnode(),
1751 p1.manifestnode(), p2.manifestnode(),
1706 added, drop)
1752 added, drop)
1707 files = changed + removed
1753 files = changed + removed
1708 else:
1754 else:
1709 mn = p1.manifestnode()
1755 mn = p1.manifestnode()
1710 files = []
1756 files = []
1711
1757
1712 # update changelog
1758 # update changelog
1713 self.ui.note(_("committing changelog\n"))
1759 self.ui.note(_("committing changelog\n"))
1714 self.changelog.delayupdate(tr)
1760 self.changelog.delayupdate(tr)
1715 n = self.changelog.add(mn, files, ctx.description(),
1761 n = self.changelog.add(mn, files, ctx.description(),
1716 trp, p1.node(), p2.node(),
1762 trp, p1.node(), p2.node(),
1717 user, ctx.date(), ctx.extra().copy())
1763 user, ctx.date(), ctx.extra().copy())
1718 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1764 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1719 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1765 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1720 parent2=xp2)
1766 parent2=xp2)
1721 # set the new commit is proper phase
1767 # set the new commit is proper phase
1722 targetphase = subrepo.newcommitphase(self.ui, ctx)
1768 targetphase = subrepo.newcommitphase(self.ui, ctx)
1723 if targetphase:
1769 if targetphase:
1724 # retract boundary do not alter parent changeset.
1770 # retract boundary do not alter parent changeset.
1725 # if a parent have higher the resulting phase will
1771 # if a parent have higher the resulting phase will
1726 # be compliant anyway
1772 # be compliant anyway
1727 #
1773 #
1728 # if minimal phase was 0 we don't need to retract anything
1774 # if minimal phase was 0 we don't need to retract anything
1729 phases.retractboundary(self, tr, targetphase, [n])
1775 phases.retractboundary(self, tr, targetphase, [n])
1730 tr.close()
1776 tr.close()
1731 branchmap.updatecache(self.filtered('served'))
1777 branchmap.updatecache(self.filtered('served'))
1732 return n
1778 return n
1733 finally:
1779 finally:
1734 if tr:
1780 if tr:
1735 tr.release()
1781 tr.release()
1736 lock.release()
1782 lock.release()
1737
1783
1738 @unfilteredmethod
1784 @unfilteredmethod
1739 def destroying(self):
1785 def destroying(self):
1740 '''Inform the repository that nodes are about to be destroyed.
1786 '''Inform the repository that nodes are about to be destroyed.
1741 Intended for use by strip and rollback, so there's a common
1787 Intended for use by strip and rollback, so there's a common
1742 place for anything that has to be done before destroying history.
1788 place for anything that has to be done before destroying history.
1743
1789
1744 This is mostly useful for saving state that is in memory and waiting
1790 This is mostly useful for saving state that is in memory and waiting
1745 to be flushed when the current lock is released. Because a call to
1791 to be flushed when the current lock is released. Because a call to
1746 destroyed is imminent, the repo will be invalidated causing those
1792 destroyed is imminent, the repo will be invalidated causing those
1747 changes to stay in memory (waiting for the next unlock), or vanish
1793 changes to stay in memory (waiting for the next unlock), or vanish
1748 completely.
1794 completely.
1749 '''
1795 '''
1750 # When using the same lock to commit and strip, the phasecache is left
1796 # When using the same lock to commit and strip, the phasecache is left
1751 # dirty after committing. Then when we strip, the repo is invalidated,
1797 # dirty after committing. Then when we strip, the repo is invalidated,
1752 # causing those changes to disappear.
1798 # causing those changes to disappear.
1753 if '_phasecache' in vars(self):
1799 if '_phasecache' in vars(self):
1754 self._phasecache.write()
1800 self._phasecache.write()
1755
1801
1756 @unfilteredmethod
1802 @unfilteredmethod
1757 def destroyed(self):
1803 def destroyed(self):
1758 '''Inform the repository that nodes have been destroyed.
1804 '''Inform the repository that nodes have been destroyed.
1759 Intended for use by strip and rollback, so there's a common
1805 Intended for use by strip and rollback, so there's a common
1760 place for anything that has to be done after destroying history.
1806 place for anything that has to be done after destroying history.
1761 '''
1807 '''
1762 # When one tries to:
1808 # When one tries to:
1763 # 1) destroy nodes thus calling this method (e.g. strip)
1809 # 1) destroy nodes thus calling this method (e.g. strip)
1764 # 2) use phasecache somewhere (e.g. commit)
1810 # 2) use phasecache somewhere (e.g. commit)
1765 #
1811 #
1766 # then 2) will fail because the phasecache contains nodes that were
1812 # then 2) will fail because the phasecache contains nodes that were
1767 # removed. We can either remove phasecache from the filecache,
1813 # removed. We can either remove phasecache from the filecache,
1768 # causing it to reload next time it is accessed, or simply filter
1814 # causing it to reload next time it is accessed, or simply filter
1769 # the removed nodes now and write the updated cache.
1815 # the removed nodes now and write the updated cache.
1770 self._phasecache.filterunknown(self)
1816 self._phasecache.filterunknown(self)
1771 self._phasecache.write()
1817 self._phasecache.write()
1772
1818
1773 # update the 'served' branch cache to help read only server process
1819 # update the 'served' branch cache to help read only server process
1774 # Thanks to branchcache collaboration this is done from the nearest
1820 # Thanks to branchcache collaboration this is done from the nearest
1775 # filtered subset and it is expected to be fast.
1821 # filtered subset and it is expected to be fast.
1776 branchmap.updatecache(self.filtered('served'))
1822 branchmap.updatecache(self.filtered('served'))
1777
1823
1778 # Ensure the persistent tag cache is updated. Doing it now
1824 # Ensure the persistent tag cache is updated. Doing it now
1779 # means that the tag cache only has to worry about destroyed
1825 # means that the tag cache only has to worry about destroyed
1780 # heads immediately after a strip/rollback. That in turn
1826 # heads immediately after a strip/rollback. That in turn
1781 # guarantees that "cachetip == currenttip" (comparing both rev
1827 # guarantees that "cachetip == currenttip" (comparing both rev
1782 # and node) always means no nodes have been added or destroyed.
1828 # and node) always means no nodes have been added or destroyed.
1783
1829
1784 # XXX this is suboptimal when qrefresh'ing: we strip the current
1830 # XXX this is suboptimal when qrefresh'ing: we strip the current
1785 # head, refresh the tag cache, then immediately add a new head.
1831 # head, refresh the tag cache, then immediately add a new head.
1786 # But I think doing it this way is necessary for the "instant
1832 # But I think doing it this way is necessary for the "instant
1787 # tag cache retrieval" case to work.
1833 # tag cache retrieval" case to work.
1788 self.invalidate()
1834 self.invalidate()
1789
1835
1790 def walk(self, match, node=None):
1836 def walk(self, match, node=None):
1791 '''
1837 '''
1792 walk recursively through the directory tree or a given
1838 walk recursively through the directory tree or a given
1793 changeset, finding all files matched by the match
1839 changeset, finding all files matched by the match
1794 function
1840 function
1795 '''
1841 '''
1796 return self[node].walk(match)
1842 return self[node].walk(match)
1797
1843
1798 def status(self, node1='.', node2=None, match=None,
1844 def status(self, node1='.', node2=None, match=None,
1799 ignored=False, clean=False, unknown=False,
1845 ignored=False, clean=False, unknown=False,
1800 listsubrepos=False):
1846 listsubrepos=False):
1801 '''a convenience method that calls node1.status(node2)'''
1847 '''a convenience method that calls node1.status(node2)'''
1802 return self[node1].status(node2, match, ignored, clean, unknown,
1848 return self[node1].status(node2, match, ignored, clean, unknown,
1803 listsubrepos)
1849 listsubrepos)
1804
1850
1805 def heads(self, start=None):
1851 def heads(self, start=None):
1806 if start is None:
1852 if start is None:
1807 cl = self.changelog
1853 cl = self.changelog
1808 headrevs = reversed(cl.headrevs())
1854 headrevs = reversed(cl.headrevs())
1809 return [cl.node(rev) for rev in headrevs]
1855 return [cl.node(rev) for rev in headrevs]
1810
1856
1811 heads = self.changelog.heads(start)
1857 heads = self.changelog.heads(start)
1812 # sort the output in rev descending order
1858 # sort the output in rev descending order
1813 return sorted(heads, key=self.changelog.rev, reverse=True)
1859 return sorted(heads, key=self.changelog.rev, reverse=True)
1814
1860
1815 def branchheads(self, branch=None, start=None, closed=False):
1861 def branchheads(self, branch=None, start=None, closed=False):
1816 '''return a (possibly filtered) list of heads for the given branch
1862 '''return a (possibly filtered) list of heads for the given branch
1817
1863
1818 Heads are returned in topological order, from newest to oldest.
1864 Heads are returned in topological order, from newest to oldest.
1819 If branch is None, use the dirstate branch.
1865 If branch is None, use the dirstate branch.
1820 If start is not None, return only heads reachable from start.
1866 If start is not None, return only heads reachable from start.
1821 If closed is True, return heads that are marked as closed as well.
1867 If closed is True, return heads that are marked as closed as well.
1822 '''
1868 '''
1823 if branch is None:
1869 if branch is None:
1824 branch = self[None].branch()
1870 branch = self[None].branch()
1825 branches = self.branchmap()
1871 branches = self.branchmap()
1826 if branch not in branches:
1872 if branch not in branches:
1827 return []
1873 return []
1828 # the cache returns heads ordered lowest to highest
1874 # the cache returns heads ordered lowest to highest
1829 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1875 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 if start is not None:
1876 if start is not None:
1831 # filter out the heads that cannot be reached from startrev
1877 # filter out the heads that cannot be reached from startrev
1832 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1878 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 bheads = [h for h in bheads if h in fbheads]
1879 bheads = [h for h in bheads if h in fbheads]
1834 return bheads
1880 return bheads
1835
1881
1836 def branches(self, nodes):
1882 def branches(self, nodes):
1837 if not nodes:
1883 if not nodes:
1838 nodes = [self.changelog.tip()]
1884 nodes = [self.changelog.tip()]
1839 b = []
1885 b = []
1840 for n in nodes:
1886 for n in nodes:
1841 t = n
1887 t = n
1842 while True:
1888 while True:
1843 p = self.changelog.parents(n)
1889 p = self.changelog.parents(n)
1844 if p[1] != nullid or p[0] == nullid:
1890 if p[1] != nullid or p[0] == nullid:
1845 b.append((t, n, p[0], p[1]))
1891 b.append((t, n, p[0], p[1]))
1846 break
1892 break
1847 n = p[0]
1893 n = p[0]
1848 return b
1894 return b
1849
1895
1850 def between(self, pairs):
1896 def between(self, pairs):
1851 r = []
1897 r = []
1852
1898
1853 for top, bottom in pairs:
1899 for top, bottom in pairs:
1854 n, l, i = top, [], 0
1900 n, l, i = top, [], 0
1855 f = 1
1901 f = 1
1856
1902
1857 while n != bottom and n != nullid:
1903 while n != bottom and n != nullid:
1858 p = self.changelog.parents(n)[0]
1904 p = self.changelog.parents(n)[0]
1859 if i == f:
1905 if i == f:
1860 l.append(n)
1906 l.append(n)
1861 f = f * 2
1907 f = f * 2
1862 n = p
1908 n = p
1863 i += 1
1909 i += 1
1864
1910
1865 r.append(l)
1911 r.append(l)
1866
1912
1867 return r
1913 return r
1868
1914
1869 def checkpush(self, pushop):
1915 def checkpush(self, pushop):
1870 """Extensions can override this function if additional checks have
1916 """Extensions can override this function if additional checks have
1871 to be performed before pushing, or call it if they override push
1917 to be performed before pushing, or call it if they override push
1872 command.
1918 command.
1873 """
1919 """
1874 pass
1920 pass
1875
1921
1876 @unfilteredpropertycache
1922 @unfilteredpropertycache
1877 def prepushoutgoinghooks(self):
1923 def prepushoutgoinghooks(self):
1878 """Return util.hooks consists of a pushop with repo, remote, outgoing
1924 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 methods, which are called before pushing changesets.
1925 methods, which are called before pushing changesets.
1880 """
1926 """
1881 return util.hooks()
1927 return util.hooks()
1882
1928
1883 def pushkey(self, namespace, key, old, new):
1929 def pushkey(self, namespace, key, old, new):
1884 try:
1930 try:
1885 tr = self.currenttransaction()
1931 tr = self.currenttransaction()
1886 hookargs = {}
1932 hookargs = {}
1887 if tr is not None:
1933 if tr is not None:
1888 hookargs.update(tr.hookargs)
1934 hookargs.update(tr.hookargs)
1889 hookargs['namespace'] = namespace
1935 hookargs['namespace'] = namespace
1890 hookargs['key'] = key
1936 hookargs['key'] = key
1891 hookargs['old'] = old
1937 hookargs['old'] = old
1892 hookargs['new'] = new
1938 hookargs['new'] = new
1893 self.hook('prepushkey', throw=True, **hookargs)
1939 self.hook('prepushkey', throw=True, **hookargs)
1894 except error.HookAbort as exc:
1940 except error.HookAbort as exc:
1895 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1941 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 if exc.hint:
1942 if exc.hint:
1897 self.ui.write_err(_("(%s)\n") % exc.hint)
1943 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 return False
1944 return False
1899 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1945 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 ret = pushkey.push(self, namespace, key, old, new)
1946 ret = pushkey.push(self, namespace, key, old, new)
1901 def runhook():
1947 def runhook():
1902 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1948 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 ret=ret)
1949 ret=ret)
1904 self._afterlock(runhook)
1950 self._afterlock(runhook)
1905 return ret
1951 return ret
1906
1952
1907 def listkeys(self, namespace):
1953 def listkeys(self, namespace):
1908 self.hook('prelistkeys', throw=True, namespace=namespace)
1954 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 self.ui.debug('listing keys for "%s"\n' % namespace)
1955 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 values = pushkey.list(self, namespace)
1956 values = pushkey.list(self, namespace)
1911 self.hook('listkeys', namespace=namespace, values=values)
1957 self.hook('listkeys', namespace=namespace, values=values)
1912 return values
1958 return values
1913
1959
1914 def debugwireargs(self, one, two, three=None, four=None, five=None):
1960 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 '''used to test argument passing over the wire'''
1961 '''used to test argument passing over the wire'''
1916 return "%s %s %s %s %s" % (one, two, three, four, five)
1962 return "%s %s %s %s %s" % (one, two, three, four, five)
1917
1963
1918 def savecommitmessage(self, text):
1964 def savecommitmessage(self, text):
1919 fp = self.vfs('last-message.txt', 'wb')
1965 fp = self.vfs('last-message.txt', 'wb')
1920 try:
1966 try:
1921 fp.write(text)
1967 fp.write(text)
1922 finally:
1968 finally:
1923 fp.close()
1969 fp.close()
1924 return self.pathto(fp.name[len(self.root) + 1:])
1970 return self.pathto(fp.name[len(self.root) + 1:])
1925
1971
1926 # used to avoid circular references so destructors work
1972 # used to avoid circular references so destructors work
1927 def aftertrans(files):
1973 def aftertrans(files):
1928 renamefiles = [tuple(t) for t in files]
1974 renamefiles = [tuple(t) for t in files]
1929 def a():
1975 def a():
1930 for vfs, src, dest in renamefiles:
1976 for vfs, src, dest in renamefiles:
1931 # if src and dest refer to a same file, vfs.rename is a no-op,
1977 # if src and dest refer to a same file, vfs.rename is a no-op,
1932 # leaving both src and dest on disk. delete dest to make sure
1978 # leaving both src and dest on disk. delete dest to make sure
1933 # the rename couldn't be such a no-op.
1979 # the rename couldn't be such a no-op.
1934 vfs.tryunlink(dest)
1980 vfs.tryunlink(dest)
1935 try:
1981 try:
1936 vfs.rename(src, dest)
1982 vfs.rename(src, dest)
1937 except OSError: # journal file does not yet exist
1983 except OSError: # journal file does not yet exist
1938 pass
1984 pass
1939 return a
1985 return a
1940
1986
1941 def undoname(fn):
1987 def undoname(fn):
1942 base, name = os.path.split(fn)
1988 base, name = os.path.split(fn)
1943 assert name.startswith('journal')
1989 assert name.startswith('journal')
1944 return os.path.join(base, name.replace('journal', 'undo', 1))
1990 return os.path.join(base, name.replace('journal', 'undo', 1))
1945
1991
1946 def instance(ui, path, create):
1992 def instance(ui, path, create):
1947 return localrepository(ui, util.urllocalpath(path), create)
1993 return localrepository(ui, util.urllocalpath(path), create)
1948
1994
1949 def islocal(path):
1995 def islocal(path):
1950 return True
1996 return True
1951
1997
1952 def newreporequirements(repo):
1998 def newreporequirements(repo):
1953 """Determine the set of requirements for a new local repository.
1999 """Determine the set of requirements for a new local repository.
1954
2000
1955 Extensions can wrap this function to specify custom requirements for
2001 Extensions can wrap this function to specify custom requirements for
1956 new repositories.
2002 new repositories.
1957 """
2003 """
1958 ui = repo.ui
2004 ui = repo.ui
1959 requirements = set(['revlogv1'])
2005 requirements = set(['revlogv1'])
1960 if ui.configbool('format', 'usestore', True):
2006 if ui.configbool('format', 'usestore', True):
1961 requirements.add('store')
2007 requirements.add('store')
1962 if ui.configbool('format', 'usefncache', True):
2008 if ui.configbool('format', 'usefncache', True):
1963 requirements.add('fncache')
2009 requirements.add('fncache')
1964 if ui.configbool('format', 'dotencode', True):
2010 if ui.configbool('format', 'dotencode', True):
1965 requirements.add('dotencode')
2011 requirements.add('dotencode')
1966
2012
1967 compengine = ui.config('experimental', 'format.compression', 'zlib')
2013 compengine = ui.config('experimental', 'format.compression', 'zlib')
1968 if compengine not in util.compengines:
2014 if compengine not in util.compengines:
1969 raise error.Abort(_('compression engine %s defined by '
2015 raise error.Abort(_('compression engine %s defined by '
1970 'experimental.format.compression not available') %
2016 'experimental.format.compression not available') %
1971 compengine,
2017 compengine,
1972 hint=_('run "hg debuginstall" to list available '
2018 hint=_('run "hg debuginstall" to list available '
1973 'compression engines'))
2019 'compression engines'))
1974
2020
1975 # zlib is the historical default and doesn't need an explicit requirement.
2021 # zlib is the historical default and doesn't need an explicit requirement.
1976 if compengine != 'zlib':
2022 if compengine != 'zlib':
1977 requirements.add('exp-compression-%s' % compengine)
2023 requirements.add('exp-compression-%s' % compengine)
1978
2024
1979 if scmutil.gdinitconfig(ui):
2025 if scmutil.gdinitconfig(ui):
1980 requirements.add('generaldelta')
2026 requirements.add('generaldelta')
1981 if ui.configbool('experimental', 'treemanifest', False):
2027 if ui.configbool('experimental', 'treemanifest', False):
1982 requirements.add('treemanifest')
2028 requirements.add('treemanifest')
1983 if ui.configbool('experimental', 'manifestv2', False):
2029 if ui.configbool('experimental', 'manifestv2', False):
1984 requirements.add('manifestv2')
2030 requirements.add('manifestv2')
1985
2031
1986 return requirements
2032 return requirements
@@ -1,646 +1,696 b''
1 $ cat >> $HGRCPATH << EOF
2 > [experimental]
3 > hook-track-tags=1
4 > [hooks]
5 > txnclose.track-tag=${TESTTMP}/taghook.sh
6 > EOF
7
8 $ cat << EOF > taghook.sh
9 > #!/bin/sh
10 > # escape the "$" otherwise the test runner interpret it when writting the
11 > # file...
12 > if [ -n "\$HG_TAG_MOVED" ]; then
13 > echo 'hook: tag changes detected'
14 > fi
15 > EOF
16 $ chmod +x taghook.sh
1 $ hg init test
17 $ hg init test
2 $ cd test
18 $ cd test
3
19
4 $ echo a > a
20 $ echo a > a
5 $ hg add a
21 $ hg add a
6 $ hg commit -m "test"
22 $ hg commit -m "test"
7 $ hg history
23 $ hg history
8 changeset: 0:acb14030fe0a
24 changeset: 0:acb14030fe0a
9 tag: tip
25 tag: tip
10 user: test
26 user: test
11 date: Thu Jan 01 00:00:00 1970 +0000
27 date: Thu Jan 01 00:00:00 1970 +0000
12 summary: test
28 summary: test
13
29
14
30
15 $ hg tag ' '
31 $ hg tag ' '
16 abort: tag names cannot consist entirely of whitespace
32 abort: tag names cannot consist entirely of whitespace
17 [255]
33 [255]
18
34
19 (this tests also that editor is not invoked, if '--edit' is not
35 (this tests also that editor is not invoked, if '--edit' is not
20 specified)
36 specified)
21
37
22 $ HGEDITOR=cat hg tag "bleah"
38 $ HGEDITOR=cat hg tag "bleah"
39 hook: tag changes detected
23 $ hg history
40 $ hg history
24 changeset: 1:d4f0d2909abc
41 changeset: 1:d4f0d2909abc
25 tag: tip
42 tag: tip
26 user: test
43 user: test
27 date: Thu Jan 01 00:00:00 1970 +0000
44 date: Thu Jan 01 00:00:00 1970 +0000
28 summary: Added tag bleah for changeset acb14030fe0a
45 summary: Added tag bleah for changeset acb14030fe0a
29
46
30 changeset: 0:acb14030fe0a
47 changeset: 0:acb14030fe0a
31 tag: bleah
48 tag: bleah
32 user: test
49 user: test
33 date: Thu Jan 01 00:00:00 1970 +0000
50 date: Thu Jan 01 00:00:00 1970 +0000
34 summary: test
51 summary: test
35
52
36
53
37 $ echo foo >> .hgtags
54 $ echo foo >> .hgtags
38 $ hg tag "bleah2"
55 $ hg tag "bleah2"
39 abort: working copy of .hgtags is changed
56 abort: working copy of .hgtags is changed
40 (please commit .hgtags manually)
57 (please commit .hgtags manually)
41 [255]
58 [255]
42
59
43 $ hg revert .hgtags
60 $ hg revert .hgtags
44 $ hg tag -r 0 x y z y y z
61 $ hg tag -r 0 x y z y y z
45 abort: tag names must be unique
62 abort: tag names must be unique
46 [255]
63 [255]
47 $ hg tag tap nada dot tip
64 $ hg tag tap nada dot tip
48 abort: the name 'tip' is reserved
65 abort: the name 'tip' is reserved
49 [255]
66 [255]
50 $ hg tag .
67 $ hg tag .
51 abort: the name '.' is reserved
68 abort: the name '.' is reserved
52 [255]
69 [255]
53 $ hg tag null
70 $ hg tag null
54 abort: the name 'null' is reserved
71 abort: the name 'null' is reserved
55 [255]
72 [255]
56 $ hg tag "bleah"
73 $ hg tag "bleah"
57 abort: tag 'bleah' already exists (use -f to force)
74 abort: tag 'bleah' already exists (use -f to force)
58 [255]
75 [255]
59 $ hg tag "blecch" "bleah"
76 $ hg tag "blecch" "bleah"
60 abort: tag 'bleah' already exists (use -f to force)
77 abort: tag 'bleah' already exists (use -f to force)
61 [255]
78 [255]
62
79
63 $ hg tag --remove "blecch"
80 $ hg tag --remove "blecch"
64 abort: tag 'blecch' does not exist
81 abort: tag 'blecch' does not exist
65 [255]
82 [255]
66 $ hg tag --remove "bleah" "blecch" "blough"
83 $ hg tag --remove "bleah" "blecch" "blough"
67 abort: tag 'blecch' does not exist
84 abort: tag 'blecch' does not exist
68 [255]
85 [255]
69
86
70 $ hg tag -r 0 "bleah0"
87 $ hg tag -r 0 "bleah0"
88 hook: tag changes detected
71 $ hg tag -l -r 1 "bleah1"
89 $ hg tag -l -r 1 "bleah1"
72 $ hg tag gack gawk gorp
90 $ hg tag gack gawk gorp
91 hook: tag changes detected
73 $ hg tag -f gack
92 $ hg tag -f gack
93 hook: tag changes detected
74 $ hg tag --remove gack gorp
94 $ hg tag --remove gack gorp
95 hook: tag changes detected
75
96
76 $ hg tag "bleah "
97 $ hg tag "bleah "
77 abort: tag 'bleah' already exists (use -f to force)
98 abort: tag 'bleah' already exists (use -f to force)
78 [255]
99 [255]
79 $ hg tag " bleah"
100 $ hg tag " bleah"
80 abort: tag 'bleah' already exists (use -f to force)
101 abort: tag 'bleah' already exists (use -f to force)
81 [255]
102 [255]
82 $ hg tag " bleah"
103 $ hg tag " bleah"
83 abort: tag 'bleah' already exists (use -f to force)
104 abort: tag 'bleah' already exists (use -f to force)
84 [255]
105 [255]
85 $ hg tag -r 0 " bleahbleah "
106 $ hg tag -r 0 " bleahbleah "
107 hook: tag changes detected
86 $ hg tag -r 0 " bleah bleah "
108 $ hg tag -r 0 " bleah bleah "
109 hook: tag changes detected
87
110
88 $ cat .hgtags
111 $ cat .hgtags
89 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
112 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
90 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
113 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
91 336fccc858a4eb69609a291105009e484a6b6b8d gack
114 336fccc858a4eb69609a291105009e484a6b6b8d gack
92 336fccc858a4eb69609a291105009e484a6b6b8d gawk
115 336fccc858a4eb69609a291105009e484a6b6b8d gawk
93 336fccc858a4eb69609a291105009e484a6b6b8d gorp
116 336fccc858a4eb69609a291105009e484a6b6b8d gorp
94 336fccc858a4eb69609a291105009e484a6b6b8d gack
117 336fccc858a4eb69609a291105009e484a6b6b8d gack
95 799667b6f2d9b957f73fa644a918c2df22bab58f gack
118 799667b6f2d9b957f73fa644a918c2df22bab58f gack
96 799667b6f2d9b957f73fa644a918c2df22bab58f gack
119 799667b6f2d9b957f73fa644a918c2df22bab58f gack
97 0000000000000000000000000000000000000000 gack
120 0000000000000000000000000000000000000000 gack
98 336fccc858a4eb69609a291105009e484a6b6b8d gorp
121 336fccc858a4eb69609a291105009e484a6b6b8d gorp
99 0000000000000000000000000000000000000000 gorp
122 0000000000000000000000000000000000000000 gorp
100 acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
123 acb14030fe0a21b60322c440ad2d20cf7685a376 bleahbleah
101 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
124 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah bleah
102
125
103 $ cat .hg/localtags
126 $ cat .hg/localtags
104 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
127 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
105
128
106 tagging on a non-head revision
129 tagging on a non-head revision
107
130
108 $ hg update 0
131 $ hg update 0
109 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
132 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
110 $ hg tag -l localblah
133 $ hg tag -l localblah
111 $ hg tag "foobar"
134 $ hg tag "foobar"
112 abort: working directory is not at a branch head (use -f to force)
135 abort: working directory is not at a branch head (use -f to force)
113 [255]
136 [255]
114 $ hg tag -f "foobar"
137 $ hg tag -f "foobar"
138 hook: tag changes detected
115 $ cat .hgtags
139 $ cat .hgtags
116 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
140 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
117 $ cat .hg/localtags
141 $ cat .hg/localtags
118 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
142 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
119 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
143 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
120
144
121 $ hg tag -l 'xx
145 $ hg tag -l 'xx
122 > newline'
146 > newline'
123 abort: '\n' cannot be used in a name
147 abort: '\n' cannot be used in a name
124 [255]
148 [255]
125 $ hg tag -l 'xx:xx'
149 $ hg tag -l 'xx:xx'
126 abort: ':' cannot be used in a name
150 abort: ':' cannot be used in a name
127 [255]
151 [255]
128
152
129 cloning local tags
153 cloning local tags
130
154
131 $ cd ..
155 $ cd ..
132 $ hg -R test log -r0:5
156 $ hg -R test log -r0:5
133 changeset: 0:acb14030fe0a
157 changeset: 0:acb14030fe0a
134 tag: bleah
158 tag: bleah
135 tag: bleah bleah
159 tag: bleah bleah
136 tag: bleah0
160 tag: bleah0
137 tag: bleahbleah
161 tag: bleahbleah
138 tag: foobar
162 tag: foobar
139 tag: localblah
163 tag: localblah
140 user: test
164 user: test
141 date: Thu Jan 01 00:00:00 1970 +0000
165 date: Thu Jan 01 00:00:00 1970 +0000
142 summary: test
166 summary: test
143
167
144 changeset: 1:d4f0d2909abc
168 changeset: 1:d4f0d2909abc
145 tag: bleah1
169 tag: bleah1
146 user: test
170 user: test
147 date: Thu Jan 01 00:00:00 1970 +0000
171 date: Thu Jan 01 00:00:00 1970 +0000
148 summary: Added tag bleah for changeset acb14030fe0a
172 summary: Added tag bleah for changeset acb14030fe0a
149
173
150 changeset: 2:336fccc858a4
174 changeset: 2:336fccc858a4
151 tag: gawk
175 tag: gawk
152 user: test
176 user: test
153 date: Thu Jan 01 00:00:00 1970 +0000
177 date: Thu Jan 01 00:00:00 1970 +0000
154 summary: Added tag bleah0 for changeset acb14030fe0a
178 summary: Added tag bleah0 for changeset acb14030fe0a
155
179
156 changeset: 3:799667b6f2d9
180 changeset: 3:799667b6f2d9
157 user: test
181 user: test
158 date: Thu Jan 01 00:00:00 1970 +0000
182 date: Thu Jan 01 00:00:00 1970 +0000
159 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
183 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
160
184
161 changeset: 4:154eeb7c0138
185 changeset: 4:154eeb7c0138
162 user: test
186 user: test
163 date: Thu Jan 01 00:00:00 1970 +0000
187 date: Thu Jan 01 00:00:00 1970 +0000
164 summary: Added tag gack for changeset 799667b6f2d9
188 summary: Added tag gack for changeset 799667b6f2d9
165
189
166 changeset: 5:b4bb47aaff09
190 changeset: 5:b4bb47aaff09
167 user: test
191 user: test
168 date: Thu Jan 01 00:00:00 1970 +0000
192 date: Thu Jan 01 00:00:00 1970 +0000
169 summary: Removed tag gack, gorp
193 summary: Removed tag gack, gorp
170
194
171 $ hg clone -q -rbleah1 test test1
195 $ hg clone -q -rbleah1 test test1
196 hook: tag changes detected
172 $ hg -R test1 parents --style=compact
197 $ hg -R test1 parents --style=compact
173 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
198 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
174 Added tag bleah for changeset acb14030fe0a
199 Added tag bleah for changeset acb14030fe0a
175
200
176 $ hg clone -q -r5 test#bleah1 test2
201 $ hg clone -q -r5 test#bleah1 test2
202 hook: tag changes detected
177 $ hg -R test2 parents --style=compact
203 $ hg -R test2 parents --style=compact
178 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
204 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
179 Removed tag gack, gorp
205 Removed tag gack, gorp
180
206
181 $ hg clone -q -U test#bleah1 test3
207 $ hg clone -q -U test#bleah1 test3
208 hook: tag changes detected
182 $ hg -R test3 parents --style=compact
209 $ hg -R test3 parents --style=compact
183
210
184 $ cd test
211 $ cd test
185
212
186 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
213 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
187 doesn't end with EOL
214 doesn't end with EOL
188
215
189 $ python << EOF
216 $ python << EOF
190 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
217 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
191 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
218 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
192 > EOF
219 > EOF
193 $ cat .hg/localtags; echo
220 $ cat .hg/localtags; echo
194 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
221 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
195 $ hg tag -l localnewline
222 $ hg tag -l localnewline
196 $ cat .hg/localtags; echo
223 $ cat .hg/localtags; echo
197 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
224 acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
198 c2899151f4e76890c602a2597a650a72666681bf localnewline
225 c2899151f4e76890c602a2597a650a72666681bf localnewline
199
226
200
227
201 $ python << EOF
228 $ python << EOF
202 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
229 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
203 > f = file('.hgtags', 'w'); f.write(last); f.close()
230 > f = file('.hgtags', 'w'); f.write(last); f.close()
204 > EOF
231 > EOF
205 $ hg ci -m'broken manual edit of .hgtags'
232 $ hg ci -m'broken manual edit of .hgtags'
233 hook: tag changes detected
206 $ cat .hgtags; echo
234 $ cat .hgtags; echo
207 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
235 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
208 $ hg tag newline
236 $ hg tag newline
237 hook: tag changes detected
209 $ cat .hgtags; echo
238 $ cat .hgtags; echo
210 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
239 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
211 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
240 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
212
241
213
242
214 tag and branch using same name
243 tag and branch using same name
215
244
216 $ hg branch tag-and-branch-same-name
245 $ hg branch tag-and-branch-same-name
217 marked working directory as branch tag-and-branch-same-name
246 marked working directory as branch tag-and-branch-same-name
218 (branches are permanent and global, did you want a bookmark?)
247 (branches are permanent and global, did you want a bookmark?)
219 $ hg ci -m"discouraged"
248 $ hg ci -m"discouraged"
220 $ hg tag tag-and-branch-same-name
249 $ hg tag tag-and-branch-same-name
221 warning: tag tag-and-branch-same-name conflicts with existing branch name
250 warning: tag tag-and-branch-same-name conflicts with existing branch name
251 hook: tag changes detected
222
252
223 test custom commit messages
253 test custom commit messages
224
254
225 $ cat > editor.sh << '__EOF__'
255 $ cat > editor.sh << '__EOF__'
226 > echo "==== before editing"
256 > echo "==== before editing"
227 > cat "$1"
257 > cat "$1"
228 > echo "===="
258 > echo "===="
229 > echo "custom tag message" > "$1"
259 > echo "custom tag message" > "$1"
230 > echo "second line" >> "$1"
260 > echo "second line" >> "$1"
231 > __EOF__
261 > __EOF__
232
262
233 at first, test saving last-message.txt
263 at first, test saving last-message.txt
234
264
235 (test that editor is not invoked before transaction starting)
265 (test that editor is not invoked before transaction starting)
236
266
237 $ cat > .hg/hgrc << '__EOF__'
267 $ cat > .hg/hgrc << '__EOF__'
238 > [hooks]
268 > [hooks]
239 > # this failure occurs before editor invocation
269 > # this failure occurs before editor invocation
240 > pretag.test-saving-lastmessage = false
270 > pretag.test-saving-lastmessage = false
241 > __EOF__
271 > __EOF__
242 $ rm -f .hg/last-message.txt
272 $ rm -f .hg/last-message.txt
243 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
273 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
244 abort: pretag.test-saving-lastmessage hook exited with status 1
274 abort: pretag.test-saving-lastmessage hook exited with status 1
245 [255]
275 [255]
246 $ test -f .hg/last-message.txt
276 $ test -f .hg/last-message.txt
247 [1]
277 [1]
248
278
249 (test that editor is invoked and commit message is saved into
279 (test that editor is invoked and commit message is saved into
250 "last-message.txt")
280 "last-message.txt")
251
281
252 $ cat >> .hg/hgrc << '__EOF__'
282 $ cat >> .hg/hgrc << '__EOF__'
253 > [hooks]
283 > [hooks]
254 > pretag.test-saving-lastmessage =
284 > pretag.test-saving-lastmessage =
255 > # this failure occurs after editor invocation
285 > # this failure occurs after editor invocation
256 > pretxncommit.unexpectedabort = false
286 > pretxncommit.unexpectedabort = false
257 > __EOF__
287 > __EOF__
258
288
259 (this tests also that editor is invoked, if '--edit' is specified,
289 (this tests also that editor is invoked, if '--edit' is specified,
260 regardless of '--message')
290 regardless of '--message')
261
291
262 $ rm -f .hg/last-message.txt
292 $ rm -f .hg/last-message.txt
263 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e -m "foo bar"
293 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e -m "foo bar"
264 ==== before editing
294 ==== before editing
265 foo bar
295 foo bar
266
296
267
297
268 HG: Enter commit message. Lines beginning with 'HG:' are removed.
298 HG: Enter commit message. Lines beginning with 'HG:' are removed.
269 HG: Leave message empty to abort commit.
299 HG: Leave message empty to abort commit.
270 HG: --
300 HG: --
271 HG: user: test
301 HG: user: test
272 HG: branch 'tag-and-branch-same-name'
302 HG: branch 'tag-and-branch-same-name'
273 HG: changed .hgtags
303 HG: changed .hgtags
274 ====
304 ====
275 note: commit message saved in .hg/last-message.txt
305 note: commit message saved in .hg/last-message.txt
276 transaction abort!
306 transaction abort!
277 rollback completed
307 rollback completed
278 abort: pretxncommit.unexpectedabort hook exited with status 1
308 abort: pretxncommit.unexpectedabort hook exited with status 1
279 [255]
309 [255]
280 $ cat .hg/last-message.txt
310 $ cat .hg/last-message.txt
281 custom tag message
311 custom tag message
282 second line
312 second line
283
313
284 $ cat >> .hg/hgrc << '__EOF__'
314 $ cat >> .hg/hgrc << '__EOF__'
285 > [hooks]
315 > [hooks]
286 > pretxncommit.unexpectedabort =
316 > pretxncommit.unexpectedabort =
287 > __EOF__
317 > __EOF__
288 $ hg status .hgtags
318 $ hg status .hgtags
289 M .hgtags
319 M .hgtags
290 $ hg revert --no-backup -q .hgtags
320 $ hg revert --no-backup -q .hgtags
291
321
292 then, test custom commit message itself
322 then, test custom commit message itself
293
323
294 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
324 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg tag custom-tag -e
295 ==== before editing
325 ==== before editing
296 Added tag custom-tag for changeset 75a534207be6
326 Added tag custom-tag for changeset 75a534207be6
297
327
298
328
299 HG: Enter commit message. Lines beginning with 'HG:' are removed.
329 HG: Enter commit message. Lines beginning with 'HG:' are removed.
300 HG: Leave message empty to abort commit.
330 HG: Leave message empty to abort commit.
301 HG: --
331 HG: --
302 HG: user: test
332 HG: user: test
303 HG: branch 'tag-and-branch-same-name'
333 HG: branch 'tag-and-branch-same-name'
304 HG: changed .hgtags
334 HG: changed .hgtags
305 ====
335 ====
336 hook: tag changes detected
306 $ hg log -l1 --template "{desc}\n"
337 $ hg log -l1 --template "{desc}\n"
307 custom tag message
338 custom tag message
308 second line
339 second line
309
340
310
341
311 local tag with .hgtags modified
342 local tag with .hgtags modified
312
343
313 $ hg tag hgtags-modified
344 $ hg tag hgtags-modified
345 hook: tag changes detected
314 $ hg rollback
346 $ hg rollback
315 repository tip rolled back to revision 13 (undo commit)
347 repository tip rolled back to revision 13 (undo commit)
316 working directory now based on revision 13
348 working directory now based on revision 13
317 $ hg st
349 $ hg st
318 M .hgtags
350 M .hgtags
319 ? .hgtags.orig
351 ? .hgtags.orig
320 ? editor.sh
352 ? editor.sh
321 $ hg tag --local baz
353 $ hg tag --local baz
322 $ hg revert --no-backup .hgtags
354 $ hg revert --no-backup .hgtags
323
355
324
356
325 tagging when at named-branch-head that's not a topo-head
357 tagging when at named-branch-head that's not a topo-head
326
358
327 $ hg up default
359 $ hg up default
328 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
360 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
329 $ hg merge -t internal:local
361 $ hg merge -t internal:local
330 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
362 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
331 (branch merge, don't forget to commit)
363 (branch merge, don't forget to commit)
332 $ hg ci -m 'merge named branch'
364 $ hg ci -m 'merge named branch'
365 hook: tag changes detected
333 $ hg up 13
366 $ hg up 13
334 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
367 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 $ hg tag new-topo-head
368 $ hg tag new-topo-head
369 hook: tag changes detected
336
370
337 tagging on null rev
371 tagging on null rev
338
372
339 $ hg up null
373 $ hg up null
340 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
374 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
341 $ hg tag nullrev
375 $ hg tag nullrev
342 abort: working directory is not at a branch head (use -f to force)
376 abort: working directory is not at a branch head (use -f to force)
343 [255]
377 [255]
344
378
345 $ hg init empty
379 $ hg init empty
346 $ hg tag -R empty nullrev
380 $ hg tag -R empty nullrev
347 abort: cannot tag null revision
381 abort: cannot tag null revision
348 [255]
382 [255]
349
383
350 $ hg tag -R empty -r 00000000000 -f nulltag
384 $ hg tag -R empty -r 00000000000 -f nulltag
351 abort: cannot tag null revision
385 abort: cannot tag null revision
352 [255]
386 [255]
353
387
354 $ cd ..
388 $ cd ..
355
389
356 tagging on an uncommitted merge (issue2542)
390 tagging on an uncommitted merge (issue2542)
357
391
358 $ hg init repo-tag-uncommitted-merge
392 $ hg init repo-tag-uncommitted-merge
359 $ cd repo-tag-uncommitted-merge
393 $ cd repo-tag-uncommitted-merge
360 $ echo c1 > f1
394 $ echo c1 > f1
361 $ hg ci -Am0
395 $ hg ci -Am0
362 adding f1
396 adding f1
363 $ echo c2 > f2
397 $ echo c2 > f2
364 $ hg ci -Am1
398 $ hg ci -Am1
365 adding f2
399 adding f2
366 $ hg co -q 0
400 $ hg co -q 0
367 $ hg branch b1
401 $ hg branch b1
368 marked working directory as branch b1
402 marked working directory as branch b1
369 (branches are permanent and global, did you want a bookmark?)
403 (branches are permanent and global, did you want a bookmark?)
370 $ hg ci -m2
404 $ hg ci -m2
371 $ hg up default
405 $ hg up default
372 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
406 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
373 $ hg merge b1
407 $ hg merge b1
374 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
408 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
375 (branch merge, don't forget to commit)
409 (branch merge, don't forget to commit)
376
410
377 $ hg tag t1
411 $ hg tag t1
378 abort: uncommitted merge
412 abort: uncommitted merge
379 [255]
413 [255]
380 $ hg status
414 $ hg status
381 $ hg tag --rev 1 t2
415 $ hg tag --rev 1 t2
382 abort: uncommitted merge
416 abort: uncommitted merge
383 [255]
417 [255]
384 $ hg tag --rev 1 --local t3
418 $ hg tag --rev 1 --local t3
385 $ hg tags -v
419 $ hg tags -v
386 tip 2:2a156e8887cc
420 tip 2:2a156e8887cc
387 t3 1:c3adabd1a5f4 local
421 t3 1:c3adabd1a5f4 local
388
422
389 $ cd ..
423 $ cd ..
390
424
391 commit hook on tag used to be run without write lock - issue3344
425 commit hook on tag used to be run without write lock - issue3344
392
426
393 $ hg init repo-tag
427 $ hg init repo-tag
394 $ touch repo-tag/test
428 $ touch repo-tag/test
395 $ hg -R repo-tag commit -A -m "test"
429 $ hg -R repo-tag commit -A -m "test"
396 adding test
430 adding test
397 $ hg init repo-tag-target
431 $ hg init repo-tag-target
398 $ cat > "$TESTTMP/issue3344.sh" <<EOF
432 $ cat > "$TESTTMP/issue3344.sh" <<EOF
399 > hg push "$TESTTMP/repo-tag-target"
433 > hg push "$TESTTMP/repo-tag-target"
400 > EOF
434 > EOF
401 $ hg -R repo-tag --config hooks.commit="sh ../issue3344.sh" tag tag
435 $ hg -R repo-tag --config hooks.commit="sh ../issue3344.sh" tag tag
436 hook: tag changes detected
402 pushing to $TESTTMP/repo-tag-target (glob)
437 pushing to $TESTTMP/repo-tag-target (glob)
403 searching for changes
438 searching for changes
404 adding changesets
439 adding changesets
405 adding manifests
440 adding manifests
406 adding file changes
441 adding file changes
407 added 2 changesets with 2 changes to 2 files
442 added 2 changesets with 2 changes to 2 files
443 hook: tag changes detected
408
444
409 automatically merge resolvable tag conflicts (i.e. tags that differ in rank)
445 automatically merge resolvable tag conflicts (i.e. tags that differ in rank)
410 create two clones with some different tags as well as some common tags
446 create two clones with some different tags as well as some common tags
411 check that we can merge tags that differ in rank
447 check that we can merge tags that differ in rank
412
448
413 $ hg init repo-automatic-tag-merge
449 $ hg init repo-automatic-tag-merge
414 $ cd repo-automatic-tag-merge
450 $ cd repo-automatic-tag-merge
415 $ echo c0 > f0
451 $ echo c0 > f0
416 $ hg ci -A -m0
452 $ hg ci -A -m0
417 adding f0
453 adding f0
418 $ hg tag tbase
454 $ hg tag tbase
455 hook: tag changes detected
419 $ hg up -qr '.^'
456 $ hg up -qr '.^'
420 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
457 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
421 1
458 1
422 $ hg up -q
459 $ hg up -q
423 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
460 $ hg log -r 'wdir()' -T "{latesttagdistance}\n"
424 2
461 2
425 $ cd ..
462 $ cd ..
426 $ hg clone repo-automatic-tag-merge repo-automatic-tag-merge-clone
463 $ hg clone repo-automatic-tag-merge repo-automatic-tag-merge-clone
427 updating to branch default
464 updating to branch default
428 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
465 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
429 $ cd repo-automatic-tag-merge-clone
466 $ cd repo-automatic-tag-merge-clone
430 $ echo c1 > f1
467 $ echo c1 > f1
431 $ hg ci -A -m1
468 $ hg ci -A -m1
432 adding f1
469 adding f1
433 $ hg tag t1 t2 t3
470 $ hg tag t1 t2 t3
471 hook: tag changes detected
434 $ hg tag --remove t2
472 $ hg tag --remove t2
473 hook: tag changes detected
435 $ hg tag t5
474 $ hg tag t5
475 hook: tag changes detected
436 $ echo c2 > f2
476 $ echo c2 > f2
437 $ hg ci -A -m2
477 $ hg ci -A -m2
438 adding f2
478 adding f2
439 $ hg tag -f t3
479 $ hg tag -f t3
480 hook: tag changes detected
440
481
441 $ cd ../repo-automatic-tag-merge
482 $ cd ../repo-automatic-tag-merge
442 $ echo c3 > f3
483 $ echo c3 > f3
443 $ hg ci -A -m3
484 $ hg ci -A -m3
444 adding f3
485 adding f3
445 $ hg tag -f t4 t5 t6
486 $ hg tag -f t4 t5 t6
487 hook: tag changes detected
446
488
447 $ hg up -q '.^'
489 $ hg up -q '.^'
448 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
490 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
449 1 changes since t4:t5:t6
491 1 changes since t4:t5:t6
450 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
492 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
451 0 changes since t4:t5:t6
493 0 changes since t4:t5:t6
452 $ echo c5 > f3
494 $ echo c5 > f3
453 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
495 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
454 1 changes since t4:t5:t6
496 1 changes since t4:t5:t6
455 $ hg up -qC
497 $ hg up -qC
456
498
457 $ hg tag --remove t5
499 $ hg tag --remove t5
500 hook: tag changes detected
458 $ echo c4 > f4
501 $ echo c4 > f4
459 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
502 $ hg log -r '.' -T "{changessincelatesttag} changes since {latesttag}\n"
460 2 changes since t4:t6
503 2 changes since t4:t6
461 $ hg log -r '.' -T "{latesttag % '{latesttag}\n'}"
504 $ hg log -r '.' -T "{latesttag % '{latesttag}\n'}"
462 t4
505 t4
463 t6
506 t6
464 $ hg log -r '.' -T "{latesttag('t4') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
507 $ hg log -r '.' -T "{latesttag('t4') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
465 T: t4, C: 2, D: 2
508 T: t4, C: 2, D: 2
466 $ hg log -r '.' -T "{latesttag('re:\d') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
509 $ hg log -r '.' -T "{latesttag('re:\d') % 'T: {tag}, C: {changes}, D: {distance}\n'}"
467 T: t4, C: 2, D: 2
510 T: t4, C: 2, D: 2
468 T: t6, C: 2, D: 2
511 T: t6, C: 2, D: 2
469 $ hg log -r . -T '{join(latesttag(), "*")}\n'
512 $ hg log -r . -T '{join(latesttag(), "*")}\n'
470 t4*t6
513 t4*t6
471 $ hg ci -A -m4
514 $ hg ci -A -m4
472 adding f4
515 adding f4
473 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
516 $ hg log -r 'wdir()' -T "{changessincelatesttag} changes since {latesttag}\n"
474 4 changes since t4:t6
517 4 changes since t4:t6
475 $ hg tag t2
518 $ hg tag t2
519 hook: tag changes detected
476 $ hg tag -f t6
520 $ hg tag -f t6
521 hook: tag changes detected
477
522
478 $ cd ../repo-automatic-tag-merge-clone
523 $ cd ../repo-automatic-tag-merge-clone
479 $ hg pull
524 $ hg pull
480 pulling from $TESTTMP/repo-automatic-tag-merge (glob)
525 pulling from $TESTTMP/repo-automatic-tag-merge (glob)
481 searching for changes
526 searching for changes
482 adding changesets
527 adding changesets
483 adding manifests
528 adding manifests
484 adding file changes
529 adding file changes
485 added 6 changesets with 6 changes to 3 files (+1 heads)
530 added 6 changesets with 6 changes to 3 files (+1 heads)
531 hook: tag changes detected
486 (run 'hg heads' to see heads, 'hg merge' to merge)
532 (run 'hg heads' to see heads, 'hg merge' to merge)
487 $ hg merge --tool internal:tagmerge
533 $ hg merge --tool internal:tagmerge
488 merging .hgtags
534 merging .hgtags
489 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
535 2 files updated, 1 files merged, 0 files removed, 0 files unresolved
490 (branch merge, don't forget to commit)
536 (branch merge, don't forget to commit)
491 $ hg status
537 $ hg status
492 M .hgtags
538 M .hgtags
493 M f3
539 M f3
494 M f4
540 M f4
495 $ hg resolve -l
541 $ hg resolve -l
496 R .hgtags
542 R .hgtags
497 $ cat .hgtags
543 $ cat .hgtags
498 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
544 9aa4e1292a27a248f8d07339bed9931d54907be7 t4
499 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
545 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
500 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
546 9aa4e1292a27a248f8d07339bed9931d54907be7 t6
501 09af2ce14077a94effef208b49a718f4836d4338 t6
547 09af2ce14077a94effef208b49a718f4836d4338 t6
502 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
548 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
503 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
549 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
504 929bca7b18d067cbf3844c3896319a940059d748 t2
550 929bca7b18d067cbf3844c3896319a940059d748 t2
505 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
551 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
506 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
552 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
507 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
553 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
508 0000000000000000000000000000000000000000 t2
554 0000000000000000000000000000000000000000 t2
509 875517b4806a848f942811a315a5bce30804ae85 t5
555 875517b4806a848f942811a315a5bce30804ae85 t5
510 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
556 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
511 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
557 9aa4e1292a27a248f8d07339bed9931d54907be7 t5
512 0000000000000000000000000000000000000000 t5
558 0000000000000000000000000000000000000000 t5
513 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
559 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
514 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
560 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
515
561
516 check that the merge tried to minimize the diff with the first merge parent
562 check that the merge tried to minimize the diff with the first merge parent
517
563
518 $ hg diff --git -r 'p1()' .hgtags
564 $ hg diff --git -r 'p1()' .hgtags
519 diff --git a/.hgtags b/.hgtags
565 diff --git a/.hgtags b/.hgtags
520 --- a/.hgtags
566 --- a/.hgtags
521 +++ b/.hgtags
567 +++ b/.hgtags
522 @@ -1,9 +1,17 @@
568 @@ -1,9 +1,17 @@
523 +9aa4e1292a27a248f8d07339bed9931d54907be7 t4
569 +9aa4e1292a27a248f8d07339bed9931d54907be7 t4
524 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
570 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
525 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
571 +9aa4e1292a27a248f8d07339bed9931d54907be7 t6
526 +09af2ce14077a94effef208b49a718f4836d4338 t6
572 +09af2ce14077a94effef208b49a718f4836d4338 t6
527 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
573 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
528 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
574 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
529 +929bca7b18d067cbf3844c3896319a940059d748 t2
575 +929bca7b18d067cbf3844c3896319a940059d748 t2
530 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
576 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
531 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
577 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
532 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
578 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
533 0000000000000000000000000000000000000000 t2
579 0000000000000000000000000000000000000000 t2
534 875517b4806a848f942811a315a5bce30804ae85 t5
580 875517b4806a848f942811a315a5bce30804ae85 t5
535 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
581 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
536 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
582 +9aa4e1292a27a248f8d07339bed9931d54907be7 t5
537 +0000000000000000000000000000000000000000 t5
583 +0000000000000000000000000000000000000000 t5
538 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
584 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
539 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
585 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
540
586
541 detect merge tag conflicts
587 detect merge tag conflicts
542
588
543 $ hg update -C -r tip
589 $ hg update -C -r tip
544 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
590 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
545 $ hg tag t7
591 $ hg tag t7
592 hook: tag changes detected
546 $ hg update -C -r 'first(sort(head()))'
593 $ hg update -C -r 'first(sort(head()))'
547 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
594 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
548 $ printf "%s %s\n" `hg log -r . --template "{node} t7"` >> .hgtags
595 $ printf "%s %s\n" `hg log -r . --template "{node} t7"` >> .hgtags
549 $ hg commit -m "manually add conflicting t7 tag"
596 $ hg commit -m "manually add conflicting t7 tag"
597 hook: tag changes detected
550 $ hg merge --tool internal:tagmerge
598 $ hg merge --tool internal:tagmerge
551 merging .hgtags
599 merging .hgtags
552 automatic .hgtags merge failed
600 automatic .hgtags merge failed
553 the following 1 tags are in conflict: t7
601 the following 1 tags are in conflict: t7
554 automatic tag merging of .hgtags failed! (use 'hg resolve --tool :merge' or another merge tool of your choice)
602 automatic tag merging of .hgtags failed! (use 'hg resolve --tool :merge' or another merge tool of your choice)
555 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
603 2 files updated, 0 files merged, 0 files removed, 1 files unresolved
556 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
604 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
557 [1]
605 [1]
558 $ hg resolve -l
606 $ hg resolve -l
559 U .hgtags
607 U .hgtags
560 $ cat .hgtags
608 $ cat .hgtags
561 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
609 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
562 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
610 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
563 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
611 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
564 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
612 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
565 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
613 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
566 0000000000000000000000000000000000000000 t2
614 0000000000000000000000000000000000000000 t2
567 875517b4806a848f942811a315a5bce30804ae85 t5
615 875517b4806a848f942811a315a5bce30804ae85 t5
568 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
616 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
569 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
617 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
570 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
618 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
571
619
572 $ cd ..
620 $ cd ..
573
621
574 handle the loss of tags
622 handle the loss of tags
575
623
576 $ hg clone repo-automatic-tag-merge-clone repo-merge-lost-tags
624 $ hg clone repo-automatic-tag-merge-clone repo-merge-lost-tags
577 updating to branch default
625 updating to branch default
578 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
626 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 $ cd repo-merge-lost-tags
627 $ cd repo-merge-lost-tags
580 $ echo c5 > f5
628 $ echo c5 > f5
581 $ hg ci -A -m5
629 $ hg ci -A -m5
582 adding f5
630 adding f5
583 $ hg tag -f t7
631 $ hg tag -f t7
632 hook: tag changes detected
584 $ hg update -r 'p1(t7)'
633 $ hg update -r 'p1(t7)'
585 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
634 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
586 $ printf '' > .hgtags
635 $ printf '' > .hgtags
587 $ hg commit -m 'delete all tags'
636 $ hg commit -m 'delete all tags'
588 created new head
637 created new head
638 hook: tag changes detected
589 $ hg log -r 'max(t7::)'
639 $ hg log -r 'max(t7::)'
590 changeset: 17:ffe462b50880
640 changeset: 17:ffe462b50880
591 user: test
641 user: test
592 date: Thu Jan 01 00:00:00 1970 +0000
642 date: Thu Jan 01 00:00:00 1970 +0000
593 summary: Added tag t7 for changeset fd3a9e394ce3
643 summary: Added tag t7 for changeset fd3a9e394ce3
594
644
595 $ hg update -r 'max(t7::)'
645 $ hg update -r 'max(t7::)'
596 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
646 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
597 $ hg merge -r tip --tool internal:tagmerge
647 $ hg merge -r tip --tool internal:tagmerge
598 merging .hgtags
648 merging .hgtags
599 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
649 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
600 (branch merge, don't forget to commit)
650 (branch merge, don't forget to commit)
601 $ hg resolve -l
651 $ hg resolve -l
602 R .hgtags
652 R .hgtags
603 $ cat .hgtags
653 $ cat .hgtags
604 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
654 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
605 0000000000000000000000000000000000000000 tbase
655 0000000000000000000000000000000000000000 tbase
606 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
656 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
607 0000000000000000000000000000000000000000 t1
657 0000000000000000000000000000000000000000 t1
608 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
658 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
609 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
659 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
610 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
660 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
611 0000000000000000000000000000000000000000 t2
661 0000000000000000000000000000000000000000 t2
612 875517b4806a848f942811a315a5bce30804ae85 t5
662 875517b4806a848f942811a315a5bce30804ae85 t5
613 0000000000000000000000000000000000000000 t5
663 0000000000000000000000000000000000000000 t5
614 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
664 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
615 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
665 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
616 0000000000000000000000000000000000000000 t3
666 0000000000000000000000000000000000000000 t3
617 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
667 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
618 0000000000000000000000000000000000000000 t7
668 0000000000000000000000000000000000000000 t7
619 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
669 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
620 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
670 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
621
671
622 also check that we minimize the diff with the 1st merge parent
672 also check that we minimize the diff with the 1st merge parent
623
673
624 $ hg diff --git -r 'p1()' .hgtags
674 $ hg diff --git -r 'p1()' .hgtags
625 diff --git a/.hgtags b/.hgtags
675 diff --git a/.hgtags b/.hgtags
626 --- a/.hgtags
676 --- a/.hgtags
627 +++ b/.hgtags
677 +++ b/.hgtags
628 @@ -1,12 +1,17 @@
678 @@ -1,12 +1,17 @@
629 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
679 6cee5c8f3e5b4ae1a3996d2f6489c3e08eb5aea7 tbase
630 +0000000000000000000000000000000000000000 tbase
680 +0000000000000000000000000000000000000000 tbase
631 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
681 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t1
632 +0000000000000000000000000000000000000000 t1
682 +0000000000000000000000000000000000000000 t1
633 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
683 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
634 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
684 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
635 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
685 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t2
636 0000000000000000000000000000000000000000 t2
686 0000000000000000000000000000000000000000 t2
637 875517b4806a848f942811a315a5bce30804ae85 t5
687 875517b4806a848f942811a315a5bce30804ae85 t5
638 +0000000000000000000000000000000000000000 t5
688 +0000000000000000000000000000000000000000 t5
639 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
689 4f3e9b90005b68b4d8a3f4355cedc302a8364f5c t3
640 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
690 79505d5360b07e3e79d1052e347e73c02b8afa5b t3
641 +0000000000000000000000000000000000000000 t3
691 +0000000000000000000000000000000000000000 t3
642 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
692 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
643 +0000000000000000000000000000000000000000 t7
693 +0000000000000000000000000000000000000000 t7
644 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
694 ea918d56be86a4afc5a95312e8b6750e1428d9d2 t7
645 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
695 fd3a9e394ce3afb354a496323bf68ac1755a30de t7
646
696
General Comments 0
You need to be logged in to leave comments. Login now