##// END OF EJS Templates
commit: make commit acquire store lock before processing for consistency...
FUJIWARA Katsunori -
r27291:a18328aa default
parent child Browse files
Show More
@@ -1,1920 +1,1921 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, wdirrev, short
7 from node import hex, nullid, wdirrev, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset, cmdutil
14 import scmutil, util, extensions, hook, error, revset, cmdutil
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect, random
19 import weakref, errno, os, time, inspect, random
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception as exc:
138 except Exception as exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced as exc:
155 except error.PushRaced as exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return cg.apply(self._repo, source, url)
162 return cg.apply(self._repo, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
217 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
218 realfs=False)
218 realfs=False)
219 self.vfs = scmutil.vfs(self.path)
219 self.vfs = scmutil.vfs(self.path)
220 self.opener = self.vfs
220 self.opener = self.vfs
221 self.baseui = baseui
221 self.baseui = baseui
222 self.ui = baseui.copy()
222 self.ui = baseui.copy()
223 self.ui.copy = baseui.copy # prevent copying repo configuration
223 self.ui.copy = baseui.copy # prevent copying repo configuration
224 # A list of callback to shape the phase if no data were found.
224 # A list of callback to shape the phase if no data were found.
225 # Callback are in the form: func(repo, roots) --> processed root.
225 # Callback are in the form: func(repo, roots) --> processed root.
226 # This list it to be filled by extension during repo setup
226 # This list it to be filled by extension during repo setup
227 self._phasedefaults = []
227 self._phasedefaults = []
228 try:
228 try:
229 self.ui.readconfig(self.join("hgrc"), self.root)
229 self.ui.readconfig(self.join("hgrc"), self.root)
230 extensions.loadall(self.ui)
230 extensions.loadall(self.ui)
231 except IOError:
231 except IOError:
232 pass
232 pass
233
233
234 if self.featuresetupfuncs:
234 if self.featuresetupfuncs:
235 self.supported = set(self._basesupported) # use private copy
235 self.supported = set(self._basesupported) # use private copy
236 extmods = set(m.__name__ for n, m
236 extmods = set(m.__name__ for n, m
237 in extensions.extensions(self.ui))
237 in extensions.extensions(self.ui))
238 for setupfunc in self.featuresetupfuncs:
238 for setupfunc in self.featuresetupfuncs:
239 if setupfunc.__module__ in extmods:
239 if setupfunc.__module__ in extmods:
240 setupfunc(self.ui, self.supported)
240 setupfunc(self.ui, self.supported)
241 else:
241 else:
242 self.supported = self._basesupported
242 self.supported = self._basesupported
243
243
244 if not self.vfs.isdir():
244 if not self.vfs.isdir():
245 if create:
245 if create:
246 if not self.wvfs.exists():
246 if not self.wvfs.exists():
247 self.wvfs.makedirs()
247 self.wvfs.makedirs()
248 self.vfs.makedir(notindexed=True)
248 self.vfs.makedir(notindexed=True)
249 self.requirements.update(self._baserequirements(create))
249 self.requirements.update(self._baserequirements(create))
250 if self.ui.configbool('format', 'usestore', True):
250 if self.ui.configbool('format', 'usestore', True):
251 self.vfs.mkdir("store")
251 self.vfs.mkdir("store")
252 self.requirements.add("store")
252 self.requirements.add("store")
253 if self.ui.configbool('format', 'usefncache', True):
253 if self.ui.configbool('format', 'usefncache', True):
254 self.requirements.add("fncache")
254 self.requirements.add("fncache")
255 if self.ui.configbool('format', 'dotencode', True):
255 if self.ui.configbool('format', 'dotencode', True):
256 self.requirements.add('dotencode')
256 self.requirements.add('dotencode')
257 # create an invalid changelog
257 # create an invalid changelog
258 self.vfs.append(
258 self.vfs.append(
259 "00changelog.i",
259 "00changelog.i",
260 '\0\0\0\2' # represents revlogv2
260 '\0\0\0\2' # represents revlogv2
261 ' dummy changelog to prevent using the old repo layout'
261 ' dummy changelog to prevent using the old repo layout'
262 )
262 )
263 if scmutil.gdinitconfig(self.ui):
263 if scmutil.gdinitconfig(self.ui):
264 self.requirements.add("generaldelta")
264 self.requirements.add("generaldelta")
265 if self.ui.configbool('experimental', 'treemanifest', False):
265 if self.ui.configbool('experimental', 'treemanifest', False):
266 self.requirements.add("treemanifest")
266 self.requirements.add("treemanifest")
267 if self.ui.configbool('experimental', 'manifestv2', False):
267 if self.ui.configbool('experimental', 'manifestv2', False):
268 self.requirements.add("manifestv2")
268 self.requirements.add("manifestv2")
269 else:
269 else:
270 raise error.RepoError(_("repository %s not found") % path)
270 raise error.RepoError(_("repository %s not found") % path)
271 elif create:
271 elif create:
272 raise error.RepoError(_("repository %s already exists") % path)
272 raise error.RepoError(_("repository %s already exists") % path)
273 else:
273 else:
274 try:
274 try:
275 self.requirements = scmutil.readrequires(
275 self.requirements = scmutil.readrequires(
276 self.vfs, self.supported)
276 self.vfs, self.supported)
277 except IOError as inst:
277 except IOError as inst:
278 if inst.errno != errno.ENOENT:
278 if inst.errno != errno.ENOENT:
279 raise
279 raise
280
280
281 self.sharedpath = self.path
281 self.sharedpath = self.path
282 try:
282 try:
283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
284 realpath=True)
284 realpath=True)
285 s = vfs.base
285 s = vfs.base
286 if not vfs.exists():
286 if not vfs.exists():
287 raise error.RepoError(
287 raise error.RepoError(
288 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 _('.hg/sharedpath points to nonexistent directory %s') % s)
289 self.sharedpath = s
289 self.sharedpath = s
290 except IOError as inst:
290 except IOError as inst:
291 if inst.errno != errno.ENOENT:
291 if inst.errno != errno.ENOENT:
292 raise
292 raise
293
293
294 self.store = store.store(
294 self.store = store.store(
295 self.requirements, self.sharedpath, scmutil.vfs)
295 self.requirements, self.sharedpath, scmutil.vfs)
296 self.spath = self.store.path
296 self.spath = self.store.path
297 self.svfs = self.store.vfs
297 self.svfs = self.store.vfs
298 self.sjoin = self.store.join
298 self.sjoin = self.store.join
299 self.vfs.createmode = self.store.createmode
299 self.vfs.createmode = self.store.createmode
300 self._applyopenerreqs()
300 self._applyopenerreqs()
301 if create:
301 if create:
302 self._writerequirements()
302 self._writerequirements()
303
303
304 self._dirstatevalidatewarned = False
304 self._dirstatevalidatewarned = False
305
305
306 self._branchcaches = {}
306 self._branchcaches = {}
307 self._revbranchcache = None
307 self._revbranchcache = None
308 self.filterpats = {}
308 self.filterpats = {}
309 self._datafilters = {}
309 self._datafilters = {}
310 self._transref = self._lockref = self._wlockref = None
310 self._transref = self._lockref = self._wlockref = None
311
311
312 # A cache for various files under .hg/ that tracks file changes,
312 # A cache for various files under .hg/ that tracks file changes,
313 # (used by the filecache decorator)
313 # (used by the filecache decorator)
314 #
314 #
315 # Maps a property name to its util.filecacheentry
315 # Maps a property name to its util.filecacheentry
316 self._filecache = {}
316 self._filecache = {}
317
317
318 # hold sets of revision to be filtered
318 # hold sets of revision to be filtered
319 # should be cleared when something might have changed the filter value:
319 # should be cleared when something might have changed the filter value:
320 # - new changesets,
320 # - new changesets,
321 # - phase change,
321 # - phase change,
322 # - new obsolescence marker,
322 # - new obsolescence marker,
323 # - working directory parent change,
323 # - working directory parent change,
324 # - bookmark changes
324 # - bookmark changes
325 self.filteredrevcache = {}
325 self.filteredrevcache = {}
326
326
327 # generic mapping between names and nodes
327 # generic mapping between names and nodes
328 self.names = namespaces.namespaces()
328 self.names = namespaces.namespaces()
329
329
330 def close(self):
330 def close(self):
331 self._writecaches()
331 self._writecaches()
332
332
333 def _writecaches(self):
333 def _writecaches(self):
334 if self._revbranchcache:
334 if self._revbranchcache:
335 self._revbranchcache.write()
335 self._revbranchcache.write()
336
336
337 def _restrictcapabilities(self, caps):
337 def _restrictcapabilities(self, caps):
338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
339 caps = set(caps)
339 caps = set(caps)
340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
341 caps.add('bundle2=' + urllib.quote(capsblob))
341 caps.add('bundle2=' + urllib.quote(capsblob))
342 return caps
342 return caps
343
343
344 def _applyopenerreqs(self):
344 def _applyopenerreqs(self):
345 self.svfs.options = dict((r, 1) for r in self.requirements
345 self.svfs.options = dict((r, 1) for r in self.requirements
346 if r in self.openerreqs)
346 if r in self.openerreqs)
347 # experimental config: format.chunkcachesize
347 # experimental config: format.chunkcachesize
348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
349 if chunkcachesize is not None:
349 if chunkcachesize is not None:
350 self.svfs.options['chunkcachesize'] = chunkcachesize
350 self.svfs.options['chunkcachesize'] = chunkcachesize
351 # experimental config: format.maxchainlen
351 # experimental config: format.maxchainlen
352 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 maxchainlen = self.ui.configint('format', 'maxchainlen')
353 if maxchainlen is not None:
353 if maxchainlen is not None:
354 self.svfs.options['maxchainlen'] = maxchainlen
354 self.svfs.options['maxchainlen'] = maxchainlen
355 # experimental config: format.manifestcachesize
355 # experimental config: format.manifestcachesize
356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
357 if manifestcachesize is not None:
357 if manifestcachesize is not None:
358 self.svfs.options['manifestcachesize'] = manifestcachesize
358 self.svfs.options['manifestcachesize'] = manifestcachesize
359 # experimental config: format.aggressivemergedeltas
359 # experimental config: format.aggressivemergedeltas
360 aggressivemergedeltas = self.ui.configbool('format',
360 aggressivemergedeltas = self.ui.configbool('format',
361 'aggressivemergedeltas', False)
361 'aggressivemergedeltas', False)
362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
363 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
363 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
364
364
365 def _writerequirements(self):
365 def _writerequirements(self):
366 scmutil.writerequires(self.vfs, self.requirements)
366 scmutil.writerequires(self.vfs, self.requirements)
367
367
368 def _checknested(self, path):
368 def _checknested(self, path):
369 """Determine if path is a legal nested repository."""
369 """Determine if path is a legal nested repository."""
370 if not path.startswith(self.root):
370 if not path.startswith(self.root):
371 return False
371 return False
372 subpath = path[len(self.root) + 1:]
372 subpath = path[len(self.root) + 1:]
373 normsubpath = util.pconvert(subpath)
373 normsubpath = util.pconvert(subpath)
374
374
375 # XXX: Checking against the current working copy is wrong in
375 # XXX: Checking against the current working copy is wrong in
376 # the sense that it can reject things like
376 # the sense that it can reject things like
377 #
377 #
378 # $ hg cat -r 10 sub/x.txt
378 # $ hg cat -r 10 sub/x.txt
379 #
379 #
380 # if sub/ is no longer a subrepository in the working copy
380 # if sub/ is no longer a subrepository in the working copy
381 # parent revision.
381 # parent revision.
382 #
382 #
383 # However, it can of course also allow things that would have
383 # However, it can of course also allow things that would have
384 # been rejected before, such as the above cat command if sub/
384 # been rejected before, such as the above cat command if sub/
385 # is a subrepository now, but was a normal directory before.
385 # is a subrepository now, but was a normal directory before.
386 # The old path auditor would have rejected by mistake since it
386 # The old path auditor would have rejected by mistake since it
387 # panics when it sees sub/.hg/.
387 # panics when it sees sub/.hg/.
388 #
388 #
389 # All in all, checking against the working copy seems sensible
389 # All in all, checking against the working copy seems sensible
390 # since we want to prevent access to nested repositories on
390 # since we want to prevent access to nested repositories on
391 # the filesystem *now*.
391 # the filesystem *now*.
392 ctx = self[None]
392 ctx = self[None]
393 parts = util.splitpath(subpath)
393 parts = util.splitpath(subpath)
394 while parts:
394 while parts:
395 prefix = '/'.join(parts)
395 prefix = '/'.join(parts)
396 if prefix in ctx.substate:
396 if prefix in ctx.substate:
397 if prefix == normsubpath:
397 if prefix == normsubpath:
398 return True
398 return True
399 else:
399 else:
400 sub = ctx.sub(prefix)
400 sub = ctx.sub(prefix)
401 return sub.checknested(subpath[len(prefix) + 1:])
401 return sub.checknested(subpath[len(prefix) + 1:])
402 else:
402 else:
403 parts.pop()
403 parts.pop()
404 return False
404 return False
405
405
406 def peer(self):
406 def peer(self):
407 return localpeer(self) # not cached to avoid reference cycle
407 return localpeer(self) # not cached to avoid reference cycle
408
408
409 def unfiltered(self):
409 def unfiltered(self):
410 """Return unfiltered version of the repository
410 """Return unfiltered version of the repository
411
411
412 Intended to be overwritten by filtered repo."""
412 Intended to be overwritten by filtered repo."""
413 return self
413 return self
414
414
415 def filtered(self, name):
415 def filtered(self, name):
416 """Return a filtered version of a repository"""
416 """Return a filtered version of a repository"""
417 # build a new class with the mixin and the current class
417 # build a new class with the mixin and the current class
418 # (possibly subclass of the repo)
418 # (possibly subclass of the repo)
419 class proxycls(repoview.repoview, self.unfiltered().__class__):
419 class proxycls(repoview.repoview, self.unfiltered().__class__):
420 pass
420 pass
421 return proxycls(self, name)
421 return proxycls(self, name)
422
422
423 @repofilecache('bookmarks')
423 @repofilecache('bookmarks')
424 def _bookmarks(self):
424 def _bookmarks(self):
425 return bookmarks.bmstore(self)
425 return bookmarks.bmstore(self)
426
426
427 @repofilecache('bookmarks.current')
427 @repofilecache('bookmarks.current')
428 def _activebookmark(self):
428 def _activebookmark(self):
429 return bookmarks.readactive(self)
429 return bookmarks.readactive(self)
430
430
431 def bookmarkheads(self, bookmark):
431 def bookmarkheads(self, bookmark):
432 name = bookmark.split('@', 1)[0]
432 name = bookmark.split('@', 1)[0]
433 heads = []
433 heads = []
434 for mark, n in self._bookmarks.iteritems():
434 for mark, n in self._bookmarks.iteritems():
435 if mark.split('@', 1)[0] == name:
435 if mark.split('@', 1)[0] == name:
436 heads.append(n)
436 heads.append(n)
437 return heads
437 return heads
438
438
439 # _phaserevs and _phasesets depend on changelog. what we need is to
439 # _phaserevs and _phasesets depend on changelog. what we need is to
440 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
440 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
441 # can't be easily expressed in filecache mechanism.
441 # can't be easily expressed in filecache mechanism.
442 @storecache('phaseroots', '00changelog.i')
442 @storecache('phaseroots', '00changelog.i')
443 def _phasecache(self):
443 def _phasecache(self):
444 return phases.phasecache(self, self._phasedefaults)
444 return phases.phasecache(self, self._phasedefaults)
445
445
446 @storecache('obsstore')
446 @storecache('obsstore')
447 def obsstore(self):
447 def obsstore(self):
448 # read default format for new obsstore.
448 # read default format for new obsstore.
449 # developer config: format.obsstore-version
449 # developer config: format.obsstore-version
450 defaultformat = self.ui.configint('format', 'obsstore-version', None)
450 defaultformat = self.ui.configint('format', 'obsstore-version', None)
451 # rely on obsstore class default when possible.
451 # rely on obsstore class default when possible.
452 kwargs = {}
452 kwargs = {}
453 if defaultformat is not None:
453 if defaultformat is not None:
454 kwargs['defaultformat'] = defaultformat
454 kwargs['defaultformat'] = defaultformat
455 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
455 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
456 store = obsolete.obsstore(self.svfs, readonly=readonly,
456 store = obsolete.obsstore(self.svfs, readonly=readonly,
457 **kwargs)
457 **kwargs)
458 if store and readonly:
458 if store and readonly:
459 self.ui.warn(
459 self.ui.warn(
460 _('obsolete feature not enabled but %i markers found!\n')
460 _('obsolete feature not enabled but %i markers found!\n')
461 % len(list(store)))
461 % len(list(store)))
462 return store
462 return store
463
463
464 @storecache('00changelog.i')
464 @storecache('00changelog.i')
465 def changelog(self):
465 def changelog(self):
466 c = changelog.changelog(self.svfs)
466 c = changelog.changelog(self.svfs)
467 if 'HG_PENDING' in os.environ:
467 if 'HG_PENDING' in os.environ:
468 p = os.environ['HG_PENDING']
468 p = os.environ['HG_PENDING']
469 if p.startswith(self.root):
469 if p.startswith(self.root):
470 c.readpending('00changelog.i.a')
470 c.readpending('00changelog.i.a')
471 return c
471 return c
472
472
473 @storecache('00manifest.i')
473 @storecache('00manifest.i')
474 def manifest(self):
474 def manifest(self):
475 return manifest.manifest(self.svfs)
475 return manifest.manifest(self.svfs)
476
476
477 def dirlog(self, dir):
477 def dirlog(self, dir):
478 return self.manifest.dirlog(dir)
478 return self.manifest.dirlog(dir)
479
479
480 @repofilecache('dirstate')
480 @repofilecache('dirstate')
481 def dirstate(self):
481 def dirstate(self):
482 return dirstate.dirstate(self.vfs, self.ui, self.root,
482 return dirstate.dirstate(self.vfs, self.ui, self.root,
483 self._dirstatevalidate)
483 self._dirstatevalidate)
484
484
485 def _dirstatevalidate(self, node):
485 def _dirstatevalidate(self, node):
486 try:
486 try:
487 self.changelog.rev(node)
487 self.changelog.rev(node)
488 return node
488 return node
489 except error.LookupError:
489 except error.LookupError:
490 if not self._dirstatevalidatewarned:
490 if not self._dirstatevalidatewarned:
491 self._dirstatevalidatewarned = True
491 self._dirstatevalidatewarned = True
492 self.ui.warn(_("warning: ignoring unknown"
492 self.ui.warn(_("warning: ignoring unknown"
493 " working parent %s!\n") % short(node))
493 " working parent %s!\n") % short(node))
494 return nullid
494 return nullid
495
495
496 def __getitem__(self, changeid):
496 def __getitem__(self, changeid):
497 if changeid is None or changeid == wdirrev:
497 if changeid is None or changeid == wdirrev:
498 return context.workingctx(self)
498 return context.workingctx(self)
499 if isinstance(changeid, slice):
499 if isinstance(changeid, slice):
500 return [context.changectx(self, i)
500 return [context.changectx(self, i)
501 for i in xrange(*changeid.indices(len(self)))
501 for i in xrange(*changeid.indices(len(self)))
502 if i not in self.changelog.filteredrevs]
502 if i not in self.changelog.filteredrevs]
503 return context.changectx(self, changeid)
503 return context.changectx(self, changeid)
504
504
505 def __contains__(self, changeid):
505 def __contains__(self, changeid):
506 try:
506 try:
507 self[changeid]
507 self[changeid]
508 return True
508 return True
509 except error.RepoLookupError:
509 except error.RepoLookupError:
510 return False
510 return False
511
511
512 def __nonzero__(self):
512 def __nonzero__(self):
513 return True
513 return True
514
514
515 def __len__(self):
515 def __len__(self):
516 return len(self.changelog)
516 return len(self.changelog)
517
517
518 def __iter__(self):
518 def __iter__(self):
519 return iter(self.changelog)
519 return iter(self.changelog)
520
520
521 def revs(self, expr, *args):
521 def revs(self, expr, *args):
522 '''Find revisions matching a revset.
522 '''Find revisions matching a revset.
523
523
524 The revset is specified as a string ``expr`` that may contain
524 The revset is specified as a string ``expr`` that may contain
525 %-formatting to escape certain types. See ``revset.formatspec``.
525 %-formatting to escape certain types. See ``revset.formatspec``.
526
526
527 Return a revset.abstractsmartset, which is a list-like interface
527 Return a revset.abstractsmartset, which is a list-like interface
528 that contains integer revisions.
528 that contains integer revisions.
529 '''
529 '''
530 expr = revset.formatspec(expr, *args)
530 expr = revset.formatspec(expr, *args)
531 m = revset.match(None, expr)
531 m = revset.match(None, expr)
532 return m(self)
532 return m(self)
533
533
534 def set(self, expr, *args):
534 def set(self, expr, *args):
535 '''Find revisions matching a revset and emit changectx instances.
535 '''Find revisions matching a revset and emit changectx instances.
536
536
537 This is a convenience wrapper around ``revs()`` that iterates the
537 This is a convenience wrapper around ``revs()`` that iterates the
538 result and is a generator of changectx instances.
538 result and is a generator of changectx instances.
539 '''
539 '''
540 for r in self.revs(expr, *args):
540 for r in self.revs(expr, *args):
541 yield self[r]
541 yield self[r]
542
542
543 def url(self):
543 def url(self):
544 return 'file:' + self.root
544 return 'file:' + self.root
545
545
546 def hook(self, name, throw=False, **args):
546 def hook(self, name, throw=False, **args):
547 """Call a hook, passing this repo instance.
547 """Call a hook, passing this repo instance.
548
548
549 This a convenience method to aid invoking hooks. Extensions likely
549 This a convenience method to aid invoking hooks. Extensions likely
550 won't call this unless they have registered a custom hook or are
550 won't call this unless they have registered a custom hook or are
551 replacing code that is expected to call a hook.
551 replacing code that is expected to call a hook.
552 """
552 """
553 return hook.hook(self.ui, self, name, throw, **args)
553 return hook.hook(self.ui, self, name, throw, **args)
554
554
555 @unfilteredmethod
555 @unfilteredmethod
556 def _tag(self, names, node, message, local, user, date, extra=None,
556 def _tag(self, names, node, message, local, user, date, extra=None,
557 editor=False):
557 editor=False):
558 if isinstance(names, str):
558 if isinstance(names, str):
559 names = (names,)
559 names = (names,)
560
560
561 branches = self.branchmap()
561 branches = self.branchmap()
562 for name in names:
562 for name in names:
563 self.hook('pretag', throw=True, node=hex(node), tag=name,
563 self.hook('pretag', throw=True, node=hex(node), tag=name,
564 local=local)
564 local=local)
565 if name in branches:
565 if name in branches:
566 self.ui.warn(_("warning: tag %s conflicts with existing"
566 self.ui.warn(_("warning: tag %s conflicts with existing"
567 " branch name\n") % name)
567 " branch name\n") % name)
568
568
569 def writetags(fp, names, munge, prevtags):
569 def writetags(fp, names, munge, prevtags):
570 fp.seek(0, 2)
570 fp.seek(0, 2)
571 if prevtags and prevtags[-1] != '\n':
571 if prevtags and prevtags[-1] != '\n':
572 fp.write('\n')
572 fp.write('\n')
573 for name in names:
573 for name in names:
574 if munge:
574 if munge:
575 m = munge(name)
575 m = munge(name)
576 else:
576 else:
577 m = name
577 m = name
578
578
579 if (self._tagscache.tagtypes and
579 if (self._tagscache.tagtypes and
580 name in self._tagscache.tagtypes):
580 name in self._tagscache.tagtypes):
581 old = self.tags().get(name, nullid)
581 old = self.tags().get(name, nullid)
582 fp.write('%s %s\n' % (hex(old), m))
582 fp.write('%s %s\n' % (hex(old), m))
583 fp.write('%s %s\n' % (hex(node), m))
583 fp.write('%s %s\n' % (hex(node), m))
584 fp.close()
584 fp.close()
585
585
586 prevtags = ''
586 prevtags = ''
587 if local:
587 if local:
588 try:
588 try:
589 fp = self.vfs('localtags', 'r+')
589 fp = self.vfs('localtags', 'r+')
590 except IOError:
590 except IOError:
591 fp = self.vfs('localtags', 'a')
591 fp = self.vfs('localtags', 'a')
592 else:
592 else:
593 prevtags = fp.read()
593 prevtags = fp.read()
594
594
595 # local tags are stored in the current charset
595 # local tags are stored in the current charset
596 writetags(fp, names, None, prevtags)
596 writetags(fp, names, None, prevtags)
597 for name in names:
597 for name in names:
598 self.hook('tag', node=hex(node), tag=name, local=local)
598 self.hook('tag', node=hex(node), tag=name, local=local)
599 return
599 return
600
600
601 try:
601 try:
602 fp = self.wfile('.hgtags', 'rb+')
602 fp = self.wfile('.hgtags', 'rb+')
603 except IOError as e:
603 except IOError as e:
604 if e.errno != errno.ENOENT:
604 if e.errno != errno.ENOENT:
605 raise
605 raise
606 fp = self.wfile('.hgtags', 'ab')
606 fp = self.wfile('.hgtags', 'ab')
607 else:
607 else:
608 prevtags = fp.read()
608 prevtags = fp.read()
609
609
610 # committed tags are stored in UTF-8
610 # committed tags are stored in UTF-8
611 writetags(fp, names, encoding.fromlocal, prevtags)
611 writetags(fp, names, encoding.fromlocal, prevtags)
612
612
613 fp.close()
613 fp.close()
614
614
615 self.invalidatecaches()
615 self.invalidatecaches()
616
616
617 if '.hgtags' not in self.dirstate:
617 if '.hgtags' not in self.dirstate:
618 self[None].add(['.hgtags'])
618 self[None].add(['.hgtags'])
619
619
620 m = matchmod.exact(self.root, '', ['.hgtags'])
620 m = matchmod.exact(self.root, '', ['.hgtags'])
621 tagnode = self.commit(message, user, date, extra=extra, match=m,
621 tagnode = self.commit(message, user, date, extra=extra, match=m,
622 editor=editor)
622 editor=editor)
623
623
624 for name in names:
624 for name in names:
625 self.hook('tag', node=hex(node), tag=name, local=local)
625 self.hook('tag', node=hex(node), tag=name, local=local)
626
626
627 return tagnode
627 return tagnode
628
628
629 def tag(self, names, node, message, local, user, date, editor=False):
629 def tag(self, names, node, message, local, user, date, editor=False):
630 '''tag a revision with one or more symbolic names.
630 '''tag a revision with one or more symbolic names.
631
631
632 names is a list of strings or, when adding a single tag, names may be a
632 names is a list of strings or, when adding a single tag, names may be a
633 string.
633 string.
634
634
635 if local is True, the tags are stored in a per-repository file.
635 if local is True, the tags are stored in a per-repository file.
636 otherwise, they are stored in the .hgtags file, and a new
636 otherwise, they are stored in the .hgtags file, and a new
637 changeset is committed with the change.
637 changeset is committed with the change.
638
638
639 keyword arguments:
639 keyword arguments:
640
640
641 local: whether to store tags in non-version-controlled file
641 local: whether to store tags in non-version-controlled file
642 (default False)
642 (default False)
643
643
644 message: commit message to use if committing
644 message: commit message to use if committing
645
645
646 user: name of user to use if committing
646 user: name of user to use if committing
647
647
648 date: date tuple to use if committing'''
648 date: date tuple to use if committing'''
649
649
650 if not local:
650 if not local:
651 m = matchmod.exact(self.root, '', ['.hgtags'])
651 m = matchmod.exact(self.root, '', ['.hgtags'])
652 if any(self.status(match=m, unknown=True, ignored=True)):
652 if any(self.status(match=m, unknown=True, ignored=True)):
653 raise error.Abort(_('working copy of .hgtags is changed'),
653 raise error.Abort(_('working copy of .hgtags is changed'),
654 hint=_('please commit .hgtags manually'))
654 hint=_('please commit .hgtags manually'))
655
655
656 self.tags() # instantiate the cache
656 self.tags() # instantiate the cache
657 self._tag(names, node, message, local, user, date, editor=editor)
657 self._tag(names, node, message, local, user, date, editor=editor)
658
658
659 @filteredpropertycache
659 @filteredpropertycache
660 def _tagscache(self):
660 def _tagscache(self):
661 '''Returns a tagscache object that contains various tags related
661 '''Returns a tagscache object that contains various tags related
662 caches.'''
662 caches.'''
663
663
664 # This simplifies its cache management by having one decorated
664 # This simplifies its cache management by having one decorated
665 # function (this one) and the rest simply fetch things from it.
665 # function (this one) and the rest simply fetch things from it.
666 class tagscache(object):
666 class tagscache(object):
667 def __init__(self):
667 def __init__(self):
668 # These two define the set of tags for this repository. tags
668 # These two define the set of tags for this repository. tags
669 # maps tag name to node; tagtypes maps tag name to 'global' or
669 # maps tag name to node; tagtypes maps tag name to 'global' or
670 # 'local'. (Global tags are defined by .hgtags across all
670 # 'local'. (Global tags are defined by .hgtags across all
671 # heads, and local tags are defined in .hg/localtags.)
671 # heads, and local tags are defined in .hg/localtags.)
672 # They constitute the in-memory cache of tags.
672 # They constitute the in-memory cache of tags.
673 self.tags = self.tagtypes = None
673 self.tags = self.tagtypes = None
674
674
675 self.nodetagscache = self.tagslist = None
675 self.nodetagscache = self.tagslist = None
676
676
677 cache = tagscache()
677 cache = tagscache()
678 cache.tags, cache.tagtypes = self._findtags()
678 cache.tags, cache.tagtypes = self._findtags()
679
679
680 return cache
680 return cache
681
681
682 def tags(self):
682 def tags(self):
683 '''return a mapping of tag to node'''
683 '''return a mapping of tag to node'''
684 t = {}
684 t = {}
685 if self.changelog.filteredrevs:
685 if self.changelog.filteredrevs:
686 tags, tt = self._findtags()
686 tags, tt = self._findtags()
687 else:
687 else:
688 tags = self._tagscache.tags
688 tags = self._tagscache.tags
689 for k, v in tags.iteritems():
689 for k, v in tags.iteritems():
690 try:
690 try:
691 # ignore tags to unknown nodes
691 # ignore tags to unknown nodes
692 self.changelog.rev(v)
692 self.changelog.rev(v)
693 t[k] = v
693 t[k] = v
694 except (error.LookupError, ValueError):
694 except (error.LookupError, ValueError):
695 pass
695 pass
696 return t
696 return t
697
697
698 def _findtags(self):
698 def _findtags(self):
699 '''Do the hard work of finding tags. Return a pair of dicts
699 '''Do the hard work of finding tags. Return a pair of dicts
700 (tags, tagtypes) where tags maps tag name to node, and tagtypes
700 (tags, tagtypes) where tags maps tag name to node, and tagtypes
701 maps tag name to a string like \'global\' or \'local\'.
701 maps tag name to a string like \'global\' or \'local\'.
702 Subclasses or extensions are free to add their own tags, but
702 Subclasses or extensions are free to add their own tags, but
703 should be aware that the returned dicts will be retained for the
703 should be aware that the returned dicts will be retained for the
704 duration of the localrepo object.'''
704 duration of the localrepo object.'''
705
705
706 # XXX what tagtype should subclasses/extensions use? Currently
706 # XXX what tagtype should subclasses/extensions use? Currently
707 # mq and bookmarks add tags, but do not set the tagtype at all.
707 # mq and bookmarks add tags, but do not set the tagtype at all.
708 # Should each extension invent its own tag type? Should there
708 # Should each extension invent its own tag type? Should there
709 # be one tagtype for all such "virtual" tags? Or is the status
709 # be one tagtype for all such "virtual" tags? Or is the status
710 # quo fine?
710 # quo fine?
711
711
712 alltags = {} # map tag name to (node, hist)
712 alltags = {} # map tag name to (node, hist)
713 tagtypes = {}
713 tagtypes = {}
714
714
715 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
715 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
716 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
717
717
718 # Build the return dicts. Have to re-encode tag names because
718 # Build the return dicts. Have to re-encode tag names because
719 # the tags module always uses UTF-8 (in order not to lose info
719 # the tags module always uses UTF-8 (in order not to lose info
720 # writing to the cache), but the rest of Mercurial wants them in
720 # writing to the cache), but the rest of Mercurial wants them in
721 # local encoding.
721 # local encoding.
722 tags = {}
722 tags = {}
723 for (name, (node, hist)) in alltags.iteritems():
723 for (name, (node, hist)) in alltags.iteritems():
724 if node != nullid:
724 if node != nullid:
725 tags[encoding.tolocal(name)] = node
725 tags[encoding.tolocal(name)] = node
726 tags['tip'] = self.changelog.tip()
726 tags['tip'] = self.changelog.tip()
727 tagtypes = dict([(encoding.tolocal(name), value)
727 tagtypes = dict([(encoding.tolocal(name), value)
728 for (name, value) in tagtypes.iteritems()])
728 for (name, value) in tagtypes.iteritems()])
729 return (tags, tagtypes)
729 return (tags, tagtypes)
730
730
731 def tagtype(self, tagname):
731 def tagtype(self, tagname):
732 '''
732 '''
733 return the type of the given tag. result can be:
733 return the type of the given tag. result can be:
734
734
735 'local' : a local tag
735 'local' : a local tag
736 'global' : a global tag
736 'global' : a global tag
737 None : tag does not exist
737 None : tag does not exist
738 '''
738 '''
739
739
740 return self._tagscache.tagtypes.get(tagname)
740 return self._tagscache.tagtypes.get(tagname)
741
741
742 def tagslist(self):
742 def tagslist(self):
743 '''return a list of tags ordered by revision'''
743 '''return a list of tags ordered by revision'''
744 if not self._tagscache.tagslist:
744 if not self._tagscache.tagslist:
745 l = []
745 l = []
746 for t, n in self.tags().iteritems():
746 for t, n in self.tags().iteritems():
747 l.append((self.changelog.rev(n), t, n))
747 l.append((self.changelog.rev(n), t, n))
748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
748 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
749
749
750 return self._tagscache.tagslist
750 return self._tagscache.tagslist
751
751
752 def nodetags(self, node):
752 def nodetags(self, node):
753 '''return the tags associated with a node'''
753 '''return the tags associated with a node'''
754 if not self._tagscache.nodetagscache:
754 if not self._tagscache.nodetagscache:
755 nodetagscache = {}
755 nodetagscache = {}
756 for t, n in self._tagscache.tags.iteritems():
756 for t, n in self._tagscache.tags.iteritems():
757 nodetagscache.setdefault(n, []).append(t)
757 nodetagscache.setdefault(n, []).append(t)
758 for tags in nodetagscache.itervalues():
758 for tags in nodetagscache.itervalues():
759 tags.sort()
759 tags.sort()
760 self._tagscache.nodetagscache = nodetagscache
760 self._tagscache.nodetagscache = nodetagscache
761 return self._tagscache.nodetagscache.get(node, [])
761 return self._tagscache.nodetagscache.get(node, [])
762
762
763 def nodebookmarks(self, node):
763 def nodebookmarks(self, node):
764 """return the list of bookmarks pointing to the specified node"""
764 """return the list of bookmarks pointing to the specified node"""
765 marks = []
765 marks = []
766 for bookmark, n in self._bookmarks.iteritems():
766 for bookmark, n in self._bookmarks.iteritems():
767 if n == node:
767 if n == node:
768 marks.append(bookmark)
768 marks.append(bookmark)
769 return sorted(marks)
769 return sorted(marks)
770
770
771 def branchmap(self):
771 def branchmap(self):
772 '''returns a dictionary {branch: [branchheads]} with branchheads
772 '''returns a dictionary {branch: [branchheads]} with branchheads
773 ordered by increasing revision number'''
773 ordered by increasing revision number'''
774 branchmap.updatecache(self)
774 branchmap.updatecache(self)
775 return self._branchcaches[self.filtername]
775 return self._branchcaches[self.filtername]
776
776
777 @unfilteredmethod
777 @unfilteredmethod
778 def revbranchcache(self):
778 def revbranchcache(self):
779 if not self._revbranchcache:
779 if not self._revbranchcache:
780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
780 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
781 return self._revbranchcache
781 return self._revbranchcache
782
782
783 def branchtip(self, branch, ignoremissing=False):
783 def branchtip(self, branch, ignoremissing=False):
784 '''return the tip node for a given branch
784 '''return the tip node for a given branch
785
785
786 If ignoremissing is True, then this method will not raise an error.
786 If ignoremissing is True, then this method will not raise an error.
787 This is helpful for callers that only expect None for a missing branch
787 This is helpful for callers that only expect None for a missing branch
788 (e.g. namespace).
788 (e.g. namespace).
789
789
790 '''
790 '''
791 try:
791 try:
792 return self.branchmap().branchtip(branch)
792 return self.branchmap().branchtip(branch)
793 except KeyError:
793 except KeyError:
794 if not ignoremissing:
794 if not ignoremissing:
795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
795 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
796 else:
796 else:
797 pass
797 pass
798
798
799 def lookup(self, key):
799 def lookup(self, key):
800 return self[key].node()
800 return self[key].node()
801
801
802 def lookupbranch(self, key, remote=None):
802 def lookupbranch(self, key, remote=None):
803 repo = remote or self
803 repo = remote or self
804 if key in repo.branchmap():
804 if key in repo.branchmap():
805 return key
805 return key
806
806
807 repo = (remote and remote.local()) and remote or self
807 repo = (remote and remote.local()) and remote or self
808 return repo[key].branch()
808 return repo[key].branch()
809
809
810 def known(self, nodes):
810 def known(self, nodes):
811 nm = self.changelog.nodemap
811 nm = self.changelog.nodemap
812 pc = self._phasecache
812 pc = self._phasecache
813 result = []
813 result = []
814 for n in nodes:
814 for n in nodes:
815 r = nm.get(n)
815 r = nm.get(n)
816 resp = not (r is None or pc.phase(self, r) >= phases.secret)
816 resp = not (r is None or pc.phase(self, r) >= phases.secret)
817 result.append(resp)
817 result.append(resp)
818 return result
818 return result
819
819
820 def local(self):
820 def local(self):
821 return self
821 return self
822
822
823 def publishing(self):
823 def publishing(self):
824 # it's safe (and desirable) to trust the publish flag unconditionally
824 # it's safe (and desirable) to trust the publish flag unconditionally
825 # so that we don't finalize changes shared between users via ssh or nfs
825 # so that we don't finalize changes shared between users via ssh or nfs
826 return self.ui.configbool('phases', 'publish', True, untrusted=True)
826 return self.ui.configbool('phases', 'publish', True, untrusted=True)
827
827
828 def cancopy(self):
828 def cancopy(self):
829 # so statichttprepo's override of local() works
829 # so statichttprepo's override of local() works
830 if not self.local():
830 if not self.local():
831 return False
831 return False
832 if not self.publishing():
832 if not self.publishing():
833 return True
833 return True
834 # if publishing we can't copy if there is filtered content
834 # if publishing we can't copy if there is filtered content
835 return not self.filtered('visible').changelog.filteredrevs
835 return not self.filtered('visible').changelog.filteredrevs
836
836
837 def shared(self):
837 def shared(self):
838 '''the type of shared repository (None if not shared)'''
838 '''the type of shared repository (None if not shared)'''
839 if self.sharedpath != self.path:
839 if self.sharedpath != self.path:
840 return 'store'
840 return 'store'
841 return None
841 return None
842
842
843 def join(self, f, *insidef):
843 def join(self, f, *insidef):
844 return self.vfs.join(os.path.join(f, *insidef))
844 return self.vfs.join(os.path.join(f, *insidef))
845
845
846 def wjoin(self, f, *insidef):
846 def wjoin(self, f, *insidef):
847 return self.vfs.reljoin(self.root, f, *insidef)
847 return self.vfs.reljoin(self.root, f, *insidef)
848
848
849 def file(self, f):
849 def file(self, f):
850 if f[0] == '/':
850 if f[0] == '/':
851 f = f[1:]
851 f = f[1:]
852 return filelog.filelog(self.svfs, f)
852 return filelog.filelog(self.svfs, f)
853
853
854 def parents(self, changeid=None):
854 def parents(self, changeid=None):
855 '''get list of changectxs for parents of changeid'''
855 '''get list of changectxs for parents of changeid'''
856 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
856 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
857 self.ui.deprecwarn(msg, '3.7')
857 self.ui.deprecwarn(msg, '3.7')
858 return self[changeid].parents()
858 return self[changeid].parents()
859
859
860 def changectx(self, changeid):
860 def changectx(self, changeid):
861 return self[changeid]
861 return self[changeid]
862
862
863 def setparents(self, p1, p2=nullid):
863 def setparents(self, p1, p2=nullid):
864 self.dirstate.beginparentchange()
864 self.dirstate.beginparentchange()
865 copies = self.dirstate.setparents(p1, p2)
865 copies = self.dirstate.setparents(p1, p2)
866 pctx = self[p1]
866 pctx = self[p1]
867 if copies:
867 if copies:
868 # Adjust copy records, the dirstate cannot do it, it
868 # Adjust copy records, the dirstate cannot do it, it
869 # requires access to parents manifests. Preserve them
869 # requires access to parents manifests. Preserve them
870 # only for entries added to first parent.
870 # only for entries added to first parent.
871 for f in copies:
871 for f in copies:
872 if f not in pctx and copies[f] in pctx:
872 if f not in pctx and copies[f] in pctx:
873 self.dirstate.copy(copies[f], f)
873 self.dirstate.copy(copies[f], f)
874 if p2 == nullid:
874 if p2 == nullid:
875 for f, s in sorted(self.dirstate.copies().items()):
875 for f, s in sorted(self.dirstate.copies().items()):
876 if f not in pctx and s not in pctx:
876 if f not in pctx and s not in pctx:
877 self.dirstate.copy(None, f)
877 self.dirstate.copy(None, f)
878 self.dirstate.endparentchange()
878 self.dirstate.endparentchange()
879
879
880 def filectx(self, path, changeid=None, fileid=None):
880 def filectx(self, path, changeid=None, fileid=None):
881 """changeid can be a changeset revision, node, or tag.
881 """changeid can be a changeset revision, node, or tag.
882 fileid can be a file revision or node."""
882 fileid can be a file revision or node."""
883 return context.filectx(self, path, changeid, fileid)
883 return context.filectx(self, path, changeid, fileid)
884
884
885 def getcwd(self):
885 def getcwd(self):
886 return self.dirstate.getcwd()
886 return self.dirstate.getcwd()
887
887
888 def pathto(self, f, cwd=None):
888 def pathto(self, f, cwd=None):
889 return self.dirstate.pathto(f, cwd)
889 return self.dirstate.pathto(f, cwd)
890
890
891 def wfile(self, f, mode='r'):
891 def wfile(self, f, mode='r'):
892 return self.wvfs(f, mode)
892 return self.wvfs(f, mode)
893
893
894 def _link(self, f):
894 def _link(self, f):
895 return self.wvfs.islink(f)
895 return self.wvfs.islink(f)
896
896
897 def _loadfilter(self, filter):
897 def _loadfilter(self, filter):
898 if filter not in self.filterpats:
898 if filter not in self.filterpats:
899 l = []
899 l = []
900 for pat, cmd in self.ui.configitems(filter):
900 for pat, cmd in self.ui.configitems(filter):
901 if cmd == '!':
901 if cmd == '!':
902 continue
902 continue
903 mf = matchmod.match(self.root, '', [pat])
903 mf = matchmod.match(self.root, '', [pat])
904 fn = None
904 fn = None
905 params = cmd
905 params = cmd
906 for name, filterfn in self._datafilters.iteritems():
906 for name, filterfn in self._datafilters.iteritems():
907 if cmd.startswith(name):
907 if cmd.startswith(name):
908 fn = filterfn
908 fn = filterfn
909 params = cmd[len(name):].lstrip()
909 params = cmd[len(name):].lstrip()
910 break
910 break
911 if not fn:
911 if not fn:
912 fn = lambda s, c, **kwargs: util.filter(s, c)
912 fn = lambda s, c, **kwargs: util.filter(s, c)
913 # Wrap old filters not supporting keyword arguments
913 # Wrap old filters not supporting keyword arguments
914 if not inspect.getargspec(fn)[2]:
914 if not inspect.getargspec(fn)[2]:
915 oldfn = fn
915 oldfn = fn
916 fn = lambda s, c, **kwargs: oldfn(s, c)
916 fn = lambda s, c, **kwargs: oldfn(s, c)
917 l.append((mf, fn, params))
917 l.append((mf, fn, params))
918 self.filterpats[filter] = l
918 self.filterpats[filter] = l
919 return self.filterpats[filter]
919 return self.filterpats[filter]
920
920
921 def _filter(self, filterpats, filename, data):
921 def _filter(self, filterpats, filename, data):
922 for mf, fn, cmd in filterpats:
922 for mf, fn, cmd in filterpats:
923 if mf(filename):
923 if mf(filename):
924 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
924 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
925 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
925 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
926 break
926 break
927
927
928 return data
928 return data
929
929
930 @unfilteredpropertycache
930 @unfilteredpropertycache
931 def _encodefilterpats(self):
931 def _encodefilterpats(self):
932 return self._loadfilter('encode')
932 return self._loadfilter('encode')
933
933
934 @unfilteredpropertycache
934 @unfilteredpropertycache
935 def _decodefilterpats(self):
935 def _decodefilterpats(self):
936 return self._loadfilter('decode')
936 return self._loadfilter('decode')
937
937
938 def adddatafilter(self, name, filter):
938 def adddatafilter(self, name, filter):
939 self._datafilters[name] = filter
939 self._datafilters[name] = filter
940
940
941 def wread(self, filename):
941 def wread(self, filename):
942 if self._link(filename):
942 if self._link(filename):
943 data = self.wvfs.readlink(filename)
943 data = self.wvfs.readlink(filename)
944 else:
944 else:
945 data = self.wvfs.read(filename)
945 data = self.wvfs.read(filename)
946 return self._filter(self._encodefilterpats, filename, data)
946 return self._filter(self._encodefilterpats, filename, data)
947
947
948 def wwrite(self, filename, data, flags):
948 def wwrite(self, filename, data, flags):
949 """write ``data`` into ``filename`` in the working directory
949 """write ``data`` into ``filename`` in the working directory
950
950
951 This returns length of written (maybe decoded) data.
951 This returns length of written (maybe decoded) data.
952 """
952 """
953 data = self._filter(self._decodefilterpats, filename, data)
953 data = self._filter(self._decodefilterpats, filename, data)
954 if 'l' in flags:
954 if 'l' in flags:
955 self.wvfs.symlink(data, filename)
955 self.wvfs.symlink(data, filename)
956 else:
956 else:
957 self.wvfs.write(filename, data)
957 self.wvfs.write(filename, data)
958 if 'x' in flags:
958 if 'x' in flags:
959 self.wvfs.setflags(filename, False, True)
959 self.wvfs.setflags(filename, False, True)
960 return len(data)
960 return len(data)
961
961
962 def wwritedata(self, filename, data):
962 def wwritedata(self, filename, data):
963 return self._filter(self._decodefilterpats, filename, data)
963 return self._filter(self._decodefilterpats, filename, data)
964
964
965 def currenttransaction(self):
965 def currenttransaction(self):
966 """return the current transaction or None if non exists"""
966 """return the current transaction or None if non exists"""
967 if self._transref:
967 if self._transref:
968 tr = self._transref()
968 tr = self._transref()
969 else:
969 else:
970 tr = None
970 tr = None
971
971
972 if tr and tr.running():
972 if tr and tr.running():
973 return tr
973 return tr
974 return None
974 return None
975
975
976 def transaction(self, desc, report=None):
976 def transaction(self, desc, report=None):
977 if (self.ui.configbool('devel', 'all-warnings')
977 if (self.ui.configbool('devel', 'all-warnings')
978 or self.ui.configbool('devel', 'check-locks')):
978 or self.ui.configbool('devel', 'check-locks')):
979 l = self._lockref and self._lockref()
979 l = self._lockref and self._lockref()
980 if l is None or not l.held:
980 if l is None or not l.held:
981 self.ui.develwarn('transaction with no lock')
981 self.ui.develwarn('transaction with no lock')
982 tr = self.currenttransaction()
982 tr = self.currenttransaction()
983 if tr is not None:
983 if tr is not None:
984 return tr.nest()
984 return tr.nest()
985
985
986 # abort here if the journal already exists
986 # abort here if the journal already exists
987 if self.svfs.exists("journal"):
987 if self.svfs.exists("journal"):
988 raise error.RepoError(
988 raise error.RepoError(
989 _("abandoned transaction found"),
989 _("abandoned transaction found"),
990 hint=_("run 'hg recover' to clean up transaction"))
990 hint=_("run 'hg recover' to clean up transaction"))
991
991
992 # make journal.dirstate contain in-memory changes at this point
992 # make journal.dirstate contain in-memory changes at this point
993 self.dirstate.write(None)
993 self.dirstate.write(None)
994
994
995 idbase = "%.40f#%f" % (random.random(), time.time())
995 idbase = "%.40f#%f" % (random.random(), time.time())
996 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
996 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
997 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
997 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
998
998
999 self._writejournal(desc)
999 self._writejournal(desc)
1000 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1000 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1001 if report:
1001 if report:
1002 rp = report
1002 rp = report
1003 else:
1003 else:
1004 rp = self.ui.warn
1004 rp = self.ui.warn
1005 vfsmap = {'plain': self.vfs} # root of .hg/
1005 vfsmap = {'plain': self.vfs} # root of .hg/
1006 # we must avoid cyclic reference between repo and transaction.
1006 # we must avoid cyclic reference between repo and transaction.
1007 reporef = weakref.ref(self)
1007 reporef = weakref.ref(self)
1008 def validate(tr):
1008 def validate(tr):
1009 """will run pre-closing hooks"""
1009 """will run pre-closing hooks"""
1010 reporef().hook('pretxnclose', throw=True,
1010 reporef().hook('pretxnclose', throw=True,
1011 txnname=desc, **tr.hookargs)
1011 txnname=desc, **tr.hookargs)
1012 def releasefn(tr, success):
1012 def releasefn(tr, success):
1013 repo = reporef()
1013 repo = reporef()
1014 if success:
1014 if success:
1015 # this should be explicitly invoked here, because
1015 # this should be explicitly invoked here, because
1016 # in-memory changes aren't written out at closing
1016 # in-memory changes aren't written out at closing
1017 # transaction, if tr.addfilegenerator (via
1017 # transaction, if tr.addfilegenerator (via
1018 # dirstate.write or so) isn't invoked while
1018 # dirstate.write or so) isn't invoked while
1019 # transaction running
1019 # transaction running
1020 repo.dirstate.write(None)
1020 repo.dirstate.write(None)
1021 else:
1021 else:
1022 # prevent in-memory changes from being written out at
1022 # prevent in-memory changes from being written out at
1023 # the end of outer wlock scope or so
1023 # the end of outer wlock scope or so
1024 repo.dirstate.invalidate()
1024 repo.dirstate.invalidate()
1025
1025
1026 # discard all changes (including ones already written
1026 # discard all changes (including ones already written
1027 # out) in this transaction
1027 # out) in this transaction
1028 repo.vfs.rename('journal.dirstate', 'dirstate')
1028 repo.vfs.rename('journal.dirstate', 'dirstate')
1029
1029
1030 repo.invalidate(clearfilecache=True)
1030 repo.invalidate(clearfilecache=True)
1031
1031
1032 tr = transaction.transaction(rp, self.svfs, vfsmap,
1032 tr = transaction.transaction(rp, self.svfs, vfsmap,
1033 "journal",
1033 "journal",
1034 "undo",
1034 "undo",
1035 aftertrans(renames),
1035 aftertrans(renames),
1036 self.store.createmode,
1036 self.store.createmode,
1037 validator=validate,
1037 validator=validate,
1038 releasefn=releasefn)
1038 releasefn=releasefn)
1039
1039
1040 tr.hookargs['txnid'] = txnid
1040 tr.hookargs['txnid'] = txnid
1041 # note: writing the fncache only during finalize mean that the file is
1041 # note: writing the fncache only during finalize mean that the file is
1042 # outdated when running hooks. As fncache is used for streaming clone,
1042 # outdated when running hooks. As fncache is used for streaming clone,
1043 # this is not expected to break anything that happen during the hooks.
1043 # this is not expected to break anything that happen during the hooks.
1044 tr.addfinalize('flush-fncache', self.store.write)
1044 tr.addfinalize('flush-fncache', self.store.write)
1045 def txnclosehook(tr2):
1045 def txnclosehook(tr2):
1046 """To be run if transaction is successful, will schedule a hook run
1046 """To be run if transaction is successful, will schedule a hook run
1047 """
1047 """
1048 def hook():
1048 def hook():
1049 reporef().hook('txnclose', throw=False, txnname=desc,
1049 reporef().hook('txnclose', throw=False, txnname=desc,
1050 **tr2.hookargs)
1050 **tr2.hookargs)
1051 reporef()._afterlock(hook)
1051 reporef()._afterlock(hook)
1052 tr.addfinalize('txnclose-hook', txnclosehook)
1052 tr.addfinalize('txnclose-hook', txnclosehook)
1053 def txnaborthook(tr2):
1053 def txnaborthook(tr2):
1054 """To be run if transaction is aborted
1054 """To be run if transaction is aborted
1055 """
1055 """
1056 reporef().hook('txnabort', throw=False, txnname=desc,
1056 reporef().hook('txnabort', throw=False, txnname=desc,
1057 **tr2.hookargs)
1057 **tr2.hookargs)
1058 tr.addabort('txnabort-hook', txnaborthook)
1058 tr.addabort('txnabort-hook', txnaborthook)
1059 # avoid eager cache invalidation. in-memory data should be identical
1059 # avoid eager cache invalidation. in-memory data should be identical
1060 # to stored data if transaction has no error.
1060 # to stored data if transaction has no error.
1061 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1061 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1062 self._transref = weakref.ref(tr)
1062 self._transref = weakref.ref(tr)
1063 return tr
1063 return tr
1064
1064
1065 def _journalfiles(self):
1065 def _journalfiles(self):
1066 return ((self.svfs, 'journal'),
1066 return ((self.svfs, 'journal'),
1067 (self.vfs, 'journal.dirstate'),
1067 (self.vfs, 'journal.dirstate'),
1068 (self.vfs, 'journal.branch'),
1068 (self.vfs, 'journal.branch'),
1069 (self.vfs, 'journal.desc'),
1069 (self.vfs, 'journal.desc'),
1070 (self.vfs, 'journal.bookmarks'),
1070 (self.vfs, 'journal.bookmarks'),
1071 (self.svfs, 'journal.phaseroots'))
1071 (self.svfs, 'journal.phaseroots'))
1072
1072
1073 def undofiles(self):
1073 def undofiles(self):
1074 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1074 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1075
1075
1076 def _writejournal(self, desc):
1076 def _writejournal(self, desc):
1077 self.vfs.write("journal.dirstate",
1077 self.vfs.write("journal.dirstate",
1078 self.vfs.tryread("dirstate"))
1078 self.vfs.tryread("dirstate"))
1079 self.vfs.write("journal.branch",
1079 self.vfs.write("journal.branch",
1080 encoding.fromlocal(self.dirstate.branch()))
1080 encoding.fromlocal(self.dirstate.branch()))
1081 self.vfs.write("journal.desc",
1081 self.vfs.write("journal.desc",
1082 "%d\n%s\n" % (len(self), desc))
1082 "%d\n%s\n" % (len(self), desc))
1083 self.vfs.write("journal.bookmarks",
1083 self.vfs.write("journal.bookmarks",
1084 self.vfs.tryread("bookmarks"))
1084 self.vfs.tryread("bookmarks"))
1085 self.svfs.write("journal.phaseroots",
1085 self.svfs.write("journal.phaseroots",
1086 self.svfs.tryread("phaseroots"))
1086 self.svfs.tryread("phaseroots"))
1087
1087
1088 def recover(self):
1088 def recover(self):
1089 lock = self.lock()
1089 lock = self.lock()
1090 try:
1090 try:
1091 if self.svfs.exists("journal"):
1091 if self.svfs.exists("journal"):
1092 self.ui.status(_("rolling back interrupted transaction\n"))
1092 self.ui.status(_("rolling back interrupted transaction\n"))
1093 vfsmap = {'': self.svfs,
1093 vfsmap = {'': self.svfs,
1094 'plain': self.vfs,}
1094 'plain': self.vfs,}
1095 transaction.rollback(self.svfs, vfsmap, "journal",
1095 transaction.rollback(self.svfs, vfsmap, "journal",
1096 self.ui.warn)
1096 self.ui.warn)
1097 self.invalidate()
1097 self.invalidate()
1098 return True
1098 return True
1099 else:
1099 else:
1100 self.ui.warn(_("no interrupted transaction available\n"))
1100 self.ui.warn(_("no interrupted transaction available\n"))
1101 return False
1101 return False
1102 finally:
1102 finally:
1103 lock.release()
1103 lock.release()
1104
1104
1105 def rollback(self, dryrun=False, force=False):
1105 def rollback(self, dryrun=False, force=False):
1106 wlock = lock = dsguard = None
1106 wlock = lock = dsguard = None
1107 try:
1107 try:
1108 wlock = self.wlock()
1108 wlock = self.wlock()
1109 lock = self.lock()
1109 lock = self.lock()
1110 if self.svfs.exists("undo"):
1110 if self.svfs.exists("undo"):
1111 dsguard = cmdutil.dirstateguard(self, 'rollback')
1111 dsguard = cmdutil.dirstateguard(self, 'rollback')
1112
1112
1113 return self._rollback(dryrun, force, dsguard)
1113 return self._rollback(dryrun, force, dsguard)
1114 else:
1114 else:
1115 self.ui.warn(_("no rollback information available\n"))
1115 self.ui.warn(_("no rollback information available\n"))
1116 return 1
1116 return 1
1117 finally:
1117 finally:
1118 release(dsguard, lock, wlock)
1118 release(dsguard, lock, wlock)
1119
1119
1120 @unfilteredmethod # Until we get smarter cache management
1120 @unfilteredmethod # Until we get smarter cache management
1121 def _rollback(self, dryrun, force, dsguard):
1121 def _rollback(self, dryrun, force, dsguard):
1122 ui = self.ui
1122 ui = self.ui
1123 try:
1123 try:
1124 args = self.vfs.read('undo.desc').splitlines()
1124 args = self.vfs.read('undo.desc').splitlines()
1125 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1125 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1126 if len(args) >= 3:
1126 if len(args) >= 3:
1127 detail = args[2]
1127 detail = args[2]
1128 oldtip = oldlen - 1
1128 oldtip = oldlen - 1
1129
1129
1130 if detail and ui.verbose:
1130 if detail and ui.verbose:
1131 msg = (_('repository tip rolled back to revision %s'
1131 msg = (_('repository tip rolled back to revision %s'
1132 ' (undo %s: %s)\n')
1132 ' (undo %s: %s)\n')
1133 % (oldtip, desc, detail))
1133 % (oldtip, desc, detail))
1134 else:
1134 else:
1135 msg = (_('repository tip rolled back to revision %s'
1135 msg = (_('repository tip rolled back to revision %s'
1136 ' (undo %s)\n')
1136 ' (undo %s)\n')
1137 % (oldtip, desc))
1137 % (oldtip, desc))
1138 except IOError:
1138 except IOError:
1139 msg = _('rolling back unknown transaction\n')
1139 msg = _('rolling back unknown transaction\n')
1140 desc = None
1140 desc = None
1141
1141
1142 if not force and self['.'] != self['tip'] and desc == 'commit':
1142 if not force and self['.'] != self['tip'] and desc == 'commit':
1143 raise error.Abort(
1143 raise error.Abort(
1144 _('rollback of last commit while not checked out '
1144 _('rollback of last commit while not checked out '
1145 'may lose data'), hint=_('use -f to force'))
1145 'may lose data'), hint=_('use -f to force'))
1146
1146
1147 ui.status(msg)
1147 ui.status(msg)
1148 if dryrun:
1148 if dryrun:
1149 return 0
1149 return 0
1150
1150
1151 parents = self.dirstate.parents()
1151 parents = self.dirstate.parents()
1152 self.destroying()
1152 self.destroying()
1153 vfsmap = {'plain': self.vfs, '': self.svfs}
1153 vfsmap = {'plain': self.vfs, '': self.svfs}
1154 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1154 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1155 if self.vfs.exists('undo.bookmarks'):
1155 if self.vfs.exists('undo.bookmarks'):
1156 self.vfs.rename('undo.bookmarks', 'bookmarks')
1156 self.vfs.rename('undo.bookmarks', 'bookmarks')
1157 if self.svfs.exists('undo.phaseroots'):
1157 if self.svfs.exists('undo.phaseroots'):
1158 self.svfs.rename('undo.phaseroots', 'phaseroots')
1158 self.svfs.rename('undo.phaseroots', 'phaseroots')
1159 self.invalidate()
1159 self.invalidate()
1160
1160
1161 parentgone = (parents[0] not in self.changelog.nodemap or
1161 parentgone = (parents[0] not in self.changelog.nodemap or
1162 parents[1] not in self.changelog.nodemap)
1162 parents[1] not in self.changelog.nodemap)
1163 if parentgone:
1163 if parentgone:
1164 # prevent dirstateguard from overwriting already restored one
1164 # prevent dirstateguard from overwriting already restored one
1165 dsguard.close()
1165 dsguard.close()
1166
1166
1167 self.vfs.rename('undo.dirstate', 'dirstate')
1167 self.vfs.rename('undo.dirstate', 'dirstate')
1168 try:
1168 try:
1169 branch = self.vfs.read('undo.branch')
1169 branch = self.vfs.read('undo.branch')
1170 self.dirstate.setbranch(encoding.tolocal(branch))
1170 self.dirstate.setbranch(encoding.tolocal(branch))
1171 except IOError:
1171 except IOError:
1172 ui.warn(_('named branch could not be reset: '
1172 ui.warn(_('named branch could not be reset: '
1173 'current branch is still \'%s\'\n')
1173 'current branch is still \'%s\'\n')
1174 % self.dirstate.branch())
1174 % self.dirstate.branch())
1175
1175
1176 self.dirstate.invalidate()
1176 self.dirstate.invalidate()
1177 parents = tuple([p.rev() for p in self[None].parents()])
1177 parents = tuple([p.rev() for p in self[None].parents()])
1178 if len(parents) > 1:
1178 if len(parents) > 1:
1179 ui.status(_('working directory now based on '
1179 ui.status(_('working directory now based on '
1180 'revisions %d and %d\n') % parents)
1180 'revisions %d and %d\n') % parents)
1181 else:
1181 else:
1182 ui.status(_('working directory now based on '
1182 ui.status(_('working directory now based on '
1183 'revision %d\n') % parents)
1183 'revision %d\n') % parents)
1184 mergemod.mergestate.clean(self, self['.'].node())
1184 mergemod.mergestate.clean(self, self['.'].node())
1185
1185
1186 # TODO: if we know which new heads may result from this rollback, pass
1186 # TODO: if we know which new heads may result from this rollback, pass
1187 # them to destroy(), which will prevent the branchhead cache from being
1187 # them to destroy(), which will prevent the branchhead cache from being
1188 # invalidated.
1188 # invalidated.
1189 self.destroyed()
1189 self.destroyed()
1190 return 0
1190 return 0
1191
1191
1192 def invalidatecaches(self):
1192 def invalidatecaches(self):
1193
1193
1194 if '_tagscache' in vars(self):
1194 if '_tagscache' in vars(self):
1195 # can't use delattr on proxy
1195 # can't use delattr on proxy
1196 del self.__dict__['_tagscache']
1196 del self.__dict__['_tagscache']
1197
1197
1198 self.unfiltered()._branchcaches.clear()
1198 self.unfiltered()._branchcaches.clear()
1199 self.invalidatevolatilesets()
1199 self.invalidatevolatilesets()
1200
1200
1201 def invalidatevolatilesets(self):
1201 def invalidatevolatilesets(self):
1202 self.filteredrevcache.clear()
1202 self.filteredrevcache.clear()
1203 obsolete.clearobscaches(self)
1203 obsolete.clearobscaches(self)
1204
1204
1205 def invalidatedirstate(self):
1205 def invalidatedirstate(self):
1206 '''Invalidates the dirstate, causing the next call to dirstate
1206 '''Invalidates the dirstate, causing the next call to dirstate
1207 to check if it was modified since the last time it was read,
1207 to check if it was modified since the last time it was read,
1208 rereading it if it has.
1208 rereading it if it has.
1209
1209
1210 This is different to dirstate.invalidate() that it doesn't always
1210 This is different to dirstate.invalidate() that it doesn't always
1211 rereads the dirstate. Use dirstate.invalidate() if you want to
1211 rereads the dirstate. Use dirstate.invalidate() if you want to
1212 explicitly read the dirstate again (i.e. restoring it to a previous
1212 explicitly read the dirstate again (i.e. restoring it to a previous
1213 known good state).'''
1213 known good state).'''
1214 if hasunfilteredcache(self, 'dirstate'):
1214 if hasunfilteredcache(self, 'dirstate'):
1215 for k in self.dirstate._filecache:
1215 for k in self.dirstate._filecache:
1216 try:
1216 try:
1217 delattr(self.dirstate, k)
1217 delattr(self.dirstate, k)
1218 except AttributeError:
1218 except AttributeError:
1219 pass
1219 pass
1220 delattr(self.unfiltered(), 'dirstate')
1220 delattr(self.unfiltered(), 'dirstate')
1221
1221
1222 def invalidate(self, clearfilecache=False):
1222 def invalidate(self, clearfilecache=False):
1223 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1223 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1224 for k in self._filecache.keys():
1224 for k in self._filecache.keys():
1225 # dirstate is invalidated separately in invalidatedirstate()
1225 # dirstate is invalidated separately in invalidatedirstate()
1226 if k == 'dirstate':
1226 if k == 'dirstate':
1227 continue
1227 continue
1228
1228
1229 if clearfilecache:
1229 if clearfilecache:
1230 del self._filecache[k]
1230 del self._filecache[k]
1231 try:
1231 try:
1232 delattr(unfiltered, k)
1232 delattr(unfiltered, k)
1233 except AttributeError:
1233 except AttributeError:
1234 pass
1234 pass
1235 self.invalidatecaches()
1235 self.invalidatecaches()
1236 self.store.invalidatecaches()
1236 self.store.invalidatecaches()
1237
1237
1238 def invalidateall(self):
1238 def invalidateall(self):
1239 '''Fully invalidates both store and non-store parts, causing the
1239 '''Fully invalidates both store and non-store parts, causing the
1240 subsequent operation to reread any outside changes.'''
1240 subsequent operation to reread any outside changes.'''
1241 # extension should hook this to invalidate its caches
1241 # extension should hook this to invalidate its caches
1242 self.invalidate()
1242 self.invalidate()
1243 self.invalidatedirstate()
1243 self.invalidatedirstate()
1244
1244
1245 def _refreshfilecachestats(self, tr):
1245 def _refreshfilecachestats(self, tr):
1246 """Reload stats of cached files so that they are flagged as valid"""
1246 """Reload stats of cached files so that they are flagged as valid"""
1247 for k, ce in self._filecache.items():
1247 for k, ce in self._filecache.items():
1248 if k == 'dirstate' or k not in self.__dict__:
1248 if k == 'dirstate' or k not in self.__dict__:
1249 continue
1249 continue
1250 ce.refresh()
1250 ce.refresh()
1251
1251
1252 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1252 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1253 inheritchecker=None, parentenvvar=None):
1253 inheritchecker=None, parentenvvar=None):
1254 parentlock = None
1254 parentlock = None
1255 # the contents of parentenvvar are used by the underlying lock to
1255 # the contents of parentenvvar are used by the underlying lock to
1256 # determine whether it can be inherited
1256 # determine whether it can be inherited
1257 if parentenvvar is not None:
1257 if parentenvvar is not None:
1258 parentlock = os.environ.get(parentenvvar)
1258 parentlock = os.environ.get(parentenvvar)
1259 try:
1259 try:
1260 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1260 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1261 acquirefn=acquirefn, desc=desc,
1261 acquirefn=acquirefn, desc=desc,
1262 inheritchecker=inheritchecker,
1262 inheritchecker=inheritchecker,
1263 parentlock=parentlock)
1263 parentlock=parentlock)
1264 except error.LockHeld as inst:
1264 except error.LockHeld as inst:
1265 if not wait:
1265 if not wait:
1266 raise
1266 raise
1267 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1267 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1268 (desc, inst.locker))
1268 (desc, inst.locker))
1269 # default to 600 seconds timeout
1269 # default to 600 seconds timeout
1270 l = lockmod.lock(vfs, lockname,
1270 l = lockmod.lock(vfs, lockname,
1271 int(self.ui.config("ui", "timeout", "600")),
1271 int(self.ui.config("ui", "timeout", "600")),
1272 releasefn=releasefn, acquirefn=acquirefn,
1272 releasefn=releasefn, acquirefn=acquirefn,
1273 desc=desc)
1273 desc=desc)
1274 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1274 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1275 return l
1275 return l
1276
1276
1277 def _afterlock(self, callback):
1277 def _afterlock(self, callback):
1278 """add a callback to be run when the repository is fully unlocked
1278 """add a callback to be run when the repository is fully unlocked
1279
1279
1280 The callback will be executed when the outermost lock is released
1280 The callback will be executed when the outermost lock is released
1281 (with wlock being higher level than 'lock')."""
1281 (with wlock being higher level than 'lock')."""
1282 for ref in (self._wlockref, self._lockref):
1282 for ref in (self._wlockref, self._lockref):
1283 l = ref and ref()
1283 l = ref and ref()
1284 if l and l.held:
1284 if l and l.held:
1285 l.postrelease.append(callback)
1285 l.postrelease.append(callback)
1286 break
1286 break
1287 else: # no lock have been found.
1287 else: # no lock have been found.
1288 callback()
1288 callback()
1289
1289
1290 def lock(self, wait=True):
1290 def lock(self, wait=True):
1291 '''Lock the repository store (.hg/store) and return a weak reference
1291 '''Lock the repository store (.hg/store) and return a weak reference
1292 to the lock. Use this before modifying the store (e.g. committing or
1292 to the lock. Use this before modifying the store (e.g. committing or
1293 stripping). If you are opening a transaction, get a lock as well.)
1293 stripping). If you are opening a transaction, get a lock as well.)
1294
1294
1295 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1295 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1296 'wlock' first to avoid a dead-lock hazard.'''
1296 'wlock' first to avoid a dead-lock hazard.'''
1297 l = self._lockref and self._lockref()
1297 l = self._lockref and self._lockref()
1298 if l is not None and l.held:
1298 if l is not None and l.held:
1299 l.lock()
1299 l.lock()
1300 return l
1300 return l
1301
1301
1302 l = self._lock(self.svfs, "lock", wait, None,
1302 l = self._lock(self.svfs, "lock", wait, None,
1303 self.invalidate, _('repository %s') % self.origroot)
1303 self.invalidate, _('repository %s') % self.origroot)
1304 self._lockref = weakref.ref(l)
1304 self._lockref = weakref.ref(l)
1305 return l
1305 return l
1306
1306
1307 def _wlockchecktransaction(self):
1307 def _wlockchecktransaction(self):
1308 if self.currenttransaction() is not None:
1308 if self.currenttransaction() is not None:
1309 raise error.LockInheritanceContractViolation(
1309 raise error.LockInheritanceContractViolation(
1310 'wlock cannot be inherited in the middle of a transaction')
1310 'wlock cannot be inherited in the middle of a transaction')
1311
1311
1312 def wlock(self, wait=True):
1312 def wlock(self, wait=True):
1313 '''Lock the non-store parts of the repository (everything under
1313 '''Lock the non-store parts of the repository (everything under
1314 .hg except .hg/store) and return a weak reference to the lock.
1314 .hg except .hg/store) and return a weak reference to the lock.
1315
1315
1316 Use this before modifying files in .hg.
1316 Use this before modifying files in .hg.
1317
1317
1318 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1318 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1319 'wlock' first to avoid a dead-lock hazard.'''
1319 'wlock' first to avoid a dead-lock hazard.'''
1320 l = self._wlockref and self._wlockref()
1320 l = self._wlockref and self._wlockref()
1321 if l is not None and l.held:
1321 if l is not None and l.held:
1322 l.lock()
1322 l.lock()
1323 return l
1323 return l
1324
1324
1325 # We do not need to check for non-waiting lock acquisition. Such
1325 # We do not need to check for non-waiting lock acquisition. Such
1326 # acquisition would not cause dead-lock as they would just fail.
1326 # acquisition would not cause dead-lock as they would just fail.
1327 if wait and (self.ui.configbool('devel', 'all-warnings')
1327 if wait and (self.ui.configbool('devel', 'all-warnings')
1328 or self.ui.configbool('devel', 'check-locks')):
1328 or self.ui.configbool('devel', 'check-locks')):
1329 l = self._lockref and self._lockref()
1329 l = self._lockref and self._lockref()
1330 if l is not None and l.held:
1330 if l is not None and l.held:
1331 self.ui.develwarn('"wlock" acquired after "lock"')
1331 self.ui.develwarn('"wlock" acquired after "lock"')
1332
1332
1333 def unlock():
1333 def unlock():
1334 if self.dirstate.pendingparentchange():
1334 if self.dirstate.pendingparentchange():
1335 self.dirstate.invalidate()
1335 self.dirstate.invalidate()
1336 else:
1336 else:
1337 self.dirstate.write(None)
1337 self.dirstate.write(None)
1338
1338
1339 self._filecache['dirstate'].refresh()
1339 self._filecache['dirstate'].refresh()
1340
1340
1341 l = self._lock(self.vfs, "wlock", wait, unlock,
1341 l = self._lock(self.vfs, "wlock", wait, unlock,
1342 self.invalidatedirstate, _('working directory of %s') %
1342 self.invalidatedirstate, _('working directory of %s') %
1343 self.origroot,
1343 self.origroot,
1344 inheritchecker=self._wlockchecktransaction,
1344 inheritchecker=self._wlockchecktransaction,
1345 parentenvvar='HG_WLOCK_LOCKER')
1345 parentenvvar='HG_WLOCK_LOCKER')
1346 self._wlockref = weakref.ref(l)
1346 self._wlockref = weakref.ref(l)
1347 return l
1347 return l
1348
1348
1349 def _currentlock(self, lockref):
1349 def _currentlock(self, lockref):
1350 """Returns the lock if it's held, or None if it's not."""
1350 """Returns the lock if it's held, or None if it's not."""
1351 if lockref is None:
1351 if lockref is None:
1352 return None
1352 return None
1353 l = lockref()
1353 l = lockref()
1354 if l is None or not l.held:
1354 if l is None or not l.held:
1355 return None
1355 return None
1356 return l
1356 return l
1357
1357
1358 def currentwlock(self):
1358 def currentwlock(self):
1359 """Returns the wlock if it's held, or None if it's not."""
1359 """Returns the wlock if it's held, or None if it's not."""
1360 return self._currentlock(self._wlockref)
1360 return self._currentlock(self._wlockref)
1361
1361
1362 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1362 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1363 """
1363 """
1364 commit an individual file as part of a larger transaction
1364 commit an individual file as part of a larger transaction
1365 """
1365 """
1366
1366
1367 fname = fctx.path()
1367 fname = fctx.path()
1368 fparent1 = manifest1.get(fname, nullid)
1368 fparent1 = manifest1.get(fname, nullid)
1369 fparent2 = manifest2.get(fname, nullid)
1369 fparent2 = manifest2.get(fname, nullid)
1370 if isinstance(fctx, context.filectx):
1370 if isinstance(fctx, context.filectx):
1371 node = fctx.filenode()
1371 node = fctx.filenode()
1372 if node in [fparent1, fparent2]:
1372 if node in [fparent1, fparent2]:
1373 self.ui.debug('reusing %s filelog entry\n' % fname)
1373 self.ui.debug('reusing %s filelog entry\n' % fname)
1374 return node
1374 return node
1375
1375
1376 flog = self.file(fname)
1376 flog = self.file(fname)
1377 meta = {}
1377 meta = {}
1378 copy = fctx.renamed()
1378 copy = fctx.renamed()
1379 if copy and copy[0] != fname:
1379 if copy and copy[0] != fname:
1380 # Mark the new revision of this file as a copy of another
1380 # Mark the new revision of this file as a copy of another
1381 # file. This copy data will effectively act as a parent
1381 # file. This copy data will effectively act as a parent
1382 # of this new revision. If this is a merge, the first
1382 # of this new revision. If this is a merge, the first
1383 # parent will be the nullid (meaning "look up the copy data")
1383 # parent will be the nullid (meaning "look up the copy data")
1384 # and the second one will be the other parent. For example:
1384 # and the second one will be the other parent. For example:
1385 #
1385 #
1386 # 0 --- 1 --- 3 rev1 changes file foo
1386 # 0 --- 1 --- 3 rev1 changes file foo
1387 # \ / rev2 renames foo to bar and changes it
1387 # \ / rev2 renames foo to bar and changes it
1388 # \- 2 -/ rev3 should have bar with all changes and
1388 # \- 2 -/ rev3 should have bar with all changes and
1389 # should record that bar descends from
1389 # should record that bar descends from
1390 # bar in rev2 and foo in rev1
1390 # bar in rev2 and foo in rev1
1391 #
1391 #
1392 # this allows this merge to succeed:
1392 # this allows this merge to succeed:
1393 #
1393 #
1394 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1394 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1395 # \ / merging rev3 and rev4 should use bar@rev2
1395 # \ / merging rev3 and rev4 should use bar@rev2
1396 # \- 2 --- 4 as the merge base
1396 # \- 2 --- 4 as the merge base
1397 #
1397 #
1398
1398
1399 cfname = copy[0]
1399 cfname = copy[0]
1400 crev = manifest1.get(cfname)
1400 crev = manifest1.get(cfname)
1401 newfparent = fparent2
1401 newfparent = fparent2
1402
1402
1403 if manifest2: # branch merge
1403 if manifest2: # branch merge
1404 if fparent2 == nullid or crev is None: # copied on remote side
1404 if fparent2 == nullid or crev is None: # copied on remote side
1405 if cfname in manifest2:
1405 if cfname in manifest2:
1406 crev = manifest2[cfname]
1406 crev = manifest2[cfname]
1407 newfparent = fparent1
1407 newfparent = fparent1
1408
1408
1409 # Here, we used to search backwards through history to try to find
1409 # Here, we used to search backwards through history to try to find
1410 # where the file copy came from if the source of a copy was not in
1410 # where the file copy came from if the source of a copy was not in
1411 # the parent directory. However, this doesn't actually make sense to
1411 # the parent directory. However, this doesn't actually make sense to
1412 # do (what does a copy from something not in your working copy even
1412 # do (what does a copy from something not in your working copy even
1413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1414 # the user that copy information was dropped, so if they didn't
1414 # the user that copy information was dropped, so if they didn't
1415 # expect this outcome it can be fixed, but this is the correct
1415 # expect this outcome it can be fixed, but this is the correct
1416 # behavior in this circumstance.
1416 # behavior in this circumstance.
1417
1417
1418 if crev:
1418 if crev:
1419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1420 meta["copy"] = cfname
1420 meta["copy"] = cfname
1421 meta["copyrev"] = hex(crev)
1421 meta["copyrev"] = hex(crev)
1422 fparent1, fparent2 = nullid, newfparent
1422 fparent1, fparent2 = nullid, newfparent
1423 else:
1423 else:
1424 self.ui.warn(_("warning: can't find ancestor for '%s' "
1424 self.ui.warn(_("warning: can't find ancestor for '%s' "
1425 "copied from '%s'!\n") % (fname, cfname))
1425 "copied from '%s'!\n") % (fname, cfname))
1426
1426
1427 elif fparent1 == nullid:
1427 elif fparent1 == nullid:
1428 fparent1, fparent2 = fparent2, nullid
1428 fparent1, fparent2 = fparent2, nullid
1429 elif fparent2 != nullid:
1429 elif fparent2 != nullid:
1430 # is one parent an ancestor of the other?
1430 # is one parent an ancestor of the other?
1431 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1431 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1432 if fparent1 in fparentancestors:
1432 if fparent1 in fparentancestors:
1433 fparent1, fparent2 = fparent2, nullid
1433 fparent1, fparent2 = fparent2, nullid
1434 elif fparent2 in fparentancestors:
1434 elif fparent2 in fparentancestors:
1435 fparent2 = nullid
1435 fparent2 = nullid
1436
1436
1437 # is the file changed?
1437 # is the file changed?
1438 text = fctx.data()
1438 text = fctx.data()
1439 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1439 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1440 changelist.append(fname)
1440 changelist.append(fname)
1441 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1441 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1442 # are just the flags changed during merge?
1442 # are just the flags changed during merge?
1443 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1443 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1444 changelist.append(fname)
1444 changelist.append(fname)
1445
1445
1446 return fparent1
1446 return fparent1
1447
1447
1448 @unfilteredmethod
1448 @unfilteredmethod
1449 def commit(self, text="", user=None, date=None, match=None, force=False,
1449 def commit(self, text="", user=None, date=None, match=None, force=False,
1450 editor=False, extra=None):
1450 editor=False, extra=None):
1451 """Add a new revision to current repository.
1451 """Add a new revision to current repository.
1452
1452
1453 Revision information is gathered from the working directory,
1453 Revision information is gathered from the working directory,
1454 match can be used to filter the committed files. If editor is
1454 match can be used to filter the committed files. If editor is
1455 supplied, it is called to get a commit message.
1455 supplied, it is called to get a commit message.
1456 """
1456 """
1457 if extra is None:
1457 if extra is None:
1458 extra = {}
1458 extra = {}
1459
1459
1460 def fail(f, msg):
1460 def fail(f, msg):
1461 raise error.Abort('%s: %s' % (f, msg))
1461 raise error.Abort('%s: %s' % (f, msg))
1462
1462
1463 if not match:
1463 if not match:
1464 match = matchmod.always(self.root, '')
1464 match = matchmod.always(self.root, '')
1465
1465
1466 if not force:
1466 if not force:
1467 vdirs = []
1467 vdirs = []
1468 match.explicitdir = vdirs.append
1468 match.explicitdir = vdirs.append
1469 match.bad = fail
1469 match.bad = fail
1470
1470
1471 wlock = lock = tr = None
1471 wlock = lock = tr = None
1472 try:
1472 try:
1473 wlock = self.wlock()
1473 wlock = self.wlock()
1474 lock = self.lock() # for recent changelog (see issue4368)
1475
1474 wctx = self[None]
1476 wctx = self[None]
1475 merge = len(wctx.parents()) > 1
1477 merge = len(wctx.parents()) > 1
1476
1478
1477 if not force and merge and match.ispartial():
1479 if not force and merge and match.ispartial():
1478 raise error.Abort(_('cannot partially commit a merge '
1480 raise error.Abort(_('cannot partially commit a merge '
1479 '(do not specify files or patterns)'))
1481 '(do not specify files or patterns)'))
1480
1482
1481 status = self.status(match=match, clean=force)
1483 status = self.status(match=match, clean=force)
1482 if force:
1484 if force:
1483 status.modified.extend(status.clean) # mq may commit clean files
1485 status.modified.extend(status.clean) # mq may commit clean files
1484
1486
1485 # check subrepos
1487 # check subrepos
1486 subs = []
1488 subs = []
1487 commitsubs = set()
1489 commitsubs = set()
1488 newstate = wctx.substate.copy()
1490 newstate = wctx.substate.copy()
1489 # only manage subrepos and .hgsubstate if .hgsub is present
1491 # only manage subrepos and .hgsubstate if .hgsub is present
1490 if '.hgsub' in wctx:
1492 if '.hgsub' in wctx:
1491 # we'll decide whether to track this ourselves, thanks
1493 # we'll decide whether to track this ourselves, thanks
1492 for c in status.modified, status.added, status.removed:
1494 for c in status.modified, status.added, status.removed:
1493 if '.hgsubstate' in c:
1495 if '.hgsubstate' in c:
1494 c.remove('.hgsubstate')
1496 c.remove('.hgsubstate')
1495
1497
1496 # compare current state to last committed state
1498 # compare current state to last committed state
1497 # build new substate based on last committed state
1499 # build new substate based on last committed state
1498 oldstate = wctx.p1().substate
1500 oldstate = wctx.p1().substate
1499 for s in sorted(newstate.keys()):
1501 for s in sorted(newstate.keys()):
1500 if not match(s):
1502 if not match(s):
1501 # ignore working copy, use old state if present
1503 # ignore working copy, use old state if present
1502 if s in oldstate:
1504 if s in oldstate:
1503 newstate[s] = oldstate[s]
1505 newstate[s] = oldstate[s]
1504 continue
1506 continue
1505 if not force:
1507 if not force:
1506 raise error.Abort(
1508 raise error.Abort(
1507 _("commit with new subrepo %s excluded") % s)
1509 _("commit with new subrepo %s excluded") % s)
1508 dirtyreason = wctx.sub(s).dirtyreason(True)
1510 dirtyreason = wctx.sub(s).dirtyreason(True)
1509 if dirtyreason:
1511 if dirtyreason:
1510 if not self.ui.configbool('ui', 'commitsubrepos'):
1512 if not self.ui.configbool('ui', 'commitsubrepos'):
1511 raise error.Abort(dirtyreason,
1513 raise error.Abort(dirtyreason,
1512 hint=_("use --subrepos for recursive commit"))
1514 hint=_("use --subrepos for recursive commit"))
1513 subs.append(s)
1515 subs.append(s)
1514 commitsubs.add(s)
1516 commitsubs.add(s)
1515 else:
1517 else:
1516 bs = wctx.sub(s).basestate()
1518 bs = wctx.sub(s).basestate()
1517 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1519 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1518 if oldstate.get(s, (None, None, None))[1] != bs:
1520 if oldstate.get(s, (None, None, None))[1] != bs:
1519 subs.append(s)
1521 subs.append(s)
1520
1522
1521 # check for removed subrepos
1523 # check for removed subrepos
1522 for p in wctx.parents():
1524 for p in wctx.parents():
1523 r = [s for s in p.substate if s not in newstate]
1525 r = [s for s in p.substate if s not in newstate]
1524 subs += [s for s in r if match(s)]
1526 subs += [s for s in r if match(s)]
1525 if subs:
1527 if subs:
1526 if (not match('.hgsub') and
1528 if (not match('.hgsub') and
1527 '.hgsub' in (wctx.modified() + wctx.added())):
1529 '.hgsub' in (wctx.modified() + wctx.added())):
1528 raise error.Abort(
1530 raise error.Abort(
1529 _("can't commit subrepos without .hgsub"))
1531 _("can't commit subrepos without .hgsub"))
1530 status.modified.insert(0, '.hgsubstate')
1532 status.modified.insert(0, '.hgsubstate')
1531
1533
1532 elif '.hgsub' in status.removed:
1534 elif '.hgsub' in status.removed:
1533 # clean up .hgsubstate when .hgsub is removed
1535 # clean up .hgsubstate when .hgsub is removed
1534 if ('.hgsubstate' in wctx and
1536 if ('.hgsubstate' in wctx and
1535 '.hgsubstate' not in (status.modified + status.added +
1537 '.hgsubstate' not in (status.modified + status.added +
1536 status.removed)):
1538 status.removed)):
1537 status.removed.insert(0, '.hgsubstate')
1539 status.removed.insert(0, '.hgsubstate')
1538
1540
1539 # make sure all explicit patterns are matched
1541 # make sure all explicit patterns are matched
1540 if not force and (match.isexact() or match.prefix()):
1542 if not force and (match.isexact() or match.prefix()):
1541 matched = set(status.modified + status.added + status.removed)
1543 matched = set(status.modified + status.added + status.removed)
1542
1544
1543 for f in match.files():
1545 for f in match.files():
1544 f = self.dirstate.normalize(f)
1546 f = self.dirstate.normalize(f)
1545 if f == '.' or f in matched or f in wctx.substate:
1547 if f == '.' or f in matched or f in wctx.substate:
1546 continue
1548 continue
1547 if f in status.deleted:
1549 if f in status.deleted:
1548 fail(f, _('file not found!'))
1550 fail(f, _('file not found!'))
1549 if f in vdirs: # visited directory
1551 if f in vdirs: # visited directory
1550 d = f + '/'
1552 d = f + '/'
1551 for mf in matched:
1553 for mf in matched:
1552 if mf.startswith(d):
1554 if mf.startswith(d):
1553 break
1555 break
1554 else:
1556 else:
1555 fail(f, _("no match under directory!"))
1557 fail(f, _("no match under directory!"))
1556 elif f not in self.dirstate:
1558 elif f not in self.dirstate:
1557 fail(f, _("file not tracked!"))
1559 fail(f, _("file not tracked!"))
1558
1560
1559 cctx = context.workingcommitctx(self, status,
1561 cctx = context.workingcommitctx(self, status,
1560 text, user, date, extra)
1562 text, user, date, extra)
1561
1563
1562 # internal config: ui.allowemptycommit
1564 # internal config: ui.allowemptycommit
1563 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1565 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1564 or extra.get('close') or merge or cctx.files()
1566 or extra.get('close') or merge or cctx.files()
1565 or self.ui.configbool('ui', 'allowemptycommit'))
1567 or self.ui.configbool('ui', 'allowemptycommit'))
1566 if not allowemptycommit:
1568 if not allowemptycommit:
1567 return None
1569 return None
1568
1570
1569 if merge and cctx.deleted():
1571 if merge and cctx.deleted():
1570 raise error.Abort(_("cannot commit merge with missing files"))
1572 raise error.Abort(_("cannot commit merge with missing files"))
1571
1573
1572 ms = mergemod.mergestate.read(self)
1574 ms = mergemod.mergestate.read(self)
1573
1575
1574 if list(ms.unresolved()):
1576 if list(ms.unresolved()):
1575 raise error.Abort(_('unresolved merge conflicts '
1577 raise error.Abort(_('unresolved merge conflicts '
1576 '(see "hg help resolve")'))
1578 '(see "hg help resolve")'))
1577 if ms.mdstate() != 's' or list(ms.driverresolved()):
1579 if ms.mdstate() != 's' or list(ms.driverresolved()):
1578 raise error.Abort(_('driver-resolved merge conflicts'),
1580 raise error.Abort(_('driver-resolved merge conflicts'),
1579 hint=_('run "hg resolve --all" to resolve'))
1581 hint=_('run "hg resolve --all" to resolve'))
1580
1582
1581 if editor:
1583 if editor:
1582 cctx._text = editor(self, cctx, subs)
1584 cctx._text = editor(self, cctx, subs)
1583 edited = (text != cctx._text)
1585 edited = (text != cctx._text)
1584
1586
1585 # Save commit message in case this transaction gets rolled back
1587 # Save commit message in case this transaction gets rolled back
1586 # (e.g. by a pretxncommit hook). Leave the content alone on
1588 # (e.g. by a pretxncommit hook). Leave the content alone on
1587 # the assumption that the user will use the same editor again.
1589 # the assumption that the user will use the same editor again.
1588 msgfn = self.savecommitmessage(cctx._text)
1590 msgfn = self.savecommitmessage(cctx._text)
1589
1591
1590 # commit subs and write new state
1592 # commit subs and write new state
1591 if subs:
1593 if subs:
1592 for s in sorted(commitsubs):
1594 for s in sorted(commitsubs):
1593 sub = wctx.sub(s)
1595 sub = wctx.sub(s)
1594 self.ui.status(_('committing subrepository %s\n') %
1596 self.ui.status(_('committing subrepository %s\n') %
1595 subrepo.subrelpath(sub))
1597 subrepo.subrelpath(sub))
1596 sr = sub.commit(cctx._text, user, date)
1598 sr = sub.commit(cctx._text, user, date)
1597 newstate[s] = (newstate[s][0], sr)
1599 newstate[s] = (newstate[s][0], sr)
1598 subrepo.writestate(self, newstate)
1600 subrepo.writestate(self, newstate)
1599
1601
1600 p1, p2 = self.dirstate.parents()
1602 p1, p2 = self.dirstate.parents()
1601 lock = self.lock()
1602 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1603 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1603 try:
1604 try:
1604 self.hook("precommit", throw=True, parent1=hookp1,
1605 self.hook("precommit", throw=True, parent1=hookp1,
1605 parent2=hookp2)
1606 parent2=hookp2)
1606 tr = self.transaction('commit')
1607 tr = self.transaction('commit')
1607 ret = self.commitctx(cctx, True)
1608 ret = self.commitctx(cctx, True)
1608 except: # re-raises
1609 except: # re-raises
1609 if edited:
1610 if edited:
1610 self.ui.write(
1611 self.ui.write(
1611 _('note: commit message saved in %s\n') % msgfn)
1612 _('note: commit message saved in %s\n') % msgfn)
1612 raise
1613 raise
1613 # update bookmarks, dirstate and mergestate
1614 # update bookmarks, dirstate and mergestate
1614 bookmarks.update(self, [p1, p2], ret)
1615 bookmarks.update(self, [p1, p2], ret)
1615 cctx.markcommitted(ret)
1616 cctx.markcommitted(ret)
1616 ms.reset()
1617 ms.reset()
1617 tr.close()
1618 tr.close()
1618
1619
1619 finally:
1620 finally:
1620 lockmod.release(tr, lock, wlock)
1621 lockmod.release(tr, lock, wlock)
1621
1622
1622 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1623 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1623 # hack for command that use a temporary commit (eg: histedit)
1624 # hack for command that use a temporary commit (eg: histedit)
1624 # temporary commit got stripped before hook release
1625 # temporary commit got stripped before hook release
1625 if self.changelog.hasnode(ret):
1626 if self.changelog.hasnode(ret):
1626 self.hook("commit", node=node, parent1=parent1,
1627 self.hook("commit", node=node, parent1=parent1,
1627 parent2=parent2)
1628 parent2=parent2)
1628 self._afterlock(commithook)
1629 self._afterlock(commithook)
1629 return ret
1630 return ret
1630
1631
1631 @unfilteredmethod
1632 @unfilteredmethod
1632 def commitctx(self, ctx, error=False):
1633 def commitctx(self, ctx, error=False):
1633 """Add a new revision to current repository.
1634 """Add a new revision to current repository.
1634 Revision information is passed via the context argument.
1635 Revision information is passed via the context argument.
1635 """
1636 """
1636
1637
1637 tr = None
1638 tr = None
1638 p1, p2 = ctx.p1(), ctx.p2()
1639 p1, p2 = ctx.p1(), ctx.p2()
1639 user = ctx.user()
1640 user = ctx.user()
1640
1641
1641 lock = self.lock()
1642 lock = self.lock()
1642 try:
1643 try:
1643 tr = self.transaction("commit")
1644 tr = self.transaction("commit")
1644 trp = weakref.proxy(tr)
1645 trp = weakref.proxy(tr)
1645
1646
1646 if ctx.files():
1647 if ctx.files():
1647 m1 = p1.manifest()
1648 m1 = p1.manifest()
1648 m2 = p2.manifest()
1649 m2 = p2.manifest()
1649 m = m1.copy()
1650 m = m1.copy()
1650
1651
1651 # check in files
1652 # check in files
1652 added = []
1653 added = []
1653 changed = []
1654 changed = []
1654 removed = list(ctx.removed())
1655 removed = list(ctx.removed())
1655 linkrev = len(self)
1656 linkrev = len(self)
1656 self.ui.note(_("committing files:\n"))
1657 self.ui.note(_("committing files:\n"))
1657 for f in sorted(ctx.modified() + ctx.added()):
1658 for f in sorted(ctx.modified() + ctx.added()):
1658 self.ui.note(f + "\n")
1659 self.ui.note(f + "\n")
1659 try:
1660 try:
1660 fctx = ctx[f]
1661 fctx = ctx[f]
1661 if fctx is None:
1662 if fctx is None:
1662 removed.append(f)
1663 removed.append(f)
1663 else:
1664 else:
1664 added.append(f)
1665 added.append(f)
1665 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1666 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1666 trp, changed)
1667 trp, changed)
1667 m.setflag(f, fctx.flags())
1668 m.setflag(f, fctx.flags())
1668 except OSError as inst:
1669 except OSError as inst:
1669 self.ui.warn(_("trouble committing %s!\n") % f)
1670 self.ui.warn(_("trouble committing %s!\n") % f)
1670 raise
1671 raise
1671 except IOError as inst:
1672 except IOError as inst:
1672 errcode = getattr(inst, 'errno', errno.ENOENT)
1673 errcode = getattr(inst, 'errno', errno.ENOENT)
1673 if error or errcode and errcode != errno.ENOENT:
1674 if error or errcode and errcode != errno.ENOENT:
1674 self.ui.warn(_("trouble committing %s!\n") % f)
1675 self.ui.warn(_("trouble committing %s!\n") % f)
1675 raise
1676 raise
1676
1677
1677 # update manifest
1678 # update manifest
1678 self.ui.note(_("committing manifest\n"))
1679 self.ui.note(_("committing manifest\n"))
1679 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1680 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1680 drop = [f for f in removed if f in m]
1681 drop = [f for f in removed if f in m]
1681 for f in drop:
1682 for f in drop:
1682 del m[f]
1683 del m[f]
1683 mn = self.manifest.add(m, trp, linkrev,
1684 mn = self.manifest.add(m, trp, linkrev,
1684 p1.manifestnode(), p2.manifestnode(),
1685 p1.manifestnode(), p2.manifestnode(),
1685 added, drop)
1686 added, drop)
1686 files = changed + removed
1687 files = changed + removed
1687 else:
1688 else:
1688 mn = p1.manifestnode()
1689 mn = p1.manifestnode()
1689 files = []
1690 files = []
1690
1691
1691 # update changelog
1692 # update changelog
1692 self.ui.note(_("committing changelog\n"))
1693 self.ui.note(_("committing changelog\n"))
1693 self.changelog.delayupdate(tr)
1694 self.changelog.delayupdate(tr)
1694 n = self.changelog.add(mn, files, ctx.description(),
1695 n = self.changelog.add(mn, files, ctx.description(),
1695 trp, p1.node(), p2.node(),
1696 trp, p1.node(), p2.node(),
1696 user, ctx.date(), ctx.extra().copy())
1697 user, ctx.date(), ctx.extra().copy())
1697 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1698 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1698 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1699 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1699 parent2=xp2)
1700 parent2=xp2)
1700 # set the new commit is proper phase
1701 # set the new commit is proper phase
1701 targetphase = subrepo.newcommitphase(self.ui, ctx)
1702 targetphase = subrepo.newcommitphase(self.ui, ctx)
1702 if targetphase:
1703 if targetphase:
1703 # retract boundary do not alter parent changeset.
1704 # retract boundary do not alter parent changeset.
1704 # if a parent have higher the resulting phase will
1705 # if a parent have higher the resulting phase will
1705 # be compliant anyway
1706 # be compliant anyway
1706 #
1707 #
1707 # if minimal phase was 0 we don't need to retract anything
1708 # if minimal phase was 0 we don't need to retract anything
1708 phases.retractboundary(self, tr, targetphase, [n])
1709 phases.retractboundary(self, tr, targetphase, [n])
1709 tr.close()
1710 tr.close()
1710 branchmap.updatecache(self.filtered('served'))
1711 branchmap.updatecache(self.filtered('served'))
1711 return n
1712 return n
1712 finally:
1713 finally:
1713 if tr:
1714 if tr:
1714 tr.release()
1715 tr.release()
1715 lock.release()
1716 lock.release()
1716
1717
1717 @unfilteredmethod
1718 @unfilteredmethod
1718 def destroying(self):
1719 def destroying(self):
1719 '''Inform the repository that nodes are about to be destroyed.
1720 '''Inform the repository that nodes are about to be destroyed.
1720 Intended for use by strip and rollback, so there's a common
1721 Intended for use by strip and rollback, so there's a common
1721 place for anything that has to be done before destroying history.
1722 place for anything that has to be done before destroying history.
1722
1723
1723 This is mostly useful for saving state that is in memory and waiting
1724 This is mostly useful for saving state that is in memory and waiting
1724 to be flushed when the current lock is released. Because a call to
1725 to be flushed when the current lock is released. Because a call to
1725 destroyed is imminent, the repo will be invalidated causing those
1726 destroyed is imminent, the repo will be invalidated causing those
1726 changes to stay in memory (waiting for the next unlock), or vanish
1727 changes to stay in memory (waiting for the next unlock), or vanish
1727 completely.
1728 completely.
1728 '''
1729 '''
1729 # When using the same lock to commit and strip, the phasecache is left
1730 # When using the same lock to commit and strip, the phasecache is left
1730 # dirty after committing. Then when we strip, the repo is invalidated,
1731 # dirty after committing. Then when we strip, the repo is invalidated,
1731 # causing those changes to disappear.
1732 # causing those changes to disappear.
1732 if '_phasecache' in vars(self):
1733 if '_phasecache' in vars(self):
1733 self._phasecache.write()
1734 self._phasecache.write()
1734
1735
1735 @unfilteredmethod
1736 @unfilteredmethod
1736 def destroyed(self):
1737 def destroyed(self):
1737 '''Inform the repository that nodes have been destroyed.
1738 '''Inform the repository that nodes have been destroyed.
1738 Intended for use by strip and rollback, so there's a common
1739 Intended for use by strip and rollback, so there's a common
1739 place for anything that has to be done after destroying history.
1740 place for anything that has to be done after destroying history.
1740 '''
1741 '''
1741 # When one tries to:
1742 # When one tries to:
1742 # 1) destroy nodes thus calling this method (e.g. strip)
1743 # 1) destroy nodes thus calling this method (e.g. strip)
1743 # 2) use phasecache somewhere (e.g. commit)
1744 # 2) use phasecache somewhere (e.g. commit)
1744 #
1745 #
1745 # then 2) will fail because the phasecache contains nodes that were
1746 # then 2) will fail because the phasecache contains nodes that were
1746 # removed. We can either remove phasecache from the filecache,
1747 # removed. We can either remove phasecache from the filecache,
1747 # causing it to reload next time it is accessed, or simply filter
1748 # causing it to reload next time it is accessed, or simply filter
1748 # the removed nodes now and write the updated cache.
1749 # the removed nodes now and write the updated cache.
1749 self._phasecache.filterunknown(self)
1750 self._phasecache.filterunknown(self)
1750 self._phasecache.write()
1751 self._phasecache.write()
1751
1752
1752 # update the 'served' branch cache to help read only server process
1753 # update the 'served' branch cache to help read only server process
1753 # Thanks to branchcache collaboration this is done from the nearest
1754 # Thanks to branchcache collaboration this is done from the nearest
1754 # filtered subset and it is expected to be fast.
1755 # filtered subset and it is expected to be fast.
1755 branchmap.updatecache(self.filtered('served'))
1756 branchmap.updatecache(self.filtered('served'))
1756
1757
1757 # Ensure the persistent tag cache is updated. Doing it now
1758 # Ensure the persistent tag cache is updated. Doing it now
1758 # means that the tag cache only has to worry about destroyed
1759 # means that the tag cache only has to worry about destroyed
1759 # heads immediately after a strip/rollback. That in turn
1760 # heads immediately after a strip/rollback. That in turn
1760 # guarantees that "cachetip == currenttip" (comparing both rev
1761 # guarantees that "cachetip == currenttip" (comparing both rev
1761 # and node) always means no nodes have been added or destroyed.
1762 # and node) always means no nodes have been added or destroyed.
1762
1763
1763 # XXX this is suboptimal when qrefresh'ing: we strip the current
1764 # XXX this is suboptimal when qrefresh'ing: we strip the current
1764 # head, refresh the tag cache, then immediately add a new head.
1765 # head, refresh the tag cache, then immediately add a new head.
1765 # But I think doing it this way is necessary for the "instant
1766 # But I think doing it this way is necessary for the "instant
1766 # tag cache retrieval" case to work.
1767 # tag cache retrieval" case to work.
1767 self.invalidate()
1768 self.invalidate()
1768
1769
1769 def walk(self, match, node=None):
1770 def walk(self, match, node=None):
1770 '''
1771 '''
1771 walk recursively through the directory tree or a given
1772 walk recursively through the directory tree or a given
1772 changeset, finding all files matched by the match
1773 changeset, finding all files matched by the match
1773 function
1774 function
1774 '''
1775 '''
1775 return self[node].walk(match)
1776 return self[node].walk(match)
1776
1777
1777 def status(self, node1='.', node2=None, match=None,
1778 def status(self, node1='.', node2=None, match=None,
1778 ignored=False, clean=False, unknown=False,
1779 ignored=False, clean=False, unknown=False,
1779 listsubrepos=False):
1780 listsubrepos=False):
1780 '''a convenience method that calls node1.status(node2)'''
1781 '''a convenience method that calls node1.status(node2)'''
1781 return self[node1].status(node2, match, ignored, clean, unknown,
1782 return self[node1].status(node2, match, ignored, clean, unknown,
1782 listsubrepos)
1783 listsubrepos)
1783
1784
1784 def heads(self, start=None):
1785 def heads(self, start=None):
1785 heads = self.changelog.heads(start)
1786 heads = self.changelog.heads(start)
1786 # sort the output in rev descending order
1787 # sort the output in rev descending order
1787 return sorted(heads, key=self.changelog.rev, reverse=True)
1788 return sorted(heads, key=self.changelog.rev, reverse=True)
1788
1789
1789 def branchheads(self, branch=None, start=None, closed=False):
1790 def branchheads(self, branch=None, start=None, closed=False):
1790 '''return a (possibly filtered) list of heads for the given branch
1791 '''return a (possibly filtered) list of heads for the given branch
1791
1792
1792 Heads are returned in topological order, from newest to oldest.
1793 Heads are returned in topological order, from newest to oldest.
1793 If branch is None, use the dirstate branch.
1794 If branch is None, use the dirstate branch.
1794 If start is not None, return only heads reachable from start.
1795 If start is not None, return only heads reachable from start.
1795 If closed is True, return heads that are marked as closed as well.
1796 If closed is True, return heads that are marked as closed as well.
1796 '''
1797 '''
1797 if branch is None:
1798 if branch is None:
1798 branch = self[None].branch()
1799 branch = self[None].branch()
1799 branches = self.branchmap()
1800 branches = self.branchmap()
1800 if branch not in branches:
1801 if branch not in branches:
1801 return []
1802 return []
1802 # the cache returns heads ordered lowest to highest
1803 # the cache returns heads ordered lowest to highest
1803 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1804 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1804 if start is not None:
1805 if start is not None:
1805 # filter out the heads that cannot be reached from startrev
1806 # filter out the heads that cannot be reached from startrev
1806 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1807 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1807 bheads = [h for h in bheads if h in fbheads]
1808 bheads = [h for h in bheads if h in fbheads]
1808 return bheads
1809 return bheads
1809
1810
1810 def branches(self, nodes):
1811 def branches(self, nodes):
1811 if not nodes:
1812 if not nodes:
1812 nodes = [self.changelog.tip()]
1813 nodes = [self.changelog.tip()]
1813 b = []
1814 b = []
1814 for n in nodes:
1815 for n in nodes:
1815 t = n
1816 t = n
1816 while True:
1817 while True:
1817 p = self.changelog.parents(n)
1818 p = self.changelog.parents(n)
1818 if p[1] != nullid or p[0] == nullid:
1819 if p[1] != nullid or p[0] == nullid:
1819 b.append((t, n, p[0], p[1]))
1820 b.append((t, n, p[0], p[1]))
1820 break
1821 break
1821 n = p[0]
1822 n = p[0]
1822 return b
1823 return b
1823
1824
1824 def between(self, pairs):
1825 def between(self, pairs):
1825 r = []
1826 r = []
1826
1827
1827 for top, bottom in pairs:
1828 for top, bottom in pairs:
1828 n, l, i = top, [], 0
1829 n, l, i = top, [], 0
1829 f = 1
1830 f = 1
1830
1831
1831 while n != bottom and n != nullid:
1832 while n != bottom and n != nullid:
1832 p = self.changelog.parents(n)[0]
1833 p = self.changelog.parents(n)[0]
1833 if i == f:
1834 if i == f:
1834 l.append(n)
1835 l.append(n)
1835 f = f * 2
1836 f = f * 2
1836 n = p
1837 n = p
1837 i += 1
1838 i += 1
1838
1839
1839 r.append(l)
1840 r.append(l)
1840
1841
1841 return r
1842 return r
1842
1843
1843 def checkpush(self, pushop):
1844 def checkpush(self, pushop):
1844 """Extensions can override this function if additional checks have
1845 """Extensions can override this function if additional checks have
1845 to be performed before pushing, or call it if they override push
1846 to be performed before pushing, or call it if they override push
1846 command.
1847 command.
1847 """
1848 """
1848 pass
1849 pass
1849
1850
1850 @unfilteredpropertycache
1851 @unfilteredpropertycache
1851 def prepushoutgoinghooks(self):
1852 def prepushoutgoinghooks(self):
1852 """Return util.hooks consists of "(repo, remote, outgoing)"
1853 """Return util.hooks consists of "(repo, remote, outgoing)"
1853 functions, which are called before pushing changesets.
1854 functions, which are called before pushing changesets.
1854 """
1855 """
1855 return util.hooks()
1856 return util.hooks()
1856
1857
1857 def pushkey(self, namespace, key, old, new):
1858 def pushkey(self, namespace, key, old, new):
1858 try:
1859 try:
1859 tr = self.currenttransaction()
1860 tr = self.currenttransaction()
1860 hookargs = {}
1861 hookargs = {}
1861 if tr is not None:
1862 if tr is not None:
1862 hookargs.update(tr.hookargs)
1863 hookargs.update(tr.hookargs)
1863 hookargs['namespace'] = namespace
1864 hookargs['namespace'] = namespace
1864 hookargs['key'] = key
1865 hookargs['key'] = key
1865 hookargs['old'] = old
1866 hookargs['old'] = old
1866 hookargs['new'] = new
1867 hookargs['new'] = new
1867 self.hook('prepushkey', throw=True, **hookargs)
1868 self.hook('prepushkey', throw=True, **hookargs)
1868 except error.HookAbort as exc:
1869 except error.HookAbort as exc:
1869 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 if exc.hint:
1871 if exc.hint:
1871 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 return False
1873 return False
1873 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 ret = pushkey.push(self, namespace, key, old, new)
1875 ret = pushkey.push(self, namespace, key, old, new)
1875 def runhook():
1876 def runhook():
1876 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 ret=ret)
1878 ret=ret)
1878 self._afterlock(runhook)
1879 self._afterlock(runhook)
1879 return ret
1880 return ret
1880
1881
1881 def listkeys(self, namespace):
1882 def listkeys(self, namespace):
1882 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 values = pushkey.list(self, namespace)
1885 values = pushkey.list(self, namespace)
1885 self.hook('listkeys', namespace=namespace, values=values)
1886 self.hook('listkeys', namespace=namespace, values=values)
1886 return values
1887 return values
1887
1888
1888 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 '''used to test argument passing over the wire'''
1890 '''used to test argument passing over the wire'''
1890 return "%s %s %s %s %s" % (one, two, three, four, five)
1891 return "%s %s %s %s %s" % (one, two, three, four, five)
1891
1892
1892 def savecommitmessage(self, text):
1893 def savecommitmessage(self, text):
1893 fp = self.vfs('last-message.txt', 'wb')
1894 fp = self.vfs('last-message.txt', 'wb')
1894 try:
1895 try:
1895 fp.write(text)
1896 fp.write(text)
1896 finally:
1897 finally:
1897 fp.close()
1898 fp.close()
1898 return self.pathto(fp.name[len(self.root) + 1:])
1899 return self.pathto(fp.name[len(self.root) + 1:])
1899
1900
1900 # used to avoid circular references so destructors work
1901 # used to avoid circular references so destructors work
1901 def aftertrans(files):
1902 def aftertrans(files):
1902 renamefiles = [tuple(t) for t in files]
1903 renamefiles = [tuple(t) for t in files]
1903 def a():
1904 def a():
1904 for vfs, src, dest in renamefiles:
1905 for vfs, src, dest in renamefiles:
1905 try:
1906 try:
1906 vfs.rename(src, dest)
1907 vfs.rename(src, dest)
1907 except OSError: # journal file does not yet exist
1908 except OSError: # journal file does not yet exist
1908 pass
1909 pass
1909 return a
1910 return a
1910
1911
1911 def undoname(fn):
1912 def undoname(fn):
1912 base, name = os.path.split(fn)
1913 base, name = os.path.split(fn)
1913 assert name.startswith('journal')
1914 assert name.startswith('journal')
1914 return os.path.join(base, name.replace('journal', 'undo', 1))
1915 return os.path.join(base, name.replace('journal', 'undo', 1))
1915
1916
1916 def instance(ui, path, create):
1917 def instance(ui, path, create):
1917 return localrepository(ui, util.urllocalpath(path), create)
1918 return localrepository(ui, util.urllocalpath(path), create)
1918
1919
1919 def islocal(path):
1920 def islocal(path):
1920 return True
1921 return True
General Comments 0
You need to be logged in to leave comments. Login now