##// END OF EJS Templates
journal: set Abort hint when failing due to an abandoned transaction
Johan Bjork -
r21274:3b4c7569 default
parent child Browse files
Show More
@@ -1,1912 +1,1913
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 propertycache = util.propertycache
21 propertycache = util.propertycache
22 filecache = scmutil.filecache
22 filecache = scmutil.filecache
23
23
24 class repofilecache(filecache):
24 class repofilecache(filecache):
25 """All filecache usage on repo are done for logic that should be unfiltered
25 """All filecache usage on repo are done for logic that should be unfiltered
26 """
26 """
27
27
28 def __get__(self, repo, type=None):
28 def __get__(self, repo, type=None):
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 def __set__(self, repo, value):
30 def __set__(self, repo, value):
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 def __delete__(self, repo):
32 def __delete__(self, repo):
33 return super(repofilecache, self).__delete__(repo.unfiltered())
33 return super(repofilecache, self).__delete__(repo.unfiltered())
34
34
35 class storecache(repofilecache):
35 class storecache(repofilecache):
36 """filecache for files in the store"""
36 """filecache for files in the store"""
37 def join(self, obj, fname):
37 def join(self, obj, fname):
38 return obj.sjoin(fname)
38 return obj.sjoin(fname)
39
39
40 class unfilteredpropertycache(propertycache):
40 class unfilteredpropertycache(propertycache):
41 """propertycache that apply to unfiltered repo only"""
41 """propertycache that apply to unfiltered repo only"""
42
42
43 def __get__(self, repo, type=None):
43 def __get__(self, repo, type=None):
44 unfi = repo.unfiltered()
44 unfi = repo.unfiltered()
45 if unfi is repo:
45 if unfi is repo:
46 return super(unfilteredpropertycache, self).__get__(unfi)
46 return super(unfilteredpropertycache, self).__get__(unfi)
47 return getattr(unfi, self.name)
47 return getattr(unfi, self.name)
48
48
49 class filteredpropertycache(propertycache):
49 class filteredpropertycache(propertycache):
50 """propertycache that must take filtering in account"""
50 """propertycache that must take filtering in account"""
51
51
52 def cachevalue(self, obj, value):
52 def cachevalue(self, obj, value):
53 object.__setattr__(obj, self.name, value)
53 object.__setattr__(obj, self.name, value)
54
54
55
55
56 def hasunfilteredcache(repo, name):
56 def hasunfilteredcache(repo, name):
57 """check if a repo has an unfilteredpropertycache value for <name>"""
57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 return name in vars(repo.unfiltered())
58 return name in vars(repo.unfiltered())
59
59
60 def unfilteredmethod(orig):
60 def unfilteredmethod(orig):
61 """decorate method that always need to be run on unfiltered version"""
61 """decorate method that always need to be run on unfiltered version"""
62 def wrapper(repo, *args, **kwargs):
62 def wrapper(repo, *args, **kwargs):
63 return orig(repo.unfiltered(), *args, **kwargs)
63 return orig(repo.unfiltered(), *args, **kwargs)
64 return wrapper
64 return wrapper
65
65
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 'unbundle'))
67 'unbundle'))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69
69
70 class localpeer(peer.peerrepository):
70 class localpeer(peer.peerrepository):
71 '''peer for a local repo; reflects only the most recent API'''
71 '''peer for a local repo; reflects only the most recent API'''
72
72
73 def __init__(self, repo, caps=moderncaps):
73 def __init__(self, repo, caps=moderncaps):
74 peer.peerrepository.__init__(self)
74 peer.peerrepository.__init__(self)
75 self._repo = repo.filtered('served')
75 self._repo = repo.filtered('served')
76 self.ui = repo.ui
76 self.ui = repo.ui
77 self._caps = repo._restrictcapabilities(caps)
77 self._caps = repo._restrictcapabilities(caps)
78 self.requirements = repo.requirements
78 self.requirements = repo.requirements
79 self.supportedformats = repo.supportedformats
79 self.supportedformats = repo.supportedformats
80
80
81 def close(self):
81 def close(self):
82 self._repo.close()
82 self._repo.close()
83
83
84 def _capabilities(self):
84 def _capabilities(self):
85 return self._caps
85 return self._caps
86
86
87 def local(self):
87 def local(self):
88 return self._repo
88 return self._repo
89
89
90 def canpush(self):
90 def canpush(self):
91 return True
91 return True
92
92
93 def url(self):
93 def url(self):
94 return self._repo.url()
94 return self._repo.url()
95
95
96 def lookup(self, key):
96 def lookup(self, key):
97 return self._repo.lookup(key)
97 return self._repo.lookup(key)
98
98
99 def branchmap(self):
99 def branchmap(self):
100 return self._repo.branchmap()
100 return self._repo.branchmap()
101
101
102 def heads(self):
102 def heads(self):
103 return self._repo.heads()
103 return self._repo.heads()
104
104
105 def known(self, nodes):
105 def known(self, nodes):
106 return self._repo.known(nodes)
106 return self._repo.known(nodes)
107
107
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 format='HG10', **kwargs):
109 format='HG10', **kwargs):
110 cg = exchange.getbundle(self._repo, source, heads=heads,
110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 common=common, bundlecaps=bundlecaps, **kwargs)
111 common=common, bundlecaps=bundlecaps, **kwargs)
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 # When requesting a bundle2, getbundle returns a stream to make the
113 # When requesting a bundle2, getbundle returns a stream to make the
114 # wire level function happier. We need to build a proper object
114 # wire level function happier. We need to build a proper object
115 # from it in local peer.
115 # from it in local peer.
116 cg = bundle2.unbundle20(self.ui, cg)
116 cg = bundle2.unbundle20(self.ui, cg)
117 return cg
117 return cg
118
118
119 # TODO We might want to move the next two calls into legacypeer and add
119 # TODO We might want to move the next two calls into legacypeer and add
120 # unbundle instead.
120 # unbundle instead.
121
121
122 def unbundle(self, cg, heads, url):
122 def unbundle(self, cg, heads, url):
123 """apply a bundle on a repo
123 """apply a bundle on a repo
124
124
125 This function handles the repo locking itself."""
125 This function handles the repo locking itself."""
126 try:
126 try:
127 cg = exchange.readbundle(self.ui, cg, None)
127 cg = exchange.readbundle(self.ui, cg, None)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 if util.safehasattr(ret, 'getchunks'):
129 if util.safehasattr(ret, 'getchunks'):
130 # This is a bundle20 object, turn it into an unbundler.
130 # This is a bundle20 object, turn it into an unbundler.
131 # This little dance should be dropped eventually when the API
131 # This little dance should be dropped eventually when the API
132 # is finally improved.
132 # is finally improved.
133 stream = util.chunkbuffer(ret.getchunks())
133 stream = util.chunkbuffer(ret.getchunks())
134 ret = bundle2.unbundle20(self.ui, stream)
134 ret = bundle2.unbundle20(self.ui, stream)
135 return ret
135 return ret
136 except error.PushRaced, exc:
136 except error.PushRaced, exc:
137 raise error.ResponseError(_('push failed:'), str(exc))
137 raise error.ResponseError(_('push failed:'), str(exc))
138
138
139 def lock(self):
139 def lock(self):
140 return self._repo.lock()
140 return self._repo.lock()
141
141
142 def addchangegroup(self, cg, source, url):
142 def addchangegroup(self, cg, source, url):
143 return changegroup.addchangegroup(self._repo, cg, source, url)
143 return changegroup.addchangegroup(self._repo, cg, source, url)
144
144
145 def pushkey(self, namespace, key, old, new):
145 def pushkey(self, namespace, key, old, new):
146 return self._repo.pushkey(namespace, key, old, new)
146 return self._repo.pushkey(namespace, key, old, new)
147
147
148 def listkeys(self, namespace):
148 def listkeys(self, namespace):
149 return self._repo.listkeys(namespace)
149 return self._repo.listkeys(namespace)
150
150
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 '''used to test argument passing over the wire'''
152 '''used to test argument passing over the wire'''
153 return "%s %s %s %s %s" % (one, two, three, four, five)
153 return "%s %s %s %s %s" % (one, two, three, four, five)
154
154
155 class locallegacypeer(localpeer):
155 class locallegacypeer(localpeer):
156 '''peer extension which implements legacy methods too; used for tests with
156 '''peer extension which implements legacy methods too; used for tests with
157 restricted capabilities'''
157 restricted capabilities'''
158
158
159 def __init__(self, repo):
159 def __init__(self, repo):
160 localpeer.__init__(self, repo, caps=legacycaps)
160 localpeer.__init__(self, repo, caps=legacycaps)
161
161
162 def branches(self, nodes):
162 def branches(self, nodes):
163 return self._repo.branches(nodes)
163 return self._repo.branches(nodes)
164
164
165 def between(self, pairs):
165 def between(self, pairs):
166 return self._repo.between(pairs)
166 return self._repo.between(pairs)
167
167
168 def changegroup(self, basenodes, source):
168 def changegroup(self, basenodes, source):
169 return changegroup.changegroup(self._repo, basenodes, source)
169 return changegroup.changegroup(self._repo, basenodes, source)
170
170
171 def changegroupsubset(self, bases, heads, source):
171 def changegroupsubset(self, bases, heads, source):
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173
173
174 class localrepository(object):
174 class localrepository(object):
175
175
176 supportedformats = set(('revlogv1', 'generaldelta'))
176 supportedformats = set(('revlogv1', 'generaldelta'))
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 'dotencode'))
178 'dotencode'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
179 openerreqs = set(('revlogv1', 'generaldelta'))
180 requirements = ['revlogv1']
180 requirements = ['revlogv1']
181 filtername = None
181 filtername = None
182
182
183 bundle2caps = {'HG2X': ()}
183 bundle2caps = {'HG2X': ()}
184
184
185 # a list of (ui, featureset) functions.
185 # a list of (ui, featureset) functions.
186 # only functions defined in module of enabled extensions are invoked
186 # only functions defined in module of enabled extensions are invoked
187 featuresetupfuncs = set()
187 featuresetupfuncs = set()
188
188
189 def _baserequirements(self, create):
189 def _baserequirements(self, create):
190 return self.requirements[:]
190 return self.requirements[:]
191
191
192 def __init__(self, baseui, path=None, create=False):
192 def __init__(self, baseui, path=None, create=False):
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
194 self.wopener = self.wvfs
194 self.wopener = self.wvfs
195 self.root = self.wvfs.base
195 self.root = self.wvfs.base
196 self.path = self.wvfs.join(".hg")
196 self.path = self.wvfs.join(".hg")
197 self.origroot = path
197 self.origroot = path
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.auditor = pathutil.pathauditor(self.root, self._checknested)
199 self.vfs = scmutil.vfs(self.path)
199 self.vfs = scmutil.vfs(self.path)
200 self.opener = self.vfs
200 self.opener = self.vfs
201 self.baseui = baseui
201 self.baseui = baseui
202 self.ui = baseui.copy()
202 self.ui = baseui.copy()
203 self.ui.copy = baseui.copy # prevent copying repo configuration
203 self.ui.copy = baseui.copy # prevent copying repo configuration
204 # A list of callback to shape the phase if no data were found.
204 # A list of callback to shape the phase if no data were found.
205 # Callback are in the form: func(repo, roots) --> processed root.
205 # Callback are in the form: func(repo, roots) --> processed root.
206 # This list it to be filled by extension during repo setup
206 # This list it to be filled by extension during repo setup
207 self._phasedefaults = []
207 self._phasedefaults = []
208 try:
208 try:
209 self.ui.readconfig(self.join("hgrc"), self.root)
209 self.ui.readconfig(self.join("hgrc"), self.root)
210 extensions.loadall(self.ui)
210 extensions.loadall(self.ui)
211 except IOError:
211 except IOError:
212 pass
212 pass
213
213
214 if self.featuresetupfuncs:
214 if self.featuresetupfuncs:
215 self.supported = set(self._basesupported) # use private copy
215 self.supported = set(self._basesupported) # use private copy
216 extmods = set(m.__name__ for n, m
216 extmods = set(m.__name__ for n, m
217 in extensions.extensions(self.ui))
217 in extensions.extensions(self.ui))
218 for setupfunc in self.featuresetupfuncs:
218 for setupfunc in self.featuresetupfuncs:
219 if setupfunc.__module__ in extmods:
219 if setupfunc.__module__ in extmods:
220 setupfunc(self.ui, self.supported)
220 setupfunc(self.ui, self.supported)
221 else:
221 else:
222 self.supported = self._basesupported
222 self.supported = self._basesupported
223
223
224 if not self.vfs.isdir():
224 if not self.vfs.isdir():
225 if create:
225 if create:
226 if not self.wvfs.exists():
226 if not self.wvfs.exists():
227 self.wvfs.makedirs()
227 self.wvfs.makedirs()
228 self.vfs.makedir(notindexed=True)
228 self.vfs.makedir(notindexed=True)
229 requirements = self._baserequirements(create)
229 requirements = self._baserequirements(create)
230 if self.ui.configbool('format', 'usestore', True):
230 if self.ui.configbool('format', 'usestore', True):
231 self.vfs.mkdir("store")
231 self.vfs.mkdir("store")
232 requirements.append("store")
232 requirements.append("store")
233 if self.ui.configbool('format', 'usefncache', True):
233 if self.ui.configbool('format', 'usefncache', True):
234 requirements.append("fncache")
234 requirements.append("fncache")
235 if self.ui.configbool('format', 'dotencode', True):
235 if self.ui.configbool('format', 'dotencode', True):
236 requirements.append('dotencode')
236 requirements.append('dotencode')
237 # create an invalid changelog
237 # create an invalid changelog
238 self.vfs.append(
238 self.vfs.append(
239 "00changelog.i",
239 "00changelog.i",
240 '\0\0\0\2' # represents revlogv2
240 '\0\0\0\2' # represents revlogv2
241 ' dummy changelog to prevent using the old repo layout'
241 ' dummy changelog to prevent using the old repo layout'
242 )
242 )
243 if self.ui.configbool('format', 'generaldelta', False):
243 if self.ui.configbool('format', 'generaldelta', False):
244 requirements.append("generaldelta")
244 requirements.append("generaldelta")
245 requirements = set(requirements)
245 requirements = set(requirements)
246 else:
246 else:
247 raise error.RepoError(_("repository %s not found") % path)
247 raise error.RepoError(_("repository %s not found") % path)
248 elif create:
248 elif create:
249 raise error.RepoError(_("repository %s already exists") % path)
249 raise error.RepoError(_("repository %s already exists") % path)
250 else:
250 else:
251 try:
251 try:
252 requirements = scmutil.readrequires(self.vfs, self.supported)
252 requirements = scmutil.readrequires(self.vfs, self.supported)
253 except IOError, inst:
253 except IOError, inst:
254 if inst.errno != errno.ENOENT:
254 if inst.errno != errno.ENOENT:
255 raise
255 raise
256 requirements = set()
256 requirements = set()
257
257
258 self.sharedpath = self.path
258 self.sharedpath = self.path
259 try:
259 try:
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
261 realpath=True)
261 realpath=True)
262 s = vfs.base
262 s = vfs.base
263 if not vfs.exists():
263 if not vfs.exists():
264 raise error.RepoError(
264 raise error.RepoError(
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 _('.hg/sharedpath points to nonexistent directory %s') % s)
266 self.sharedpath = s
266 self.sharedpath = s
267 except IOError, inst:
267 except IOError, inst:
268 if inst.errno != errno.ENOENT:
268 if inst.errno != errno.ENOENT:
269 raise
269 raise
270
270
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
272 self.spath = self.store.path
272 self.spath = self.store.path
273 self.svfs = self.store.vfs
273 self.svfs = self.store.vfs
274 self.sopener = self.svfs
274 self.sopener = self.svfs
275 self.sjoin = self.store.join
275 self.sjoin = self.store.join
276 self.vfs.createmode = self.store.createmode
276 self.vfs.createmode = self.store.createmode
277 self._applyrequirements(requirements)
277 self._applyrequirements(requirements)
278 if create:
278 if create:
279 self._writerequirements()
279 self._writerequirements()
280
280
281
281
282 self._branchcaches = {}
282 self._branchcaches = {}
283 self.filterpats = {}
283 self.filterpats = {}
284 self._datafilters = {}
284 self._datafilters = {}
285 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
286
286
287 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
288 # (used by the filecache decorator)
288 # (used by the filecache decorator)
289 #
289 #
290 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
291 self._filecache = {}
291 self._filecache = {}
292
292
293 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
294 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
295 # - new changesets,
295 # - new changesets,
296 # - phase change,
296 # - phase change,
297 # - new obsolescence marker,
297 # - new obsolescence marker,
298 # - working directory parent change,
298 # - working directory parent change,
299 # - bookmark changes
299 # - bookmark changes
300 self.filteredrevcache = {}
300 self.filteredrevcache = {}
301
301
302 def close(self):
302 def close(self):
303 pass
303 pass
304
304
305 def _restrictcapabilities(self, caps):
305 def _restrictcapabilities(self, caps):
306 # bundle2 is not ready for prime time, drop it unless explicitly
306 # bundle2 is not ready for prime time, drop it unless explicitly
307 # required by the tests (or some brave tester)
307 # required by the tests (or some brave tester)
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
308 if self.ui.configbool('experimental', 'bundle2-exp', False):
309 caps = set(caps)
309 caps = set(caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
310 capsblob = bundle2.encodecaps(self.bundle2caps)
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
311 caps.add('bundle2-exp=' + urllib.quote(capsblob))
312 return caps
312 return caps
313
313
314 def _applyrequirements(self, requirements):
314 def _applyrequirements(self, requirements):
315 self.requirements = requirements
315 self.requirements = requirements
316 self.sopener.options = dict((r, 1) for r in requirements
316 self.sopener.options = dict((r, 1) for r in requirements
317 if r in self.openerreqs)
317 if r in self.openerreqs)
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
318 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
319 if chunkcachesize is not None:
319 if chunkcachesize is not None:
320 self.sopener.options['chunkcachesize'] = chunkcachesize
320 self.sopener.options['chunkcachesize'] = chunkcachesize
321
321
322 def _writerequirements(self):
322 def _writerequirements(self):
323 reqfile = self.opener("requires", "w")
323 reqfile = self.opener("requires", "w")
324 for r in sorted(self.requirements):
324 for r in sorted(self.requirements):
325 reqfile.write("%s\n" % r)
325 reqfile.write("%s\n" % r)
326 reqfile.close()
326 reqfile.close()
327
327
328 def _checknested(self, path):
328 def _checknested(self, path):
329 """Determine if path is a legal nested repository."""
329 """Determine if path is a legal nested repository."""
330 if not path.startswith(self.root):
330 if not path.startswith(self.root):
331 return False
331 return False
332 subpath = path[len(self.root) + 1:]
332 subpath = path[len(self.root) + 1:]
333 normsubpath = util.pconvert(subpath)
333 normsubpath = util.pconvert(subpath)
334
334
335 # XXX: Checking against the current working copy is wrong in
335 # XXX: Checking against the current working copy is wrong in
336 # the sense that it can reject things like
336 # the sense that it can reject things like
337 #
337 #
338 # $ hg cat -r 10 sub/x.txt
338 # $ hg cat -r 10 sub/x.txt
339 #
339 #
340 # if sub/ is no longer a subrepository in the working copy
340 # if sub/ is no longer a subrepository in the working copy
341 # parent revision.
341 # parent revision.
342 #
342 #
343 # However, it can of course also allow things that would have
343 # However, it can of course also allow things that would have
344 # been rejected before, such as the above cat command if sub/
344 # been rejected before, such as the above cat command if sub/
345 # is a subrepository now, but was a normal directory before.
345 # is a subrepository now, but was a normal directory before.
346 # The old path auditor would have rejected by mistake since it
346 # The old path auditor would have rejected by mistake since it
347 # panics when it sees sub/.hg/.
347 # panics when it sees sub/.hg/.
348 #
348 #
349 # All in all, checking against the working copy seems sensible
349 # All in all, checking against the working copy seems sensible
350 # since we want to prevent access to nested repositories on
350 # since we want to prevent access to nested repositories on
351 # the filesystem *now*.
351 # the filesystem *now*.
352 ctx = self[None]
352 ctx = self[None]
353 parts = util.splitpath(subpath)
353 parts = util.splitpath(subpath)
354 while parts:
354 while parts:
355 prefix = '/'.join(parts)
355 prefix = '/'.join(parts)
356 if prefix in ctx.substate:
356 if prefix in ctx.substate:
357 if prefix == normsubpath:
357 if prefix == normsubpath:
358 return True
358 return True
359 else:
359 else:
360 sub = ctx.sub(prefix)
360 sub = ctx.sub(prefix)
361 return sub.checknested(subpath[len(prefix) + 1:])
361 return sub.checknested(subpath[len(prefix) + 1:])
362 else:
362 else:
363 parts.pop()
363 parts.pop()
364 return False
364 return False
365
365
366 def peer(self):
366 def peer(self):
367 return localpeer(self) # not cached to avoid reference cycle
367 return localpeer(self) # not cached to avoid reference cycle
368
368
369 def unfiltered(self):
369 def unfiltered(self):
370 """Return unfiltered version of the repository
370 """Return unfiltered version of the repository
371
371
372 Intended to be overwritten by filtered repo."""
372 Intended to be overwritten by filtered repo."""
373 return self
373 return self
374
374
375 def filtered(self, name):
375 def filtered(self, name):
376 """Return a filtered version of a repository"""
376 """Return a filtered version of a repository"""
377 # build a new class with the mixin and the current class
377 # build a new class with the mixin and the current class
378 # (possibly subclass of the repo)
378 # (possibly subclass of the repo)
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
379 class proxycls(repoview.repoview, self.unfiltered().__class__):
380 pass
380 pass
381 return proxycls(self, name)
381 return proxycls(self, name)
382
382
383 @repofilecache('bookmarks')
383 @repofilecache('bookmarks')
384 def _bookmarks(self):
384 def _bookmarks(self):
385 return bookmarks.bmstore(self)
385 return bookmarks.bmstore(self)
386
386
387 @repofilecache('bookmarks.current')
387 @repofilecache('bookmarks.current')
388 def _bookmarkcurrent(self):
388 def _bookmarkcurrent(self):
389 return bookmarks.readcurrent(self)
389 return bookmarks.readcurrent(self)
390
390
391 def bookmarkheads(self, bookmark):
391 def bookmarkheads(self, bookmark):
392 name = bookmark.split('@', 1)[0]
392 name = bookmark.split('@', 1)[0]
393 heads = []
393 heads = []
394 for mark, n in self._bookmarks.iteritems():
394 for mark, n in self._bookmarks.iteritems():
395 if mark.split('@', 1)[0] == name:
395 if mark.split('@', 1)[0] == name:
396 heads.append(n)
396 heads.append(n)
397 return heads
397 return heads
398
398
399 @storecache('phaseroots')
399 @storecache('phaseroots')
400 def _phasecache(self):
400 def _phasecache(self):
401 return phases.phasecache(self, self._phasedefaults)
401 return phases.phasecache(self, self._phasedefaults)
402
402
403 @storecache('obsstore')
403 @storecache('obsstore')
404 def obsstore(self):
404 def obsstore(self):
405 store = obsolete.obsstore(self.sopener)
405 store = obsolete.obsstore(self.sopener)
406 if store and not obsolete._enabled:
406 if store and not obsolete._enabled:
407 # message is rare enough to not be translated
407 # message is rare enough to not be translated
408 msg = 'obsolete feature not enabled but %i markers found!\n'
408 msg = 'obsolete feature not enabled but %i markers found!\n'
409 self.ui.warn(msg % len(list(store)))
409 self.ui.warn(msg % len(list(store)))
410 return store
410 return store
411
411
412 @storecache('00changelog.i')
412 @storecache('00changelog.i')
413 def changelog(self):
413 def changelog(self):
414 c = changelog.changelog(self.sopener)
414 c = changelog.changelog(self.sopener)
415 if 'HG_PENDING' in os.environ:
415 if 'HG_PENDING' in os.environ:
416 p = os.environ['HG_PENDING']
416 p = os.environ['HG_PENDING']
417 if p.startswith(self.root):
417 if p.startswith(self.root):
418 c.readpending('00changelog.i.a')
418 c.readpending('00changelog.i.a')
419 return c
419 return c
420
420
421 @storecache('00manifest.i')
421 @storecache('00manifest.i')
422 def manifest(self):
422 def manifest(self):
423 return manifest.manifest(self.sopener)
423 return manifest.manifest(self.sopener)
424
424
425 @repofilecache('dirstate')
425 @repofilecache('dirstate')
426 def dirstate(self):
426 def dirstate(self):
427 warned = [0]
427 warned = [0]
428 def validate(node):
428 def validate(node):
429 try:
429 try:
430 self.changelog.rev(node)
430 self.changelog.rev(node)
431 return node
431 return node
432 except error.LookupError:
432 except error.LookupError:
433 if not warned[0]:
433 if not warned[0]:
434 warned[0] = True
434 warned[0] = True
435 self.ui.warn(_("warning: ignoring unknown"
435 self.ui.warn(_("warning: ignoring unknown"
436 " working parent %s!\n") % short(node))
436 " working parent %s!\n") % short(node))
437 return nullid
437 return nullid
438
438
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
439 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
440
440
441 def __getitem__(self, changeid):
441 def __getitem__(self, changeid):
442 if changeid is None:
442 if changeid is None:
443 return context.workingctx(self)
443 return context.workingctx(self)
444 return context.changectx(self, changeid)
444 return context.changectx(self, changeid)
445
445
446 def __contains__(self, changeid):
446 def __contains__(self, changeid):
447 try:
447 try:
448 return bool(self.lookup(changeid))
448 return bool(self.lookup(changeid))
449 except error.RepoLookupError:
449 except error.RepoLookupError:
450 return False
450 return False
451
451
452 def __nonzero__(self):
452 def __nonzero__(self):
453 return True
453 return True
454
454
455 def __len__(self):
455 def __len__(self):
456 return len(self.changelog)
456 return len(self.changelog)
457
457
458 def __iter__(self):
458 def __iter__(self):
459 return iter(self.changelog)
459 return iter(self.changelog)
460
460
461 def revs(self, expr, *args):
461 def revs(self, expr, *args):
462 '''Return a list of revisions matching the given revset'''
462 '''Return a list of revisions matching the given revset'''
463 expr = revset.formatspec(expr, *args)
463 expr = revset.formatspec(expr, *args)
464 m = revset.match(None, expr)
464 m = revset.match(None, expr)
465 return m(self, revset.spanset(self))
465 return m(self, revset.spanset(self))
466
466
467 def set(self, expr, *args):
467 def set(self, expr, *args):
468 '''
468 '''
469 Yield a context for each matching revision, after doing arg
469 Yield a context for each matching revision, after doing arg
470 replacement via revset.formatspec
470 replacement via revset.formatspec
471 '''
471 '''
472 for r in self.revs(expr, *args):
472 for r in self.revs(expr, *args):
473 yield self[r]
473 yield self[r]
474
474
475 def url(self):
475 def url(self):
476 return 'file:' + self.root
476 return 'file:' + self.root
477
477
478 def hook(self, name, throw=False, **args):
478 def hook(self, name, throw=False, **args):
479 return hook.hook(self.ui, self, name, throw, **args)
479 return hook.hook(self.ui, self, name, throw, **args)
480
480
481 @unfilteredmethod
481 @unfilteredmethod
482 def _tag(self, names, node, message, local, user, date, extra={},
482 def _tag(self, names, node, message, local, user, date, extra={},
483 editor=False):
483 editor=False):
484 if isinstance(names, str):
484 if isinstance(names, str):
485 names = (names,)
485 names = (names,)
486
486
487 branches = self.branchmap()
487 branches = self.branchmap()
488 for name in names:
488 for name in names:
489 self.hook('pretag', throw=True, node=hex(node), tag=name,
489 self.hook('pretag', throw=True, node=hex(node), tag=name,
490 local=local)
490 local=local)
491 if name in branches:
491 if name in branches:
492 self.ui.warn(_("warning: tag %s conflicts with existing"
492 self.ui.warn(_("warning: tag %s conflicts with existing"
493 " branch name\n") % name)
493 " branch name\n") % name)
494
494
495 def writetags(fp, names, munge, prevtags):
495 def writetags(fp, names, munge, prevtags):
496 fp.seek(0, 2)
496 fp.seek(0, 2)
497 if prevtags and prevtags[-1] != '\n':
497 if prevtags and prevtags[-1] != '\n':
498 fp.write('\n')
498 fp.write('\n')
499 for name in names:
499 for name in names:
500 m = munge and munge(name) or name
500 m = munge and munge(name) or name
501 if (self._tagscache.tagtypes and
501 if (self._tagscache.tagtypes and
502 name in self._tagscache.tagtypes):
502 name in self._tagscache.tagtypes):
503 old = self.tags().get(name, nullid)
503 old = self.tags().get(name, nullid)
504 fp.write('%s %s\n' % (hex(old), m))
504 fp.write('%s %s\n' % (hex(old), m))
505 fp.write('%s %s\n' % (hex(node), m))
505 fp.write('%s %s\n' % (hex(node), m))
506 fp.close()
506 fp.close()
507
507
508 prevtags = ''
508 prevtags = ''
509 if local:
509 if local:
510 try:
510 try:
511 fp = self.opener('localtags', 'r+')
511 fp = self.opener('localtags', 'r+')
512 except IOError:
512 except IOError:
513 fp = self.opener('localtags', 'a')
513 fp = self.opener('localtags', 'a')
514 else:
514 else:
515 prevtags = fp.read()
515 prevtags = fp.read()
516
516
517 # local tags are stored in the current charset
517 # local tags are stored in the current charset
518 writetags(fp, names, None, prevtags)
518 writetags(fp, names, None, prevtags)
519 for name in names:
519 for name in names:
520 self.hook('tag', node=hex(node), tag=name, local=local)
520 self.hook('tag', node=hex(node), tag=name, local=local)
521 return
521 return
522
522
523 try:
523 try:
524 fp = self.wfile('.hgtags', 'rb+')
524 fp = self.wfile('.hgtags', 'rb+')
525 except IOError, e:
525 except IOError, e:
526 if e.errno != errno.ENOENT:
526 if e.errno != errno.ENOENT:
527 raise
527 raise
528 fp = self.wfile('.hgtags', 'ab')
528 fp = self.wfile('.hgtags', 'ab')
529 else:
529 else:
530 prevtags = fp.read()
530 prevtags = fp.read()
531
531
532 # committed tags are stored in UTF-8
532 # committed tags are stored in UTF-8
533 writetags(fp, names, encoding.fromlocal, prevtags)
533 writetags(fp, names, encoding.fromlocal, prevtags)
534
534
535 fp.close()
535 fp.close()
536
536
537 self.invalidatecaches()
537 self.invalidatecaches()
538
538
539 if '.hgtags' not in self.dirstate:
539 if '.hgtags' not in self.dirstate:
540 self[None].add(['.hgtags'])
540 self[None].add(['.hgtags'])
541
541
542 m = matchmod.exact(self.root, '', ['.hgtags'])
542 m = matchmod.exact(self.root, '', ['.hgtags'])
543 tagnode = self.commit(message, user, date, extra=extra, match=m,
543 tagnode = self.commit(message, user, date, extra=extra, match=m,
544 editor=editor)
544 editor=editor)
545
545
546 for name in names:
546 for name in names:
547 self.hook('tag', node=hex(node), tag=name, local=local)
547 self.hook('tag', node=hex(node), tag=name, local=local)
548
548
549 return tagnode
549 return tagnode
550
550
551 def tag(self, names, node, message, local, user, date, editor=False):
551 def tag(self, names, node, message, local, user, date, editor=False):
552 '''tag a revision with one or more symbolic names.
552 '''tag a revision with one or more symbolic names.
553
553
554 names is a list of strings or, when adding a single tag, names may be a
554 names is a list of strings or, when adding a single tag, names may be a
555 string.
555 string.
556
556
557 if local is True, the tags are stored in a per-repository file.
557 if local is True, the tags are stored in a per-repository file.
558 otherwise, they are stored in the .hgtags file, and a new
558 otherwise, they are stored in the .hgtags file, and a new
559 changeset is committed with the change.
559 changeset is committed with the change.
560
560
561 keyword arguments:
561 keyword arguments:
562
562
563 local: whether to store tags in non-version-controlled file
563 local: whether to store tags in non-version-controlled file
564 (default False)
564 (default False)
565
565
566 message: commit message to use if committing
566 message: commit message to use if committing
567
567
568 user: name of user to use if committing
568 user: name of user to use if committing
569
569
570 date: date tuple to use if committing'''
570 date: date tuple to use if committing'''
571
571
572 if not local:
572 if not local:
573 for x in self.status()[:5]:
573 for x in self.status()[:5]:
574 if '.hgtags' in x:
574 if '.hgtags' in x:
575 raise util.Abort(_('working copy of .hgtags is changed '
575 raise util.Abort(_('working copy of .hgtags is changed '
576 '(please commit .hgtags manually)'))
576 '(please commit .hgtags manually)'))
577
577
578 self.tags() # instantiate the cache
578 self.tags() # instantiate the cache
579 self._tag(names, node, message, local, user, date, editor=editor)
579 self._tag(names, node, message, local, user, date, editor=editor)
580
580
581 @filteredpropertycache
581 @filteredpropertycache
582 def _tagscache(self):
582 def _tagscache(self):
583 '''Returns a tagscache object that contains various tags related
583 '''Returns a tagscache object that contains various tags related
584 caches.'''
584 caches.'''
585
585
586 # This simplifies its cache management by having one decorated
586 # This simplifies its cache management by having one decorated
587 # function (this one) and the rest simply fetch things from it.
587 # function (this one) and the rest simply fetch things from it.
588 class tagscache(object):
588 class tagscache(object):
589 def __init__(self):
589 def __init__(self):
590 # These two define the set of tags for this repository. tags
590 # These two define the set of tags for this repository. tags
591 # maps tag name to node; tagtypes maps tag name to 'global' or
591 # maps tag name to node; tagtypes maps tag name to 'global' or
592 # 'local'. (Global tags are defined by .hgtags across all
592 # 'local'. (Global tags are defined by .hgtags across all
593 # heads, and local tags are defined in .hg/localtags.)
593 # heads, and local tags are defined in .hg/localtags.)
594 # They constitute the in-memory cache of tags.
594 # They constitute the in-memory cache of tags.
595 self.tags = self.tagtypes = None
595 self.tags = self.tagtypes = None
596
596
597 self.nodetagscache = self.tagslist = None
597 self.nodetagscache = self.tagslist = None
598
598
599 cache = tagscache()
599 cache = tagscache()
600 cache.tags, cache.tagtypes = self._findtags()
600 cache.tags, cache.tagtypes = self._findtags()
601
601
602 return cache
602 return cache
603
603
604 def tags(self):
604 def tags(self):
605 '''return a mapping of tag to node'''
605 '''return a mapping of tag to node'''
606 t = {}
606 t = {}
607 if self.changelog.filteredrevs:
607 if self.changelog.filteredrevs:
608 tags, tt = self._findtags()
608 tags, tt = self._findtags()
609 else:
609 else:
610 tags = self._tagscache.tags
610 tags = self._tagscache.tags
611 for k, v in tags.iteritems():
611 for k, v in tags.iteritems():
612 try:
612 try:
613 # ignore tags to unknown nodes
613 # ignore tags to unknown nodes
614 self.changelog.rev(v)
614 self.changelog.rev(v)
615 t[k] = v
615 t[k] = v
616 except (error.LookupError, ValueError):
616 except (error.LookupError, ValueError):
617 pass
617 pass
618 return t
618 return t
619
619
620 def _findtags(self):
620 def _findtags(self):
621 '''Do the hard work of finding tags. Return a pair of dicts
621 '''Do the hard work of finding tags. Return a pair of dicts
622 (tags, tagtypes) where tags maps tag name to node, and tagtypes
622 (tags, tagtypes) where tags maps tag name to node, and tagtypes
623 maps tag name to a string like \'global\' or \'local\'.
623 maps tag name to a string like \'global\' or \'local\'.
624 Subclasses or extensions are free to add their own tags, but
624 Subclasses or extensions are free to add their own tags, but
625 should be aware that the returned dicts will be retained for the
625 should be aware that the returned dicts will be retained for the
626 duration of the localrepo object.'''
626 duration of the localrepo object.'''
627
627
628 # XXX what tagtype should subclasses/extensions use? Currently
628 # XXX what tagtype should subclasses/extensions use? Currently
629 # mq and bookmarks add tags, but do not set the tagtype at all.
629 # mq and bookmarks add tags, but do not set the tagtype at all.
630 # Should each extension invent its own tag type? Should there
630 # Should each extension invent its own tag type? Should there
631 # be one tagtype for all such "virtual" tags? Or is the status
631 # be one tagtype for all such "virtual" tags? Or is the status
632 # quo fine?
632 # quo fine?
633
633
634 alltags = {} # map tag name to (node, hist)
634 alltags = {} # map tag name to (node, hist)
635 tagtypes = {}
635 tagtypes = {}
636
636
637 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
637 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
638 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
639
639
640 # Build the return dicts. Have to re-encode tag names because
640 # Build the return dicts. Have to re-encode tag names because
641 # the tags module always uses UTF-8 (in order not to lose info
641 # the tags module always uses UTF-8 (in order not to lose info
642 # writing to the cache), but the rest of Mercurial wants them in
642 # writing to the cache), but the rest of Mercurial wants them in
643 # local encoding.
643 # local encoding.
644 tags = {}
644 tags = {}
645 for (name, (node, hist)) in alltags.iteritems():
645 for (name, (node, hist)) in alltags.iteritems():
646 if node != nullid:
646 if node != nullid:
647 tags[encoding.tolocal(name)] = node
647 tags[encoding.tolocal(name)] = node
648 tags['tip'] = self.changelog.tip()
648 tags['tip'] = self.changelog.tip()
649 tagtypes = dict([(encoding.tolocal(name), value)
649 tagtypes = dict([(encoding.tolocal(name), value)
650 for (name, value) in tagtypes.iteritems()])
650 for (name, value) in tagtypes.iteritems()])
651 return (tags, tagtypes)
651 return (tags, tagtypes)
652
652
653 def tagtype(self, tagname):
653 def tagtype(self, tagname):
654 '''
654 '''
655 return the type of the given tag. result can be:
655 return the type of the given tag. result can be:
656
656
657 'local' : a local tag
657 'local' : a local tag
658 'global' : a global tag
658 'global' : a global tag
659 None : tag does not exist
659 None : tag does not exist
660 '''
660 '''
661
661
662 return self._tagscache.tagtypes.get(tagname)
662 return self._tagscache.tagtypes.get(tagname)
663
663
664 def tagslist(self):
664 def tagslist(self):
665 '''return a list of tags ordered by revision'''
665 '''return a list of tags ordered by revision'''
666 if not self._tagscache.tagslist:
666 if not self._tagscache.tagslist:
667 l = []
667 l = []
668 for t, n in self.tags().iteritems():
668 for t, n in self.tags().iteritems():
669 r = self.changelog.rev(n)
669 r = self.changelog.rev(n)
670 l.append((r, t, n))
670 l.append((r, t, n))
671 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
671 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
672
672
673 return self._tagscache.tagslist
673 return self._tagscache.tagslist
674
674
675 def nodetags(self, node):
675 def nodetags(self, node):
676 '''return the tags associated with a node'''
676 '''return the tags associated with a node'''
677 if not self._tagscache.nodetagscache:
677 if not self._tagscache.nodetagscache:
678 nodetagscache = {}
678 nodetagscache = {}
679 for t, n in self._tagscache.tags.iteritems():
679 for t, n in self._tagscache.tags.iteritems():
680 nodetagscache.setdefault(n, []).append(t)
680 nodetagscache.setdefault(n, []).append(t)
681 for tags in nodetagscache.itervalues():
681 for tags in nodetagscache.itervalues():
682 tags.sort()
682 tags.sort()
683 self._tagscache.nodetagscache = nodetagscache
683 self._tagscache.nodetagscache = nodetagscache
684 return self._tagscache.nodetagscache.get(node, [])
684 return self._tagscache.nodetagscache.get(node, [])
685
685
686 def nodebookmarks(self, node):
686 def nodebookmarks(self, node):
687 marks = []
687 marks = []
688 for bookmark, n in self._bookmarks.iteritems():
688 for bookmark, n in self._bookmarks.iteritems():
689 if n == node:
689 if n == node:
690 marks.append(bookmark)
690 marks.append(bookmark)
691 return sorted(marks)
691 return sorted(marks)
692
692
693 def branchmap(self):
693 def branchmap(self):
694 '''returns a dictionary {branch: [branchheads]} with branchheads
694 '''returns a dictionary {branch: [branchheads]} with branchheads
695 ordered by increasing revision number'''
695 ordered by increasing revision number'''
696 branchmap.updatecache(self)
696 branchmap.updatecache(self)
697 return self._branchcaches[self.filtername]
697 return self._branchcaches[self.filtername]
698
698
699 def branchtip(self, branch):
699 def branchtip(self, branch):
700 '''return the tip node for a given branch'''
700 '''return the tip node for a given branch'''
701 try:
701 try:
702 return self.branchmap().branchtip(branch)
702 return self.branchmap().branchtip(branch)
703 except KeyError:
703 except KeyError:
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705
705
706 def lookup(self, key):
706 def lookup(self, key):
707 return self[key].node()
707 return self[key].node()
708
708
709 def lookupbranch(self, key, remote=None):
709 def lookupbranch(self, key, remote=None):
710 repo = remote or self
710 repo = remote or self
711 if key in repo.branchmap():
711 if key in repo.branchmap():
712 return key
712 return key
713
713
714 repo = (remote and remote.local()) and remote or self
714 repo = (remote and remote.local()) and remote or self
715 return repo[key].branch()
715 return repo[key].branch()
716
716
717 def known(self, nodes):
717 def known(self, nodes):
718 nm = self.changelog.nodemap
718 nm = self.changelog.nodemap
719 pc = self._phasecache
719 pc = self._phasecache
720 result = []
720 result = []
721 for n in nodes:
721 for n in nodes:
722 r = nm.get(n)
722 r = nm.get(n)
723 resp = not (r is None or pc.phase(self, r) >= phases.secret)
723 resp = not (r is None or pc.phase(self, r) >= phases.secret)
724 result.append(resp)
724 result.append(resp)
725 return result
725 return result
726
726
727 def local(self):
727 def local(self):
728 return self
728 return self
729
729
730 def cancopy(self):
730 def cancopy(self):
731 # so statichttprepo's override of local() works
731 # so statichttprepo's override of local() works
732 if not self.local():
732 if not self.local():
733 return False
733 return False
734 if not self.ui.configbool('phases', 'publish', True):
734 if not self.ui.configbool('phases', 'publish', True):
735 return True
735 return True
736 # if publishing we can't copy if there is filtered content
736 # if publishing we can't copy if there is filtered content
737 return not self.filtered('visible').changelog.filteredrevs
737 return not self.filtered('visible').changelog.filteredrevs
738
738
739 def join(self, f):
739 def join(self, f):
740 return os.path.join(self.path, f)
740 return os.path.join(self.path, f)
741
741
742 def wjoin(self, f):
742 def wjoin(self, f):
743 return os.path.join(self.root, f)
743 return os.path.join(self.root, f)
744
744
745 def file(self, f):
745 def file(self, f):
746 if f[0] == '/':
746 if f[0] == '/':
747 f = f[1:]
747 f = f[1:]
748 return filelog.filelog(self.sopener, f)
748 return filelog.filelog(self.sopener, f)
749
749
750 def changectx(self, changeid):
750 def changectx(self, changeid):
751 return self[changeid]
751 return self[changeid]
752
752
753 def parents(self, changeid=None):
753 def parents(self, changeid=None):
754 '''get list of changectxs for parents of changeid'''
754 '''get list of changectxs for parents of changeid'''
755 return self[changeid].parents()
755 return self[changeid].parents()
756
756
757 def setparents(self, p1, p2=nullid):
757 def setparents(self, p1, p2=nullid):
758 copies = self.dirstate.setparents(p1, p2)
758 copies = self.dirstate.setparents(p1, p2)
759 pctx = self[p1]
759 pctx = self[p1]
760 if copies:
760 if copies:
761 # Adjust copy records, the dirstate cannot do it, it
761 # Adjust copy records, the dirstate cannot do it, it
762 # requires access to parents manifests. Preserve them
762 # requires access to parents manifests. Preserve them
763 # only for entries added to first parent.
763 # only for entries added to first parent.
764 for f in copies:
764 for f in copies:
765 if f not in pctx and copies[f] in pctx:
765 if f not in pctx and copies[f] in pctx:
766 self.dirstate.copy(copies[f], f)
766 self.dirstate.copy(copies[f], f)
767 if p2 == nullid:
767 if p2 == nullid:
768 for f, s in sorted(self.dirstate.copies().items()):
768 for f, s in sorted(self.dirstate.copies().items()):
769 if f not in pctx and s not in pctx:
769 if f not in pctx and s not in pctx:
770 self.dirstate.copy(None, f)
770 self.dirstate.copy(None, f)
771
771
772 def filectx(self, path, changeid=None, fileid=None):
772 def filectx(self, path, changeid=None, fileid=None):
773 """changeid can be a changeset revision, node, or tag.
773 """changeid can be a changeset revision, node, or tag.
774 fileid can be a file revision or node."""
774 fileid can be a file revision or node."""
775 return context.filectx(self, path, changeid, fileid)
775 return context.filectx(self, path, changeid, fileid)
776
776
777 def getcwd(self):
777 def getcwd(self):
778 return self.dirstate.getcwd()
778 return self.dirstate.getcwd()
779
779
780 def pathto(self, f, cwd=None):
780 def pathto(self, f, cwd=None):
781 return self.dirstate.pathto(f, cwd)
781 return self.dirstate.pathto(f, cwd)
782
782
783 def wfile(self, f, mode='r'):
783 def wfile(self, f, mode='r'):
784 return self.wopener(f, mode)
784 return self.wopener(f, mode)
785
785
786 def _link(self, f):
786 def _link(self, f):
787 return self.wvfs.islink(f)
787 return self.wvfs.islink(f)
788
788
789 def _loadfilter(self, filter):
789 def _loadfilter(self, filter):
790 if filter not in self.filterpats:
790 if filter not in self.filterpats:
791 l = []
791 l = []
792 for pat, cmd in self.ui.configitems(filter):
792 for pat, cmd in self.ui.configitems(filter):
793 if cmd == '!':
793 if cmd == '!':
794 continue
794 continue
795 mf = matchmod.match(self.root, '', [pat])
795 mf = matchmod.match(self.root, '', [pat])
796 fn = None
796 fn = None
797 params = cmd
797 params = cmd
798 for name, filterfn in self._datafilters.iteritems():
798 for name, filterfn in self._datafilters.iteritems():
799 if cmd.startswith(name):
799 if cmd.startswith(name):
800 fn = filterfn
800 fn = filterfn
801 params = cmd[len(name):].lstrip()
801 params = cmd[len(name):].lstrip()
802 break
802 break
803 if not fn:
803 if not fn:
804 fn = lambda s, c, **kwargs: util.filter(s, c)
804 fn = lambda s, c, **kwargs: util.filter(s, c)
805 # Wrap old filters not supporting keyword arguments
805 # Wrap old filters not supporting keyword arguments
806 if not inspect.getargspec(fn)[2]:
806 if not inspect.getargspec(fn)[2]:
807 oldfn = fn
807 oldfn = fn
808 fn = lambda s, c, **kwargs: oldfn(s, c)
808 fn = lambda s, c, **kwargs: oldfn(s, c)
809 l.append((mf, fn, params))
809 l.append((mf, fn, params))
810 self.filterpats[filter] = l
810 self.filterpats[filter] = l
811 return self.filterpats[filter]
811 return self.filterpats[filter]
812
812
813 def _filter(self, filterpats, filename, data):
813 def _filter(self, filterpats, filename, data):
814 for mf, fn, cmd in filterpats:
814 for mf, fn, cmd in filterpats:
815 if mf(filename):
815 if mf(filename):
816 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
816 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
817 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
817 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
818 break
818 break
819
819
820 return data
820 return data
821
821
822 @unfilteredpropertycache
822 @unfilteredpropertycache
823 def _encodefilterpats(self):
823 def _encodefilterpats(self):
824 return self._loadfilter('encode')
824 return self._loadfilter('encode')
825
825
826 @unfilteredpropertycache
826 @unfilteredpropertycache
827 def _decodefilterpats(self):
827 def _decodefilterpats(self):
828 return self._loadfilter('decode')
828 return self._loadfilter('decode')
829
829
830 def adddatafilter(self, name, filter):
830 def adddatafilter(self, name, filter):
831 self._datafilters[name] = filter
831 self._datafilters[name] = filter
832
832
833 def wread(self, filename):
833 def wread(self, filename):
834 if self._link(filename):
834 if self._link(filename):
835 data = self.wvfs.readlink(filename)
835 data = self.wvfs.readlink(filename)
836 else:
836 else:
837 data = self.wopener.read(filename)
837 data = self.wopener.read(filename)
838 return self._filter(self._encodefilterpats, filename, data)
838 return self._filter(self._encodefilterpats, filename, data)
839
839
840 def wwrite(self, filename, data, flags):
840 def wwrite(self, filename, data, flags):
841 data = self._filter(self._decodefilterpats, filename, data)
841 data = self._filter(self._decodefilterpats, filename, data)
842 if 'l' in flags:
842 if 'l' in flags:
843 self.wopener.symlink(data, filename)
843 self.wopener.symlink(data, filename)
844 else:
844 else:
845 self.wopener.write(filename, data)
845 self.wopener.write(filename, data)
846 if 'x' in flags:
846 if 'x' in flags:
847 self.wvfs.setflags(filename, False, True)
847 self.wvfs.setflags(filename, False, True)
848
848
849 def wwritedata(self, filename, data):
849 def wwritedata(self, filename, data):
850 return self._filter(self._decodefilterpats, filename, data)
850 return self._filter(self._decodefilterpats, filename, data)
851
851
852 def transaction(self, desc, report=None):
852 def transaction(self, desc, report=None):
853 tr = self._transref and self._transref() or None
853 tr = self._transref and self._transref() or None
854 if tr and tr.running():
854 if tr and tr.running():
855 return tr.nest()
855 return tr.nest()
856
856
857 # abort here if the journal already exists
857 # abort here if the journal already exists
858 if self.svfs.exists("journal"):
858 if self.svfs.exists("journal"):
859 raise error.RepoError(
859 raise error.RepoError(
860 _("abandoned transaction found - run hg recover"))
860 _("abandoned transaction found"),
861 hint=_("run 'hg recover' to clean up transaction"))
861
862
862 def onclose():
863 def onclose():
863 self.store.write(tr)
864 self.store.write(tr)
864
865
865 self._writejournal(desc)
866 self._writejournal(desc)
866 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
867 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
867 rp = report and report or self.ui.warn
868 rp = report and report or self.ui.warn
868 tr = transaction.transaction(rp, self.sopener,
869 tr = transaction.transaction(rp, self.sopener,
869 "journal",
870 "journal",
870 aftertrans(renames),
871 aftertrans(renames),
871 self.store.createmode,
872 self.store.createmode,
872 onclose)
873 onclose)
873 self._transref = weakref.ref(tr)
874 self._transref = weakref.ref(tr)
874 return tr
875 return tr
875
876
876 def _journalfiles(self):
877 def _journalfiles(self):
877 return ((self.svfs, 'journal'),
878 return ((self.svfs, 'journal'),
878 (self.vfs, 'journal.dirstate'),
879 (self.vfs, 'journal.dirstate'),
879 (self.vfs, 'journal.branch'),
880 (self.vfs, 'journal.branch'),
880 (self.vfs, 'journal.desc'),
881 (self.vfs, 'journal.desc'),
881 (self.vfs, 'journal.bookmarks'),
882 (self.vfs, 'journal.bookmarks'),
882 (self.svfs, 'journal.phaseroots'))
883 (self.svfs, 'journal.phaseroots'))
883
884
884 def undofiles(self):
885 def undofiles(self):
885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
886 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
886
887
887 def _writejournal(self, desc):
888 def _writejournal(self, desc):
888 self.opener.write("journal.dirstate",
889 self.opener.write("journal.dirstate",
889 self.opener.tryread("dirstate"))
890 self.opener.tryread("dirstate"))
890 self.opener.write("journal.branch",
891 self.opener.write("journal.branch",
891 encoding.fromlocal(self.dirstate.branch()))
892 encoding.fromlocal(self.dirstate.branch()))
892 self.opener.write("journal.desc",
893 self.opener.write("journal.desc",
893 "%d\n%s\n" % (len(self), desc))
894 "%d\n%s\n" % (len(self), desc))
894 self.opener.write("journal.bookmarks",
895 self.opener.write("journal.bookmarks",
895 self.opener.tryread("bookmarks"))
896 self.opener.tryread("bookmarks"))
896 self.sopener.write("journal.phaseroots",
897 self.sopener.write("journal.phaseroots",
897 self.sopener.tryread("phaseroots"))
898 self.sopener.tryread("phaseroots"))
898
899
899 def recover(self):
900 def recover(self):
900 lock = self.lock()
901 lock = self.lock()
901 try:
902 try:
902 if self.svfs.exists("journal"):
903 if self.svfs.exists("journal"):
903 self.ui.status(_("rolling back interrupted transaction\n"))
904 self.ui.status(_("rolling back interrupted transaction\n"))
904 transaction.rollback(self.sopener, "journal",
905 transaction.rollback(self.sopener, "journal",
905 self.ui.warn)
906 self.ui.warn)
906 self.invalidate()
907 self.invalidate()
907 return True
908 return True
908 else:
909 else:
909 self.ui.warn(_("no interrupted transaction available\n"))
910 self.ui.warn(_("no interrupted transaction available\n"))
910 return False
911 return False
911 finally:
912 finally:
912 lock.release()
913 lock.release()
913
914
914 def rollback(self, dryrun=False, force=False):
915 def rollback(self, dryrun=False, force=False):
915 wlock = lock = None
916 wlock = lock = None
916 try:
917 try:
917 wlock = self.wlock()
918 wlock = self.wlock()
918 lock = self.lock()
919 lock = self.lock()
919 if self.svfs.exists("undo"):
920 if self.svfs.exists("undo"):
920 return self._rollback(dryrun, force)
921 return self._rollback(dryrun, force)
921 else:
922 else:
922 self.ui.warn(_("no rollback information available\n"))
923 self.ui.warn(_("no rollback information available\n"))
923 return 1
924 return 1
924 finally:
925 finally:
925 release(lock, wlock)
926 release(lock, wlock)
926
927
927 @unfilteredmethod # Until we get smarter cache management
928 @unfilteredmethod # Until we get smarter cache management
928 def _rollback(self, dryrun, force):
929 def _rollback(self, dryrun, force):
929 ui = self.ui
930 ui = self.ui
930 try:
931 try:
931 args = self.opener.read('undo.desc').splitlines()
932 args = self.opener.read('undo.desc').splitlines()
932 (oldlen, desc, detail) = (int(args[0]), args[1], None)
933 (oldlen, desc, detail) = (int(args[0]), args[1], None)
933 if len(args) >= 3:
934 if len(args) >= 3:
934 detail = args[2]
935 detail = args[2]
935 oldtip = oldlen - 1
936 oldtip = oldlen - 1
936
937
937 if detail and ui.verbose:
938 if detail and ui.verbose:
938 msg = (_('repository tip rolled back to revision %s'
939 msg = (_('repository tip rolled back to revision %s'
939 ' (undo %s: %s)\n')
940 ' (undo %s: %s)\n')
940 % (oldtip, desc, detail))
941 % (oldtip, desc, detail))
941 else:
942 else:
942 msg = (_('repository tip rolled back to revision %s'
943 msg = (_('repository tip rolled back to revision %s'
943 ' (undo %s)\n')
944 ' (undo %s)\n')
944 % (oldtip, desc))
945 % (oldtip, desc))
945 except IOError:
946 except IOError:
946 msg = _('rolling back unknown transaction\n')
947 msg = _('rolling back unknown transaction\n')
947 desc = None
948 desc = None
948
949
949 if not force and self['.'] != self['tip'] and desc == 'commit':
950 if not force and self['.'] != self['tip'] and desc == 'commit':
950 raise util.Abort(
951 raise util.Abort(
951 _('rollback of last commit while not checked out '
952 _('rollback of last commit while not checked out '
952 'may lose data'), hint=_('use -f to force'))
953 'may lose data'), hint=_('use -f to force'))
953
954
954 ui.status(msg)
955 ui.status(msg)
955 if dryrun:
956 if dryrun:
956 return 0
957 return 0
957
958
958 parents = self.dirstate.parents()
959 parents = self.dirstate.parents()
959 self.destroying()
960 self.destroying()
960 transaction.rollback(self.sopener, 'undo', ui.warn)
961 transaction.rollback(self.sopener, 'undo', ui.warn)
961 if self.vfs.exists('undo.bookmarks'):
962 if self.vfs.exists('undo.bookmarks'):
962 self.vfs.rename('undo.bookmarks', 'bookmarks')
963 self.vfs.rename('undo.bookmarks', 'bookmarks')
963 if self.svfs.exists('undo.phaseroots'):
964 if self.svfs.exists('undo.phaseroots'):
964 self.svfs.rename('undo.phaseroots', 'phaseroots')
965 self.svfs.rename('undo.phaseroots', 'phaseroots')
965 self.invalidate()
966 self.invalidate()
966
967
967 parentgone = (parents[0] not in self.changelog.nodemap or
968 parentgone = (parents[0] not in self.changelog.nodemap or
968 parents[1] not in self.changelog.nodemap)
969 parents[1] not in self.changelog.nodemap)
969 if parentgone:
970 if parentgone:
970 self.vfs.rename('undo.dirstate', 'dirstate')
971 self.vfs.rename('undo.dirstate', 'dirstate')
971 try:
972 try:
972 branch = self.opener.read('undo.branch')
973 branch = self.opener.read('undo.branch')
973 self.dirstate.setbranch(encoding.tolocal(branch))
974 self.dirstate.setbranch(encoding.tolocal(branch))
974 except IOError:
975 except IOError:
975 ui.warn(_('named branch could not be reset: '
976 ui.warn(_('named branch could not be reset: '
976 'current branch is still \'%s\'\n')
977 'current branch is still \'%s\'\n')
977 % self.dirstate.branch())
978 % self.dirstate.branch())
978
979
979 self.dirstate.invalidate()
980 self.dirstate.invalidate()
980 parents = tuple([p.rev() for p in self.parents()])
981 parents = tuple([p.rev() for p in self.parents()])
981 if len(parents) > 1:
982 if len(parents) > 1:
982 ui.status(_('working directory now based on '
983 ui.status(_('working directory now based on '
983 'revisions %d and %d\n') % parents)
984 'revisions %d and %d\n') % parents)
984 else:
985 else:
985 ui.status(_('working directory now based on '
986 ui.status(_('working directory now based on '
986 'revision %d\n') % parents)
987 'revision %d\n') % parents)
987 # TODO: if we know which new heads may result from this rollback, pass
988 # TODO: if we know which new heads may result from this rollback, pass
988 # them to destroy(), which will prevent the branchhead cache from being
989 # them to destroy(), which will prevent the branchhead cache from being
989 # invalidated.
990 # invalidated.
990 self.destroyed()
991 self.destroyed()
991 return 0
992 return 0
992
993
993 def invalidatecaches(self):
994 def invalidatecaches(self):
994
995
995 if '_tagscache' in vars(self):
996 if '_tagscache' in vars(self):
996 # can't use delattr on proxy
997 # can't use delattr on proxy
997 del self.__dict__['_tagscache']
998 del self.__dict__['_tagscache']
998
999
999 self.unfiltered()._branchcaches.clear()
1000 self.unfiltered()._branchcaches.clear()
1000 self.invalidatevolatilesets()
1001 self.invalidatevolatilesets()
1001
1002
1002 def invalidatevolatilesets(self):
1003 def invalidatevolatilesets(self):
1003 self.filteredrevcache.clear()
1004 self.filteredrevcache.clear()
1004 obsolete.clearobscaches(self)
1005 obsolete.clearobscaches(self)
1005
1006
1006 def invalidatedirstate(self):
1007 def invalidatedirstate(self):
1007 '''Invalidates the dirstate, causing the next call to dirstate
1008 '''Invalidates the dirstate, causing the next call to dirstate
1008 to check if it was modified since the last time it was read,
1009 to check if it was modified since the last time it was read,
1009 rereading it if it has.
1010 rereading it if it has.
1010
1011
1011 This is different to dirstate.invalidate() that it doesn't always
1012 This is different to dirstate.invalidate() that it doesn't always
1012 rereads the dirstate. Use dirstate.invalidate() if you want to
1013 rereads the dirstate. Use dirstate.invalidate() if you want to
1013 explicitly read the dirstate again (i.e. restoring it to a previous
1014 explicitly read the dirstate again (i.e. restoring it to a previous
1014 known good state).'''
1015 known good state).'''
1015 if hasunfilteredcache(self, 'dirstate'):
1016 if hasunfilteredcache(self, 'dirstate'):
1016 for k in self.dirstate._filecache:
1017 for k in self.dirstate._filecache:
1017 try:
1018 try:
1018 delattr(self.dirstate, k)
1019 delattr(self.dirstate, k)
1019 except AttributeError:
1020 except AttributeError:
1020 pass
1021 pass
1021 delattr(self.unfiltered(), 'dirstate')
1022 delattr(self.unfiltered(), 'dirstate')
1022
1023
1023 def invalidate(self):
1024 def invalidate(self):
1024 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1025 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1025 for k in self._filecache:
1026 for k in self._filecache:
1026 # dirstate is invalidated separately in invalidatedirstate()
1027 # dirstate is invalidated separately in invalidatedirstate()
1027 if k == 'dirstate':
1028 if k == 'dirstate':
1028 continue
1029 continue
1029
1030
1030 try:
1031 try:
1031 delattr(unfiltered, k)
1032 delattr(unfiltered, k)
1032 except AttributeError:
1033 except AttributeError:
1033 pass
1034 pass
1034 self.invalidatecaches()
1035 self.invalidatecaches()
1035 self.store.invalidatecaches()
1036 self.store.invalidatecaches()
1036
1037
1037 def invalidateall(self):
1038 def invalidateall(self):
1038 '''Fully invalidates both store and non-store parts, causing the
1039 '''Fully invalidates both store and non-store parts, causing the
1039 subsequent operation to reread any outside changes.'''
1040 subsequent operation to reread any outside changes.'''
1040 # extension should hook this to invalidate its caches
1041 # extension should hook this to invalidate its caches
1041 self.invalidate()
1042 self.invalidate()
1042 self.invalidatedirstate()
1043 self.invalidatedirstate()
1043
1044
1044 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1045 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1045 try:
1046 try:
1046 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1047 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1047 except error.LockHeld, inst:
1048 except error.LockHeld, inst:
1048 if not wait:
1049 if not wait:
1049 raise
1050 raise
1050 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1051 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1051 (desc, inst.locker))
1052 (desc, inst.locker))
1052 # default to 600 seconds timeout
1053 # default to 600 seconds timeout
1053 l = lockmod.lock(vfs, lockname,
1054 l = lockmod.lock(vfs, lockname,
1054 int(self.ui.config("ui", "timeout", "600")),
1055 int(self.ui.config("ui", "timeout", "600")),
1055 releasefn, desc=desc)
1056 releasefn, desc=desc)
1056 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1057 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1057 if acquirefn:
1058 if acquirefn:
1058 acquirefn()
1059 acquirefn()
1059 return l
1060 return l
1060
1061
1061 def _afterlock(self, callback):
1062 def _afterlock(self, callback):
1062 """add a callback to the current repository lock.
1063 """add a callback to the current repository lock.
1063
1064
1064 The callback will be executed on lock release."""
1065 The callback will be executed on lock release."""
1065 l = self._lockref and self._lockref()
1066 l = self._lockref and self._lockref()
1066 if l:
1067 if l:
1067 l.postrelease.append(callback)
1068 l.postrelease.append(callback)
1068 else:
1069 else:
1069 callback()
1070 callback()
1070
1071
1071 def lock(self, wait=True):
1072 def lock(self, wait=True):
1072 '''Lock the repository store (.hg/store) and return a weak reference
1073 '''Lock the repository store (.hg/store) and return a weak reference
1073 to the lock. Use this before modifying the store (e.g. committing or
1074 to the lock. Use this before modifying the store (e.g. committing or
1074 stripping). If you are opening a transaction, get a lock as well.)'''
1075 stripping). If you are opening a transaction, get a lock as well.)'''
1075 l = self._lockref and self._lockref()
1076 l = self._lockref and self._lockref()
1076 if l is not None and l.held:
1077 if l is not None and l.held:
1077 l.lock()
1078 l.lock()
1078 return l
1079 return l
1079
1080
1080 def unlock():
1081 def unlock():
1081 if hasunfilteredcache(self, '_phasecache'):
1082 if hasunfilteredcache(self, '_phasecache'):
1082 self._phasecache.write()
1083 self._phasecache.write()
1083 for k, ce in self._filecache.items():
1084 for k, ce in self._filecache.items():
1084 if k == 'dirstate' or k not in self.__dict__:
1085 if k == 'dirstate' or k not in self.__dict__:
1085 continue
1086 continue
1086 ce.refresh()
1087 ce.refresh()
1087
1088
1088 l = self._lock(self.svfs, "lock", wait, unlock,
1089 l = self._lock(self.svfs, "lock", wait, unlock,
1089 self.invalidate, _('repository %s') % self.origroot)
1090 self.invalidate, _('repository %s') % self.origroot)
1090 self._lockref = weakref.ref(l)
1091 self._lockref = weakref.ref(l)
1091 return l
1092 return l
1092
1093
1093 def wlock(self, wait=True):
1094 def wlock(self, wait=True):
1094 '''Lock the non-store parts of the repository (everything under
1095 '''Lock the non-store parts of the repository (everything under
1095 .hg except .hg/store) and return a weak reference to the lock.
1096 .hg except .hg/store) and return a weak reference to the lock.
1096 Use this before modifying files in .hg.'''
1097 Use this before modifying files in .hg.'''
1097 l = self._wlockref and self._wlockref()
1098 l = self._wlockref and self._wlockref()
1098 if l is not None and l.held:
1099 if l is not None and l.held:
1099 l.lock()
1100 l.lock()
1100 return l
1101 return l
1101
1102
1102 def unlock():
1103 def unlock():
1103 self.dirstate.write()
1104 self.dirstate.write()
1104 self._filecache['dirstate'].refresh()
1105 self._filecache['dirstate'].refresh()
1105
1106
1106 l = self._lock(self.vfs, "wlock", wait, unlock,
1107 l = self._lock(self.vfs, "wlock", wait, unlock,
1107 self.invalidatedirstate, _('working directory of %s') %
1108 self.invalidatedirstate, _('working directory of %s') %
1108 self.origroot)
1109 self.origroot)
1109 self._wlockref = weakref.ref(l)
1110 self._wlockref = weakref.ref(l)
1110 return l
1111 return l
1111
1112
1112 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1113 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1113 """
1114 """
1114 commit an individual file as part of a larger transaction
1115 commit an individual file as part of a larger transaction
1115 """
1116 """
1116
1117
1117 fname = fctx.path()
1118 fname = fctx.path()
1118 text = fctx.data()
1119 text = fctx.data()
1119 flog = self.file(fname)
1120 flog = self.file(fname)
1120 fparent1 = manifest1.get(fname, nullid)
1121 fparent1 = manifest1.get(fname, nullid)
1121 fparent2 = fparent2o = manifest2.get(fname, nullid)
1122 fparent2 = fparent2o = manifest2.get(fname, nullid)
1122
1123
1123 meta = {}
1124 meta = {}
1124 copy = fctx.renamed()
1125 copy = fctx.renamed()
1125 if copy and copy[0] != fname:
1126 if copy and copy[0] != fname:
1126 # Mark the new revision of this file as a copy of another
1127 # Mark the new revision of this file as a copy of another
1127 # file. This copy data will effectively act as a parent
1128 # file. This copy data will effectively act as a parent
1128 # of this new revision. If this is a merge, the first
1129 # of this new revision. If this is a merge, the first
1129 # parent will be the nullid (meaning "look up the copy data")
1130 # parent will be the nullid (meaning "look up the copy data")
1130 # and the second one will be the other parent. For example:
1131 # and the second one will be the other parent. For example:
1131 #
1132 #
1132 # 0 --- 1 --- 3 rev1 changes file foo
1133 # 0 --- 1 --- 3 rev1 changes file foo
1133 # \ / rev2 renames foo to bar and changes it
1134 # \ / rev2 renames foo to bar and changes it
1134 # \- 2 -/ rev3 should have bar with all changes and
1135 # \- 2 -/ rev3 should have bar with all changes and
1135 # should record that bar descends from
1136 # should record that bar descends from
1136 # bar in rev2 and foo in rev1
1137 # bar in rev2 and foo in rev1
1137 #
1138 #
1138 # this allows this merge to succeed:
1139 # this allows this merge to succeed:
1139 #
1140 #
1140 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1141 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1141 # \ / merging rev3 and rev4 should use bar@rev2
1142 # \ / merging rev3 and rev4 should use bar@rev2
1142 # \- 2 --- 4 as the merge base
1143 # \- 2 --- 4 as the merge base
1143 #
1144 #
1144
1145
1145 cfname = copy[0]
1146 cfname = copy[0]
1146 crev = manifest1.get(cfname)
1147 crev = manifest1.get(cfname)
1147 newfparent = fparent2
1148 newfparent = fparent2
1148
1149
1149 if manifest2: # branch merge
1150 if manifest2: # branch merge
1150 if fparent2 == nullid or crev is None: # copied on remote side
1151 if fparent2 == nullid or crev is None: # copied on remote side
1151 if cfname in manifest2:
1152 if cfname in manifest2:
1152 crev = manifest2[cfname]
1153 crev = manifest2[cfname]
1153 newfparent = fparent1
1154 newfparent = fparent1
1154
1155
1155 # find source in nearest ancestor if we've lost track
1156 # find source in nearest ancestor if we've lost track
1156 if not crev:
1157 if not crev:
1157 self.ui.debug(" %s: searching for copy revision for %s\n" %
1158 self.ui.debug(" %s: searching for copy revision for %s\n" %
1158 (fname, cfname))
1159 (fname, cfname))
1159 for ancestor in self[None].ancestors():
1160 for ancestor in self[None].ancestors():
1160 if cfname in ancestor:
1161 if cfname in ancestor:
1161 crev = ancestor[cfname].filenode()
1162 crev = ancestor[cfname].filenode()
1162 break
1163 break
1163
1164
1164 if crev:
1165 if crev:
1165 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1166 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1166 meta["copy"] = cfname
1167 meta["copy"] = cfname
1167 meta["copyrev"] = hex(crev)
1168 meta["copyrev"] = hex(crev)
1168 fparent1, fparent2 = nullid, newfparent
1169 fparent1, fparent2 = nullid, newfparent
1169 else:
1170 else:
1170 self.ui.warn(_("warning: can't find ancestor for '%s' "
1171 self.ui.warn(_("warning: can't find ancestor for '%s' "
1171 "copied from '%s'!\n") % (fname, cfname))
1172 "copied from '%s'!\n") % (fname, cfname))
1172
1173
1173 elif fparent1 == nullid:
1174 elif fparent1 == nullid:
1174 fparent1, fparent2 = fparent2, nullid
1175 fparent1, fparent2 = fparent2, nullid
1175 elif fparent2 != nullid:
1176 elif fparent2 != nullid:
1176 # is one parent an ancestor of the other?
1177 # is one parent an ancestor of the other?
1177 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1178 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1178 if fparent1 in fparentancestors:
1179 if fparent1 in fparentancestors:
1179 fparent1, fparent2 = fparent2, nullid
1180 fparent1, fparent2 = fparent2, nullid
1180 elif fparent2 in fparentancestors:
1181 elif fparent2 in fparentancestors:
1181 fparent2 = nullid
1182 fparent2 = nullid
1182
1183
1183 # is the file changed?
1184 # is the file changed?
1184 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1185 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1185 changelist.append(fname)
1186 changelist.append(fname)
1186 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1187 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1187
1188
1188 # are just the flags changed during merge?
1189 # are just the flags changed during merge?
1189 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1190 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1190 changelist.append(fname)
1191 changelist.append(fname)
1191
1192
1192 return fparent1
1193 return fparent1
1193
1194
1194 @unfilteredmethod
1195 @unfilteredmethod
1195 def commit(self, text="", user=None, date=None, match=None, force=False,
1196 def commit(self, text="", user=None, date=None, match=None, force=False,
1196 editor=False, extra={}):
1197 editor=False, extra={}):
1197 """Add a new revision to current repository.
1198 """Add a new revision to current repository.
1198
1199
1199 Revision information is gathered from the working directory,
1200 Revision information is gathered from the working directory,
1200 match can be used to filter the committed files. If editor is
1201 match can be used to filter the committed files. If editor is
1201 supplied, it is called to get a commit message.
1202 supplied, it is called to get a commit message.
1202 """
1203 """
1203
1204
1204 def fail(f, msg):
1205 def fail(f, msg):
1205 raise util.Abort('%s: %s' % (f, msg))
1206 raise util.Abort('%s: %s' % (f, msg))
1206
1207
1207 if not match:
1208 if not match:
1208 match = matchmod.always(self.root, '')
1209 match = matchmod.always(self.root, '')
1209
1210
1210 if not force:
1211 if not force:
1211 vdirs = []
1212 vdirs = []
1212 match.explicitdir = vdirs.append
1213 match.explicitdir = vdirs.append
1213 match.bad = fail
1214 match.bad = fail
1214
1215
1215 wlock = self.wlock()
1216 wlock = self.wlock()
1216 try:
1217 try:
1217 wctx = self[None]
1218 wctx = self[None]
1218 merge = len(wctx.parents()) > 1
1219 merge = len(wctx.parents()) > 1
1219
1220
1220 if (not force and merge and match and
1221 if (not force and merge and match and
1221 (match.files() or match.anypats())):
1222 (match.files() or match.anypats())):
1222 raise util.Abort(_('cannot partially commit a merge '
1223 raise util.Abort(_('cannot partially commit a merge '
1223 '(do not specify files or patterns)'))
1224 '(do not specify files or patterns)'))
1224
1225
1225 changes = self.status(match=match, clean=force)
1226 changes = self.status(match=match, clean=force)
1226 if force:
1227 if force:
1227 changes[0].extend(changes[6]) # mq may commit unchanged files
1228 changes[0].extend(changes[6]) # mq may commit unchanged files
1228
1229
1229 # check subrepos
1230 # check subrepos
1230 subs = []
1231 subs = []
1231 commitsubs = set()
1232 commitsubs = set()
1232 newstate = wctx.substate.copy()
1233 newstate = wctx.substate.copy()
1233 # only manage subrepos and .hgsubstate if .hgsub is present
1234 # only manage subrepos and .hgsubstate if .hgsub is present
1234 if '.hgsub' in wctx:
1235 if '.hgsub' in wctx:
1235 # we'll decide whether to track this ourselves, thanks
1236 # we'll decide whether to track this ourselves, thanks
1236 for c in changes[:3]:
1237 for c in changes[:3]:
1237 if '.hgsubstate' in c:
1238 if '.hgsubstate' in c:
1238 c.remove('.hgsubstate')
1239 c.remove('.hgsubstate')
1239
1240
1240 # compare current state to last committed state
1241 # compare current state to last committed state
1241 # build new substate based on last committed state
1242 # build new substate based on last committed state
1242 oldstate = wctx.p1().substate
1243 oldstate = wctx.p1().substate
1243 for s in sorted(newstate.keys()):
1244 for s in sorted(newstate.keys()):
1244 if not match(s):
1245 if not match(s):
1245 # ignore working copy, use old state if present
1246 # ignore working copy, use old state if present
1246 if s in oldstate:
1247 if s in oldstate:
1247 newstate[s] = oldstate[s]
1248 newstate[s] = oldstate[s]
1248 continue
1249 continue
1249 if not force:
1250 if not force:
1250 raise util.Abort(
1251 raise util.Abort(
1251 _("commit with new subrepo %s excluded") % s)
1252 _("commit with new subrepo %s excluded") % s)
1252 if wctx.sub(s).dirty(True):
1253 if wctx.sub(s).dirty(True):
1253 if not self.ui.configbool('ui', 'commitsubrepos'):
1254 if not self.ui.configbool('ui', 'commitsubrepos'):
1254 raise util.Abort(
1255 raise util.Abort(
1255 _("uncommitted changes in subrepo %s") % s,
1256 _("uncommitted changes in subrepo %s") % s,
1256 hint=_("use --subrepos for recursive commit"))
1257 hint=_("use --subrepos for recursive commit"))
1257 subs.append(s)
1258 subs.append(s)
1258 commitsubs.add(s)
1259 commitsubs.add(s)
1259 else:
1260 else:
1260 bs = wctx.sub(s).basestate()
1261 bs = wctx.sub(s).basestate()
1261 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1262 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1262 if oldstate.get(s, (None, None, None))[1] != bs:
1263 if oldstate.get(s, (None, None, None))[1] != bs:
1263 subs.append(s)
1264 subs.append(s)
1264
1265
1265 # check for removed subrepos
1266 # check for removed subrepos
1266 for p in wctx.parents():
1267 for p in wctx.parents():
1267 r = [s for s in p.substate if s not in newstate]
1268 r = [s for s in p.substate if s not in newstate]
1268 subs += [s for s in r if match(s)]
1269 subs += [s for s in r if match(s)]
1269 if subs:
1270 if subs:
1270 if (not match('.hgsub') and
1271 if (not match('.hgsub') and
1271 '.hgsub' in (wctx.modified() + wctx.added())):
1272 '.hgsub' in (wctx.modified() + wctx.added())):
1272 raise util.Abort(
1273 raise util.Abort(
1273 _("can't commit subrepos without .hgsub"))
1274 _("can't commit subrepos without .hgsub"))
1274 changes[0].insert(0, '.hgsubstate')
1275 changes[0].insert(0, '.hgsubstate')
1275
1276
1276 elif '.hgsub' in changes[2]:
1277 elif '.hgsub' in changes[2]:
1277 # clean up .hgsubstate when .hgsub is removed
1278 # clean up .hgsubstate when .hgsub is removed
1278 if ('.hgsubstate' in wctx and
1279 if ('.hgsubstate' in wctx and
1279 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1280 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1280 changes[2].insert(0, '.hgsubstate')
1281 changes[2].insert(0, '.hgsubstate')
1281
1282
1282 # make sure all explicit patterns are matched
1283 # make sure all explicit patterns are matched
1283 if not force and match.files():
1284 if not force and match.files():
1284 matched = set(changes[0] + changes[1] + changes[2])
1285 matched = set(changes[0] + changes[1] + changes[2])
1285
1286
1286 for f in match.files():
1287 for f in match.files():
1287 f = self.dirstate.normalize(f)
1288 f = self.dirstate.normalize(f)
1288 if f == '.' or f in matched or f in wctx.substate:
1289 if f == '.' or f in matched or f in wctx.substate:
1289 continue
1290 continue
1290 if f in changes[3]: # missing
1291 if f in changes[3]: # missing
1291 fail(f, _('file not found!'))
1292 fail(f, _('file not found!'))
1292 if f in vdirs: # visited directory
1293 if f in vdirs: # visited directory
1293 d = f + '/'
1294 d = f + '/'
1294 for mf in matched:
1295 for mf in matched:
1295 if mf.startswith(d):
1296 if mf.startswith(d):
1296 break
1297 break
1297 else:
1298 else:
1298 fail(f, _("no match under directory!"))
1299 fail(f, _("no match under directory!"))
1299 elif f not in self.dirstate:
1300 elif f not in self.dirstate:
1300 fail(f, _("file not tracked!"))
1301 fail(f, _("file not tracked!"))
1301
1302
1302 cctx = context.workingctx(self, text, user, date, extra, changes)
1303 cctx = context.workingctx(self, text, user, date, extra, changes)
1303
1304
1304 if (not force and not extra.get("close") and not merge
1305 if (not force and not extra.get("close") and not merge
1305 and not cctx.files()
1306 and not cctx.files()
1306 and wctx.branch() == wctx.p1().branch()):
1307 and wctx.branch() == wctx.p1().branch()):
1307 return None
1308 return None
1308
1309
1309 if merge and cctx.deleted():
1310 if merge and cctx.deleted():
1310 raise util.Abort(_("cannot commit merge with missing files"))
1311 raise util.Abort(_("cannot commit merge with missing files"))
1311
1312
1312 ms = mergemod.mergestate(self)
1313 ms = mergemod.mergestate(self)
1313 for f in changes[0]:
1314 for f in changes[0]:
1314 if f in ms and ms[f] == 'u':
1315 if f in ms and ms[f] == 'u':
1315 raise util.Abort(_("unresolved merge conflicts "
1316 raise util.Abort(_("unresolved merge conflicts "
1316 "(see hg help resolve)"))
1317 "(see hg help resolve)"))
1317
1318
1318 if editor:
1319 if editor:
1319 cctx._text = editor(self, cctx, subs)
1320 cctx._text = editor(self, cctx, subs)
1320 edited = (text != cctx._text)
1321 edited = (text != cctx._text)
1321
1322
1322 # Save commit message in case this transaction gets rolled back
1323 # Save commit message in case this transaction gets rolled back
1323 # (e.g. by a pretxncommit hook). Leave the content alone on
1324 # (e.g. by a pretxncommit hook). Leave the content alone on
1324 # the assumption that the user will use the same editor again.
1325 # the assumption that the user will use the same editor again.
1325 msgfn = self.savecommitmessage(cctx._text)
1326 msgfn = self.savecommitmessage(cctx._text)
1326
1327
1327 # commit subs and write new state
1328 # commit subs and write new state
1328 if subs:
1329 if subs:
1329 for s in sorted(commitsubs):
1330 for s in sorted(commitsubs):
1330 sub = wctx.sub(s)
1331 sub = wctx.sub(s)
1331 self.ui.status(_('committing subrepository %s\n') %
1332 self.ui.status(_('committing subrepository %s\n') %
1332 subrepo.subrelpath(sub))
1333 subrepo.subrelpath(sub))
1333 sr = sub.commit(cctx._text, user, date)
1334 sr = sub.commit(cctx._text, user, date)
1334 newstate[s] = (newstate[s][0], sr)
1335 newstate[s] = (newstate[s][0], sr)
1335 subrepo.writestate(self, newstate)
1336 subrepo.writestate(self, newstate)
1336
1337
1337 p1, p2 = self.dirstate.parents()
1338 p1, p2 = self.dirstate.parents()
1338 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1339 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1339 try:
1340 try:
1340 self.hook("precommit", throw=True, parent1=hookp1,
1341 self.hook("precommit", throw=True, parent1=hookp1,
1341 parent2=hookp2)
1342 parent2=hookp2)
1342 ret = self.commitctx(cctx, True)
1343 ret = self.commitctx(cctx, True)
1343 except: # re-raises
1344 except: # re-raises
1344 if edited:
1345 if edited:
1345 self.ui.write(
1346 self.ui.write(
1346 _('note: commit message saved in %s\n') % msgfn)
1347 _('note: commit message saved in %s\n') % msgfn)
1347 raise
1348 raise
1348
1349
1349 # update bookmarks, dirstate and mergestate
1350 # update bookmarks, dirstate and mergestate
1350 bookmarks.update(self, [p1, p2], ret)
1351 bookmarks.update(self, [p1, p2], ret)
1351 cctx.markcommitted(ret)
1352 cctx.markcommitted(ret)
1352 ms.reset()
1353 ms.reset()
1353 finally:
1354 finally:
1354 wlock.release()
1355 wlock.release()
1355
1356
1356 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1357 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1357 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1358 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1358 self._afterlock(commithook)
1359 self._afterlock(commithook)
1359 return ret
1360 return ret
1360
1361
1361 @unfilteredmethod
1362 @unfilteredmethod
1362 def commitctx(self, ctx, error=False):
1363 def commitctx(self, ctx, error=False):
1363 """Add a new revision to current repository.
1364 """Add a new revision to current repository.
1364 Revision information is passed via the context argument.
1365 Revision information is passed via the context argument.
1365 """
1366 """
1366
1367
1367 tr = lock = None
1368 tr = lock = None
1368 removed = list(ctx.removed())
1369 removed = list(ctx.removed())
1369 p1, p2 = ctx.p1(), ctx.p2()
1370 p1, p2 = ctx.p1(), ctx.p2()
1370 user = ctx.user()
1371 user = ctx.user()
1371
1372
1372 lock = self.lock()
1373 lock = self.lock()
1373 try:
1374 try:
1374 tr = self.transaction("commit")
1375 tr = self.transaction("commit")
1375 trp = weakref.proxy(tr)
1376 trp = weakref.proxy(tr)
1376
1377
1377 if ctx.files():
1378 if ctx.files():
1378 m1 = p1.manifest().copy()
1379 m1 = p1.manifest().copy()
1379 m2 = p2.manifest()
1380 m2 = p2.manifest()
1380
1381
1381 # check in files
1382 # check in files
1382 new = {}
1383 new = {}
1383 changed = []
1384 changed = []
1384 linkrev = len(self)
1385 linkrev = len(self)
1385 for f in sorted(ctx.modified() + ctx.added()):
1386 for f in sorted(ctx.modified() + ctx.added()):
1386 self.ui.note(f + "\n")
1387 self.ui.note(f + "\n")
1387 try:
1388 try:
1388 fctx = ctx[f]
1389 fctx = ctx[f]
1389 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1390 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1390 changed)
1391 changed)
1391 m1.set(f, fctx.flags())
1392 m1.set(f, fctx.flags())
1392 except OSError, inst:
1393 except OSError, inst:
1393 self.ui.warn(_("trouble committing %s!\n") % f)
1394 self.ui.warn(_("trouble committing %s!\n") % f)
1394 raise
1395 raise
1395 except IOError, inst:
1396 except IOError, inst:
1396 errcode = getattr(inst, 'errno', errno.ENOENT)
1397 errcode = getattr(inst, 'errno', errno.ENOENT)
1397 if error or errcode and errcode != errno.ENOENT:
1398 if error or errcode and errcode != errno.ENOENT:
1398 self.ui.warn(_("trouble committing %s!\n") % f)
1399 self.ui.warn(_("trouble committing %s!\n") % f)
1399 raise
1400 raise
1400 else:
1401 else:
1401 removed.append(f)
1402 removed.append(f)
1402
1403
1403 # update manifest
1404 # update manifest
1404 m1.update(new)
1405 m1.update(new)
1405 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1406 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1406 drop = [f for f in removed if f in m1]
1407 drop = [f for f in removed if f in m1]
1407 for f in drop:
1408 for f in drop:
1408 del m1[f]
1409 del m1[f]
1409 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1410 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1410 p2.manifestnode(), (new, drop))
1411 p2.manifestnode(), (new, drop))
1411 files = changed + removed
1412 files = changed + removed
1412 else:
1413 else:
1413 mn = p1.manifestnode()
1414 mn = p1.manifestnode()
1414 files = []
1415 files = []
1415
1416
1416 # update changelog
1417 # update changelog
1417 self.changelog.delayupdate()
1418 self.changelog.delayupdate()
1418 n = self.changelog.add(mn, files, ctx.description(),
1419 n = self.changelog.add(mn, files, ctx.description(),
1419 trp, p1.node(), p2.node(),
1420 trp, p1.node(), p2.node(),
1420 user, ctx.date(), ctx.extra().copy())
1421 user, ctx.date(), ctx.extra().copy())
1421 p = lambda: self.changelog.writepending() and self.root or ""
1422 p = lambda: self.changelog.writepending() and self.root or ""
1422 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1423 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1423 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1424 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1424 parent2=xp2, pending=p)
1425 parent2=xp2, pending=p)
1425 self.changelog.finalize(trp)
1426 self.changelog.finalize(trp)
1426 # set the new commit is proper phase
1427 # set the new commit is proper phase
1427 targetphase = subrepo.newcommitphase(self.ui, ctx)
1428 targetphase = subrepo.newcommitphase(self.ui, ctx)
1428 if targetphase:
1429 if targetphase:
1429 # retract boundary do not alter parent changeset.
1430 # retract boundary do not alter parent changeset.
1430 # if a parent have higher the resulting phase will
1431 # if a parent have higher the resulting phase will
1431 # be compliant anyway
1432 # be compliant anyway
1432 #
1433 #
1433 # if minimal phase was 0 we don't need to retract anything
1434 # if minimal phase was 0 we don't need to retract anything
1434 phases.retractboundary(self, targetphase, [n])
1435 phases.retractboundary(self, targetphase, [n])
1435 tr.close()
1436 tr.close()
1436 branchmap.updatecache(self.filtered('served'))
1437 branchmap.updatecache(self.filtered('served'))
1437 return n
1438 return n
1438 finally:
1439 finally:
1439 if tr:
1440 if tr:
1440 tr.release()
1441 tr.release()
1441 lock.release()
1442 lock.release()
1442
1443
1443 @unfilteredmethod
1444 @unfilteredmethod
1444 def destroying(self):
1445 def destroying(self):
1445 '''Inform the repository that nodes are about to be destroyed.
1446 '''Inform the repository that nodes are about to be destroyed.
1446 Intended for use by strip and rollback, so there's a common
1447 Intended for use by strip and rollback, so there's a common
1447 place for anything that has to be done before destroying history.
1448 place for anything that has to be done before destroying history.
1448
1449
1449 This is mostly useful for saving state that is in memory and waiting
1450 This is mostly useful for saving state that is in memory and waiting
1450 to be flushed when the current lock is released. Because a call to
1451 to be flushed when the current lock is released. Because a call to
1451 destroyed is imminent, the repo will be invalidated causing those
1452 destroyed is imminent, the repo will be invalidated causing those
1452 changes to stay in memory (waiting for the next unlock), or vanish
1453 changes to stay in memory (waiting for the next unlock), or vanish
1453 completely.
1454 completely.
1454 '''
1455 '''
1455 # When using the same lock to commit and strip, the phasecache is left
1456 # When using the same lock to commit and strip, the phasecache is left
1456 # dirty after committing. Then when we strip, the repo is invalidated,
1457 # dirty after committing. Then when we strip, the repo is invalidated,
1457 # causing those changes to disappear.
1458 # causing those changes to disappear.
1458 if '_phasecache' in vars(self):
1459 if '_phasecache' in vars(self):
1459 self._phasecache.write()
1460 self._phasecache.write()
1460
1461
1461 @unfilteredmethod
1462 @unfilteredmethod
1462 def destroyed(self):
1463 def destroyed(self):
1463 '''Inform the repository that nodes have been destroyed.
1464 '''Inform the repository that nodes have been destroyed.
1464 Intended for use by strip and rollback, so there's a common
1465 Intended for use by strip and rollback, so there's a common
1465 place for anything that has to be done after destroying history.
1466 place for anything that has to be done after destroying history.
1466 '''
1467 '''
1467 # When one tries to:
1468 # When one tries to:
1468 # 1) destroy nodes thus calling this method (e.g. strip)
1469 # 1) destroy nodes thus calling this method (e.g. strip)
1469 # 2) use phasecache somewhere (e.g. commit)
1470 # 2) use phasecache somewhere (e.g. commit)
1470 #
1471 #
1471 # then 2) will fail because the phasecache contains nodes that were
1472 # then 2) will fail because the phasecache contains nodes that were
1472 # removed. We can either remove phasecache from the filecache,
1473 # removed. We can either remove phasecache from the filecache,
1473 # causing it to reload next time it is accessed, or simply filter
1474 # causing it to reload next time it is accessed, or simply filter
1474 # the removed nodes now and write the updated cache.
1475 # the removed nodes now and write the updated cache.
1475 self._phasecache.filterunknown(self)
1476 self._phasecache.filterunknown(self)
1476 self._phasecache.write()
1477 self._phasecache.write()
1477
1478
1478 # update the 'served' branch cache to help read only server process
1479 # update the 'served' branch cache to help read only server process
1479 # Thanks to branchcache collaboration this is done from the nearest
1480 # Thanks to branchcache collaboration this is done from the nearest
1480 # filtered subset and it is expected to be fast.
1481 # filtered subset and it is expected to be fast.
1481 branchmap.updatecache(self.filtered('served'))
1482 branchmap.updatecache(self.filtered('served'))
1482
1483
1483 # Ensure the persistent tag cache is updated. Doing it now
1484 # Ensure the persistent tag cache is updated. Doing it now
1484 # means that the tag cache only has to worry about destroyed
1485 # means that the tag cache only has to worry about destroyed
1485 # heads immediately after a strip/rollback. That in turn
1486 # heads immediately after a strip/rollback. That in turn
1486 # guarantees that "cachetip == currenttip" (comparing both rev
1487 # guarantees that "cachetip == currenttip" (comparing both rev
1487 # and node) always means no nodes have been added or destroyed.
1488 # and node) always means no nodes have been added or destroyed.
1488
1489
1489 # XXX this is suboptimal when qrefresh'ing: we strip the current
1490 # XXX this is suboptimal when qrefresh'ing: we strip the current
1490 # head, refresh the tag cache, then immediately add a new head.
1491 # head, refresh the tag cache, then immediately add a new head.
1491 # But I think doing it this way is necessary for the "instant
1492 # But I think doing it this way is necessary for the "instant
1492 # tag cache retrieval" case to work.
1493 # tag cache retrieval" case to work.
1493 self.invalidate()
1494 self.invalidate()
1494
1495
1495 def walk(self, match, node=None):
1496 def walk(self, match, node=None):
1496 '''
1497 '''
1497 walk recursively through the directory tree or a given
1498 walk recursively through the directory tree or a given
1498 changeset, finding all files matched by the match
1499 changeset, finding all files matched by the match
1499 function
1500 function
1500 '''
1501 '''
1501 return self[node].walk(match)
1502 return self[node].walk(match)
1502
1503
1503 def status(self, node1='.', node2=None, match=None,
1504 def status(self, node1='.', node2=None, match=None,
1504 ignored=False, clean=False, unknown=False,
1505 ignored=False, clean=False, unknown=False,
1505 listsubrepos=False):
1506 listsubrepos=False):
1506 """return status of files between two nodes or node and working
1507 """return status of files between two nodes or node and working
1507 directory.
1508 directory.
1508
1509
1509 If node1 is None, use the first dirstate parent instead.
1510 If node1 is None, use the first dirstate parent instead.
1510 If node2 is None, compare node1 with working directory.
1511 If node2 is None, compare node1 with working directory.
1511 """
1512 """
1512
1513
1513 def mfmatches(ctx):
1514 def mfmatches(ctx):
1514 mf = ctx.manifest().copy()
1515 mf = ctx.manifest().copy()
1515 if match.always():
1516 if match.always():
1516 return mf
1517 return mf
1517 for fn in mf.keys():
1518 for fn in mf.keys():
1518 if not match(fn):
1519 if not match(fn):
1519 del mf[fn]
1520 del mf[fn]
1520 return mf
1521 return mf
1521
1522
1522 ctx1 = self[node1]
1523 ctx1 = self[node1]
1523 ctx2 = self[node2]
1524 ctx2 = self[node2]
1524
1525
1525 working = ctx2.rev() is None
1526 working = ctx2.rev() is None
1526 parentworking = working and ctx1 == self['.']
1527 parentworking = working and ctx1 == self['.']
1527 match = match or matchmod.always(self.root, self.getcwd())
1528 match = match or matchmod.always(self.root, self.getcwd())
1528 listignored, listclean, listunknown = ignored, clean, unknown
1529 listignored, listclean, listunknown = ignored, clean, unknown
1529
1530
1530 # load earliest manifest first for caching reasons
1531 # load earliest manifest first for caching reasons
1531 if not working and ctx2.rev() < ctx1.rev():
1532 if not working and ctx2.rev() < ctx1.rev():
1532 ctx2.manifest()
1533 ctx2.manifest()
1533
1534
1534 if not parentworking:
1535 if not parentworking:
1535 def bad(f, msg):
1536 def bad(f, msg):
1536 # 'f' may be a directory pattern from 'match.files()',
1537 # 'f' may be a directory pattern from 'match.files()',
1537 # so 'f not in ctx1' is not enough
1538 # so 'f not in ctx1' is not enough
1538 if f not in ctx1 and f not in ctx1.dirs():
1539 if f not in ctx1 and f not in ctx1.dirs():
1539 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1540 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1540 match.bad = bad
1541 match.bad = bad
1541
1542
1542 if working: # we need to scan the working dir
1543 if working: # we need to scan the working dir
1543 subrepos = []
1544 subrepos = []
1544 if '.hgsub' in self.dirstate:
1545 if '.hgsub' in self.dirstate:
1545 subrepos = sorted(ctx2.substate)
1546 subrepos = sorted(ctx2.substate)
1546 s = self.dirstate.status(match, subrepos, listignored,
1547 s = self.dirstate.status(match, subrepos, listignored,
1547 listclean, listunknown)
1548 listclean, listunknown)
1548 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1549 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1549
1550
1550 # check for any possibly clean files
1551 # check for any possibly clean files
1551 if parentworking and cmp:
1552 if parentworking and cmp:
1552 fixup = []
1553 fixup = []
1553 # do a full compare of any files that might have changed
1554 # do a full compare of any files that might have changed
1554 for f in sorted(cmp):
1555 for f in sorted(cmp):
1555 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1556 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1556 or ctx1[f].cmp(ctx2[f])):
1557 or ctx1[f].cmp(ctx2[f])):
1557 modified.append(f)
1558 modified.append(f)
1558 else:
1559 else:
1559 fixup.append(f)
1560 fixup.append(f)
1560
1561
1561 # update dirstate for files that are actually clean
1562 # update dirstate for files that are actually clean
1562 if fixup:
1563 if fixup:
1563 if listclean:
1564 if listclean:
1564 clean += fixup
1565 clean += fixup
1565
1566
1566 try:
1567 try:
1567 # updating the dirstate is optional
1568 # updating the dirstate is optional
1568 # so we don't wait on the lock
1569 # so we don't wait on the lock
1569 wlock = self.wlock(False)
1570 wlock = self.wlock(False)
1570 try:
1571 try:
1571 for f in fixup:
1572 for f in fixup:
1572 self.dirstate.normal(f)
1573 self.dirstate.normal(f)
1573 finally:
1574 finally:
1574 wlock.release()
1575 wlock.release()
1575 except error.LockError:
1576 except error.LockError:
1576 pass
1577 pass
1577
1578
1578 if not parentworking:
1579 if not parentworking:
1579 mf1 = mfmatches(ctx1)
1580 mf1 = mfmatches(ctx1)
1580 if working:
1581 if working:
1581 # we are comparing working dir against non-parent
1582 # we are comparing working dir against non-parent
1582 # generate a pseudo-manifest for the working dir
1583 # generate a pseudo-manifest for the working dir
1583 mf2 = mfmatches(self['.'])
1584 mf2 = mfmatches(self['.'])
1584 for f in cmp + modified + added:
1585 for f in cmp + modified + added:
1585 mf2[f] = None
1586 mf2[f] = None
1586 mf2.set(f, ctx2.flags(f))
1587 mf2.set(f, ctx2.flags(f))
1587 for f in removed:
1588 for f in removed:
1588 if f in mf2:
1589 if f in mf2:
1589 del mf2[f]
1590 del mf2[f]
1590 else:
1591 else:
1591 # we are comparing two revisions
1592 # we are comparing two revisions
1592 deleted, unknown, ignored = [], [], []
1593 deleted, unknown, ignored = [], [], []
1593 mf2 = mfmatches(ctx2)
1594 mf2 = mfmatches(ctx2)
1594
1595
1595 modified, added, clean = [], [], []
1596 modified, added, clean = [], [], []
1596 withflags = mf1.withflags() | mf2.withflags()
1597 withflags = mf1.withflags() | mf2.withflags()
1597 for fn, mf2node in mf2.iteritems():
1598 for fn, mf2node in mf2.iteritems():
1598 if fn in mf1:
1599 if fn in mf1:
1599 if (fn not in deleted and
1600 if (fn not in deleted and
1600 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1601 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1601 (mf1[fn] != mf2node and
1602 (mf1[fn] != mf2node and
1602 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1603 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1603 modified.append(fn)
1604 modified.append(fn)
1604 elif listclean:
1605 elif listclean:
1605 clean.append(fn)
1606 clean.append(fn)
1606 del mf1[fn]
1607 del mf1[fn]
1607 elif fn not in deleted:
1608 elif fn not in deleted:
1608 added.append(fn)
1609 added.append(fn)
1609 removed = mf1.keys()
1610 removed = mf1.keys()
1610
1611
1611 if working and modified and not self.dirstate._checklink:
1612 if working and modified and not self.dirstate._checklink:
1612 # Symlink placeholders may get non-symlink-like contents
1613 # Symlink placeholders may get non-symlink-like contents
1613 # via user error or dereferencing by NFS or Samba servers,
1614 # via user error or dereferencing by NFS or Samba servers,
1614 # so we filter out any placeholders that don't look like a
1615 # so we filter out any placeholders that don't look like a
1615 # symlink
1616 # symlink
1616 sane = []
1617 sane = []
1617 for f in modified:
1618 for f in modified:
1618 if ctx2.flags(f) == 'l':
1619 if ctx2.flags(f) == 'l':
1619 d = ctx2[f].data()
1620 d = ctx2[f].data()
1620 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1621 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1621 self.ui.debug('ignoring suspect symlink placeholder'
1622 self.ui.debug('ignoring suspect symlink placeholder'
1622 ' "%s"\n' % f)
1623 ' "%s"\n' % f)
1623 continue
1624 continue
1624 sane.append(f)
1625 sane.append(f)
1625 modified = sane
1626 modified = sane
1626
1627
1627 r = modified, added, removed, deleted, unknown, ignored, clean
1628 r = modified, added, removed, deleted, unknown, ignored, clean
1628
1629
1629 if listsubrepos:
1630 if listsubrepos:
1630 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1631 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1631 if working:
1632 if working:
1632 rev2 = None
1633 rev2 = None
1633 else:
1634 else:
1634 rev2 = ctx2.substate[subpath][1]
1635 rev2 = ctx2.substate[subpath][1]
1635 try:
1636 try:
1636 submatch = matchmod.narrowmatcher(subpath, match)
1637 submatch = matchmod.narrowmatcher(subpath, match)
1637 s = sub.status(rev2, match=submatch, ignored=listignored,
1638 s = sub.status(rev2, match=submatch, ignored=listignored,
1638 clean=listclean, unknown=listunknown,
1639 clean=listclean, unknown=listunknown,
1639 listsubrepos=True)
1640 listsubrepos=True)
1640 for rfiles, sfiles in zip(r, s):
1641 for rfiles, sfiles in zip(r, s):
1641 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1642 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1642 except error.LookupError:
1643 except error.LookupError:
1643 self.ui.status(_("skipping missing subrepository: %s\n")
1644 self.ui.status(_("skipping missing subrepository: %s\n")
1644 % subpath)
1645 % subpath)
1645
1646
1646 for l in r:
1647 for l in r:
1647 l.sort()
1648 l.sort()
1648 return r
1649 return r
1649
1650
1650 def heads(self, start=None):
1651 def heads(self, start=None):
1651 heads = self.changelog.heads(start)
1652 heads = self.changelog.heads(start)
1652 # sort the output in rev descending order
1653 # sort the output in rev descending order
1653 return sorted(heads, key=self.changelog.rev, reverse=True)
1654 return sorted(heads, key=self.changelog.rev, reverse=True)
1654
1655
1655 def branchheads(self, branch=None, start=None, closed=False):
1656 def branchheads(self, branch=None, start=None, closed=False):
1656 '''return a (possibly filtered) list of heads for the given branch
1657 '''return a (possibly filtered) list of heads for the given branch
1657
1658
1658 Heads are returned in topological order, from newest to oldest.
1659 Heads are returned in topological order, from newest to oldest.
1659 If branch is None, use the dirstate branch.
1660 If branch is None, use the dirstate branch.
1660 If start is not None, return only heads reachable from start.
1661 If start is not None, return only heads reachable from start.
1661 If closed is True, return heads that are marked as closed as well.
1662 If closed is True, return heads that are marked as closed as well.
1662 '''
1663 '''
1663 if branch is None:
1664 if branch is None:
1664 branch = self[None].branch()
1665 branch = self[None].branch()
1665 branches = self.branchmap()
1666 branches = self.branchmap()
1666 if branch not in branches:
1667 if branch not in branches:
1667 return []
1668 return []
1668 # the cache returns heads ordered lowest to highest
1669 # the cache returns heads ordered lowest to highest
1669 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1670 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1670 if start is not None:
1671 if start is not None:
1671 # filter out the heads that cannot be reached from startrev
1672 # filter out the heads that cannot be reached from startrev
1672 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1673 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1673 bheads = [h for h in bheads if h in fbheads]
1674 bheads = [h for h in bheads if h in fbheads]
1674 return bheads
1675 return bheads
1675
1676
1676 def branches(self, nodes):
1677 def branches(self, nodes):
1677 if not nodes:
1678 if not nodes:
1678 nodes = [self.changelog.tip()]
1679 nodes = [self.changelog.tip()]
1679 b = []
1680 b = []
1680 for n in nodes:
1681 for n in nodes:
1681 t = n
1682 t = n
1682 while True:
1683 while True:
1683 p = self.changelog.parents(n)
1684 p = self.changelog.parents(n)
1684 if p[1] != nullid or p[0] == nullid:
1685 if p[1] != nullid or p[0] == nullid:
1685 b.append((t, n, p[0], p[1]))
1686 b.append((t, n, p[0], p[1]))
1686 break
1687 break
1687 n = p[0]
1688 n = p[0]
1688 return b
1689 return b
1689
1690
1690 def between(self, pairs):
1691 def between(self, pairs):
1691 r = []
1692 r = []
1692
1693
1693 for top, bottom in pairs:
1694 for top, bottom in pairs:
1694 n, l, i = top, [], 0
1695 n, l, i = top, [], 0
1695 f = 1
1696 f = 1
1696
1697
1697 while n != bottom and n != nullid:
1698 while n != bottom and n != nullid:
1698 p = self.changelog.parents(n)[0]
1699 p = self.changelog.parents(n)[0]
1699 if i == f:
1700 if i == f:
1700 l.append(n)
1701 l.append(n)
1701 f = f * 2
1702 f = f * 2
1702 n = p
1703 n = p
1703 i += 1
1704 i += 1
1704
1705
1705 r.append(l)
1706 r.append(l)
1706
1707
1707 return r
1708 return r
1708
1709
1709 def pull(self, remote, heads=None, force=False):
1710 def pull(self, remote, heads=None, force=False):
1710 return exchange.pull (self, remote, heads, force)
1711 return exchange.pull (self, remote, heads, force)
1711
1712
1712 def checkpush(self, pushop):
1713 def checkpush(self, pushop):
1713 """Extensions can override this function if additional checks have
1714 """Extensions can override this function if additional checks have
1714 to be performed before pushing, or call it if they override push
1715 to be performed before pushing, or call it if they override push
1715 command.
1716 command.
1716 """
1717 """
1717 pass
1718 pass
1718
1719
1719 @unfilteredpropertycache
1720 @unfilteredpropertycache
1720 def prepushoutgoinghooks(self):
1721 def prepushoutgoinghooks(self):
1721 """Return util.hooks consists of "(repo, remote, outgoing)"
1722 """Return util.hooks consists of "(repo, remote, outgoing)"
1722 functions, which are called before pushing changesets.
1723 functions, which are called before pushing changesets.
1723 """
1724 """
1724 return util.hooks()
1725 return util.hooks()
1725
1726
1726 def push(self, remote, force=False, revs=None, newbranch=False):
1727 def push(self, remote, force=False, revs=None, newbranch=False):
1727 return exchange.push(self, remote, force, revs, newbranch)
1728 return exchange.push(self, remote, force, revs, newbranch)
1728
1729
1729 def stream_in(self, remote, requirements):
1730 def stream_in(self, remote, requirements):
1730 lock = self.lock()
1731 lock = self.lock()
1731 try:
1732 try:
1732 # Save remote branchmap. We will use it later
1733 # Save remote branchmap. We will use it later
1733 # to speed up branchcache creation
1734 # to speed up branchcache creation
1734 rbranchmap = None
1735 rbranchmap = None
1735 if remote.capable("branchmap"):
1736 if remote.capable("branchmap"):
1736 rbranchmap = remote.branchmap()
1737 rbranchmap = remote.branchmap()
1737
1738
1738 fp = remote.stream_out()
1739 fp = remote.stream_out()
1739 l = fp.readline()
1740 l = fp.readline()
1740 try:
1741 try:
1741 resp = int(l)
1742 resp = int(l)
1742 except ValueError:
1743 except ValueError:
1743 raise error.ResponseError(
1744 raise error.ResponseError(
1744 _('unexpected response from remote server:'), l)
1745 _('unexpected response from remote server:'), l)
1745 if resp == 1:
1746 if resp == 1:
1746 raise util.Abort(_('operation forbidden by server'))
1747 raise util.Abort(_('operation forbidden by server'))
1747 elif resp == 2:
1748 elif resp == 2:
1748 raise util.Abort(_('locking the remote repository failed'))
1749 raise util.Abort(_('locking the remote repository failed'))
1749 elif resp != 0:
1750 elif resp != 0:
1750 raise util.Abort(_('the server sent an unknown error code'))
1751 raise util.Abort(_('the server sent an unknown error code'))
1751 self.ui.status(_('streaming all changes\n'))
1752 self.ui.status(_('streaming all changes\n'))
1752 l = fp.readline()
1753 l = fp.readline()
1753 try:
1754 try:
1754 total_files, total_bytes = map(int, l.split(' ', 1))
1755 total_files, total_bytes = map(int, l.split(' ', 1))
1755 except (ValueError, TypeError):
1756 except (ValueError, TypeError):
1756 raise error.ResponseError(
1757 raise error.ResponseError(
1757 _('unexpected response from remote server:'), l)
1758 _('unexpected response from remote server:'), l)
1758 self.ui.status(_('%d files to transfer, %s of data\n') %
1759 self.ui.status(_('%d files to transfer, %s of data\n') %
1759 (total_files, util.bytecount(total_bytes)))
1760 (total_files, util.bytecount(total_bytes)))
1760 handled_bytes = 0
1761 handled_bytes = 0
1761 self.ui.progress(_('clone'), 0, total=total_bytes)
1762 self.ui.progress(_('clone'), 0, total=total_bytes)
1762 start = time.time()
1763 start = time.time()
1763
1764
1764 tr = self.transaction(_('clone'))
1765 tr = self.transaction(_('clone'))
1765 try:
1766 try:
1766 for i in xrange(total_files):
1767 for i in xrange(total_files):
1767 # XXX doesn't support '\n' or '\r' in filenames
1768 # XXX doesn't support '\n' or '\r' in filenames
1768 l = fp.readline()
1769 l = fp.readline()
1769 try:
1770 try:
1770 name, size = l.split('\0', 1)
1771 name, size = l.split('\0', 1)
1771 size = int(size)
1772 size = int(size)
1772 except (ValueError, TypeError):
1773 except (ValueError, TypeError):
1773 raise error.ResponseError(
1774 raise error.ResponseError(
1774 _('unexpected response from remote server:'), l)
1775 _('unexpected response from remote server:'), l)
1775 if self.ui.debugflag:
1776 if self.ui.debugflag:
1776 self.ui.debug('adding %s (%s)\n' %
1777 self.ui.debug('adding %s (%s)\n' %
1777 (name, util.bytecount(size)))
1778 (name, util.bytecount(size)))
1778 # for backwards compat, name was partially encoded
1779 # for backwards compat, name was partially encoded
1779 ofp = self.sopener(store.decodedir(name), 'w')
1780 ofp = self.sopener(store.decodedir(name), 'w')
1780 for chunk in util.filechunkiter(fp, limit=size):
1781 for chunk in util.filechunkiter(fp, limit=size):
1781 handled_bytes += len(chunk)
1782 handled_bytes += len(chunk)
1782 self.ui.progress(_('clone'), handled_bytes,
1783 self.ui.progress(_('clone'), handled_bytes,
1783 total=total_bytes)
1784 total=total_bytes)
1784 ofp.write(chunk)
1785 ofp.write(chunk)
1785 ofp.close()
1786 ofp.close()
1786 tr.close()
1787 tr.close()
1787 finally:
1788 finally:
1788 tr.release()
1789 tr.release()
1789
1790
1790 # Writing straight to files circumvented the inmemory caches
1791 # Writing straight to files circumvented the inmemory caches
1791 self.invalidate()
1792 self.invalidate()
1792
1793
1793 elapsed = time.time() - start
1794 elapsed = time.time() - start
1794 if elapsed <= 0:
1795 if elapsed <= 0:
1795 elapsed = 0.001
1796 elapsed = 0.001
1796 self.ui.progress(_('clone'), None)
1797 self.ui.progress(_('clone'), None)
1797 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1798 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1798 (util.bytecount(total_bytes), elapsed,
1799 (util.bytecount(total_bytes), elapsed,
1799 util.bytecount(total_bytes / elapsed)))
1800 util.bytecount(total_bytes / elapsed)))
1800
1801
1801 # new requirements = old non-format requirements +
1802 # new requirements = old non-format requirements +
1802 # new format-related
1803 # new format-related
1803 # requirements from the streamed-in repository
1804 # requirements from the streamed-in repository
1804 requirements.update(set(self.requirements) - self.supportedformats)
1805 requirements.update(set(self.requirements) - self.supportedformats)
1805 self._applyrequirements(requirements)
1806 self._applyrequirements(requirements)
1806 self._writerequirements()
1807 self._writerequirements()
1807
1808
1808 if rbranchmap:
1809 if rbranchmap:
1809 rbheads = []
1810 rbheads = []
1810 for bheads in rbranchmap.itervalues():
1811 for bheads in rbranchmap.itervalues():
1811 rbheads.extend(bheads)
1812 rbheads.extend(bheads)
1812
1813
1813 if rbheads:
1814 if rbheads:
1814 rtiprev = max((int(self.changelog.rev(node))
1815 rtiprev = max((int(self.changelog.rev(node))
1815 for node in rbheads))
1816 for node in rbheads))
1816 cache = branchmap.branchcache(rbranchmap,
1817 cache = branchmap.branchcache(rbranchmap,
1817 self[rtiprev].node(),
1818 self[rtiprev].node(),
1818 rtiprev)
1819 rtiprev)
1819 # Try to stick it as low as possible
1820 # Try to stick it as low as possible
1820 # filter above served are unlikely to be fetch from a clone
1821 # filter above served are unlikely to be fetch from a clone
1821 for candidate in ('base', 'immutable', 'served'):
1822 for candidate in ('base', 'immutable', 'served'):
1822 rview = self.filtered(candidate)
1823 rview = self.filtered(candidate)
1823 if cache.validfor(rview):
1824 if cache.validfor(rview):
1824 self._branchcaches[candidate] = cache
1825 self._branchcaches[candidate] = cache
1825 cache.write(rview)
1826 cache.write(rview)
1826 break
1827 break
1827 self.invalidate()
1828 self.invalidate()
1828 return len(self.heads()) + 1
1829 return len(self.heads()) + 1
1829 finally:
1830 finally:
1830 lock.release()
1831 lock.release()
1831
1832
1832 def clone(self, remote, heads=[], stream=False):
1833 def clone(self, remote, heads=[], stream=False):
1833 '''clone remote repository.
1834 '''clone remote repository.
1834
1835
1835 keyword arguments:
1836 keyword arguments:
1836 heads: list of revs to clone (forces use of pull)
1837 heads: list of revs to clone (forces use of pull)
1837 stream: use streaming clone if possible'''
1838 stream: use streaming clone if possible'''
1838
1839
1839 # now, all clients that can request uncompressed clones can
1840 # now, all clients that can request uncompressed clones can
1840 # read repo formats supported by all servers that can serve
1841 # read repo formats supported by all servers that can serve
1841 # them.
1842 # them.
1842
1843
1843 # if revlog format changes, client will have to check version
1844 # if revlog format changes, client will have to check version
1844 # and format flags on "stream" capability, and use
1845 # and format flags on "stream" capability, and use
1845 # uncompressed only if compatible.
1846 # uncompressed only if compatible.
1846
1847
1847 if not stream:
1848 if not stream:
1848 # if the server explicitly prefers to stream (for fast LANs)
1849 # if the server explicitly prefers to stream (for fast LANs)
1849 stream = remote.capable('stream-preferred')
1850 stream = remote.capable('stream-preferred')
1850
1851
1851 if stream and not heads:
1852 if stream and not heads:
1852 # 'stream' means remote revlog format is revlogv1 only
1853 # 'stream' means remote revlog format is revlogv1 only
1853 if remote.capable('stream'):
1854 if remote.capable('stream'):
1854 return self.stream_in(remote, set(('revlogv1',)))
1855 return self.stream_in(remote, set(('revlogv1',)))
1855 # otherwise, 'streamreqs' contains the remote revlog format
1856 # otherwise, 'streamreqs' contains the remote revlog format
1856 streamreqs = remote.capable('streamreqs')
1857 streamreqs = remote.capable('streamreqs')
1857 if streamreqs:
1858 if streamreqs:
1858 streamreqs = set(streamreqs.split(','))
1859 streamreqs = set(streamreqs.split(','))
1859 # if we support it, stream in and adjust our requirements
1860 # if we support it, stream in and adjust our requirements
1860 if not streamreqs - self.supportedformats:
1861 if not streamreqs - self.supportedformats:
1861 return self.stream_in(remote, streamreqs)
1862 return self.stream_in(remote, streamreqs)
1862 return self.pull(remote, heads)
1863 return self.pull(remote, heads)
1863
1864
1864 def pushkey(self, namespace, key, old, new):
1865 def pushkey(self, namespace, key, old, new):
1865 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1866 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1866 old=old, new=new)
1867 old=old, new=new)
1867 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1868 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1868 ret = pushkey.push(self, namespace, key, old, new)
1869 ret = pushkey.push(self, namespace, key, old, new)
1869 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1870 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1870 ret=ret)
1871 ret=ret)
1871 return ret
1872 return ret
1872
1873
1873 def listkeys(self, namespace):
1874 def listkeys(self, namespace):
1874 self.hook('prelistkeys', throw=True, namespace=namespace)
1875 self.hook('prelistkeys', throw=True, namespace=namespace)
1875 self.ui.debug('listing keys for "%s"\n' % namespace)
1876 self.ui.debug('listing keys for "%s"\n' % namespace)
1876 values = pushkey.list(self, namespace)
1877 values = pushkey.list(self, namespace)
1877 self.hook('listkeys', namespace=namespace, values=values)
1878 self.hook('listkeys', namespace=namespace, values=values)
1878 return values
1879 return values
1879
1880
1880 def debugwireargs(self, one, two, three=None, four=None, five=None):
1881 def debugwireargs(self, one, two, three=None, four=None, five=None):
1881 '''used to test argument passing over the wire'''
1882 '''used to test argument passing over the wire'''
1882 return "%s %s %s %s %s" % (one, two, three, four, five)
1883 return "%s %s %s %s %s" % (one, two, three, four, five)
1883
1884
1884 def savecommitmessage(self, text):
1885 def savecommitmessage(self, text):
1885 fp = self.opener('last-message.txt', 'wb')
1886 fp = self.opener('last-message.txt', 'wb')
1886 try:
1887 try:
1887 fp.write(text)
1888 fp.write(text)
1888 finally:
1889 finally:
1889 fp.close()
1890 fp.close()
1890 return self.pathto(fp.name[len(self.root) + 1:])
1891 return self.pathto(fp.name[len(self.root) + 1:])
1891
1892
1892 # used to avoid circular references so destructors work
1893 # used to avoid circular references so destructors work
1893 def aftertrans(files):
1894 def aftertrans(files):
1894 renamefiles = [tuple(t) for t in files]
1895 renamefiles = [tuple(t) for t in files]
1895 def a():
1896 def a():
1896 for vfs, src, dest in renamefiles:
1897 for vfs, src, dest in renamefiles:
1897 try:
1898 try:
1898 vfs.rename(src, dest)
1899 vfs.rename(src, dest)
1899 except OSError: # journal file does not yet exist
1900 except OSError: # journal file does not yet exist
1900 pass
1901 pass
1901 return a
1902 return a
1902
1903
1903 def undoname(fn):
1904 def undoname(fn):
1904 base, name = os.path.split(fn)
1905 base, name = os.path.split(fn)
1905 assert name.startswith('journal')
1906 assert name.startswith('journal')
1906 return os.path.join(base, name.replace('journal', 'undo', 1))
1907 return os.path.join(base, name.replace('journal', 'undo', 1))
1907
1908
1908 def instance(ui, path, create):
1909 def instance(ui, path, create):
1909 return localrepository(ui, util.urllocalpath(path), create)
1910 return localrepository(ui, util.urllocalpath(path), create)
1910
1911
1911 def islocal(path):
1912 def islocal(path):
1912 return True
1913 return True
@@ -1,36 +1,37
1 $ hg init
1 $ hg init
2 $ echo a > a
2 $ echo a > a
3 $ hg ci -Am0
3 $ hg ci -Am0
4 adding a
4 adding a
5
5
6 $ hg -q clone . foo
6 $ hg -q clone . foo
7
7
8 $ touch .hg/store/journal
8 $ touch .hg/store/journal
9
9
10 $ echo foo > a
10 $ echo foo > a
11 $ hg ci -Am0
11 $ hg ci -Am0
12 abort: abandoned transaction found - run hg recover!
12 abort: abandoned transaction found!
13 (run 'hg recover' to clean up transaction)
13 [255]
14 [255]
14
15
15 $ hg recover
16 $ hg recover
16 rolling back interrupted transaction
17 rolling back interrupted transaction
17 checking changesets
18 checking changesets
18 checking manifests
19 checking manifests
19 crosschecking files in changesets and manifests
20 crosschecking files in changesets and manifests
20 checking files
21 checking files
21 1 files, 1 changesets, 1 total revisions
22 1 files, 1 changesets, 1 total revisions
22
23
23 Check that zero-size journals are correctly aborted:
24 Check that zero-size journals are correctly aborted:
24
25
25 #if unix-permissions no-root
26 #if unix-permissions no-root
26 $ hg bundle -qa repo.hg
27 $ hg bundle -qa repo.hg
27 $ chmod -w foo/.hg/store/00changelog.i
28 $ chmod -w foo/.hg/store/00changelog.i
28
29
29 $ hg -R foo unbundle repo.hg
30 $ hg -R foo unbundle repo.hg
30 adding changesets
31 adding changesets
31 abort: Permission denied: $TESTTMP/foo/.hg/store/.00changelog.i-* (glob)
32 abort: Permission denied: $TESTTMP/foo/.hg/store/.00changelog.i-* (glob)
32 [255]
33 [255]
33
34
34 $ if test -f foo/.hg/store/journal; then echo 'journal exists :-('; fi
35 $ if test -f foo/.hg/store/journal; then echo 'journal exists :-('; fi
35 #endif
36 #endif
36
37
General Comments 0
You need to be logged in to leave comments. Login now