##// END OF EJS Templates
localrepo: make "undofiles()" return list of tuples "(vfs, relative filename)"...
FUJIWARA Katsunori -
r20975:37cdf1fc default
parent child Browse files
Show More
@@ -1,1885 +1,1885 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock as lockmod
11 import lock as lockmod
12 import transaction, store, encoding, exchange
12 import transaction, store, encoding, exchange
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 import branchmap, pathutil
19 import branchmap, pathutil
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21 filecache = scmutil.filecache
21 filecache = scmutil.filecache
22
22
23 class repofilecache(filecache):
23 class repofilecache(filecache):
24 """All filecache usage on repo are done for logic that should be unfiltered
24 """All filecache usage on repo are done for logic that should be unfiltered
25 """
25 """
26
26
27 def __get__(self, repo, type=None):
27 def __get__(self, repo, type=None):
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 def __set__(self, repo, value):
29 def __set__(self, repo, value):
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 def __delete__(self, repo):
31 def __delete__(self, repo):
32 return super(repofilecache, self).__delete__(repo.unfiltered())
32 return super(repofilecache, self).__delete__(repo.unfiltered())
33
33
34 class storecache(repofilecache):
34 class storecache(repofilecache):
35 """filecache for files in the store"""
35 """filecache for files in the store"""
36 def join(self, obj, fname):
36 def join(self, obj, fname):
37 return obj.sjoin(fname)
37 return obj.sjoin(fname)
38
38
39 class unfilteredpropertycache(propertycache):
39 class unfilteredpropertycache(propertycache):
40 """propertycache that apply to unfiltered repo only"""
40 """propertycache that apply to unfiltered repo only"""
41
41
42 def __get__(self, repo, type=None):
42 def __get__(self, repo, type=None):
43 unfi = repo.unfiltered()
43 unfi = repo.unfiltered()
44 if unfi is repo:
44 if unfi is repo:
45 return super(unfilteredpropertycache, self).__get__(unfi)
45 return super(unfilteredpropertycache, self).__get__(unfi)
46 return getattr(unfi, self.name)
46 return getattr(unfi, self.name)
47
47
48 class filteredpropertycache(propertycache):
48 class filteredpropertycache(propertycache):
49 """propertycache that must take filtering in account"""
49 """propertycache that must take filtering in account"""
50
50
51 def cachevalue(self, obj, value):
51 def cachevalue(self, obj, value):
52 object.__setattr__(obj, self.name, value)
52 object.__setattr__(obj, self.name, value)
53
53
54
54
55 def hasunfilteredcache(repo, name):
55 def hasunfilteredcache(repo, name):
56 """check if a repo has an unfilteredpropertycache value for <name>"""
56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 return name in vars(repo.unfiltered())
57 return name in vars(repo.unfiltered())
58
58
59 def unfilteredmethod(orig):
59 def unfilteredmethod(orig):
60 """decorate method that always need to be run on unfiltered version"""
60 """decorate method that always need to be run on unfiltered version"""
61 def wrapper(repo, *args, **kwargs):
61 def wrapper(repo, *args, **kwargs):
62 return orig(repo.unfiltered(), *args, **kwargs)
62 return orig(repo.unfiltered(), *args, **kwargs)
63 return wrapper
63 return wrapper
64
64
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 'bundle2', 'unbundle'))
66 'bundle2', 'unbundle'))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68
68
69 class localpeer(peer.peerrepository):
69 class localpeer(peer.peerrepository):
70 '''peer for a local repo; reflects only the most recent API'''
70 '''peer for a local repo; reflects only the most recent API'''
71
71
72 def __init__(self, repo, caps=moderncaps):
72 def __init__(self, repo, caps=moderncaps):
73 peer.peerrepository.__init__(self)
73 peer.peerrepository.__init__(self)
74 self._repo = repo.filtered('served')
74 self._repo = repo.filtered('served')
75 self.ui = repo.ui
75 self.ui = repo.ui
76 self._caps = repo._restrictcapabilities(caps)
76 self._caps = repo._restrictcapabilities(caps)
77 self.requirements = repo.requirements
77 self.requirements = repo.requirements
78 self.supportedformats = repo.supportedformats
78 self.supportedformats = repo.supportedformats
79
79
80 def close(self):
80 def close(self):
81 self._repo.close()
81 self._repo.close()
82
82
83 def _capabilities(self):
83 def _capabilities(self):
84 return self._caps
84 return self._caps
85
85
86 def local(self):
86 def local(self):
87 return self._repo
87 return self._repo
88
88
89 def canpush(self):
89 def canpush(self):
90 return True
90 return True
91
91
92 def url(self):
92 def url(self):
93 return self._repo.url()
93 return self._repo.url()
94
94
95 def lookup(self, key):
95 def lookup(self, key):
96 return self._repo.lookup(key)
96 return self._repo.lookup(key)
97
97
98 def branchmap(self):
98 def branchmap(self):
99 return self._repo.branchmap()
99 return self._repo.branchmap()
100
100
101 def heads(self):
101 def heads(self):
102 return self._repo.heads()
102 return self._repo.heads()
103
103
104 def known(self, nodes):
104 def known(self, nodes):
105 return self._repo.known(nodes)
105 return self._repo.known(nodes)
106
106
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 format='HG10'):
108 format='HG10'):
109 return exchange.getbundle(self._repo, source, heads=heads,
109 return exchange.getbundle(self._repo, source, heads=heads,
110 common=common, bundlecaps=bundlecaps)
110 common=common, bundlecaps=bundlecaps)
111
111
112 # TODO We might want to move the next two calls into legacypeer and add
112 # TODO We might want to move the next two calls into legacypeer and add
113 # unbundle instead.
113 # unbundle instead.
114
114
115 def unbundle(self, cg, heads, url):
115 def unbundle(self, cg, heads, url):
116 """apply a bundle on a repo
116 """apply a bundle on a repo
117
117
118 This function handles the repo locking itself."""
118 This function handles the repo locking itself."""
119 try:
119 try:
120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
121 except exchange.PushRaced, exc:
121 except exchange.PushRaced, exc:
122 raise error.ResponseError(_('push failed:'), exc.message)
122 raise error.ResponseError(_('push failed:'), exc.message)
123
123
124 def lock(self):
124 def lock(self):
125 return self._repo.lock()
125 return self._repo.lock()
126
126
127 def addchangegroup(self, cg, source, url):
127 def addchangegroup(self, cg, source, url):
128 return changegroup.addchangegroup(self._repo, cg, source, url)
128 return changegroup.addchangegroup(self._repo, cg, source, url)
129
129
130 def pushkey(self, namespace, key, old, new):
130 def pushkey(self, namespace, key, old, new):
131 return self._repo.pushkey(namespace, key, old, new)
131 return self._repo.pushkey(namespace, key, old, new)
132
132
133 def listkeys(self, namespace):
133 def listkeys(self, namespace):
134 return self._repo.listkeys(namespace)
134 return self._repo.listkeys(namespace)
135
135
136 def debugwireargs(self, one, two, three=None, four=None, five=None):
136 def debugwireargs(self, one, two, three=None, four=None, five=None):
137 '''used to test argument passing over the wire'''
137 '''used to test argument passing over the wire'''
138 return "%s %s %s %s %s" % (one, two, three, four, five)
138 return "%s %s %s %s %s" % (one, two, three, four, five)
139
139
140 class locallegacypeer(localpeer):
140 class locallegacypeer(localpeer):
141 '''peer extension which implements legacy methods too; used for tests with
141 '''peer extension which implements legacy methods too; used for tests with
142 restricted capabilities'''
142 restricted capabilities'''
143
143
144 def __init__(self, repo):
144 def __init__(self, repo):
145 localpeer.__init__(self, repo, caps=legacycaps)
145 localpeer.__init__(self, repo, caps=legacycaps)
146
146
147 def branches(self, nodes):
147 def branches(self, nodes):
148 return self._repo.branches(nodes)
148 return self._repo.branches(nodes)
149
149
150 def between(self, pairs):
150 def between(self, pairs):
151 return self._repo.between(pairs)
151 return self._repo.between(pairs)
152
152
153 def changegroup(self, basenodes, source):
153 def changegroup(self, basenodes, source):
154 return changegroup.changegroup(self._repo, basenodes, source)
154 return changegroup.changegroup(self._repo, basenodes, source)
155
155
156 def changegroupsubset(self, bases, heads, source):
156 def changegroupsubset(self, bases, heads, source):
157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
158
158
159 class localrepository(object):
159 class localrepository(object):
160
160
161 supportedformats = set(('revlogv1', 'generaldelta'))
161 supportedformats = set(('revlogv1', 'generaldelta'))
162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
163 'dotencode'))
163 'dotencode'))
164 openerreqs = set(('revlogv1', 'generaldelta'))
164 openerreqs = set(('revlogv1', 'generaldelta'))
165 requirements = ['revlogv1']
165 requirements = ['revlogv1']
166 filtername = None
166 filtername = None
167
167
168 # a list of (ui, featureset) functions.
168 # a list of (ui, featureset) functions.
169 # only functions defined in module of enabled extensions are invoked
169 # only functions defined in module of enabled extensions are invoked
170 featuresetupfuncs = set()
170 featuresetupfuncs = set()
171
171
172 def _baserequirements(self, create):
172 def _baserequirements(self, create):
173 return self.requirements[:]
173 return self.requirements[:]
174
174
175 def __init__(self, baseui, path=None, create=False):
175 def __init__(self, baseui, path=None, create=False):
176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
177 self.wopener = self.wvfs
177 self.wopener = self.wvfs
178 self.root = self.wvfs.base
178 self.root = self.wvfs.base
179 self.path = self.wvfs.join(".hg")
179 self.path = self.wvfs.join(".hg")
180 self.origroot = path
180 self.origroot = path
181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
182 self.vfs = scmutil.vfs(self.path)
182 self.vfs = scmutil.vfs(self.path)
183 self.opener = self.vfs
183 self.opener = self.vfs
184 self.baseui = baseui
184 self.baseui = baseui
185 self.ui = baseui.copy()
185 self.ui = baseui.copy()
186 self.ui.copy = baseui.copy # prevent copying repo configuration
186 self.ui.copy = baseui.copy # prevent copying repo configuration
187 # A list of callback to shape the phase if no data were found.
187 # A list of callback to shape the phase if no data were found.
188 # Callback are in the form: func(repo, roots) --> processed root.
188 # Callback are in the form: func(repo, roots) --> processed root.
189 # This list it to be filled by extension during repo setup
189 # This list it to be filled by extension during repo setup
190 self._phasedefaults = []
190 self._phasedefaults = []
191 try:
191 try:
192 self.ui.readconfig(self.join("hgrc"), self.root)
192 self.ui.readconfig(self.join("hgrc"), self.root)
193 extensions.loadall(self.ui)
193 extensions.loadall(self.ui)
194 except IOError:
194 except IOError:
195 pass
195 pass
196
196
197 if self.featuresetupfuncs:
197 if self.featuresetupfuncs:
198 self.supported = set(self._basesupported) # use private copy
198 self.supported = set(self._basesupported) # use private copy
199 extmods = set(m.__name__ for n, m
199 extmods = set(m.__name__ for n, m
200 in extensions.extensions(self.ui))
200 in extensions.extensions(self.ui))
201 for setupfunc in self.featuresetupfuncs:
201 for setupfunc in self.featuresetupfuncs:
202 if setupfunc.__module__ in extmods:
202 if setupfunc.__module__ in extmods:
203 setupfunc(self.ui, self.supported)
203 setupfunc(self.ui, self.supported)
204 else:
204 else:
205 self.supported = self._basesupported
205 self.supported = self._basesupported
206
206
207 if not self.vfs.isdir():
207 if not self.vfs.isdir():
208 if create:
208 if create:
209 if not self.wvfs.exists():
209 if not self.wvfs.exists():
210 self.wvfs.makedirs()
210 self.wvfs.makedirs()
211 self.vfs.makedir(notindexed=True)
211 self.vfs.makedir(notindexed=True)
212 requirements = self._baserequirements(create)
212 requirements = self._baserequirements(create)
213 if self.ui.configbool('format', 'usestore', True):
213 if self.ui.configbool('format', 'usestore', True):
214 self.vfs.mkdir("store")
214 self.vfs.mkdir("store")
215 requirements.append("store")
215 requirements.append("store")
216 if self.ui.configbool('format', 'usefncache', True):
216 if self.ui.configbool('format', 'usefncache', True):
217 requirements.append("fncache")
217 requirements.append("fncache")
218 if self.ui.configbool('format', 'dotencode', True):
218 if self.ui.configbool('format', 'dotencode', True):
219 requirements.append('dotencode')
219 requirements.append('dotencode')
220 # create an invalid changelog
220 # create an invalid changelog
221 self.vfs.append(
221 self.vfs.append(
222 "00changelog.i",
222 "00changelog.i",
223 '\0\0\0\2' # represents revlogv2
223 '\0\0\0\2' # represents revlogv2
224 ' dummy changelog to prevent using the old repo layout'
224 ' dummy changelog to prevent using the old repo layout'
225 )
225 )
226 if self.ui.configbool('format', 'generaldelta', False):
226 if self.ui.configbool('format', 'generaldelta', False):
227 requirements.append("generaldelta")
227 requirements.append("generaldelta")
228 requirements = set(requirements)
228 requirements = set(requirements)
229 else:
229 else:
230 raise error.RepoError(_("repository %s not found") % path)
230 raise error.RepoError(_("repository %s not found") % path)
231 elif create:
231 elif create:
232 raise error.RepoError(_("repository %s already exists") % path)
232 raise error.RepoError(_("repository %s already exists") % path)
233 else:
233 else:
234 try:
234 try:
235 requirements = scmutil.readrequires(self.vfs, self.supported)
235 requirements = scmutil.readrequires(self.vfs, self.supported)
236 except IOError, inst:
236 except IOError, inst:
237 if inst.errno != errno.ENOENT:
237 if inst.errno != errno.ENOENT:
238 raise
238 raise
239 requirements = set()
239 requirements = set()
240
240
241 self.sharedpath = self.path
241 self.sharedpath = self.path
242 try:
242 try:
243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
244 realpath=True)
244 realpath=True)
245 s = vfs.base
245 s = vfs.base
246 if not vfs.exists():
246 if not vfs.exists():
247 raise error.RepoError(
247 raise error.RepoError(
248 _('.hg/sharedpath points to nonexistent directory %s') % s)
248 _('.hg/sharedpath points to nonexistent directory %s') % s)
249 self.sharedpath = s
249 self.sharedpath = s
250 except IOError, inst:
250 except IOError, inst:
251 if inst.errno != errno.ENOENT:
251 if inst.errno != errno.ENOENT:
252 raise
252 raise
253
253
254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
255 self.spath = self.store.path
255 self.spath = self.store.path
256 self.svfs = self.store.vfs
256 self.svfs = self.store.vfs
257 self.sopener = self.svfs
257 self.sopener = self.svfs
258 self.sjoin = self.store.join
258 self.sjoin = self.store.join
259 self.vfs.createmode = self.store.createmode
259 self.vfs.createmode = self.store.createmode
260 self._applyrequirements(requirements)
260 self._applyrequirements(requirements)
261 if create:
261 if create:
262 self._writerequirements()
262 self._writerequirements()
263
263
264
264
265 self._branchcaches = {}
265 self._branchcaches = {}
266 self.filterpats = {}
266 self.filterpats = {}
267 self._datafilters = {}
267 self._datafilters = {}
268 self._transref = self._lockref = self._wlockref = None
268 self._transref = self._lockref = self._wlockref = None
269
269
270 # A cache for various files under .hg/ that tracks file changes,
270 # A cache for various files under .hg/ that tracks file changes,
271 # (used by the filecache decorator)
271 # (used by the filecache decorator)
272 #
272 #
273 # Maps a property name to its util.filecacheentry
273 # Maps a property name to its util.filecacheentry
274 self._filecache = {}
274 self._filecache = {}
275
275
276 # hold sets of revision to be filtered
276 # hold sets of revision to be filtered
277 # should be cleared when something might have changed the filter value:
277 # should be cleared when something might have changed the filter value:
278 # - new changesets,
278 # - new changesets,
279 # - phase change,
279 # - phase change,
280 # - new obsolescence marker,
280 # - new obsolescence marker,
281 # - working directory parent change,
281 # - working directory parent change,
282 # - bookmark changes
282 # - bookmark changes
283 self.filteredrevcache = {}
283 self.filteredrevcache = {}
284
284
285 def close(self):
285 def close(self):
286 pass
286 pass
287
287
288 def _restrictcapabilities(self, caps):
288 def _restrictcapabilities(self, caps):
289 # bundle2 is not ready for prime time, drop it unless explicitly
289 # bundle2 is not ready for prime time, drop it unless explicitly
290 # required by the tests (or some brave tester)
290 # required by the tests (or some brave tester)
291 if not self.ui.configbool('server', 'bundle2', False):
291 if not self.ui.configbool('server', 'bundle2', False):
292 caps = set(caps)
292 caps = set(caps)
293 caps.discard('bundle2')
293 caps.discard('bundle2')
294 return caps
294 return caps
295
295
296 def _applyrequirements(self, requirements):
296 def _applyrequirements(self, requirements):
297 self.requirements = requirements
297 self.requirements = requirements
298 self.sopener.options = dict((r, 1) for r in requirements
298 self.sopener.options = dict((r, 1) for r in requirements
299 if r in self.openerreqs)
299 if r in self.openerreqs)
300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
301 if chunkcachesize is not None:
301 if chunkcachesize is not None:
302 self.sopener.options['chunkcachesize'] = chunkcachesize
302 self.sopener.options['chunkcachesize'] = chunkcachesize
303
303
304 def _writerequirements(self):
304 def _writerequirements(self):
305 reqfile = self.opener("requires", "w")
305 reqfile = self.opener("requires", "w")
306 for r in sorted(self.requirements):
306 for r in sorted(self.requirements):
307 reqfile.write("%s\n" % r)
307 reqfile.write("%s\n" % r)
308 reqfile.close()
308 reqfile.close()
309
309
310 def _checknested(self, path):
310 def _checknested(self, path):
311 """Determine if path is a legal nested repository."""
311 """Determine if path is a legal nested repository."""
312 if not path.startswith(self.root):
312 if not path.startswith(self.root):
313 return False
313 return False
314 subpath = path[len(self.root) + 1:]
314 subpath = path[len(self.root) + 1:]
315 normsubpath = util.pconvert(subpath)
315 normsubpath = util.pconvert(subpath)
316
316
317 # XXX: Checking against the current working copy is wrong in
317 # XXX: Checking against the current working copy is wrong in
318 # the sense that it can reject things like
318 # the sense that it can reject things like
319 #
319 #
320 # $ hg cat -r 10 sub/x.txt
320 # $ hg cat -r 10 sub/x.txt
321 #
321 #
322 # if sub/ is no longer a subrepository in the working copy
322 # if sub/ is no longer a subrepository in the working copy
323 # parent revision.
323 # parent revision.
324 #
324 #
325 # However, it can of course also allow things that would have
325 # However, it can of course also allow things that would have
326 # been rejected before, such as the above cat command if sub/
326 # been rejected before, such as the above cat command if sub/
327 # is a subrepository now, but was a normal directory before.
327 # is a subrepository now, but was a normal directory before.
328 # The old path auditor would have rejected by mistake since it
328 # The old path auditor would have rejected by mistake since it
329 # panics when it sees sub/.hg/.
329 # panics when it sees sub/.hg/.
330 #
330 #
331 # All in all, checking against the working copy seems sensible
331 # All in all, checking against the working copy seems sensible
332 # since we want to prevent access to nested repositories on
332 # since we want to prevent access to nested repositories on
333 # the filesystem *now*.
333 # the filesystem *now*.
334 ctx = self[None]
334 ctx = self[None]
335 parts = util.splitpath(subpath)
335 parts = util.splitpath(subpath)
336 while parts:
336 while parts:
337 prefix = '/'.join(parts)
337 prefix = '/'.join(parts)
338 if prefix in ctx.substate:
338 if prefix in ctx.substate:
339 if prefix == normsubpath:
339 if prefix == normsubpath:
340 return True
340 return True
341 else:
341 else:
342 sub = ctx.sub(prefix)
342 sub = ctx.sub(prefix)
343 return sub.checknested(subpath[len(prefix) + 1:])
343 return sub.checknested(subpath[len(prefix) + 1:])
344 else:
344 else:
345 parts.pop()
345 parts.pop()
346 return False
346 return False
347
347
348 def peer(self):
348 def peer(self):
349 return localpeer(self) # not cached to avoid reference cycle
349 return localpeer(self) # not cached to avoid reference cycle
350
350
351 def unfiltered(self):
351 def unfiltered(self):
352 """Return unfiltered version of the repository
352 """Return unfiltered version of the repository
353
353
354 Intended to be overwritten by filtered repo."""
354 Intended to be overwritten by filtered repo."""
355 return self
355 return self
356
356
357 def filtered(self, name):
357 def filtered(self, name):
358 """Return a filtered version of a repository"""
358 """Return a filtered version of a repository"""
359 # build a new class with the mixin and the current class
359 # build a new class with the mixin and the current class
360 # (possibly subclass of the repo)
360 # (possibly subclass of the repo)
361 class proxycls(repoview.repoview, self.unfiltered().__class__):
361 class proxycls(repoview.repoview, self.unfiltered().__class__):
362 pass
362 pass
363 return proxycls(self, name)
363 return proxycls(self, name)
364
364
365 @repofilecache('bookmarks')
365 @repofilecache('bookmarks')
366 def _bookmarks(self):
366 def _bookmarks(self):
367 return bookmarks.bmstore(self)
367 return bookmarks.bmstore(self)
368
368
369 @repofilecache('bookmarks.current')
369 @repofilecache('bookmarks.current')
370 def _bookmarkcurrent(self):
370 def _bookmarkcurrent(self):
371 return bookmarks.readcurrent(self)
371 return bookmarks.readcurrent(self)
372
372
373 def bookmarkheads(self, bookmark):
373 def bookmarkheads(self, bookmark):
374 name = bookmark.split('@', 1)[0]
374 name = bookmark.split('@', 1)[0]
375 heads = []
375 heads = []
376 for mark, n in self._bookmarks.iteritems():
376 for mark, n in self._bookmarks.iteritems():
377 if mark.split('@', 1)[0] == name:
377 if mark.split('@', 1)[0] == name:
378 heads.append(n)
378 heads.append(n)
379 return heads
379 return heads
380
380
381 @storecache('phaseroots')
381 @storecache('phaseroots')
382 def _phasecache(self):
382 def _phasecache(self):
383 return phases.phasecache(self, self._phasedefaults)
383 return phases.phasecache(self, self._phasedefaults)
384
384
385 @storecache('obsstore')
385 @storecache('obsstore')
386 def obsstore(self):
386 def obsstore(self):
387 store = obsolete.obsstore(self.sopener)
387 store = obsolete.obsstore(self.sopener)
388 if store and not obsolete._enabled:
388 if store and not obsolete._enabled:
389 # message is rare enough to not be translated
389 # message is rare enough to not be translated
390 msg = 'obsolete feature not enabled but %i markers found!\n'
390 msg = 'obsolete feature not enabled but %i markers found!\n'
391 self.ui.warn(msg % len(list(store)))
391 self.ui.warn(msg % len(list(store)))
392 return store
392 return store
393
393
394 @storecache('00changelog.i')
394 @storecache('00changelog.i')
395 def changelog(self):
395 def changelog(self):
396 c = changelog.changelog(self.sopener)
396 c = changelog.changelog(self.sopener)
397 if 'HG_PENDING' in os.environ:
397 if 'HG_PENDING' in os.environ:
398 p = os.environ['HG_PENDING']
398 p = os.environ['HG_PENDING']
399 if p.startswith(self.root):
399 if p.startswith(self.root):
400 c.readpending('00changelog.i.a')
400 c.readpending('00changelog.i.a')
401 return c
401 return c
402
402
403 @storecache('00manifest.i')
403 @storecache('00manifest.i')
404 def manifest(self):
404 def manifest(self):
405 return manifest.manifest(self.sopener)
405 return manifest.manifest(self.sopener)
406
406
407 @repofilecache('dirstate')
407 @repofilecache('dirstate')
408 def dirstate(self):
408 def dirstate(self):
409 warned = [0]
409 warned = [0]
410 def validate(node):
410 def validate(node):
411 try:
411 try:
412 self.changelog.rev(node)
412 self.changelog.rev(node)
413 return node
413 return node
414 except error.LookupError:
414 except error.LookupError:
415 if not warned[0]:
415 if not warned[0]:
416 warned[0] = True
416 warned[0] = True
417 self.ui.warn(_("warning: ignoring unknown"
417 self.ui.warn(_("warning: ignoring unknown"
418 " working parent %s!\n") % short(node))
418 " working parent %s!\n") % short(node))
419 return nullid
419 return nullid
420
420
421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
422
422
423 def __getitem__(self, changeid):
423 def __getitem__(self, changeid):
424 if changeid is None:
424 if changeid is None:
425 return context.workingctx(self)
425 return context.workingctx(self)
426 return context.changectx(self, changeid)
426 return context.changectx(self, changeid)
427
427
428 def __contains__(self, changeid):
428 def __contains__(self, changeid):
429 try:
429 try:
430 return bool(self.lookup(changeid))
430 return bool(self.lookup(changeid))
431 except error.RepoLookupError:
431 except error.RepoLookupError:
432 return False
432 return False
433
433
434 def __nonzero__(self):
434 def __nonzero__(self):
435 return True
435 return True
436
436
437 def __len__(self):
437 def __len__(self):
438 return len(self.changelog)
438 return len(self.changelog)
439
439
440 def __iter__(self):
440 def __iter__(self):
441 return iter(self.changelog)
441 return iter(self.changelog)
442
442
443 def revs(self, expr, *args):
443 def revs(self, expr, *args):
444 '''Return a list of revisions matching the given revset'''
444 '''Return a list of revisions matching the given revset'''
445 expr = revset.formatspec(expr, *args)
445 expr = revset.formatspec(expr, *args)
446 m = revset.match(None, expr)
446 m = revset.match(None, expr)
447 return m(self, revset.spanset(self))
447 return m(self, revset.spanset(self))
448
448
449 def set(self, expr, *args):
449 def set(self, expr, *args):
450 '''
450 '''
451 Yield a context for each matching revision, after doing arg
451 Yield a context for each matching revision, after doing arg
452 replacement via revset.formatspec
452 replacement via revset.formatspec
453 '''
453 '''
454 for r in self.revs(expr, *args):
454 for r in self.revs(expr, *args):
455 yield self[r]
455 yield self[r]
456
456
457 def url(self):
457 def url(self):
458 return 'file:' + self.root
458 return 'file:' + self.root
459
459
460 def hook(self, name, throw=False, **args):
460 def hook(self, name, throw=False, **args):
461 return hook.hook(self.ui, self, name, throw, **args)
461 return hook.hook(self.ui, self, name, throw, **args)
462
462
463 @unfilteredmethod
463 @unfilteredmethod
464 def _tag(self, names, node, message, local, user, date, extra={}):
464 def _tag(self, names, node, message, local, user, date, extra={}):
465 if isinstance(names, str):
465 if isinstance(names, str):
466 names = (names,)
466 names = (names,)
467
467
468 branches = self.branchmap()
468 branches = self.branchmap()
469 for name in names:
469 for name in names:
470 self.hook('pretag', throw=True, node=hex(node), tag=name,
470 self.hook('pretag', throw=True, node=hex(node), tag=name,
471 local=local)
471 local=local)
472 if name in branches:
472 if name in branches:
473 self.ui.warn(_("warning: tag %s conflicts with existing"
473 self.ui.warn(_("warning: tag %s conflicts with existing"
474 " branch name\n") % name)
474 " branch name\n") % name)
475
475
476 def writetags(fp, names, munge, prevtags):
476 def writetags(fp, names, munge, prevtags):
477 fp.seek(0, 2)
477 fp.seek(0, 2)
478 if prevtags and prevtags[-1] != '\n':
478 if prevtags and prevtags[-1] != '\n':
479 fp.write('\n')
479 fp.write('\n')
480 for name in names:
480 for name in names:
481 m = munge and munge(name) or name
481 m = munge and munge(name) or name
482 if (self._tagscache.tagtypes and
482 if (self._tagscache.tagtypes and
483 name in self._tagscache.tagtypes):
483 name in self._tagscache.tagtypes):
484 old = self.tags().get(name, nullid)
484 old = self.tags().get(name, nullid)
485 fp.write('%s %s\n' % (hex(old), m))
485 fp.write('%s %s\n' % (hex(old), m))
486 fp.write('%s %s\n' % (hex(node), m))
486 fp.write('%s %s\n' % (hex(node), m))
487 fp.close()
487 fp.close()
488
488
489 prevtags = ''
489 prevtags = ''
490 if local:
490 if local:
491 try:
491 try:
492 fp = self.opener('localtags', 'r+')
492 fp = self.opener('localtags', 'r+')
493 except IOError:
493 except IOError:
494 fp = self.opener('localtags', 'a')
494 fp = self.opener('localtags', 'a')
495 else:
495 else:
496 prevtags = fp.read()
496 prevtags = fp.read()
497
497
498 # local tags are stored in the current charset
498 # local tags are stored in the current charset
499 writetags(fp, names, None, prevtags)
499 writetags(fp, names, None, prevtags)
500 for name in names:
500 for name in names:
501 self.hook('tag', node=hex(node), tag=name, local=local)
501 self.hook('tag', node=hex(node), tag=name, local=local)
502 return
502 return
503
503
504 try:
504 try:
505 fp = self.wfile('.hgtags', 'rb+')
505 fp = self.wfile('.hgtags', 'rb+')
506 except IOError, e:
506 except IOError, e:
507 if e.errno != errno.ENOENT:
507 if e.errno != errno.ENOENT:
508 raise
508 raise
509 fp = self.wfile('.hgtags', 'ab')
509 fp = self.wfile('.hgtags', 'ab')
510 else:
510 else:
511 prevtags = fp.read()
511 prevtags = fp.read()
512
512
513 # committed tags are stored in UTF-8
513 # committed tags are stored in UTF-8
514 writetags(fp, names, encoding.fromlocal, prevtags)
514 writetags(fp, names, encoding.fromlocal, prevtags)
515
515
516 fp.close()
516 fp.close()
517
517
518 self.invalidatecaches()
518 self.invalidatecaches()
519
519
520 if '.hgtags' not in self.dirstate:
520 if '.hgtags' not in self.dirstate:
521 self[None].add(['.hgtags'])
521 self[None].add(['.hgtags'])
522
522
523 m = matchmod.exact(self.root, '', ['.hgtags'])
523 m = matchmod.exact(self.root, '', ['.hgtags'])
524 tagnode = self.commit(message, user, date, extra=extra, match=m)
524 tagnode = self.commit(message, user, date, extra=extra, match=m)
525
525
526 for name in names:
526 for name in names:
527 self.hook('tag', node=hex(node), tag=name, local=local)
527 self.hook('tag', node=hex(node), tag=name, local=local)
528
528
529 return tagnode
529 return tagnode
530
530
531 def tag(self, names, node, message, local, user, date):
531 def tag(self, names, node, message, local, user, date):
532 '''tag a revision with one or more symbolic names.
532 '''tag a revision with one or more symbolic names.
533
533
534 names is a list of strings or, when adding a single tag, names may be a
534 names is a list of strings or, when adding a single tag, names may be a
535 string.
535 string.
536
536
537 if local is True, the tags are stored in a per-repository file.
537 if local is True, the tags are stored in a per-repository file.
538 otherwise, they are stored in the .hgtags file, and a new
538 otherwise, they are stored in the .hgtags file, and a new
539 changeset is committed with the change.
539 changeset is committed with the change.
540
540
541 keyword arguments:
541 keyword arguments:
542
542
543 local: whether to store tags in non-version-controlled file
543 local: whether to store tags in non-version-controlled file
544 (default False)
544 (default False)
545
545
546 message: commit message to use if committing
546 message: commit message to use if committing
547
547
548 user: name of user to use if committing
548 user: name of user to use if committing
549
549
550 date: date tuple to use if committing'''
550 date: date tuple to use if committing'''
551
551
552 if not local:
552 if not local:
553 for x in self.status()[:5]:
553 for x in self.status()[:5]:
554 if '.hgtags' in x:
554 if '.hgtags' in x:
555 raise util.Abort(_('working copy of .hgtags is changed '
555 raise util.Abort(_('working copy of .hgtags is changed '
556 '(please commit .hgtags manually)'))
556 '(please commit .hgtags manually)'))
557
557
558 self.tags() # instantiate the cache
558 self.tags() # instantiate the cache
559 self._tag(names, node, message, local, user, date)
559 self._tag(names, node, message, local, user, date)
560
560
561 @filteredpropertycache
561 @filteredpropertycache
562 def _tagscache(self):
562 def _tagscache(self):
563 '''Returns a tagscache object that contains various tags related
563 '''Returns a tagscache object that contains various tags related
564 caches.'''
564 caches.'''
565
565
566 # This simplifies its cache management by having one decorated
566 # This simplifies its cache management by having one decorated
567 # function (this one) and the rest simply fetch things from it.
567 # function (this one) and the rest simply fetch things from it.
568 class tagscache(object):
568 class tagscache(object):
569 def __init__(self):
569 def __init__(self):
570 # These two define the set of tags for this repository. tags
570 # These two define the set of tags for this repository. tags
571 # maps tag name to node; tagtypes maps tag name to 'global' or
571 # maps tag name to node; tagtypes maps tag name to 'global' or
572 # 'local'. (Global tags are defined by .hgtags across all
572 # 'local'. (Global tags are defined by .hgtags across all
573 # heads, and local tags are defined in .hg/localtags.)
573 # heads, and local tags are defined in .hg/localtags.)
574 # They constitute the in-memory cache of tags.
574 # They constitute the in-memory cache of tags.
575 self.tags = self.tagtypes = None
575 self.tags = self.tagtypes = None
576
576
577 self.nodetagscache = self.tagslist = None
577 self.nodetagscache = self.tagslist = None
578
578
579 cache = tagscache()
579 cache = tagscache()
580 cache.tags, cache.tagtypes = self._findtags()
580 cache.tags, cache.tagtypes = self._findtags()
581
581
582 return cache
582 return cache
583
583
584 def tags(self):
584 def tags(self):
585 '''return a mapping of tag to node'''
585 '''return a mapping of tag to node'''
586 t = {}
586 t = {}
587 if self.changelog.filteredrevs:
587 if self.changelog.filteredrevs:
588 tags, tt = self._findtags()
588 tags, tt = self._findtags()
589 else:
589 else:
590 tags = self._tagscache.tags
590 tags = self._tagscache.tags
591 for k, v in tags.iteritems():
591 for k, v in tags.iteritems():
592 try:
592 try:
593 # ignore tags to unknown nodes
593 # ignore tags to unknown nodes
594 self.changelog.rev(v)
594 self.changelog.rev(v)
595 t[k] = v
595 t[k] = v
596 except (error.LookupError, ValueError):
596 except (error.LookupError, ValueError):
597 pass
597 pass
598 return t
598 return t
599
599
600 def _findtags(self):
600 def _findtags(self):
601 '''Do the hard work of finding tags. Return a pair of dicts
601 '''Do the hard work of finding tags. Return a pair of dicts
602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
603 maps tag name to a string like \'global\' or \'local\'.
603 maps tag name to a string like \'global\' or \'local\'.
604 Subclasses or extensions are free to add their own tags, but
604 Subclasses or extensions are free to add their own tags, but
605 should be aware that the returned dicts will be retained for the
605 should be aware that the returned dicts will be retained for the
606 duration of the localrepo object.'''
606 duration of the localrepo object.'''
607
607
608 # XXX what tagtype should subclasses/extensions use? Currently
608 # XXX what tagtype should subclasses/extensions use? Currently
609 # mq and bookmarks add tags, but do not set the tagtype at all.
609 # mq and bookmarks add tags, but do not set the tagtype at all.
610 # Should each extension invent its own tag type? Should there
610 # Should each extension invent its own tag type? Should there
611 # be one tagtype for all such "virtual" tags? Or is the status
611 # be one tagtype for all such "virtual" tags? Or is the status
612 # quo fine?
612 # quo fine?
613
613
614 alltags = {} # map tag name to (node, hist)
614 alltags = {} # map tag name to (node, hist)
615 tagtypes = {}
615 tagtypes = {}
616
616
617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
619
619
620 # Build the return dicts. Have to re-encode tag names because
620 # Build the return dicts. Have to re-encode tag names because
621 # the tags module always uses UTF-8 (in order not to lose info
621 # the tags module always uses UTF-8 (in order not to lose info
622 # writing to the cache), but the rest of Mercurial wants them in
622 # writing to the cache), but the rest of Mercurial wants them in
623 # local encoding.
623 # local encoding.
624 tags = {}
624 tags = {}
625 for (name, (node, hist)) in alltags.iteritems():
625 for (name, (node, hist)) in alltags.iteritems():
626 if node != nullid:
626 if node != nullid:
627 tags[encoding.tolocal(name)] = node
627 tags[encoding.tolocal(name)] = node
628 tags['tip'] = self.changelog.tip()
628 tags['tip'] = self.changelog.tip()
629 tagtypes = dict([(encoding.tolocal(name), value)
629 tagtypes = dict([(encoding.tolocal(name), value)
630 for (name, value) in tagtypes.iteritems()])
630 for (name, value) in tagtypes.iteritems()])
631 return (tags, tagtypes)
631 return (tags, tagtypes)
632
632
633 def tagtype(self, tagname):
633 def tagtype(self, tagname):
634 '''
634 '''
635 return the type of the given tag. result can be:
635 return the type of the given tag. result can be:
636
636
637 'local' : a local tag
637 'local' : a local tag
638 'global' : a global tag
638 'global' : a global tag
639 None : tag does not exist
639 None : tag does not exist
640 '''
640 '''
641
641
642 return self._tagscache.tagtypes.get(tagname)
642 return self._tagscache.tagtypes.get(tagname)
643
643
644 def tagslist(self):
644 def tagslist(self):
645 '''return a list of tags ordered by revision'''
645 '''return a list of tags ordered by revision'''
646 if not self._tagscache.tagslist:
646 if not self._tagscache.tagslist:
647 l = []
647 l = []
648 for t, n in self.tags().iteritems():
648 for t, n in self.tags().iteritems():
649 r = self.changelog.rev(n)
649 r = self.changelog.rev(n)
650 l.append((r, t, n))
650 l.append((r, t, n))
651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
652
652
653 return self._tagscache.tagslist
653 return self._tagscache.tagslist
654
654
655 def nodetags(self, node):
655 def nodetags(self, node):
656 '''return the tags associated with a node'''
656 '''return the tags associated with a node'''
657 if not self._tagscache.nodetagscache:
657 if not self._tagscache.nodetagscache:
658 nodetagscache = {}
658 nodetagscache = {}
659 for t, n in self._tagscache.tags.iteritems():
659 for t, n in self._tagscache.tags.iteritems():
660 nodetagscache.setdefault(n, []).append(t)
660 nodetagscache.setdefault(n, []).append(t)
661 for tags in nodetagscache.itervalues():
661 for tags in nodetagscache.itervalues():
662 tags.sort()
662 tags.sort()
663 self._tagscache.nodetagscache = nodetagscache
663 self._tagscache.nodetagscache = nodetagscache
664 return self._tagscache.nodetagscache.get(node, [])
664 return self._tagscache.nodetagscache.get(node, [])
665
665
666 def nodebookmarks(self, node):
666 def nodebookmarks(self, node):
667 marks = []
667 marks = []
668 for bookmark, n in self._bookmarks.iteritems():
668 for bookmark, n in self._bookmarks.iteritems():
669 if n == node:
669 if n == node:
670 marks.append(bookmark)
670 marks.append(bookmark)
671 return sorted(marks)
671 return sorted(marks)
672
672
673 def branchmap(self):
673 def branchmap(self):
674 '''returns a dictionary {branch: [branchheads]} with branchheads
674 '''returns a dictionary {branch: [branchheads]} with branchheads
675 ordered by increasing revision number'''
675 ordered by increasing revision number'''
676 branchmap.updatecache(self)
676 branchmap.updatecache(self)
677 return self._branchcaches[self.filtername]
677 return self._branchcaches[self.filtername]
678
678
679 def branchtip(self, branch):
679 def branchtip(self, branch):
680 '''return the tip node for a given branch'''
680 '''return the tip node for a given branch'''
681 try:
681 try:
682 return self.branchmap().branchtip(branch)
682 return self.branchmap().branchtip(branch)
683 except KeyError:
683 except KeyError:
684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
685
685
686 def lookup(self, key):
686 def lookup(self, key):
687 return self[key].node()
687 return self[key].node()
688
688
689 def lookupbranch(self, key, remote=None):
689 def lookupbranch(self, key, remote=None):
690 repo = remote or self
690 repo = remote or self
691 if key in repo.branchmap():
691 if key in repo.branchmap():
692 return key
692 return key
693
693
694 repo = (remote and remote.local()) and remote or self
694 repo = (remote and remote.local()) and remote or self
695 return repo[key].branch()
695 return repo[key].branch()
696
696
697 def known(self, nodes):
697 def known(self, nodes):
698 nm = self.changelog.nodemap
698 nm = self.changelog.nodemap
699 pc = self._phasecache
699 pc = self._phasecache
700 result = []
700 result = []
701 for n in nodes:
701 for n in nodes:
702 r = nm.get(n)
702 r = nm.get(n)
703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
704 result.append(resp)
704 result.append(resp)
705 return result
705 return result
706
706
707 def local(self):
707 def local(self):
708 return self
708 return self
709
709
710 def cancopy(self):
710 def cancopy(self):
711 # so statichttprepo's override of local() works
711 # so statichttprepo's override of local() works
712 if not self.local():
712 if not self.local():
713 return False
713 return False
714 if not self.ui.configbool('phases', 'publish', True):
714 if not self.ui.configbool('phases', 'publish', True):
715 return True
715 return True
716 # if publishing we can't copy if there is filtered content
716 # if publishing we can't copy if there is filtered content
717 return not self.filtered('visible').changelog.filteredrevs
717 return not self.filtered('visible').changelog.filteredrevs
718
718
719 def join(self, f):
719 def join(self, f):
720 return os.path.join(self.path, f)
720 return os.path.join(self.path, f)
721
721
722 def wjoin(self, f):
722 def wjoin(self, f):
723 return os.path.join(self.root, f)
723 return os.path.join(self.root, f)
724
724
725 def file(self, f):
725 def file(self, f):
726 if f[0] == '/':
726 if f[0] == '/':
727 f = f[1:]
727 f = f[1:]
728 return filelog.filelog(self.sopener, f)
728 return filelog.filelog(self.sopener, f)
729
729
730 def changectx(self, changeid):
730 def changectx(self, changeid):
731 return self[changeid]
731 return self[changeid]
732
732
733 def parents(self, changeid=None):
733 def parents(self, changeid=None):
734 '''get list of changectxs for parents of changeid'''
734 '''get list of changectxs for parents of changeid'''
735 return self[changeid].parents()
735 return self[changeid].parents()
736
736
737 def setparents(self, p1, p2=nullid):
737 def setparents(self, p1, p2=nullid):
738 copies = self.dirstate.setparents(p1, p2)
738 copies = self.dirstate.setparents(p1, p2)
739 pctx = self[p1]
739 pctx = self[p1]
740 if copies:
740 if copies:
741 # Adjust copy records, the dirstate cannot do it, it
741 # Adjust copy records, the dirstate cannot do it, it
742 # requires access to parents manifests. Preserve them
742 # requires access to parents manifests. Preserve them
743 # only for entries added to first parent.
743 # only for entries added to first parent.
744 for f in copies:
744 for f in copies:
745 if f not in pctx and copies[f] in pctx:
745 if f not in pctx and copies[f] in pctx:
746 self.dirstate.copy(copies[f], f)
746 self.dirstate.copy(copies[f], f)
747 if p2 == nullid:
747 if p2 == nullid:
748 for f, s in sorted(self.dirstate.copies().items()):
748 for f, s in sorted(self.dirstate.copies().items()):
749 if f not in pctx and s not in pctx:
749 if f not in pctx and s not in pctx:
750 self.dirstate.copy(None, f)
750 self.dirstate.copy(None, f)
751
751
752 def filectx(self, path, changeid=None, fileid=None):
752 def filectx(self, path, changeid=None, fileid=None):
753 """changeid can be a changeset revision, node, or tag.
753 """changeid can be a changeset revision, node, or tag.
754 fileid can be a file revision or node."""
754 fileid can be a file revision or node."""
755 return context.filectx(self, path, changeid, fileid)
755 return context.filectx(self, path, changeid, fileid)
756
756
757 def getcwd(self):
757 def getcwd(self):
758 return self.dirstate.getcwd()
758 return self.dirstate.getcwd()
759
759
760 def pathto(self, f, cwd=None):
760 def pathto(self, f, cwd=None):
761 return self.dirstate.pathto(f, cwd)
761 return self.dirstate.pathto(f, cwd)
762
762
763 def wfile(self, f, mode='r'):
763 def wfile(self, f, mode='r'):
764 return self.wopener(f, mode)
764 return self.wopener(f, mode)
765
765
766 def _link(self, f):
766 def _link(self, f):
767 return self.wvfs.islink(f)
767 return self.wvfs.islink(f)
768
768
769 def _loadfilter(self, filter):
769 def _loadfilter(self, filter):
770 if filter not in self.filterpats:
770 if filter not in self.filterpats:
771 l = []
771 l = []
772 for pat, cmd in self.ui.configitems(filter):
772 for pat, cmd in self.ui.configitems(filter):
773 if cmd == '!':
773 if cmd == '!':
774 continue
774 continue
775 mf = matchmod.match(self.root, '', [pat])
775 mf = matchmod.match(self.root, '', [pat])
776 fn = None
776 fn = None
777 params = cmd
777 params = cmd
778 for name, filterfn in self._datafilters.iteritems():
778 for name, filterfn in self._datafilters.iteritems():
779 if cmd.startswith(name):
779 if cmd.startswith(name):
780 fn = filterfn
780 fn = filterfn
781 params = cmd[len(name):].lstrip()
781 params = cmd[len(name):].lstrip()
782 break
782 break
783 if not fn:
783 if not fn:
784 fn = lambda s, c, **kwargs: util.filter(s, c)
784 fn = lambda s, c, **kwargs: util.filter(s, c)
785 # Wrap old filters not supporting keyword arguments
785 # Wrap old filters not supporting keyword arguments
786 if not inspect.getargspec(fn)[2]:
786 if not inspect.getargspec(fn)[2]:
787 oldfn = fn
787 oldfn = fn
788 fn = lambda s, c, **kwargs: oldfn(s, c)
788 fn = lambda s, c, **kwargs: oldfn(s, c)
789 l.append((mf, fn, params))
789 l.append((mf, fn, params))
790 self.filterpats[filter] = l
790 self.filterpats[filter] = l
791 return self.filterpats[filter]
791 return self.filterpats[filter]
792
792
793 def _filter(self, filterpats, filename, data):
793 def _filter(self, filterpats, filename, data):
794 for mf, fn, cmd in filterpats:
794 for mf, fn, cmd in filterpats:
795 if mf(filename):
795 if mf(filename):
796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
798 break
798 break
799
799
800 return data
800 return data
801
801
802 @unfilteredpropertycache
802 @unfilteredpropertycache
803 def _encodefilterpats(self):
803 def _encodefilterpats(self):
804 return self._loadfilter('encode')
804 return self._loadfilter('encode')
805
805
806 @unfilteredpropertycache
806 @unfilteredpropertycache
807 def _decodefilterpats(self):
807 def _decodefilterpats(self):
808 return self._loadfilter('decode')
808 return self._loadfilter('decode')
809
809
810 def adddatafilter(self, name, filter):
810 def adddatafilter(self, name, filter):
811 self._datafilters[name] = filter
811 self._datafilters[name] = filter
812
812
813 def wread(self, filename):
813 def wread(self, filename):
814 if self._link(filename):
814 if self._link(filename):
815 data = self.wvfs.readlink(filename)
815 data = self.wvfs.readlink(filename)
816 else:
816 else:
817 data = self.wopener.read(filename)
817 data = self.wopener.read(filename)
818 return self._filter(self._encodefilterpats, filename, data)
818 return self._filter(self._encodefilterpats, filename, data)
819
819
820 def wwrite(self, filename, data, flags):
820 def wwrite(self, filename, data, flags):
821 data = self._filter(self._decodefilterpats, filename, data)
821 data = self._filter(self._decodefilterpats, filename, data)
822 if 'l' in flags:
822 if 'l' in flags:
823 self.wopener.symlink(data, filename)
823 self.wopener.symlink(data, filename)
824 else:
824 else:
825 self.wopener.write(filename, data)
825 self.wopener.write(filename, data)
826 if 'x' in flags:
826 if 'x' in flags:
827 self.wvfs.setflags(filename, False, True)
827 self.wvfs.setflags(filename, False, True)
828
828
829 def wwritedata(self, filename, data):
829 def wwritedata(self, filename, data):
830 return self._filter(self._decodefilterpats, filename, data)
830 return self._filter(self._decodefilterpats, filename, data)
831
831
832 def transaction(self, desc, report=None):
832 def transaction(self, desc, report=None):
833 tr = self._transref and self._transref() or None
833 tr = self._transref and self._transref() or None
834 if tr and tr.running():
834 if tr and tr.running():
835 return tr.nest()
835 return tr.nest()
836
836
837 # abort here if the journal already exists
837 # abort here if the journal already exists
838 if self.svfs.exists("journal"):
838 if self.svfs.exists("journal"):
839 raise error.RepoError(
839 raise error.RepoError(
840 _("abandoned transaction found - run hg recover"))
840 _("abandoned transaction found - run hg recover"))
841
841
842 def onclose():
842 def onclose():
843 self.store.write(tr)
843 self.store.write(tr)
844
844
845 self._writejournal(desc)
845 self._writejournal(desc)
846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
847 rp = report and report or self.ui.warn
847 rp = report and report or self.ui.warn
848 tr = transaction.transaction(rp, self.sopener,
848 tr = transaction.transaction(rp, self.sopener,
849 "journal",
849 "journal",
850 aftertrans(renames),
850 aftertrans(renames),
851 self.store.createmode,
851 self.store.createmode,
852 onclose)
852 onclose)
853 self._transref = weakref.ref(tr)
853 self._transref = weakref.ref(tr)
854 return tr
854 return tr
855
855
856 def _journalfiles(self):
856 def _journalfiles(self):
857 return ((self.svfs, 'journal'),
857 return ((self.svfs, 'journal'),
858 (self.vfs, 'journal.dirstate'),
858 (self.vfs, 'journal.dirstate'),
859 (self.vfs, 'journal.branch'),
859 (self.vfs, 'journal.branch'),
860 (self.vfs, 'journal.desc'),
860 (self.vfs, 'journal.desc'),
861 (self.vfs, 'journal.bookmarks'),
861 (self.vfs, 'journal.bookmarks'),
862 (self.svfs, 'journal.phaseroots'))
862 (self.svfs, 'journal.phaseroots'))
863
863
864 def undofiles(self):
864 def undofiles(self):
865 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
865 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
866
866
867 def _writejournal(self, desc):
867 def _writejournal(self, desc):
868 self.opener.write("journal.dirstate",
868 self.opener.write("journal.dirstate",
869 self.opener.tryread("dirstate"))
869 self.opener.tryread("dirstate"))
870 self.opener.write("journal.branch",
870 self.opener.write("journal.branch",
871 encoding.fromlocal(self.dirstate.branch()))
871 encoding.fromlocal(self.dirstate.branch()))
872 self.opener.write("journal.desc",
872 self.opener.write("journal.desc",
873 "%d\n%s\n" % (len(self), desc))
873 "%d\n%s\n" % (len(self), desc))
874 self.opener.write("journal.bookmarks",
874 self.opener.write("journal.bookmarks",
875 self.opener.tryread("bookmarks"))
875 self.opener.tryread("bookmarks"))
876 self.sopener.write("journal.phaseroots",
876 self.sopener.write("journal.phaseroots",
877 self.sopener.tryread("phaseroots"))
877 self.sopener.tryread("phaseroots"))
878
878
879 def recover(self):
879 def recover(self):
880 lock = self.lock()
880 lock = self.lock()
881 try:
881 try:
882 if self.svfs.exists("journal"):
882 if self.svfs.exists("journal"):
883 self.ui.status(_("rolling back interrupted transaction\n"))
883 self.ui.status(_("rolling back interrupted transaction\n"))
884 transaction.rollback(self.sopener, "journal",
884 transaction.rollback(self.sopener, "journal",
885 self.ui.warn)
885 self.ui.warn)
886 self.invalidate()
886 self.invalidate()
887 return True
887 return True
888 else:
888 else:
889 self.ui.warn(_("no interrupted transaction available\n"))
889 self.ui.warn(_("no interrupted transaction available\n"))
890 return False
890 return False
891 finally:
891 finally:
892 lock.release()
892 lock.release()
893
893
894 def rollback(self, dryrun=False, force=False):
894 def rollback(self, dryrun=False, force=False):
895 wlock = lock = None
895 wlock = lock = None
896 try:
896 try:
897 wlock = self.wlock()
897 wlock = self.wlock()
898 lock = self.lock()
898 lock = self.lock()
899 if self.svfs.exists("undo"):
899 if self.svfs.exists("undo"):
900 return self._rollback(dryrun, force)
900 return self._rollback(dryrun, force)
901 else:
901 else:
902 self.ui.warn(_("no rollback information available\n"))
902 self.ui.warn(_("no rollback information available\n"))
903 return 1
903 return 1
904 finally:
904 finally:
905 release(lock, wlock)
905 release(lock, wlock)
906
906
907 @unfilteredmethod # Until we get smarter cache management
907 @unfilteredmethod # Until we get smarter cache management
908 def _rollback(self, dryrun, force):
908 def _rollback(self, dryrun, force):
909 ui = self.ui
909 ui = self.ui
910 try:
910 try:
911 args = self.opener.read('undo.desc').splitlines()
911 args = self.opener.read('undo.desc').splitlines()
912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
913 if len(args) >= 3:
913 if len(args) >= 3:
914 detail = args[2]
914 detail = args[2]
915 oldtip = oldlen - 1
915 oldtip = oldlen - 1
916
916
917 if detail and ui.verbose:
917 if detail and ui.verbose:
918 msg = (_('repository tip rolled back to revision %s'
918 msg = (_('repository tip rolled back to revision %s'
919 ' (undo %s: %s)\n')
919 ' (undo %s: %s)\n')
920 % (oldtip, desc, detail))
920 % (oldtip, desc, detail))
921 else:
921 else:
922 msg = (_('repository tip rolled back to revision %s'
922 msg = (_('repository tip rolled back to revision %s'
923 ' (undo %s)\n')
923 ' (undo %s)\n')
924 % (oldtip, desc))
924 % (oldtip, desc))
925 except IOError:
925 except IOError:
926 msg = _('rolling back unknown transaction\n')
926 msg = _('rolling back unknown transaction\n')
927 desc = None
927 desc = None
928
928
929 if not force and self['.'] != self['tip'] and desc == 'commit':
929 if not force and self['.'] != self['tip'] and desc == 'commit':
930 raise util.Abort(
930 raise util.Abort(
931 _('rollback of last commit while not checked out '
931 _('rollback of last commit while not checked out '
932 'may lose data'), hint=_('use -f to force'))
932 'may lose data'), hint=_('use -f to force'))
933
933
934 ui.status(msg)
934 ui.status(msg)
935 if dryrun:
935 if dryrun:
936 return 0
936 return 0
937
937
938 parents = self.dirstate.parents()
938 parents = self.dirstate.parents()
939 self.destroying()
939 self.destroying()
940 transaction.rollback(self.sopener, 'undo', ui.warn)
940 transaction.rollback(self.sopener, 'undo', ui.warn)
941 if self.vfs.exists('undo.bookmarks'):
941 if self.vfs.exists('undo.bookmarks'):
942 self.vfs.rename('undo.bookmarks', 'bookmarks')
942 self.vfs.rename('undo.bookmarks', 'bookmarks')
943 if self.svfs.exists('undo.phaseroots'):
943 if self.svfs.exists('undo.phaseroots'):
944 self.svfs.rename('undo.phaseroots', 'phaseroots')
944 self.svfs.rename('undo.phaseroots', 'phaseroots')
945 self.invalidate()
945 self.invalidate()
946
946
947 parentgone = (parents[0] not in self.changelog.nodemap or
947 parentgone = (parents[0] not in self.changelog.nodemap or
948 parents[1] not in self.changelog.nodemap)
948 parents[1] not in self.changelog.nodemap)
949 if parentgone:
949 if parentgone:
950 self.vfs.rename('undo.dirstate', 'dirstate')
950 self.vfs.rename('undo.dirstate', 'dirstate')
951 try:
951 try:
952 branch = self.opener.read('undo.branch')
952 branch = self.opener.read('undo.branch')
953 self.dirstate.setbranch(encoding.tolocal(branch))
953 self.dirstate.setbranch(encoding.tolocal(branch))
954 except IOError:
954 except IOError:
955 ui.warn(_('named branch could not be reset: '
955 ui.warn(_('named branch could not be reset: '
956 'current branch is still \'%s\'\n')
956 'current branch is still \'%s\'\n')
957 % self.dirstate.branch())
957 % self.dirstate.branch())
958
958
959 self.dirstate.invalidate()
959 self.dirstate.invalidate()
960 parents = tuple([p.rev() for p in self.parents()])
960 parents = tuple([p.rev() for p in self.parents()])
961 if len(parents) > 1:
961 if len(parents) > 1:
962 ui.status(_('working directory now based on '
962 ui.status(_('working directory now based on '
963 'revisions %d and %d\n') % parents)
963 'revisions %d and %d\n') % parents)
964 else:
964 else:
965 ui.status(_('working directory now based on '
965 ui.status(_('working directory now based on '
966 'revision %d\n') % parents)
966 'revision %d\n') % parents)
967 # TODO: if we know which new heads may result from this rollback, pass
967 # TODO: if we know which new heads may result from this rollback, pass
968 # them to destroy(), which will prevent the branchhead cache from being
968 # them to destroy(), which will prevent the branchhead cache from being
969 # invalidated.
969 # invalidated.
970 self.destroyed()
970 self.destroyed()
971 return 0
971 return 0
972
972
973 def invalidatecaches(self):
973 def invalidatecaches(self):
974
974
975 if '_tagscache' in vars(self):
975 if '_tagscache' in vars(self):
976 # can't use delattr on proxy
976 # can't use delattr on proxy
977 del self.__dict__['_tagscache']
977 del self.__dict__['_tagscache']
978
978
979 self.unfiltered()._branchcaches.clear()
979 self.unfiltered()._branchcaches.clear()
980 self.invalidatevolatilesets()
980 self.invalidatevolatilesets()
981
981
982 def invalidatevolatilesets(self):
982 def invalidatevolatilesets(self):
983 self.filteredrevcache.clear()
983 self.filteredrevcache.clear()
984 obsolete.clearobscaches(self)
984 obsolete.clearobscaches(self)
985
985
986 def invalidatedirstate(self):
986 def invalidatedirstate(self):
987 '''Invalidates the dirstate, causing the next call to dirstate
987 '''Invalidates the dirstate, causing the next call to dirstate
988 to check if it was modified since the last time it was read,
988 to check if it was modified since the last time it was read,
989 rereading it if it has.
989 rereading it if it has.
990
990
991 This is different to dirstate.invalidate() that it doesn't always
991 This is different to dirstate.invalidate() that it doesn't always
992 rereads the dirstate. Use dirstate.invalidate() if you want to
992 rereads the dirstate. Use dirstate.invalidate() if you want to
993 explicitly read the dirstate again (i.e. restoring it to a previous
993 explicitly read the dirstate again (i.e. restoring it to a previous
994 known good state).'''
994 known good state).'''
995 if hasunfilteredcache(self, 'dirstate'):
995 if hasunfilteredcache(self, 'dirstate'):
996 for k in self.dirstate._filecache:
996 for k in self.dirstate._filecache:
997 try:
997 try:
998 delattr(self.dirstate, k)
998 delattr(self.dirstate, k)
999 except AttributeError:
999 except AttributeError:
1000 pass
1000 pass
1001 delattr(self.unfiltered(), 'dirstate')
1001 delattr(self.unfiltered(), 'dirstate')
1002
1002
1003 def invalidate(self):
1003 def invalidate(self):
1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1005 for k in self._filecache:
1005 for k in self._filecache:
1006 # dirstate is invalidated separately in invalidatedirstate()
1006 # dirstate is invalidated separately in invalidatedirstate()
1007 if k == 'dirstate':
1007 if k == 'dirstate':
1008 continue
1008 continue
1009
1009
1010 try:
1010 try:
1011 delattr(unfiltered, k)
1011 delattr(unfiltered, k)
1012 except AttributeError:
1012 except AttributeError:
1013 pass
1013 pass
1014 self.invalidatecaches()
1014 self.invalidatecaches()
1015 self.store.invalidatecaches()
1015 self.store.invalidatecaches()
1016
1016
1017 def invalidateall(self):
1017 def invalidateall(self):
1018 '''Fully invalidates both store and non-store parts, causing the
1018 '''Fully invalidates both store and non-store parts, causing the
1019 subsequent operation to reread any outside changes.'''
1019 subsequent operation to reread any outside changes.'''
1020 # extension should hook this to invalidate its caches
1020 # extension should hook this to invalidate its caches
1021 self.invalidate()
1021 self.invalidate()
1022 self.invalidatedirstate()
1022 self.invalidatedirstate()
1023
1023
1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1025 try:
1025 try:
1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1027 except error.LockHeld, inst:
1027 except error.LockHeld, inst:
1028 if not wait:
1028 if not wait:
1029 raise
1029 raise
1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1031 (desc, inst.locker))
1031 (desc, inst.locker))
1032 # default to 600 seconds timeout
1032 # default to 600 seconds timeout
1033 l = lockmod.lock(vfs, lockname,
1033 l = lockmod.lock(vfs, lockname,
1034 int(self.ui.config("ui", "timeout", "600")),
1034 int(self.ui.config("ui", "timeout", "600")),
1035 releasefn, desc=desc)
1035 releasefn, desc=desc)
1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1037 if acquirefn:
1037 if acquirefn:
1038 acquirefn()
1038 acquirefn()
1039 return l
1039 return l
1040
1040
1041 def _afterlock(self, callback):
1041 def _afterlock(self, callback):
1042 """add a callback to the current repository lock.
1042 """add a callback to the current repository lock.
1043
1043
1044 The callback will be executed on lock release."""
1044 The callback will be executed on lock release."""
1045 l = self._lockref and self._lockref()
1045 l = self._lockref and self._lockref()
1046 if l:
1046 if l:
1047 l.postrelease.append(callback)
1047 l.postrelease.append(callback)
1048 else:
1048 else:
1049 callback()
1049 callback()
1050
1050
1051 def lock(self, wait=True):
1051 def lock(self, wait=True):
1052 '''Lock the repository store (.hg/store) and return a weak reference
1052 '''Lock the repository store (.hg/store) and return a weak reference
1053 to the lock. Use this before modifying the store (e.g. committing or
1053 to the lock. Use this before modifying the store (e.g. committing or
1054 stripping). If you are opening a transaction, get a lock as well.)'''
1054 stripping). If you are opening a transaction, get a lock as well.)'''
1055 l = self._lockref and self._lockref()
1055 l = self._lockref and self._lockref()
1056 if l is not None and l.held:
1056 if l is not None and l.held:
1057 l.lock()
1057 l.lock()
1058 return l
1058 return l
1059
1059
1060 def unlock():
1060 def unlock():
1061 if hasunfilteredcache(self, '_phasecache'):
1061 if hasunfilteredcache(self, '_phasecache'):
1062 self._phasecache.write()
1062 self._phasecache.write()
1063 for k, ce in self._filecache.items():
1063 for k, ce in self._filecache.items():
1064 if k == 'dirstate' or k not in self.__dict__:
1064 if k == 'dirstate' or k not in self.__dict__:
1065 continue
1065 continue
1066 ce.refresh()
1066 ce.refresh()
1067
1067
1068 l = self._lock(self.svfs, "lock", wait, unlock,
1068 l = self._lock(self.svfs, "lock", wait, unlock,
1069 self.invalidate, _('repository %s') % self.origroot)
1069 self.invalidate, _('repository %s') % self.origroot)
1070 self._lockref = weakref.ref(l)
1070 self._lockref = weakref.ref(l)
1071 return l
1071 return l
1072
1072
1073 def wlock(self, wait=True):
1073 def wlock(self, wait=True):
1074 '''Lock the non-store parts of the repository (everything under
1074 '''Lock the non-store parts of the repository (everything under
1075 .hg except .hg/store) and return a weak reference to the lock.
1075 .hg except .hg/store) and return a weak reference to the lock.
1076 Use this before modifying files in .hg.'''
1076 Use this before modifying files in .hg.'''
1077 l = self._wlockref and self._wlockref()
1077 l = self._wlockref and self._wlockref()
1078 if l is not None and l.held:
1078 if l is not None and l.held:
1079 l.lock()
1079 l.lock()
1080 return l
1080 return l
1081
1081
1082 def unlock():
1082 def unlock():
1083 self.dirstate.write()
1083 self.dirstate.write()
1084 self._filecache['dirstate'].refresh()
1084 self._filecache['dirstate'].refresh()
1085
1085
1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1087 self.invalidatedirstate, _('working directory of %s') %
1087 self.invalidatedirstate, _('working directory of %s') %
1088 self.origroot)
1088 self.origroot)
1089 self._wlockref = weakref.ref(l)
1089 self._wlockref = weakref.ref(l)
1090 return l
1090 return l
1091
1091
1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1093 """
1093 """
1094 commit an individual file as part of a larger transaction
1094 commit an individual file as part of a larger transaction
1095 """
1095 """
1096
1096
1097 fname = fctx.path()
1097 fname = fctx.path()
1098 text = fctx.data()
1098 text = fctx.data()
1099 flog = self.file(fname)
1099 flog = self.file(fname)
1100 fparent1 = manifest1.get(fname, nullid)
1100 fparent1 = manifest1.get(fname, nullid)
1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1102
1102
1103 meta = {}
1103 meta = {}
1104 copy = fctx.renamed()
1104 copy = fctx.renamed()
1105 if copy and copy[0] != fname:
1105 if copy and copy[0] != fname:
1106 # Mark the new revision of this file as a copy of another
1106 # Mark the new revision of this file as a copy of another
1107 # file. This copy data will effectively act as a parent
1107 # file. This copy data will effectively act as a parent
1108 # of this new revision. If this is a merge, the first
1108 # of this new revision. If this is a merge, the first
1109 # parent will be the nullid (meaning "look up the copy data")
1109 # parent will be the nullid (meaning "look up the copy data")
1110 # and the second one will be the other parent. For example:
1110 # and the second one will be the other parent. For example:
1111 #
1111 #
1112 # 0 --- 1 --- 3 rev1 changes file foo
1112 # 0 --- 1 --- 3 rev1 changes file foo
1113 # \ / rev2 renames foo to bar and changes it
1113 # \ / rev2 renames foo to bar and changes it
1114 # \- 2 -/ rev3 should have bar with all changes and
1114 # \- 2 -/ rev3 should have bar with all changes and
1115 # should record that bar descends from
1115 # should record that bar descends from
1116 # bar in rev2 and foo in rev1
1116 # bar in rev2 and foo in rev1
1117 #
1117 #
1118 # this allows this merge to succeed:
1118 # this allows this merge to succeed:
1119 #
1119 #
1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1121 # \ / merging rev3 and rev4 should use bar@rev2
1121 # \ / merging rev3 and rev4 should use bar@rev2
1122 # \- 2 --- 4 as the merge base
1122 # \- 2 --- 4 as the merge base
1123 #
1123 #
1124
1124
1125 cfname = copy[0]
1125 cfname = copy[0]
1126 crev = manifest1.get(cfname)
1126 crev = manifest1.get(cfname)
1127 newfparent = fparent2
1127 newfparent = fparent2
1128
1128
1129 if manifest2: # branch merge
1129 if manifest2: # branch merge
1130 if fparent2 == nullid or crev is None: # copied on remote side
1130 if fparent2 == nullid or crev is None: # copied on remote side
1131 if cfname in manifest2:
1131 if cfname in manifest2:
1132 crev = manifest2[cfname]
1132 crev = manifest2[cfname]
1133 newfparent = fparent1
1133 newfparent = fparent1
1134
1134
1135 # find source in nearest ancestor if we've lost track
1135 # find source in nearest ancestor if we've lost track
1136 if not crev:
1136 if not crev:
1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1138 (fname, cfname))
1138 (fname, cfname))
1139 for ancestor in self[None].ancestors():
1139 for ancestor in self[None].ancestors():
1140 if cfname in ancestor:
1140 if cfname in ancestor:
1141 crev = ancestor[cfname].filenode()
1141 crev = ancestor[cfname].filenode()
1142 break
1142 break
1143
1143
1144 if crev:
1144 if crev:
1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1146 meta["copy"] = cfname
1146 meta["copy"] = cfname
1147 meta["copyrev"] = hex(crev)
1147 meta["copyrev"] = hex(crev)
1148 fparent1, fparent2 = nullid, newfparent
1148 fparent1, fparent2 = nullid, newfparent
1149 else:
1149 else:
1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1151 "copied from '%s'!\n") % (fname, cfname))
1151 "copied from '%s'!\n") % (fname, cfname))
1152
1152
1153 elif fparent1 == nullid:
1153 elif fparent1 == nullid:
1154 fparent1, fparent2 = fparent2, nullid
1154 fparent1, fparent2 = fparent2, nullid
1155 elif fparent2 != nullid:
1155 elif fparent2 != nullid:
1156 # is one parent an ancestor of the other?
1156 # is one parent an ancestor of the other?
1157 fparentancestor = flog.ancestor(fparent1, fparent2)
1157 fparentancestor = flog.ancestor(fparent1, fparent2)
1158 if fparentancestor == fparent1:
1158 if fparentancestor == fparent1:
1159 fparent1, fparent2 = fparent2, nullid
1159 fparent1, fparent2 = fparent2, nullid
1160 elif fparentancestor == fparent2:
1160 elif fparentancestor == fparent2:
1161 fparent2 = nullid
1161 fparent2 = nullid
1162
1162
1163 # is the file changed?
1163 # is the file changed?
1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1165 changelist.append(fname)
1165 changelist.append(fname)
1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1167
1167
1168 # are just the flags changed during merge?
1168 # are just the flags changed during merge?
1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1170 changelist.append(fname)
1170 changelist.append(fname)
1171
1171
1172 return fparent1
1172 return fparent1
1173
1173
1174 @unfilteredmethod
1174 @unfilteredmethod
1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1176 editor=False, extra={}):
1176 editor=False, extra={}):
1177 """Add a new revision to current repository.
1177 """Add a new revision to current repository.
1178
1178
1179 Revision information is gathered from the working directory,
1179 Revision information is gathered from the working directory,
1180 match can be used to filter the committed files. If editor is
1180 match can be used to filter the committed files. If editor is
1181 supplied, it is called to get a commit message.
1181 supplied, it is called to get a commit message.
1182 """
1182 """
1183
1183
1184 def fail(f, msg):
1184 def fail(f, msg):
1185 raise util.Abort('%s: %s' % (f, msg))
1185 raise util.Abort('%s: %s' % (f, msg))
1186
1186
1187 if not match:
1187 if not match:
1188 match = matchmod.always(self.root, '')
1188 match = matchmod.always(self.root, '')
1189
1189
1190 if not force:
1190 if not force:
1191 vdirs = []
1191 vdirs = []
1192 match.explicitdir = vdirs.append
1192 match.explicitdir = vdirs.append
1193 match.bad = fail
1193 match.bad = fail
1194
1194
1195 wlock = self.wlock()
1195 wlock = self.wlock()
1196 try:
1196 try:
1197 wctx = self[None]
1197 wctx = self[None]
1198 merge = len(wctx.parents()) > 1
1198 merge = len(wctx.parents()) > 1
1199
1199
1200 if (not force and merge and match and
1200 if (not force and merge and match and
1201 (match.files() or match.anypats())):
1201 (match.files() or match.anypats())):
1202 raise util.Abort(_('cannot partially commit a merge '
1202 raise util.Abort(_('cannot partially commit a merge '
1203 '(do not specify files or patterns)'))
1203 '(do not specify files or patterns)'))
1204
1204
1205 changes = self.status(match=match, clean=force)
1205 changes = self.status(match=match, clean=force)
1206 if force:
1206 if force:
1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1208
1208
1209 # check subrepos
1209 # check subrepos
1210 subs = []
1210 subs = []
1211 commitsubs = set()
1211 commitsubs = set()
1212 newstate = wctx.substate.copy()
1212 newstate = wctx.substate.copy()
1213 # only manage subrepos and .hgsubstate if .hgsub is present
1213 # only manage subrepos and .hgsubstate if .hgsub is present
1214 if '.hgsub' in wctx:
1214 if '.hgsub' in wctx:
1215 # we'll decide whether to track this ourselves, thanks
1215 # we'll decide whether to track this ourselves, thanks
1216 for c in changes[:3]:
1216 for c in changes[:3]:
1217 if '.hgsubstate' in c:
1217 if '.hgsubstate' in c:
1218 c.remove('.hgsubstate')
1218 c.remove('.hgsubstate')
1219
1219
1220 # compare current state to last committed state
1220 # compare current state to last committed state
1221 # build new substate based on last committed state
1221 # build new substate based on last committed state
1222 oldstate = wctx.p1().substate
1222 oldstate = wctx.p1().substate
1223 for s in sorted(newstate.keys()):
1223 for s in sorted(newstate.keys()):
1224 if not match(s):
1224 if not match(s):
1225 # ignore working copy, use old state if present
1225 # ignore working copy, use old state if present
1226 if s in oldstate:
1226 if s in oldstate:
1227 newstate[s] = oldstate[s]
1227 newstate[s] = oldstate[s]
1228 continue
1228 continue
1229 if not force:
1229 if not force:
1230 raise util.Abort(
1230 raise util.Abort(
1231 _("commit with new subrepo %s excluded") % s)
1231 _("commit with new subrepo %s excluded") % s)
1232 if wctx.sub(s).dirty(True):
1232 if wctx.sub(s).dirty(True):
1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1234 raise util.Abort(
1234 raise util.Abort(
1235 _("uncommitted changes in subrepo %s") % s,
1235 _("uncommitted changes in subrepo %s") % s,
1236 hint=_("use --subrepos for recursive commit"))
1236 hint=_("use --subrepos for recursive commit"))
1237 subs.append(s)
1237 subs.append(s)
1238 commitsubs.add(s)
1238 commitsubs.add(s)
1239 else:
1239 else:
1240 bs = wctx.sub(s).basestate()
1240 bs = wctx.sub(s).basestate()
1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1242 if oldstate.get(s, (None, None, None))[1] != bs:
1242 if oldstate.get(s, (None, None, None))[1] != bs:
1243 subs.append(s)
1243 subs.append(s)
1244
1244
1245 # check for removed subrepos
1245 # check for removed subrepos
1246 for p in wctx.parents():
1246 for p in wctx.parents():
1247 r = [s for s in p.substate if s not in newstate]
1247 r = [s for s in p.substate if s not in newstate]
1248 subs += [s for s in r if match(s)]
1248 subs += [s for s in r if match(s)]
1249 if subs:
1249 if subs:
1250 if (not match('.hgsub') and
1250 if (not match('.hgsub') and
1251 '.hgsub' in (wctx.modified() + wctx.added())):
1251 '.hgsub' in (wctx.modified() + wctx.added())):
1252 raise util.Abort(
1252 raise util.Abort(
1253 _("can't commit subrepos without .hgsub"))
1253 _("can't commit subrepos without .hgsub"))
1254 changes[0].insert(0, '.hgsubstate')
1254 changes[0].insert(0, '.hgsubstate')
1255
1255
1256 elif '.hgsub' in changes[2]:
1256 elif '.hgsub' in changes[2]:
1257 # clean up .hgsubstate when .hgsub is removed
1257 # clean up .hgsubstate when .hgsub is removed
1258 if ('.hgsubstate' in wctx and
1258 if ('.hgsubstate' in wctx and
1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1260 changes[2].insert(0, '.hgsubstate')
1260 changes[2].insert(0, '.hgsubstate')
1261
1261
1262 # make sure all explicit patterns are matched
1262 # make sure all explicit patterns are matched
1263 if not force and match.files():
1263 if not force and match.files():
1264 matched = set(changes[0] + changes[1] + changes[2])
1264 matched = set(changes[0] + changes[1] + changes[2])
1265
1265
1266 for f in match.files():
1266 for f in match.files():
1267 f = self.dirstate.normalize(f)
1267 f = self.dirstate.normalize(f)
1268 if f == '.' or f in matched or f in wctx.substate:
1268 if f == '.' or f in matched or f in wctx.substate:
1269 continue
1269 continue
1270 if f in changes[3]: # missing
1270 if f in changes[3]: # missing
1271 fail(f, _('file not found!'))
1271 fail(f, _('file not found!'))
1272 if f in vdirs: # visited directory
1272 if f in vdirs: # visited directory
1273 d = f + '/'
1273 d = f + '/'
1274 for mf in matched:
1274 for mf in matched:
1275 if mf.startswith(d):
1275 if mf.startswith(d):
1276 break
1276 break
1277 else:
1277 else:
1278 fail(f, _("no match under directory!"))
1278 fail(f, _("no match under directory!"))
1279 elif f not in self.dirstate:
1279 elif f not in self.dirstate:
1280 fail(f, _("file not tracked!"))
1280 fail(f, _("file not tracked!"))
1281
1281
1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1283
1283
1284 if (not force and not extra.get("close") and not merge
1284 if (not force and not extra.get("close") and not merge
1285 and not cctx.files()
1285 and not cctx.files()
1286 and wctx.branch() == wctx.p1().branch()):
1286 and wctx.branch() == wctx.p1().branch()):
1287 return None
1287 return None
1288
1288
1289 if merge and cctx.deleted():
1289 if merge and cctx.deleted():
1290 raise util.Abort(_("cannot commit merge with missing files"))
1290 raise util.Abort(_("cannot commit merge with missing files"))
1291
1291
1292 ms = mergemod.mergestate(self)
1292 ms = mergemod.mergestate(self)
1293 for f in changes[0]:
1293 for f in changes[0]:
1294 if f in ms and ms[f] == 'u':
1294 if f in ms and ms[f] == 'u':
1295 raise util.Abort(_("unresolved merge conflicts "
1295 raise util.Abort(_("unresolved merge conflicts "
1296 "(see hg help resolve)"))
1296 "(see hg help resolve)"))
1297
1297
1298 if editor:
1298 if editor:
1299 cctx._text = editor(self, cctx, subs)
1299 cctx._text = editor(self, cctx, subs)
1300 edited = (text != cctx._text)
1300 edited = (text != cctx._text)
1301
1301
1302 # Save commit message in case this transaction gets rolled back
1302 # Save commit message in case this transaction gets rolled back
1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1304 # the assumption that the user will use the same editor again.
1304 # the assumption that the user will use the same editor again.
1305 msgfn = self.savecommitmessage(cctx._text)
1305 msgfn = self.savecommitmessage(cctx._text)
1306
1306
1307 # commit subs and write new state
1307 # commit subs and write new state
1308 if subs:
1308 if subs:
1309 for s in sorted(commitsubs):
1309 for s in sorted(commitsubs):
1310 sub = wctx.sub(s)
1310 sub = wctx.sub(s)
1311 self.ui.status(_('committing subrepository %s\n') %
1311 self.ui.status(_('committing subrepository %s\n') %
1312 subrepo.subrelpath(sub))
1312 subrepo.subrelpath(sub))
1313 sr = sub.commit(cctx._text, user, date)
1313 sr = sub.commit(cctx._text, user, date)
1314 newstate[s] = (newstate[s][0], sr)
1314 newstate[s] = (newstate[s][0], sr)
1315 subrepo.writestate(self, newstate)
1315 subrepo.writestate(self, newstate)
1316
1316
1317 p1, p2 = self.dirstate.parents()
1317 p1, p2 = self.dirstate.parents()
1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1319 try:
1319 try:
1320 self.hook("precommit", throw=True, parent1=hookp1,
1320 self.hook("precommit", throw=True, parent1=hookp1,
1321 parent2=hookp2)
1321 parent2=hookp2)
1322 ret = self.commitctx(cctx, True)
1322 ret = self.commitctx(cctx, True)
1323 except: # re-raises
1323 except: # re-raises
1324 if edited:
1324 if edited:
1325 self.ui.write(
1325 self.ui.write(
1326 _('note: commit message saved in %s\n') % msgfn)
1326 _('note: commit message saved in %s\n') % msgfn)
1327 raise
1327 raise
1328
1328
1329 # update bookmarks, dirstate and mergestate
1329 # update bookmarks, dirstate and mergestate
1330 bookmarks.update(self, [p1, p2], ret)
1330 bookmarks.update(self, [p1, p2], ret)
1331 cctx.markcommitted(ret)
1331 cctx.markcommitted(ret)
1332 ms.reset()
1332 ms.reset()
1333 finally:
1333 finally:
1334 wlock.release()
1334 wlock.release()
1335
1335
1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1338 self._afterlock(commithook)
1338 self._afterlock(commithook)
1339 return ret
1339 return ret
1340
1340
1341 @unfilteredmethod
1341 @unfilteredmethod
1342 def commitctx(self, ctx, error=False):
1342 def commitctx(self, ctx, error=False):
1343 """Add a new revision to current repository.
1343 """Add a new revision to current repository.
1344 Revision information is passed via the context argument.
1344 Revision information is passed via the context argument.
1345 """
1345 """
1346
1346
1347 tr = lock = None
1347 tr = lock = None
1348 removed = list(ctx.removed())
1348 removed = list(ctx.removed())
1349 p1, p2 = ctx.p1(), ctx.p2()
1349 p1, p2 = ctx.p1(), ctx.p2()
1350 user = ctx.user()
1350 user = ctx.user()
1351
1351
1352 lock = self.lock()
1352 lock = self.lock()
1353 try:
1353 try:
1354 tr = self.transaction("commit")
1354 tr = self.transaction("commit")
1355 trp = weakref.proxy(tr)
1355 trp = weakref.proxy(tr)
1356
1356
1357 if ctx.files():
1357 if ctx.files():
1358 m1 = p1.manifest().copy()
1358 m1 = p1.manifest().copy()
1359 m2 = p2.manifest()
1359 m2 = p2.manifest()
1360
1360
1361 # check in files
1361 # check in files
1362 new = {}
1362 new = {}
1363 changed = []
1363 changed = []
1364 linkrev = len(self)
1364 linkrev = len(self)
1365 for f in sorted(ctx.modified() + ctx.added()):
1365 for f in sorted(ctx.modified() + ctx.added()):
1366 self.ui.note(f + "\n")
1366 self.ui.note(f + "\n")
1367 try:
1367 try:
1368 fctx = ctx[f]
1368 fctx = ctx[f]
1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1370 changed)
1370 changed)
1371 m1.set(f, fctx.flags())
1371 m1.set(f, fctx.flags())
1372 except OSError, inst:
1372 except OSError, inst:
1373 self.ui.warn(_("trouble committing %s!\n") % f)
1373 self.ui.warn(_("trouble committing %s!\n") % f)
1374 raise
1374 raise
1375 except IOError, inst:
1375 except IOError, inst:
1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1377 if error or errcode and errcode != errno.ENOENT:
1377 if error or errcode and errcode != errno.ENOENT:
1378 self.ui.warn(_("trouble committing %s!\n") % f)
1378 self.ui.warn(_("trouble committing %s!\n") % f)
1379 raise
1379 raise
1380 else:
1380 else:
1381 removed.append(f)
1381 removed.append(f)
1382
1382
1383 # update manifest
1383 # update manifest
1384 m1.update(new)
1384 m1.update(new)
1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1386 drop = [f for f in removed if f in m1]
1386 drop = [f for f in removed if f in m1]
1387 for f in drop:
1387 for f in drop:
1388 del m1[f]
1388 del m1[f]
1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1390 p2.manifestnode(), (new, drop))
1390 p2.manifestnode(), (new, drop))
1391 files = changed + removed
1391 files = changed + removed
1392 else:
1392 else:
1393 mn = p1.manifestnode()
1393 mn = p1.manifestnode()
1394 files = []
1394 files = []
1395
1395
1396 # update changelog
1396 # update changelog
1397 self.changelog.delayupdate()
1397 self.changelog.delayupdate()
1398 n = self.changelog.add(mn, files, ctx.description(),
1398 n = self.changelog.add(mn, files, ctx.description(),
1399 trp, p1.node(), p2.node(),
1399 trp, p1.node(), p2.node(),
1400 user, ctx.date(), ctx.extra().copy())
1400 user, ctx.date(), ctx.extra().copy())
1401 p = lambda: self.changelog.writepending() and self.root or ""
1401 p = lambda: self.changelog.writepending() and self.root or ""
1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1404 parent2=xp2, pending=p)
1404 parent2=xp2, pending=p)
1405 self.changelog.finalize(trp)
1405 self.changelog.finalize(trp)
1406 # set the new commit is proper phase
1406 # set the new commit is proper phase
1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1408 if targetphase:
1408 if targetphase:
1409 # retract boundary do not alter parent changeset.
1409 # retract boundary do not alter parent changeset.
1410 # if a parent have higher the resulting phase will
1410 # if a parent have higher the resulting phase will
1411 # be compliant anyway
1411 # be compliant anyway
1412 #
1412 #
1413 # if minimal phase was 0 we don't need to retract anything
1413 # if minimal phase was 0 we don't need to retract anything
1414 phases.retractboundary(self, targetphase, [n])
1414 phases.retractboundary(self, targetphase, [n])
1415 tr.close()
1415 tr.close()
1416 branchmap.updatecache(self.filtered('served'))
1416 branchmap.updatecache(self.filtered('served'))
1417 return n
1417 return n
1418 finally:
1418 finally:
1419 if tr:
1419 if tr:
1420 tr.release()
1420 tr.release()
1421 lock.release()
1421 lock.release()
1422
1422
1423 @unfilteredmethod
1423 @unfilteredmethod
1424 def destroying(self):
1424 def destroying(self):
1425 '''Inform the repository that nodes are about to be destroyed.
1425 '''Inform the repository that nodes are about to be destroyed.
1426 Intended for use by strip and rollback, so there's a common
1426 Intended for use by strip and rollback, so there's a common
1427 place for anything that has to be done before destroying history.
1427 place for anything that has to be done before destroying history.
1428
1428
1429 This is mostly useful for saving state that is in memory and waiting
1429 This is mostly useful for saving state that is in memory and waiting
1430 to be flushed when the current lock is released. Because a call to
1430 to be flushed when the current lock is released. Because a call to
1431 destroyed is imminent, the repo will be invalidated causing those
1431 destroyed is imminent, the repo will be invalidated causing those
1432 changes to stay in memory (waiting for the next unlock), or vanish
1432 changes to stay in memory (waiting for the next unlock), or vanish
1433 completely.
1433 completely.
1434 '''
1434 '''
1435 # When using the same lock to commit and strip, the phasecache is left
1435 # When using the same lock to commit and strip, the phasecache is left
1436 # dirty after committing. Then when we strip, the repo is invalidated,
1436 # dirty after committing. Then when we strip, the repo is invalidated,
1437 # causing those changes to disappear.
1437 # causing those changes to disappear.
1438 if '_phasecache' in vars(self):
1438 if '_phasecache' in vars(self):
1439 self._phasecache.write()
1439 self._phasecache.write()
1440
1440
1441 @unfilteredmethod
1441 @unfilteredmethod
1442 def destroyed(self):
1442 def destroyed(self):
1443 '''Inform the repository that nodes have been destroyed.
1443 '''Inform the repository that nodes have been destroyed.
1444 Intended for use by strip and rollback, so there's a common
1444 Intended for use by strip and rollback, so there's a common
1445 place for anything that has to be done after destroying history.
1445 place for anything that has to be done after destroying history.
1446 '''
1446 '''
1447 # When one tries to:
1447 # When one tries to:
1448 # 1) destroy nodes thus calling this method (e.g. strip)
1448 # 1) destroy nodes thus calling this method (e.g. strip)
1449 # 2) use phasecache somewhere (e.g. commit)
1449 # 2) use phasecache somewhere (e.g. commit)
1450 #
1450 #
1451 # then 2) will fail because the phasecache contains nodes that were
1451 # then 2) will fail because the phasecache contains nodes that were
1452 # removed. We can either remove phasecache from the filecache,
1452 # removed. We can either remove phasecache from the filecache,
1453 # causing it to reload next time it is accessed, or simply filter
1453 # causing it to reload next time it is accessed, or simply filter
1454 # the removed nodes now and write the updated cache.
1454 # the removed nodes now and write the updated cache.
1455 self._phasecache.filterunknown(self)
1455 self._phasecache.filterunknown(self)
1456 self._phasecache.write()
1456 self._phasecache.write()
1457
1457
1458 # update the 'served' branch cache to help read only server process
1458 # update the 'served' branch cache to help read only server process
1459 # Thanks to branchcache collaboration this is done from the nearest
1459 # Thanks to branchcache collaboration this is done from the nearest
1460 # filtered subset and it is expected to be fast.
1460 # filtered subset and it is expected to be fast.
1461 branchmap.updatecache(self.filtered('served'))
1461 branchmap.updatecache(self.filtered('served'))
1462
1462
1463 # Ensure the persistent tag cache is updated. Doing it now
1463 # Ensure the persistent tag cache is updated. Doing it now
1464 # means that the tag cache only has to worry about destroyed
1464 # means that the tag cache only has to worry about destroyed
1465 # heads immediately after a strip/rollback. That in turn
1465 # heads immediately after a strip/rollback. That in turn
1466 # guarantees that "cachetip == currenttip" (comparing both rev
1466 # guarantees that "cachetip == currenttip" (comparing both rev
1467 # and node) always means no nodes have been added or destroyed.
1467 # and node) always means no nodes have been added or destroyed.
1468
1468
1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1470 # head, refresh the tag cache, then immediately add a new head.
1470 # head, refresh the tag cache, then immediately add a new head.
1471 # But I think doing it this way is necessary for the "instant
1471 # But I think doing it this way is necessary for the "instant
1472 # tag cache retrieval" case to work.
1472 # tag cache retrieval" case to work.
1473 self.invalidate()
1473 self.invalidate()
1474
1474
1475 def walk(self, match, node=None):
1475 def walk(self, match, node=None):
1476 '''
1476 '''
1477 walk recursively through the directory tree or a given
1477 walk recursively through the directory tree or a given
1478 changeset, finding all files matched by the match
1478 changeset, finding all files matched by the match
1479 function
1479 function
1480 '''
1480 '''
1481 return self[node].walk(match)
1481 return self[node].walk(match)
1482
1482
1483 def status(self, node1='.', node2=None, match=None,
1483 def status(self, node1='.', node2=None, match=None,
1484 ignored=False, clean=False, unknown=False,
1484 ignored=False, clean=False, unknown=False,
1485 listsubrepos=False):
1485 listsubrepos=False):
1486 """return status of files between two nodes or node and working
1486 """return status of files between two nodes or node and working
1487 directory.
1487 directory.
1488
1488
1489 If node1 is None, use the first dirstate parent instead.
1489 If node1 is None, use the first dirstate parent instead.
1490 If node2 is None, compare node1 with working directory.
1490 If node2 is None, compare node1 with working directory.
1491 """
1491 """
1492
1492
1493 def mfmatches(ctx):
1493 def mfmatches(ctx):
1494 mf = ctx.manifest().copy()
1494 mf = ctx.manifest().copy()
1495 if match.always():
1495 if match.always():
1496 return mf
1496 return mf
1497 for fn in mf.keys():
1497 for fn in mf.keys():
1498 if not match(fn):
1498 if not match(fn):
1499 del mf[fn]
1499 del mf[fn]
1500 return mf
1500 return mf
1501
1501
1502 ctx1 = self[node1]
1502 ctx1 = self[node1]
1503 ctx2 = self[node2]
1503 ctx2 = self[node2]
1504
1504
1505 working = ctx2.rev() is None
1505 working = ctx2.rev() is None
1506 parentworking = working and ctx1 == self['.']
1506 parentworking = working and ctx1 == self['.']
1507 match = match or matchmod.always(self.root, self.getcwd())
1507 match = match or matchmod.always(self.root, self.getcwd())
1508 listignored, listclean, listunknown = ignored, clean, unknown
1508 listignored, listclean, listunknown = ignored, clean, unknown
1509
1509
1510 # load earliest manifest first for caching reasons
1510 # load earliest manifest first for caching reasons
1511 if not working and ctx2.rev() < ctx1.rev():
1511 if not working and ctx2.rev() < ctx1.rev():
1512 ctx2.manifest()
1512 ctx2.manifest()
1513
1513
1514 if not parentworking:
1514 if not parentworking:
1515 def bad(f, msg):
1515 def bad(f, msg):
1516 # 'f' may be a directory pattern from 'match.files()',
1516 # 'f' may be a directory pattern from 'match.files()',
1517 # so 'f not in ctx1' is not enough
1517 # so 'f not in ctx1' is not enough
1518 if f not in ctx1 and f not in ctx1.dirs():
1518 if f not in ctx1 and f not in ctx1.dirs():
1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1520 match.bad = bad
1520 match.bad = bad
1521
1521
1522 if working: # we need to scan the working dir
1522 if working: # we need to scan the working dir
1523 subrepos = []
1523 subrepos = []
1524 if '.hgsub' in self.dirstate:
1524 if '.hgsub' in self.dirstate:
1525 subrepos = sorted(ctx2.substate)
1525 subrepos = sorted(ctx2.substate)
1526 s = self.dirstate.status(match, subrepos, listignored,
1526 s = self.dirstate.status(match, subrepos, listignored,
1527 listclean, listunknown)
1527 listclean, listunknown)
1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1529
1529
1530 # check for any possibly clean files
1530 # check for any possibly clean files
1531 if parentworking and cmp:
1531 if parentworking and cmp:
1532 fixup = []
1532 fixup = []
1533 # do a full compare of any files that might have changed
1533 # do a full compare of any files that might have changed
1534 for f in sorted(cmp):
1534 for f in sorted(cmp):
1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1536 or ctx1[f].cmp(ctx2[f])):
1536 or ctx1[f].cmp(ctx2[f])):
1537 modified.append(f)
1537 modified.append(f)
1538 else:
1538 else:
1539 fixup.append(f)
1539 fixup.append(f)
1540
1540
1541 # update dirstate for files that are actually clean
1541 # update dirstate for files that are actually clean
1542 if fixup:
1542 if fixup:
1543 if listclean:
1543 if listclean:
1544 clean += fixup
1544 clean += fixup
1545
1545
1546 try:
1546 try:
1547 # updating the dirstate is optional
1547 # updating the dirstate is optional
1548 # so we don't wait on the lock
1548 # so we don't wait on the lock
1549 wlock = self.wlock(False)
1549 wlock = self.wlock(False)
1550 try:
1550 try:
1551 for f in fixup:
1551 for f in fixup:
1552 self.dirstate.normal(f)
1552 self.dirstate.normal(f)
1553 finally:
1553 finally:
1554 wlock.release()
1554 wlock.release()
1555 except error.LockError:
1555 except error.LockError:
1556 pass
1556 pass
1557
1557
1558 if not parentworking:
1558 if not parentworking:
1559 mf1 = mfmatches(ctx1)
1559 mf1 = mfmatches(ctx1)
1560 if working:
1560 if working:
1561 # we are comparing working dir against non-parent
1561 # we are comparing working dir against non-parent
1562 # generate a pseudo-manifest for the working dir
1562 # generate a pseudo-manifest for the working dir
1563 mf2 = mfmatches(self['.'])
1563 mf2 = mfmatches(self['.'])
1564 for f in cmp + modified + added:
1564 for f in cmp + modified + added:
1565 mf2[f] = None
1565 mf2[f] = None
1566 mf2.set(f, ctx2.flags(f))
1566 mf2.set(f, ctx2.flags(f))
1567 for f in removed:
1567 for f in removed:
1568 if f in mf2:
1568 if f in mf2:
1569 del mf2[f]
1569 del mf2[f]
1570 else:
1570 else:
1571 # we are comparing two revisions
1571 # we are comparing two revisions
1572 deleted, unknown, ignored = [], [], []
1572 deleted, unknown, ignored = [], [], []
1573 mf2 = mfmatches(ctx2)
1573 mf2 = mfmatches(ctx2)
1574
1574
1575 modified, added, clean = [], [], []
1575 modified, added, clean = [], [], []
1576 withflags = mf1.withflags() | mf2.withflags()
1576 withflags = mf1.withflags() | mf2.withflags()
1577 for fn, mf2node in mf2.iteritems():
1577 for fn, mf2node in mf2.iteritems():
1578 if fn in mf1:
1578 if fn in mf1:
1579 if (fn not in deleted and
1579 if (fn not in deleted and
1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1581 (mf1[fn] != mf2node and
1581 (mf1[fn] != mf2node and
1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1583 modified.append(fn)
1583 modified.append(fn)
1584 elif listclean:
1584 elif listclean:
1585 clean.append(fn)
1585 clean.append(fn)
1586 del mf1[fn]
1586 del mf1[fn]
1587 elif fn not in deleted:
1587 elif fn not in deleted:
1588 added.append(fn)
1588 added.append(fn)
1589 removed = mf1.keys()
1589 removed = mf1.keys()
1590
1590
1591 if working and modified and not self.dirstate._checklink:
1591 if working and modified and not self.dirstate._checklink:
1592 # Symlink placeholders may get non-symlink-like contents
1592 # Symlink placeholders may get non-symlink-like contents
1593 # via user error or dereferencing by NFS or Samba servers,
1593 # via user error or dereferencing by NFS or Samba servers,
1594 # so we filter out any placeholders that don't look like a
1594 # so we filter out any placeholders that don't look like a
1595 # symlink
1595 # symlink
1596 sane = []
1596 sane = []
1597 for f in modified:
1597 for f in modified:
1598 if ctx2.flags(f) == 'l':
1598 if ctx2.flags(f) == 'l':
1599 d = ctx2[f].data()
1599 d = ctx2[f].data()
1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1601 self.ui.debug('ignoring suspect symlink placeholder'
1601 self.ui.debug('ignoring suspect symlink placeholder'
1602 ' "%s"\n' % f)
1602 ' "%s"\n' % f)
1603 continue
1603 continue
1604 sane.append(f)
1604 sane.append(f)
1605 modified = sane
1605 modified = sane
1606
1606
1607 r = modified, added, removed, deleted, unknown, ignored, clean
1607 r = modified, added, removed, deleted, unknown, ignored, clean
1608
1608
1609 if listsubrepos:
1609 if listsubrepos:
1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1611 if working:
1611 if working:
1612 rev2 = None
1612 rev2 = None
1613 else:
1613 else:
1614 rev2 = ctx2.substate[subpath][1]
1614 rev2 = ctx2.substate[subpath][1]
1615 try:
1615 try:
1616 submatch = matchmod.narrowmatcher(subpath, match)
1616 submatch = matchmod.narrowmatcher(subpath, match)
1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1618 clean=listclean, unknown=listunknown,
1618 clean=listclean, unknown=listunknown,
1619 listsubrepos=True)
1619 listsubrepos=True)
1620 for rfiles, sfiles in zip(r, s):
1620 for rfiles, sfiles in zip(r, s):
1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1622 except error.LookupError:
1622 except error.LookupError:
1623 self.ui.status(_("skipping missing subrepository: %s\n")
1623 self.ui.status(_("skipping missing subrepository: %s\n")
1624 % subpath)
1624 % subpath)
1625
1625
1626 for l in r:
1626 for l in r:
1627 l.sort()
1627 l.sort()
1628 return r
1628 return r
1629
1629
1630 def heads(self, start=None):
1630 def heads(self, start=None):
1631 heads = self.changelog.heads(start)
1631 heads = self.changelog.heads(start)
1632 # sort the output in rev descending order
1632 # sort the output in rev descending order
1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1634
1634
1635 def branchheads(self, branch=None, start=None, closed=False):
1635 def branchheads(self, branch=None, start=None, closed=False):
1636 '''return a (possibly filtered) list of heads for the given branch
1636 '''return a (possibly filtered) list of heads for the given branch
1637
1637
1638 Heads are returned in topological order, from newest to oldest.
1638 Heads are returned in topological order, from newest to oldest.
1639 If branch is None, use the dirstate branch.
1639 If branch is None, use the dirstate branch.
1640 If start is not None, return only heads reachable from start.
1640 If start is not None, return only heads reachable from start.
1641 If closed is True, return heads that are marked as closed as well.
1641 If closed is True, return heads that are marked as closed as well.
1642 '''
1642 '''
1643 if branch is None:
1643 if branch is None:
1644 branch = self[None].branch()
1644 branch = self[None].branch()
1645 branches = self.branchmap()
1645 branches = self.branchmap()
1646 if branch not in branches:
1646 if branch not in branches:
1647 return []
1647 return []
1648 # the cache returns heads ordered lowest to highest
1648 # the cache returns heads ordered lowest to highest
1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1650 if start is not None:
1650 if start is not None:
1651 # filter out the heads that cannot be reached from startrev
1651 # filter out the heads that cannot be reached from startrev
1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1653 bheads = [h for h in bheads if h in fbheads]
1653 bheads = [h for h in bheads if h in fbheads]
1654 return bheads
1654 return bheads
1655
1655
1656 def branches(self, nodes):
1656 def branches(self, nodes):
1657 if not nodes:
1657 if not nodes:
1658 nodes = [self.changelog.tip()]
1658 nodes = [self.changelog.tip()]
1659 b = []
1659 b = []
1660 for n in nodes:
1660 for n in nodes:
1661 t = n
1661 t = n
1662 while True:
1662 while True:
1663 p = self.changelog.parents(n)
1663 p = self.changelog.parents(n)
1664 if p[1] != nullid or p[0] == nullid:
1664 if p[1] != nullid or p[0] == nullid:
1665 b.append((t, n, p[0], p[1]))
1665 b.append((t, n, p[0], p[1]))
1666 break
1666 break
1667 n = p[0]
1667 n = p[0]
1668 return b
1668 return b
1669
1669
1670 def between(self, pairs):
1670 def between(self, pairs):
1671 r = []
1671 r = []
1672
1672
1673 for top, bottom in pairs:
1673 for top, bottom in pairs:
1674 n, l, i = top, [], 0
1674 n, l, i = top, [], 0
1675 f = 1
1675 f = 1
1676
1676
1677 while n != bottom and n != nullid:
1677 while n != bottom and n != nullid:
1678 p = self.changelog.parents(n)[0]
1678 p = self.changelog.parents(n)[0]
1679 if i == f:
1679 if i == f:
1680 l.append(n)
1680 l.append(n)
1681 f = f * 2
1681 f = f * 2
1682 n = p
1682 n = p
1683 i += 1
1683 i += 1
1684
1684
1685 r.append(l)
1685 r.append(l)
1686
1686
1687 return r
1687 return r
1688
1688
1689 def pull(self, remote, heads=None, force=False):
1689 def pull(self, remote, heads=None, force=False):
1690 return exchange.pull (self, remote, heads, force)
1690 return exchange.pull (self, remote, heads, force)
1691
1691
1692 def checkpush(self, pushop):
1692 def checkpush(self, pushop):
1693 """Extensions can override this function if additional checks have
1693 """Extensions can override this function if additional checks have
1694 to be performed before pushing, or call it if they override push
1694 to be performed before pushing, or call it if they override push
1695 command.
1695 command.
1696 """
1696 """
1697 pass
1697 pass
1698
1698
1699 def push(self, remote, force=False, revs=None, newbranch=False):
1699 def push(self, remote, force=False, revs=None, newbranch=False):
1700 return exchange.push(self, remote, force, revs, newbranch)
1700 return exchange.push(self, remote, force, revs, newbranch)
1701
1701
1702 def stream_in(self, remote, requirements):
1702 def stream_in(self, remote, requirements):
1703 lock = self.lock()
1703 lock = self.lock()
1704 try:
1704 try:
1705 # Save remote branchmap. We will use it later
1705 # Save remote branchmap. We will use it later
1706 # to speed up branchcache creation
1706 # to speed up branchcache creation
1707 rbranchmap = None
1707 rbranchmap = None
1708 if remote.capable("branchmap"):
1708 if remote.capable("branchmap"):
1709 rbranchmap = remote.branchmap()
1709 rbranchmap = remote.branchmap()
1710
1710
1711 fp = remote.stream_out()
1711 fp = remote.stream_out()
1712 l = fp.readline()
1712 l = fp.readline()
1713 try:
1713 try:
1714 resp = int(l)
1714 resp = int(l)
1715 except ValueError:
1715 except ValueError:
1716 raise error.ResponseError(
1716 raise error.ResponseError(
1717 _('unexpected response from remote server:'), l)
1717 _('unexpected response from remote server:'), l)
1718 if resp == 1:
1718 if resp == 1:
1719 raise util.Abort(_('operation forbidden by server'))
1719 raise util.Abort(_('operation forbidden by server'))
1720 elif resp == 2:
1720 elif resp == 2:
1721 raise util.Abort(_('locking the remote repository failed'))
1721 raise util.Abort(_('locking the remote repository failed'))
1722 elif resp != 0:
1722 elif resp != 0:
1723 raise util.Abort(_('the server sent an unknown error code'))
1723 raise util.Abort(_('the server sent an unknown error code'))
1724 self.ui.status(_('streaming all changes\n'))
1724 self.ui.status(_('streaming all changes\n'))
1725 l = fp.readline()
1725 l = fp.readline()
1726 try:
1726 try:
1727 total_files, total_bytes = map(int, l.split(' ', 1))
1727 total_files, total_bytes = map(int, l.split(' ', 1))
1728 except (ValueError, TypeError):
1728 except (ValueError, TypeError):
1729 raise error.ResponseError(
1729 raise error.ResponseError(
1730 _('unexpected response from remote server:'), l)
1730 _('unexpected response from remote server:'), l)
1731 self.ui.status(_('%d files to transfer, %s of data\n') %
1731 self.ui.status(_('%d files to transfer, %s of data\n') %
1732 (total_files, util.bytecount(total_bytes)))
1732 (total_files, util.bytecount(total_bytes)))
1733 handled_bytes = 0
1733 handled_bytes = 0
1734 self.ui.progress(_('clone'), 0, total=total_bytes)
1734 self.ui.progress(_('clone'), 0, total=total_bytes)
1735 start = time.time()
1735 start = time.time()
1736
1736
1737 tr = self.transaction(_('clone'))
1737 tr = self.transaction(_('clone'))
1738 try:
1738 try:
1739 for i in xrange(total_files):
1739 for i in xrange(total_files):
1740 # XXX doesn't support '\n' or '\r' in filenames
1740 # XXX doesn't support '\n' or '\r' in filenames
1741 l = fp.readline()
1741 l = fp.readline()
1742 try:
1742 try:
1743 name, size = l.split('\0', 1)
1743 name, size = l.split('\0', 1)
1744 size = int(size)
1744 size = int(size)
1745 except (ValueError, TypeError):
1745 except (ValueError, TypeError):
1746 raise error.ResponseError(
1746 raise error.ResponseError(
1747 _('unexpected response from remote server:'), l)
1747 _('unexpected response from remote server:'), l)
1748 if self.ui.debugflag:
1748 if self.ui.debugflag:
1749 self.ui.debug('adding %s (%s)\n' %
1749 self.ui.debug('adding %s (%s)\n' %
1750 (name, util.bytecount(size)))
1750 (name, util.bytecount(size)))
1751 # for backwards compat, name was partially encoded
1751 # for backwards compat, name was partially encoded
1752 ofp = self.sopener(store.decodedir(name), 'w')
1752 ofp = self.sopener(store.decodedir(name), 'w')
1753 for chunk in util.filechunkiter(fp, limit=size):
1753 for chunk in util.filechunkiter(fp, limit=size):
1754 handled_bytes += len(chunk)
1754 handled_bytes += len(chunk)
1755 self.ui.progress(_('clone'), handled_bytes,
1755 self.ui.progress(_('clone'), handled_bytes,
1756 total=total_bytes)
1756 total=total_bytes)
1757 ofp.write(chunk)
1757 ofp.write(chunk)
1758 ofp.close()
1758 ofp.close()
1759 tr.close()
1759 tr.close()
1760 finally:
1760 finally:
1761 tr.release()
1761 tr.release()
1762
1762
1763 # Writing straight to files circumvented the inmemory caches
1763 # Writing straight to files circumvented the inmemory caches
1764 self.invalidate()
1764 self.invalidate()
1765
1765
1766 elapsed = time.time() - start
1766 elapsed = time.time() - start
1767 if elapsed <= 0:
1767 if elapsed <= 0:
1768 elapsed = 0.001
1768 elapsed = 0.001
1769 self.ui.progress(_('clone'), None)
1769 self.ui.progress(_('clone'), None)
1770 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1770 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1771 (util.bytecount(total_bytes), elapsed,
1771 (util.bytecount(total_bytes), elapsed,
1772 util.bytecount(total_bytes / elapsed)))
1772 util.bytecount(total_bytes / elapsed)))
1773
1773
1774 # new requirements = old non-format requirements +
1774 # new requirements = old non-format requirements +
1775 # new format-related
1775 # new format-related
1776 # requirements from the streamed-in repository
1776 # requirements from the streamed-in repository
1777 requirements.update(set(self.requirements) - self.supportedformats)
1777 requirements.update(set(self.requirements) - self.supportedformats)
1778 self._applyrequirements(requirements)
1778 self._applyrequirements(requirements)
1779 self._writerequirements()
1779 self._writerequirements()
1780
1780
1781 if rbranchmap:
1781 if rbranchmap:
1782 rbheads = []
1782 rbheads = []
1783 for bheads in rbranchmap.itervalues():
1783 for bheads in rbranchmap.itervalues():
1784 rbheads.extend(bheads)
1784 rbheads.extend(bheads)
1785
1785
1786 if rbheads:
1786 if rbheads:
1787 rtiprev = max((int(self.changelog.rev(node))
1787 rtiprev = max((int(self.changelog.rev(node))
1788 for node in rbheads))
1788 for node in rbheads))
1789 cache = branchmap.branchcache(rbranchmap,
1789 cache = branchmap.branchcache(rbranchmap,
1790 self[rtiprev].node(),
1790 self[rtiprev].node(),
1791 rtiprev)
1791 rtiprev)
1792 # Try to stick it as low as possible
1792 # Try to stick it as low as possible
1793 # filter above served are unlikely to be fetch from a clone
1793 # filter above served are unlikely to be fetch from a clone
1794 for candidate in ('base', 'immutable', 'served'):
1794 for candidate in ('base', 'immutable', 'served'):
1795 rview = self.filtered(candidate)
1795 rview = self.filtered(candidate)
1796 if cache.validfor(rview):
1796 if cache.validfor(rview):
1797 self._branchcaches[candidate] = cache
1797 self._branchcaches[candidate] = cache
1798 cache.write(rview)
1798 cache.write(rview)
1799 break
1799 break
1800 self.invalidate()
1800 self.invalidate()
1801 return len(self.heads()) + 1
1801 return len(self.heads()) + 1
1802 finally:
1802 finally:
1803 lock.release()
1803 lock.release()
1804
1804
1805 def clone(self, remote, heads=[], stream=False):
1805 def clone(self, remote, heads=[], stream=False):
1806 '''clone remote repository.
1806 '''clone remote repository.
1807
1807
1808 keyword arguments:
1808 keyword arguments:
1809 heads: list of revs to clone (forces use of pull)
1809 heads: list of revs to clone (forces use of pull)
1810 stream: use streaming clone if possible'''
1810 stream: use streaming clone if possible'''
1811
1811
1812 # now, all clients that can request uncompressed clones can
1812 # now, all clients that can request uncompressed clones can
1813 # read repo formats supported by all servers that can serve
1813 # read repo formats supported by all servers that can serve
1814 # them.
1814 # them.
1815
1815
1816 # if revlog format changes, client will have to check version
1816 # if revlog format changes, client will have to check version
1817 # and format flags on "stream" capability, and use
1817 # and format flags on "stream" capability, and use
1818 # uncompressed only if compatible.
1818 # uncompressed only if compatible.
1819
1819
1820 if not stream:
1820 if not stream:
1821 # if the server explicitly prefers to stream (for fast LANs)
1821 # if the server explicitly prefers to stream (for fast LANs)
1822 stream = remote.capable('stream-preferred')
1822 stream = remote.capable('stream-preferred')
1823
1823
1824 if stream and not heads:
1824 if stream and not heads:
1825 # 'stream' means remote revlog format is revlogv1 only
1825 # 'stream' means remote revlog format is revlogv1 only
1826 if remote.capable('stream'):
1826 if remote.capable('stream'):
1827 return self.stream_in(remote, set(('revlogv1',)))
1827 return self.stream_in(remote, set(('revlogv1',)))
1828 # otherwise, 'streamreqs' contains the remote revlog format
1828 # otherwise, 'streamreqs' contains the remote revlog format
1829 streamreqs = remote.capable('streamreqs')
1829 streamreqs = remote.capable('streamreqs')
1830 if streamreqs:
1830 if streamreqs:
1831 streamreqs = set(streamreqs.split(','))
1831 streamreqs = set(streamreqs.split(','))
1832 # if we support it, stream in and adjust our requirements
1832 # if we support it, stream in and adjust our requirements
1833 if not streamreqs - self.supportedformats:
1833 if not streamreqs - self.supportedformats:
1834 return self.stream_in(remote, streamreqs)
1834 return self.stream_in(remote, streamreqs)
1835 return self.pull(remote, heads)
1835 return self.pull(remote, heads)
1836
1836
1837 def pushkey(self, namespace, key, old, new):
1837 def pushkey(self, namespace, key, old, new):
1838 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1838 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1839 old=old, new=new)
1839 old=old, new=new)
1840 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1840 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 ret = pushkey.push(self, namespace, key, old, new)
1841 ret = pushkey.push(self, namespace, key, old, new)
1842 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1842 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1843 ret=ret)
1843 ret=ret)
1844 return ret
1844 return ret
1845
1845
1846 def listkeys(self, namespace):
1846 def listkeys(self, namespace):
1847 self.hook('prelistkeys', throw=True, namespace=namespace)
1847 self.hook('prelistkeys', throw=True, namespace=namespace)
1848 self.ui.debug('listing keys for "%s"\n' % namespace)
1848 self.ui.debug('listing keys for "%s"\n' % namespace)
1849 values = pushkey.list(self, namespace)
1849 values = pushkey.list(self, namespace)
1850 self.hook('listkeys', namespace=namespace, values=values)
1850 self.hook('listkeys', namespace=namespace, values=values)
1851 return values
1851 return values
1852
1852
1853 def debugwireargs(self, one, two, three=None, four=None, five=None):
1853 def debugwireargs(self, one, two, three=None, four=None, five=None):
1854 '''used to test argument passing over the wire'''
1854 '''used to test argument passing over the wire'''
1855 return "%s %s %s %s %s" % (one, two, three, four, five)
1855 return "%s %s %s %s %s" % (one, two, three, four, five)
1856
1856
1857 def savecommitmessage(self, text):
1857 def savecommitmessage(self, text):
1858 fp = self.opener('last-message.txt', 'wb')
1858 fp = self.opener('last-message.txt', 'wb')
1859 try:
1859 try:
1860 fp.write(text)
1860 fp.write(text)
1861 finally:
1861 finally:
1862 fp.close()
1862 fp.close()
1863 return self.pathto(fp.name[len(self.root) + 1:])
1863 return self.pathto(fp.name[len(self.root) + 1:])
1864
1864
1865 # used to avoid circular references so destructors work
1865 # used to avoid circular references so destructors work
1866 def aftertrans(files):
1866 def aftertrans(files):
1867 renamefiles = [tuple(t) for t in files]
1867 renamefiles = [tuple(t) for t in files]
1868 def a():
1868 def a():
1869 for vfs, src, dest in renamefiles:
1869 for vfs, src, dest in renamefiles:
1870 try:
1870 try:
1871 vfs.rename(src, dest)
1871 vfs.rename(src, dest)
1872 except OSError: # journal file does not yet exist
1872 except OSError: # journal file does not yet exist
1873 pass
1873 pass
1874 return a
1874 return a
1875
1875
1876 def undoname(fn):
1876 def undoname(fn):
1877 base, name = os.path.split(fn)
1877 base, name = os.path.split(fn)
1878 assert name.startswith('journal')
1878 assert name.startswith('journal')
1879 return os.path.join(base, name.replace('journal', 'undo', 1))
1879 return os.path.join(base, name.replace('journal', 'undo', 1))
1880
1880
1881 def instance(ui, path, create):
1881 def instance(ui, path, create):
1882 return localrepository(ui, util.urllocalpath(path), create)
1882 return localrepository(ui, util.urllocalpath(path), create)
1883
1883
1884 def islocal(path):
1884 def islocal(path):
1885 return True
1885 return True
@@ -1,179 +1,180 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from mercurial import changegroup
9 from mercurial import changegroup
10 from mercurial.node import short
10 from mercurial.node import short
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 import os
12 import os
13 import errno
13 import errno
14
14
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 """create a bundle with the specified revisions as a backup"""
16 """create a bundle with the specified revisions as a backup"""
17 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
17 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
18 backupdir = repo.join("strip-backup")
18 backupdir = repo.join("strip-backup")
19 if not os.path.isdir(backupdir):
19 if not os.path.isdir(backupdir):
20 os.mkdir(backupdir)
20 os.mkdir(backupdir)
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 if compress:
22 if compress:
23 bundletype = "HG10BZ"
23 bundletype = "HG10BZ"
24 else:
24 else:
25 bundletype = "HG10UN"
25 bundletype = "HG10UN"
26 return changegroup.writebundle(cg, name, bundletype)
26 return changegroup.writebundle(cg, name, bundletype)
27
27
28 def _collectfiles(repo, striprev):
28 def _collectfiles(repo, striprev):
29 """find out the filelogs affected by the strip"""
29 """find out the filelogs affected by the strip"""
30 files = set()
30 files = set()
31
31
32 for x in xrange(striprev, len(repo)):
32 for x in xrange(striprev, len(repo)):
33 files.update(repo[x].files())
33 files.update(repo[x].files())
34
34
35 return sorted(files)
35 return sorted(files)
36
36
37 def _collectbrokencsets(repo, files, striprev):
37 def _collectbrokencsets(repo, files, striprev):
38 """return the changesets which will be broken by the truncation"""
38 """return the changesets which will be broken by the truncation"""
39 s = set()
39 s = set()
40 def collectone(revlog):
40 def collectone(revlog):
41 _, brokenset = revlog.getstrippoint(striprev)
41 _, brokenset = revlog.getstrippoint(striprev)
42 s.update([revlog.linkrev(r) for r in brokenset])
42 s.update([revlog.linkrev(r) for r in brokenset])
43
43
44 collectone(repo.manifest)
44 collectone(repo.manifest)
45 for fname in files:
45 for fname in files:
46 collectone(repo.file(fname))
46 collectone(repo.file(fname))
47
47
48 return s
48 return s
49
49
50 def strip(ui, repo, nodelist, backup="all", topic='backup'):
50 def strip(ui, repo, nodelist, backup="all", topic='backup'):
51 repo = repo.unfiltered()
51 repo = repo.unfiltered()
52 repo.destroying()
52 repo.destroying()
53
53
54 cl = repo.changelog
54 cl = repo.changelog
55 # TODO handle undo of merge sets
55 # TODO handle undo of merge sets
56 if isinstance(nodelist, str):
56 if isinstance(nodelist, str):
57 nodelist = [nodelist]
57 nodelist = [nodelist]
58 striplist = [cl.rev(node) for node in nodelist]
58 striplist = [cl.rev(node) for node in nodelist]
59 striprev = min(striplist)
59 striprev = min(striplist)
60
60
61 keeppartialbundle = backup == 'strip'
61 keeppartialbundle = backup == 'strip'
62
62
63 # Some revisions with rev > striprev may not be descendants of striprev.
63 # Some revisions with rev > striprev may not be descendants of striprev.
64 # We have to find these revisions and put them in a bundle, so that
64 # We have to find these revisions and put them in a bundle, so that
65 # we can restore them after the truncations.
65 # we can restore them after the truncations.
66 # To create the bundle we use repo.changegroupsubset which requires
66 # To create the bundle we use repo.changegroupsubset which requires
67 # the list of heads and bases of the set of interesting revisions.
67 # the list of heads and bases of the set of interesting revisions.
68 # (head = revision in the set that has no descendant in the set;
68 # (head = revision in the set that has no descendant in the set;
69 # base = revision in the set that has no ancestor in the set)
69 # base = revision in the set that has no ancestor in the set)
70 tostrip = set(striplist)
70 tostrip = set(striplist)
71 for rev in striplist:
71 for rev in striplist:
72 for desc in cl.descendants([rev]):
72 for desc in cl.descendants([rev]):
73 tostrip.add(desc)
73 tostrip.add(desc)
74
74
75 files = _collectfiles(repo, striprev)
75 files = _collectfiles(repo, striprev)
76 saverevs = _collectbrokencsets(repo, files, striprev)
76 saverevs = _collectbrokencsets(repo, files, striprev)
77
77
78 # compute heads
78 # compute heads
79 saveheads = set(saverevs)
79 saveheads = set(saverevs)
80 for r in xrange(striprev + 1, len(cl)):
80 for r in xrange(striprev + 1, len(cl)):
81 if r not in tostrip:
81 if r not in tostrip:
82 saverevs.add(r)
82 saverevs.add(r)
83 saveheads.difference_update(cl.parentrevs(r))
83 saveheads.difference_update(cl.parentrevs(r))
84 saveheads.add(r)
84 saveheads.add(r)
85 saveheads = [cl.node(r) for r in saveheads]
85 saveheads = [cl.node(r) for r in saveheads]
86
86
87 # compute base nodes
87 # compute base nodes
88 if saverevs:
88 if saverevs:
89 descendants = set(cl.descendants(saverevs))
89 descendants = set(cl.descendants(saverevs))
90 saverevs.difference_update(descendants)
90 saverevs.difference_update(descendants)
91 savebases = [cl.node(r) for r in saverevs]
91 savebases = [cl.node(r) for r in saverevs]
92 stripbases = [cl.node(r) for r in tostrip]
92 stripbases = [cl.node(r) for r in tostrip]
93
93
94 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
94 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
95 # is much faster
95 # is much faster
96 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
96 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
97 if newbmtarget:
97 if newbmtarget:
98 newbmtarget = repo[newbmtarget[0]].node()
98 newbmtarget = repo[newbmtarget[0]].node()
99 else:
99 else:
100 newbmtarget = '.'
100 newbmtarget = '.'
101
101
102 bm = repo._bookmarks
102 bm = repo._bookmarks
103 updatebm = []
103 updatebm = []
104 for m in bm:
104 for m in bm:
105 rev = repo[bm[m]].rev()
105 rev = repo[bm[m]].rev()
106 if rev in tostrip:
106 if rev in tostrip:
107 updatebm.append(m)
107 updatebm.append(m)
108
108
109 # create a changegroup for all the branches we need to keep
109 # create a changegroup for all the branches we need to keep
110 backupfile = None
110 backupfile = None
111 if backup == "all":
111 if backup == "all":
112 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
112 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
113 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
113 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
114 repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile)
114 repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile)
115 if saveheads or savebases:
115 if saveheads or savebases:
116 # do not compress partial bundle if we remove it from disk later
116 # do not compress partial bundle if we remove it from disk later
117 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
117 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
118 compress=keeppartialbundle)
118 compress=keeppartialbundle)
119
119
120 mfst = repo.manifest
120 mfst = repo.manifest
121
121
122 tr = repo.transaction("strip")
122 tr = repo.transaction("strip")
123 offset = len(tr.entries)
123 offset = len(tr.entries)
124
124
125 try:
125 try:
126 tr.startgroup()
126 tr.startgroup()
127 cl.strip(striprev, tr)
127 cl.strip(striprev, tr)
128 mfst.strip(striprev, tr)
128 mfst.strip(striprev, tr)
129 for fn in files:
129 for fn in files:
130 repo.file(fn).strip(striprev, tr)
130 repo.file(fn).strip(striprev, tr)
131 tr.endgroup()
131 tr.endgroup()
132
132
133 try:
133 try:
134 for i in xrange(offset, len(tr.entries)):
134 for i in xrange(offset, len(tr.entries)):
135 file, troffset, ignore = tr.entries[i]
135 file, troffset, ignore = tr.entries[i]
136 repo.sopener(file, 'a').truncate(troffset)
136 repo.sopener(file, 'a').truncate(troffset)
137 if troffset == 0:
137 if troffset == 0:
138 repo.store.markremoved(file)
138 repo.store.markremoved(file)
139 tr.close()
139 tr.close()
140 except: # re-raises
140 except: # re-raises
141 tr.abort()
141 tr.abort()
142 raise
142 raise
143
143
144 if saveheads or savebases:
144 if saveheads or savebases:
145 ui.note(_("adding branch\n"))
145 ui.note(_("adding branch\n"))
146 f = open(chgrpfile, "rb")
146 f = open(chgrpfile, "rb")
147 gen = changegroup.readbundle(f, chgrpfile)
147 gen = changegroup.readbundle(f, chgrpfile)
148 if not repo.ui.verbose:
148 if not repo.ui.verbose:
149 # silence internal shuffling chatter
149 # silence internal shuffling chatter
150 repo.ui.pushbuffer()
150 repo.ui.pushbuffer()
151 changegroup.addchangegroup(repo, gen, 'strip',
151 changegroup.addchangegroup(repo, gen, 'strip',
152 'bundle:' + chgrpfile, True)
152 'bundle:' + chgrpfile, True)
153 if not repo.ui.verbose:
153 if not repo.ui.verbose:
154 repo.ui.popbuffer()
154 repo.ui.popbuffer()
155 f.close()
155 f.close()
156 if not keeppartialbundle:
156 if not keeppartialbundle:
157 os.unlink(chgrpfile)
157 os.unlink(chgrpfile)
158
158
159 # remove undo files
159 # remove undo files
160 for undofile in repo.undofiles():
160 for undovfs, undofile in repo.undofiles():
161 try:
161 try:
162 os.unlink(undofile)
162 undovfs.unlink(undofile)
163 except OSError, e:
163 except OSError, e:
164 if e.errno != errno.ENOENT:
164 if e.errno != errno.ENOENT:
165 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
165 ui.warn(_('error removing %s: %s\n') %
166 (undovfs.join(undofile), str(e)))
166
167
167 for m in updatebm:
168 for m in updatebm:
168 bm[m] = repo[newbmtarget].node()
169 bm[m] = repo[newbmtarget].node()
169 bm.write()
170 bm.write()
170 except: # re-raises
171 except: # re-raises
171 if backupfile:
172 if backupfile:
172 ui.warn(_("strip failed, full bundle stored in '%s'\n")
173 ui.warn(_("strip failed, full bundle stored in '%s'\n")
173 % backupfile)
174 % backupfile)
174 elif saveheads:
175 elif saveheads:
175 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
176 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
176 % chgrpfile)
177 % chgrpfile)
177 raise
178 raise
178
179
179 repo.destroyed()
180 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now