##// END OF EJS Templates
treemanifest: cache directory logs and manifests...
Martin von Zweigbergk -
r25185:bf6b476f default
parent child Browse files
Show More
@@ -1,1973 +1,1973 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 'manifestv2'))
196 'manifestv2'))
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 'dotencode'))
198 'dotencode'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return ['revlogv1']
207 return ['revlogv1']
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.requirements = set()
210 self.requirements = set()
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 self.wopener = self.wvfs
212 self.wopener = self.wvfs
213 self.root = self.wvfs.base
213 self.root = self.wvfs.base
214 self.path = self.wvfs.join(".hg")
214 self.path = self.wvfs.join(".hg")
215 self.origroot = path
215 self.origroot = path
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 self.vfs = scmutil.vfs(self.path)
217 self.vfs = scmutil.vfs(self.path)
218 self.opener = self.vfs
218 self.opener = self.vfs
219 self.baseui = baseui
219 self.baseui = baseui
220 self.ui = baseui.copy()
220 self.ui = baseui.copy()
221 self.ui.copy = baseui.copy # prevent copying repo configuration
221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 # A list of callback to shape the phase if no data were found.
222 # A list of callback to shape the phase if no data were found.
223 # Callback are in the form: func(repo, roots) --> processed root.
223 # Callback are in the form: func(repo, roots) --> processed root.
224 # This list it to be filled by extension during repo setup
224 # This list it to be filled by extension during repo setup
225 self._phasedefaults = []
225 self._phasedefaults = []
226 try:
226 try:
227 self.ui.readconfig(self.join("hgrc"), self.root)
227 self.ui.readconfig(self.join("hgrc"), self.root)
228 extensions.loadall(self.ui)
228 extensions.loadall(self.ui)
229 except IOError:
229 except IOError:
230 pass
230 pass
231
231
232 if self.featuresetupfuncs:
232 if self.featuresetupfuncs:
233 self.supported = set(self._basesupported) # use private copy
233 self.supported = set(self._basesupported) # use private copy
234 extmods = set(m.__name__ for n, m
234 extmods = set(m.__name__ for n, m
235 in extensions.extensions(self.ui))
235 in extensions.extensions(self.ui))
236 for setupfunc in self.featuresetupfuncs:
236 for setupfunc in self.featuresetupfuncs:
237 if setupfunc.__module__ in extmods:
237 if setupfunc.__module__ in extmods:
238 setupfunc(self.ui, self.supported)
238 setupfunc(self.ui, self.supported)
239 else:
239 else:
240 self.supported = self._basesupported
240 self.supported = self._basesupported
241
241
242 if not self.vfs.isdir():
242 if not self.vfs.isdir():
243 if create:
243 if create:
244 if not self.wvfs.exists():
244 if not self.wvfs.exists():
245 self.wvfs.makedirs()
245 self.wvfs.makedirs()
246 self.vfs.makedir(notindexed=True)
246 self.vfs.makedir(notindexed=True)
247 self.requirements.update(self._baserequirements(create))
247 self.requirements.update(self._baserequirements(create))
248 if self.ui.configbool('format', 'usestore', True):
248 if self.ui.configbool('format', 'usestore', True):
249 self.vfs.mkdir("store")
249 self.vfs.mkdir("store")
250 self.requirements.add("store")
250 self.requirements.add("store")
251 if self.ui.configbool('format', 'usefncache', True):
251 if self.ui.configbool('format', 'usefncache', True):
252 self.requirements.add("fncache")
252 self.requirements.add("fncache")
253 if self.ui.configbool('format', 'dotencode', True):
253 if self.ui.configbool('format', 'dotencode', True):
254 self.requirements.add('dotencode')
254 self.requirements.add('dotencode')
255 # create an invalid changelog
255 # create an invalid changelog
256 self.vfs.append(
256 self.vfs.append(
257 "00changelog.i",
257 "00changelog.i",
258 '\0\0\0\2' # represents revlogv2
258 '\0\0\0\2' # represents revlogv2
259 ' dummy changelog to prevent using the old repo layout'
259 ' dummy changelog to prevent using the old repo layout'
260 )
260 )
261 if self.ui.configbool('format', 'generaldelta', False):
261 if self.ui.configbool('format', 'generaldelta', False):
262 self.requirements.add("generaldelta")
262 self.requirements.add("generaldelta")
263 if self.ui.configbool('experimental', 'treemanifest', False):
263 if self.ui.configbool('experimental', 'treemanifest', False):
264 self.requirements.add("treemanifest")
264 self.requirements.add("treemanifest")
265 if self.ui.configbool('experimental', 'manifestv2', False):
265 if self.ui.configbool('experimental', 'manifestv2', False):
266 self.requirements.add("manifestv2")
266 self.requirements.add("manifestv2")
267 else:
267 else:
268 raise error.RepoError(_("repository %s not found") % path)
268 raise error.RepoError(_("repository %s not found") % path)
269 elif create:
269 elif create:
270 raise error.RepoError(_("repository %s already exists") % path)
270 raise error.RepoError(_("repository %s already exists") % path)
271 else:
271 else:
272 try:
272 try:
273 self.requirements = scmutil.readrequires(
273 self.requirements = scmutil.readrequires(
274 self.vfs, self.supported)
274 self.vfs, self.supported)
275 except IOError, inst:
275 except IOError, inst:
276 if inst.errno != errno.ENOENT:
276 if inst.errno != errno.ENOENT:
277 raise
277 raise
278
278
279 self.sharedpath = self.path
279 self.sharedpath = self.path
280 try:
280 try:
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 realpath=True)
282 realpath=True)
283 s = vfs.base
283 s = vfs.base
284 if not vfs.exists():
284 if not vfs.exists():
285 raise error.RepoError(
285 raise error.RepoError(
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 self.sharedpath = s
287 self.sharedpath = s
288 except IOError, inst:
288 except IOError, inst:
289 if inst.errno != errno.ENOENT:
289 if inst.errno != errno.ENOENT:
290 raise
290 raise
291
291
292 self.store = store.store(
292 self.store = store.store(
293 self.requirements, self.sharedpath, scmutil.vfs)
293 self.requirements, self.sharedpath, scmutil.vfs)
294 self.spath = self.store.path
294 self.spath = self.store.path
295 self.svfs = self.store.vfs
295 self.svfs = self.store.vfs
296 self.sopener = self.svfs
296 self.sopener = self.svfs
297 self.sjoin = self.store.join
297 self.sjoin = self.store.join
298 self.vfs.createmode = self.store.createmode
298 self.vfs.createmode = self.store.createmode
299 self._applyopenerreqs()
299 self._applyopenerreqs()
300 if create:
300 if create:
301 self._writerequirements()
301 self._writerequirements()
302
302
303
303
304 self._branchcaches = {}
304 self._branchcaches = {}
305 self._revbranchcache = None
305 self._revbranchcache = None
306 self.filterpats = {}
306 self.filterpats = {}
307 self._datafilters = {}
307 self._datafilters = {}
308 self._transref = self._lockref = self._wlockref = None
308 self._transref = self._lockref = self._wlockref = None
309
309
310 # A cache for various files under .hg/ that tracks file changes,
310 # A cache for various files under .hg/ that tracks file changes,
311 # (used by the filecache decorator)
311 # (used by the filecache decorator)
312 #
312 #
313 # Maps a property name to its util.filecacheentry
313 # Maps a property name to its util.filecacheentry
314 self._filecache = {}
314 self._filecache = {}
315
315
316 # hold sets of revision to be filtered
316 # hold sets of revision to be filtered
317 # should be cleared when something might have changed the filter value:
317 # should be cleared when something might have changed the filter value:
318 # - new changesets,
318 # - new changesets,
319 # - phase change,
319 # - phase change,
320 # - new obsolescence marker,
320 # - new obsolescence marker,
321 # - working directory parent change,
321 # - working directory parent change,
322 # - bookmark changes
322 # - bookmark changes
323 self.filteredrevcache = {}
323 self.filteredrevcache = {}
324
324
325 # generic mapping between names and nodes
325 # generic mapping between names and nodes
326 self.names = namespaces.namespaces()
326 self.names = namespaces.namespaces()
327
327
328 def close(self):
328 def close(self):
329 self._writecaches()
329 self._writecaches()
330
330
331 def _writecaches(self):
331 def _writecaches(self):
332 if self._revbranchcache:
332 if self._revbranchcache:
333 self._revbranchcache.write()
333 self._revbranchcache.write()
334
334
335 def _restrictcapabilities(self, caps):
335 def _restrictcapabilities(self, caps):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 caps = set(caps)
337 caps = set(caps)
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 caps.add('bundle2=' + urllib.quote(capsblob))
339 caps.add('bundle2=' + urllib.quote(capsblob))
340 return caps
340 return caps
341
341
342 def _applyopenerreqs(self):
342 def _applyopenerreqs(self):
343 self.svfs.options = dict((r, 1) for r in self.requirements
343 self.svfs.options = dict((r, 1) for r in self.requirements
344 if r in self.openerreqs)
344 if r in self.openerreqs)
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 if chunkcachesize is not None:
346 if chunkcachesize is not None:
347 self.svfs.options['chunkcachesize'] = chunkcachesize
347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 if maxchainlen is not None:
349 if maxchainlen is not None:
350 self.svfs.options['maxchainlen'] = maxchainlen
350 self.svfs.options['maxchainlen'] = maxchainlen
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 if manifestcachesize is not None:
352 if manifestcachesize is not None:
353 self.svfs.options['manifestcachesize'] = manifestcachesize
353 self.svfs.options['manifestcachesize'] = manifestcachesize
354
354
355 def _writerequirements(self):
355 def _writerequirements(self):
356 scmutil.writerequires(self.vfs, self.requirements)
356 scmutil.writerequires(self.vfs, self.requirements)
357
357
358 def _checknested(self, path):
358 def _checknested(self, path):
359 """Determine if path is a legal nested repository."""
359 """Determine if path is a legal nested repository."""
360 if not path.startswith(self.root):
360 if not path.startswith(self.root):
361 return False
361 return False
362 subpath = path[len(self.root) + 1:]
362 subpath = path[len(self.root) + 1:]
363 normsubpath = util.pconvert(subpath)
363 normsubpath = util.pconvert(subpath)
364
364
365 # XXX: Checking against the current working copy is wrong in
365 # XXX: Checking against the current working copy is wrong in
366 # the sense that it can reject things like
366 # the sense that it can reject things like
367 #
367 #
368 # $ hg cat -r 10 sub/x.txt
368 # $ hg cat -r 10 sub/x.txt
369 #
369 #
370 # if sub/ is no longer a subrepository in the working copy
370 # if sub/ is no longer a subrepository in the working copy
371 # parent revision.
371 # parent revision.
372 #
372 #
373 # However, it can of course also allow things that would have
373 # However, it can of course also allow things that would have
374 # been rejected before, such as the above cat command if sub/
374 # been rejected before, such as the above cat command if sub/
375 # is a subrepository now, but was a normal directory before.
375 # is a subrepository now, but was a normal directory before.
376 # The old path auditor would have rejected by mistake since it
376 # The old path auditor would have rejected by mistake since it
377 # panics when it sees sub/.hg/.
377 # panics when it sees sub/.hg/.
378 #
378 #
379 # All in all, checking against the working copy seems sensible
379 # All in all, checking against the working copy seems sensible
380 # since we want to prevent access to nested repositories on
380 # since we want to prevent access to nested repositories on
381 # the filesystem *now*.
381 # the filesystem *now*.
382 ctx = self[None]
382 ctx = self[None]
383 parts = util.splitpath(subpath)
383 parts = util.splitpath(subpath)
384 while parts:
384 while parts:
385 prefix = '/'.join(parts)
385 prefix = '/'.join(parts)
386 if prefix in ctx.substate:
386 if prefix in ctx.substate:
387 if prefix == normsubpath:
387 if prefix == normsubpath:
388 return True
388 return True
389 else:
389 else:
390 sub = ctx.sub(prefix)
390 sub = ctx.sub(prefix)
391 return sub.checknested(subpath[len(prefix) + 1:])
391 return sub.checknested(subpath[len(prefix) + 1:])
392 else:
392 else:
393 parts.pop()
393 parts.pop()
394 return False
394 return False
395
395
396 def peer(self):
396 def peer(self):
397 return localpeer(self) # not cached to avoid reference cycle
397 return localpeer(self) # not cached to avoid reference cycle
398
398
399 def unfiltered(self):
399 def unfiltered(self):
400 """Return unfiltered version of the repository
400 """Return unfiltered version of the repository
401
401
402 Intended to be overwritten by filtered repo."""
402 Intended to be overwritten by filtered repo."""
403 return self
403 return self
404
404
405 def filtered(self, name):
405 def filtered(self, name):
406 """Return a filtered version of a repository"""
406 """Return a filtered version of a repository"""
407 # build a new class with the mixin and the current class
407 # build a new class with the mixin and the current class
408 # (possibly subclass of the repo)
408 # (possibly subclass of the repo)
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 pass
410 pass
411 return proxycls(self, name)
411 return proxycls(self, name)
412
412
413 @repofilecache('bookmarks')
413 @repofilecache('bookmarks')
414 def _bookmarks(self):
414 def _bookmarks(self):
415 return bookmarks.bmstore(self)
415 return bookmarks.bmstore(self)
416
416
417 @repofilecache('bookmarks.current')
417 @repofilecache('bookmarks.current')
418 def _activebookmark(self):
418 def _activebookmark(self):
419 return bookmarks.readactive(self)
419 return bookmarks.readactive(self)
420
420
421 def bookmarkheads(self, bookmark):
421 def bookmarkheads(self, bookmark):
422 name = bookmark.split('@', 1)[0]
422 name = bookmark.split('@', 1)[0]
423 heads = []
423 heads = []
424 for mark, n in self._bookmarks.iteritems():
424 for mark, n in self._bookmarks.iteritems():
425 if mark.split('@', 1)[0] == name:
425 if mark.split('@', 1)[0] == name:
426 heads.append(n)
426 heads.append(n)
427 return heads
427 return heads
428
428
429 @storecache('phaseroots')
429 @storecache('phaseroots')
430 def _phasecache(self):
430 def _phasecache(self):
431 return phases.phasecache(self, self._phasedefaults)
431 return phases.phasecache(self, self._phasedefaults)
432
432
433 @storecache('obsstore')
433 @storecache('obsstore')
434 def obsstore(self):
434 def obsstore(self):
435 # read default format for new obsstore.
435 # read default format for new obsstore.
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 # rely on obsstore class default when possible.
437 # rely on obsstore class default when possible.
438 kwargs = {}
438 kwargs = {}
439 if defaultformat is not None:
439 if defaultformat is not None:
440 kwargs['defaultformat'] = defaultformat
440 kwargs['defaultformat'] = defaultformat
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 **kwargs)
443 **kwargs)
444 if store and readonly:
444 if store and readonly:
445 self.ui.warn(
445 self.ui.warn(
446 _('obsolete feature not enabled but %i markers found!\n')
446 _('obsolete feature not enabled but %i markers found!\n')
447 % len(list(store)))
447 % len(list(store)))
448 return store
448 return store
449
449
450 @storecache('00changelog.i')
450 @storecache('00changelog.i')
451 def changelog(self):
451 def changelog(self):
452 c = changelog.changelog(self.svfs)
452 c = changelog.changelog(self.svfs)
453 if 'HG_PENDING' in os.environ:
453 if 'HG_PENDING' in os.environ:
454 p = os.environ['HG_PENDING']
454 p = os.environ['HG_PENDING']
455 if p.startswith(self.root):
455 if p.startswith(self.root):
456 c.readpending('00changelog.i.a')
456 c.readpending('00changelog.i.a')
457 return c
457 return c
458
458
459 @storecache('00manifest.i')
459 @storecache('00manifest.i')
460 def manifest(self):
460 def manifest(self):
461 return manifest.manifest(self.svfs)
461 return manifest.manifest(self.svfs)
462
462
463 def dirlog(self, dir):
463 def dirlog(self, dir):
464 return manifest.manifest(self.svfs, dir)
464 return self.manifest.dirlog(dir)
465
465
466 @repofilecache('dirstate')
466 @repofilecache('dirstate')
467 def dirstate(self):
467 def dirstate(self):
468 warned = [0]
468 warned = [0]
469 def validate(node):
469 def validate(node):
470 try:
470 try:
471 self.changelog.rev(node)
471 self.changelog.rev(node)
472 return node
472 return node
473 except error.LookupError:
473 except error.LookupError:
474 if not warned[0]:
474 if not warned[0]:
475 warned[0] = True
475 warned[0] = True
476 self.ui.warn(_("warning: ignoring unknown"
476 self.ui.warn(_("warning: ignoring unknown"
477 " working parent %s!\n") % short(node))
477 " working parent %s!\n") % short(node))
478 return nullid
478 return nullid
479
479
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481
481
482 def __getitem__(self, changeid):
482 def __getitem__(self, changeid):
483 if changeid is None:
483 if changeid is None:
484 return context.workingctx(self)
484 return context.workingctx(self)
485 if isinstance(changeid, slice):
485 if isinstance(changeid, slice):
486 return [context.changectx(self, i)
486 return [context.changectx(self, i)
487 for i in xrange(*changeid.indices(len(self)))
487 for i in xrange(*changeid.indices(len(self)))
488 if i not in self.changelog.filteredrevs]
488 if i not in self.changelog.filteredrevs]
489 return context.changectx(self, changeid)
489 return context.changectx(self, changeid)
490
490
491 def __contains__(self, changeid):
491 def __contains__(self, changeid):
492 try:
492 try:
493 self[changeid]
493 self[changeid]
494 return True
494 return True
495 except error.RepoLookupError:
495 except error.RepoLookupError:
496 return False
496 return False
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return True
499 return True
500
500
501 def __len__(self):
501 def __len__(self):
502 return len(self.changelog)
502 return len(self.changelog)
503
503
504 def __iter__(self):
504 def __iter__(self):
505 return iter(self.changelog)
505 return iter(self.changelog)
506
506
507 def revs(self, expr, *args):
507 def revs(self, expr, *args):
508 '''Return a list of revisions matching the given revset'''
508 '''Return a list of revisions matching the given revset'''
509 expr = revset.formatspec(expr, *args)
509 expr = revset.formatspec(expr, *args)
510 m = revset.match(None, expr)
510 m = revset.match(None, expr)
511 return m(self)
511 return m(self)
512
512
513 def set(self, expr, *args):
513 def set(self, expr, *args):
514 '''
514 '''
515 Yield a context for each matching revision, after doing arg
515 Yield a context for each matching revision, after doing arg
516 replacement via revset.formatspec
516 replacement via revset.formatspec
517 '''
517 '''
518 for r in self.revs(expr, *args):
518 for r in self.revs(expr, *args):
519 yield self[r]
519 yield self[r]
520
520
521 def url(self):
521 def url(self):
522 return 'file:' + self.root
522 return 'file:' + self.root
523
523
524 def hook(self, name, throw=False, **args):
524 def hook(self, name, throw=False, **args):
525 """Call a hook, passing this repo instance.
525 """Call a hook, passing this repo instance.
526
526
527 This a convenience method to aid invoking hooks. Extensions likely
527 This a convenience method to aid invoking hooks. Extensions likely
528 won't call this unless they have registered a custom hook or are
528 won't call this unless they have registered a custom hook or are
529 replacing code that is expected to call a hook.
529 replacing code that is expected to call a hook.
530 """
530 """
531 return hook.hook(self.ui, self, name, throw, **args)
531 return hook.hook(self.ui, self, name, throw, **args)
532
532
533 @unfilteredmethod
533 @unfilteredmethod
534 def _tag(self, names, node, message, local, user, date, extra={},
534 def _tag(self, names, node, message, local, user, date, extra={},
535 editor=False):
535 editor=False):
536 if isinstance(names, str):
536 if isinstance(names, str):
537 names = (names,)
537 names = (names,)
538
538
539 branches = self.branchmap()
539 branches = self.branchmap()
540 for name in names:
540 for name in names:
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 local=local)
542 local=local)
543 if name in branches:
543 if name in branches:
544 self.ui.warn(_("warning: tag %s conflicts with existing"
544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 " branch name\n") % name)
545 " branch name\n") % name)
546
546
547 def writetags(fp, names, munge, prevtags):
547 def writetags(fp, names, munge, prevtags):
548 fp.seek(0, 2)
548 fp.seek(0, 2)
549 if prevtags and prevtags[-1] != '\n':
549 if prevtags and prevtags[-1] != '\n':
550 fp.write('\n')
550 fp.write('\n')
551 for name in names:
551 for name in names:
552 if munge:
552 if munge:
553 m = munge(name)
553 m = munge(name)
554 else:
554 else:
555 m = name
555 m = name
556
556
557 if (self._tagscache.tagtypes and
557 if (self._tagscache.tagtypes and
558 name in self._tagscache.tagtypes):
558 name in self._tagscache.tagtypes):
559 old = self.tags().get(name, nullid)
559 old = self.tags().get(name, nullid)
560 fp.write('%s %s\n' % (hex(old), m))
560 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(node), m))
561 fp.write('%s %s\n' % (hex(node), m))
562 fp.close()
562 fp.close()
563
563
564 prevtags = ''
564 prevtags = ''
565 if local:
565 if local:
566 try:
566 try:
567 fp = self.vfs('localtags', 'r+')
567 fp = self.vfs('localtags', 'r+')
568 except IOError:
568 except IOError:
569 fp = self.vfs('localtags', 'a')
569 fp = self.vfs('localtags', 'a')
570 else:
570 else:
571 prevtags = fp.read()
571 prevtags = fp.read()
572
572
573 # local tags are stored in the current charset
573 # local tags are stored in the current charset
574 writetags(fp, names, None, prevtags)
574 writetags(fp, names, None, prevtags)
575 for name in names:
575 for name in names:
576 self.hook('tag', node=hex(node), tag=name, local=local)
576 self.hook('tag', node=hex(node), tag=name, local=local)
577 return
577 return
578
578
579 try:
579 try:
580 fp = self.wfile('.hgtags', 'rb+')
580 fp = self.wfile('.hgtags', 'rb+')
581 except IOError, e:
581 except IOError, e:
582 if e.errno != errno.ENOENT:
582 if e.errno != errno.ENOENT:
583 raise
583 raise
584 fp = self.wfile('.hgtags', 'ab')
584 fp = self.wfile('.hgtags', 'ab')
585 else:
585 else:
586 prevtags = fp.read()
586 prevtags = fp.read()
587
587
588 # committed tags are stored in UTF-8
588 # committed tags are stored in UTF-8
589 writetags(fp, names, encoding.fromlocal, prevtags)
589 writetags(fp, names, encoding.fromlocal, prevtags)
590
590
591 fp.close()
591 fp.close()
592
592
593 self.invalidatecaches()
593 self.invalidatecaches()
594
594
595 if '.hgtags' not in self.dirstate:
595 if '.hgtags' not in self.dirstate:
596 self[None].add(['.hgtags'])
596 self[None].add(['.hgtags'])
597
597
598 m = matchmod.exact(self.root, '', ['.hgtags'])
598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 editor=editor)
600 editor=editor)
601
601
602 for name in names:
602 for name in names:
603 self.hook('tag', node=hex(node), tag=name, local=local)
603 self.hook('tag', node=hex(node), tag=name, local=local)
604
604
605 return tagnode
605 return tagnode
606
606
607 def tag(self, names, node, message, local, user, date, editor=False):
607 def tag(self, names, node, message, local, user, date, editor=False):
608 '''tag a revision with one or more symbolic names.
608 '''tag a revision with one or more symbolic names.
609
609
610 names is a list of strings or, when adding a single tag, names may be a
610 names is a list of strings or, when adding a single tag, names may be a
611 string.
611 string.
612
612
613 if local is True, the tags are stored in a per-repository file.
613 if local is True, the tags are stored in a per-repository file.
614 otherwise, they are stored in the .hgtags file, and a new
614 otherwise, they are stored in the .hgtags file, and a new
615 changeset is committed with the change.
615 changeset is committed with the change.
616
616
617 keyword arguments:
617 keyword arguments:
618
618
619 local: whether to store tags in non-version-controlled file
619 local: whether to store tags in non-version-controlled file
620 (default False)
620 (default False)
621
621
622 message: commit message to use if committing
622 message: commit message to use if committing
623
623
624 user: name of user to use if committing
624 user: name of user to use if committing
625
625
626 date: date tuple to use if committing'''
626 date: date tuple to use if committing'''
627
627
628 if not local:
628 if not local:
629 m = matchmod.exact(self.root, '', ['.hgtags'])
629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 if any(self.status(match=m, unknown=True, ignored=True)):
630 if any(self.status(match=m, unknown=True, ignored=True)):
631 raise util.Abort(_('working copy of .hgtags is changed'),
631 raise util.Abort(_('working copy of .hgtags is changed'),
632 hint=_('please commit .hgtags manually'))
632 hint=_('please commit .hgtags manually'))
633
633
634 self.tags() # instantiate the cache
634 self.tags() # instantiate the cache
635 self._tag(names, node, message, local, user, date, editor=editor)
635 self._tag(names, node, message, local, user, date, editor=editor)
636
636
637 @filteredpropertycache
637 @filteredpropertycache
638 def _tagscache(self):
638 def _tagscache(self):
639 '''Returns a tagscache object that contains various tags related
639 '''Returns a tagscache object that contains various tags related
640 caches.'''
640 caches.'''
641
641
642 # This simplifies its cache management by having one decorated
642 # This simplifies its cache management by having one decorated
643 # function (this one) and the rest simply fetch things from it.
643 # function (this one) and the rest simply fetch things from it.
644 class tagscache(object):
644 class tagscache(object):
645 def __init__(self):
645 def __init__(self):
646 # These two define the set of tags for this repository. tags
646 # These two define the set of tags for this repository. tags
647 # maps tag name to node; tagtypes maps tag name to 'global' or
647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # 'local'. (Global tags are defined by .hgtags across all
648 # 'local'. (Global tags are defined by .hgtags across all
649 # heads, and local tags are defined in .hg/localtags.)
649 # heads, and local tags are defined in .hg/localtags.)
650 # They constitute the in-memory cache of tags.
650 # They constitute the in-memory cache of tags.
651 self.tags = self.tagtypes = None
651 self.tags = self.tagtypes = None
652
652
653 self.nodetagscache = self.tagslist = None
653 self.nodetagscache = self.tagslist = None
654
654
655 cache = tagscache()
655 cache = tagscache()
656 cache.tags, cache.tagtypes = self._findtags()
656 cache.tags, cache.tagtypes = self._findtags()
657
657
658 return cache
658 return cache
659
659
660 def tags(self):
660 def tags(self):
661 '''return a mapping of tag to node'''
661 '''return a mapping of tag to node'''
662 t = {}
662 t = {}
663 if self.changelog.filteredrevs:
663 if self.changelog.filteredrevs:
664 tags, tt = self._findtags()
664 tags, tt = self._findtags()
665 else:
665 else:
666 tags = self._tagscache.tags
666 tags = self._tagscache.tags
667 for k, v in tags.iteritems():
667 for k, v in tags.iteritems():
668 try:
668 try:
669 # ignore tags to unknown nodes
669 # ignore tags to unknown nodes
670 self.changelog.rev(v)
670 self.changelog.rev(v)
671 t[k] = v
671 t[k] = v
672 except (error.LookupError, ValueError):
672 except (error.LookupError, ValueError):
673 pass
673 pass
674 return t
674 return t
675
675
676 def _findtags(self):
676 def _findtags(self):
677 '''Do the hard work of finding tags. Return a pair of dicts
677 '''Do the hard work of finding tags. Return a pair of dicts
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 maps tag name to a string like \'global\' or \'local\'.
679 maps tag name to a string like \'global\' or \'local\'.
680 Subclasses or extensions are free to add their own tags, but
680 Subclasses or extensions are free to add their own tags, but
681 should be aware that the returned dicts will be retained for the
681 should be aware that the returned dicts will be retained for the
682 duration of the localrepo object.'''
682 duration of the localrepo object.'''
683
683
684 # XXX what tagtype should subclasses/extensions use? Currently
684 # XXX what tagtype should subclasses/extensions use? Currently
685 # mq and bookmarks add tags, but do not set the tagtype at all.
685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # Should each extension invent its own tag type? Should there
686 # Should each extension invent its own tag type? Should there
687 # be one tagtype for all such "virtual" tags? Or is the status
687 # be one tagtype for all such "virtual" tags? Or is the status
688 # quo fine?
688 # quo fine?
689
689
690 alltags = {} # map tag name to (node, hist)
690 alltags = {} # map tag name to (node, hist)
691 tagtypes = {}
691 tagtypes = {}
692
692
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695
695
696 # Build the return dicts. Have to re-encode tag names because
696 # Build the return dicts. Have to re-encode tag names because
697 # the tags module always uses UTF-8 (in order not to lose info
697 # the tags module always uses UTF-8 (in order not to lose info
698 # writing to the cache), but the rest of Mercurial wants them in
698 # writing to the cache), but the rest of Mercurial wants them in
699 # local encoding.
699 # local encoding.
700 tags = {}
700 tags = {}
701 for (name, (node, hist)) in alltags.iteritems():
701 for (name, (node, hist)) in alltags.iteritems():
702 if node != nullid:
702 if node != nullid:
703 tags[encoding.tolocal(name)] = node
703 tags[encoding.tolocal(name)] = node
704 tags['tip'] = self.changelog.tip()
704 tags['tip'] = self.changelog.tip()
705 tagtypes = dict([(encoding.tolocal(name), value)
705 tagtypes = dict([(encoding.tolocal(name), value)
706 for (name, value) in tagtypes.iteritems()])
706 for (name, value) in tagtypes.iteritems()])
707 return (tags, tagtypes)
707 return (tags, tagtypes)
708
708
709 def tagtype(self, tagname):
709 def tagtype(self, tagname):
710 '''
710 '''
711 return the type of the given tag. result can be:
711 return the type of the given tag. result can be:
712
712
713 'local' : a local tag
713 'local' : a local tag
714 'global' : a global tag
714 'global' : a global tag
715 None : tag does not exist
715 None : tag does not exist
716 '''
716 '''
717
717
718 return self._tagscache.tagtypes.get(tagname)
718 return self._tagscache.tagtypes.get(tagname)
719
719
720 def tagslist(self):
720 def tagslist(self):
721 '''return a list of tags ordered by revision'''
721 '''return a list of tags ordered by revision'''
722 if not self._tagscache.tagslist:
722 if not self._tagscache.tagslist:
723 l = []
723 l = []
724 for t, n in self.tags().iteritems():
724 for t, n in self.tags().iteritems():
725 l.append((self.changelog.rev(n), t, n))
725 l.append((self.changelog.rev(n), t, n))
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727
727
728 return self._tagscache.tagslist
728 return self._tagscache.tagslist
729
729
730 def nodetags(self, node):
730 def nodetags(self, node):
731 '''return the tags associated with a node'''
731 '''return the tags associated with a node'''
732 if not self._tagscache.nodetagscache:
732 if not self._tagscache.nodetagscache:
733 nodetagscache = {}
733 nodetagscache = {}
734 for t, n in self._tagscache.tags.iteritems():
734 for t, n in self._tagscache.tags.iteritems():
735 nodetagscache.setdefault(n, []).append(t)
735 nodetagscache.setdefault(n, []).append(t)
736 for tags in nodetagscache.itervalues():
736 for tags in nodetagscache.itervalues():
737 tags.sort()
737 tags.sort()
738 self._tagscache.nodetagscache = nodetagscache
738 self._tagscache.nodetagscache = nodetagscache
739 return self._tagscache.nodetagscache.get(node, [])
739 return self._tagscache.nodetagscache.get(node, [])
740
740
741 def nodebookmarks(self, node):
741 def nodebookmarks(self, node):
742 marks = []
742 marks = []
743 for bookmark, n in self._bookmarks.iteritems():
743 for bookmark, n in self._bookmarks.iteritems():
744 if n == node:
744 if n == node:
745 marks.append(bookmark)
745 marks.append(bookmark)
746 return sorted(marks)
746 return sorted(marks)
747
747
748 def branchmap(self):
748 def branchmap(self):
749 '''returns a dictionary {branch: [branchheads]} with branchheads
749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 ordered by increasing revision number'''
750 ordered by increasing revision number'''
751 branchmap.updatecache(self)
751 branchmap.updatecache(self)
752 return self._branchcaches[self.filtername]
752 return self._branchcaches[self.filtername]
753
753
754 @unfilteredmethod
754 @unfilteredmethod
755 def revbranchcache(self):
755 def revbranchcache(self):
756 if not self._revbranchcache:
756 if not self._revbranchcache:
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 return self._revbranchcache
758 return self._revbranchcache
759
759
760 def branchtip(self, branch, ignoremissing=False):
760 def branchtip(self, branch, ignoremissing=False):
761 '''return the tip node for a given branch
761 '''return the tip node for a given branch
762
762
763 If ignoremissing is True, then this method will not raise an error.
763 If ignoremissing is True, then this method will not raise an error.
764 This is helpful for callers that only expect None for a missing branch
764 This is helpful for callers that only expect None for a missing branch
765 (e.g. namespace).
765 (e.g. namespace).
766
766
767 '''
767 '''
768 try:
768 try:
769 return self.branchmap().branchtip(branch)
769 return self.branchmap().branchtip(branch)
770 except KeyError:
770 except KeyError:
771 if not ignoremissing:
771 if not ignoremissing:
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 else:
773 else:
774 pass
774 pass
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 # so statichttprepo's override of local() works
801 # so statichttprepo's override of local() works
802 if not self.local():
802 if not self.local():
803 return False
803 return False
804 if not self.ui.configbool('phases', 'publish', True):
804 if not self.ui.configbool('phases', 'publish', True):
805 return True
805 return True
806 # if publishing we can't copy if there is filtered content
806 # if publishing we can't copy if there is filtered content
807 return not self.filtered('visible').changelog.filteredrevs
807 return not self.filtered('visible').changelog.filteredrevs
808
808
809 def shared(self):
809 def shared(self):
810 '''the type of shared repository (None if not shared)'''
810 '''the type of shared repository (None if not shared)'''
811 if self.sharedpath != self.path:
811 if self.sharedpath != self.path:
812 return 'store'
812 return 'store'
813 return None
813 return None
814
814
815 def join(self, f, *insidef):
815 def join(self, f, *insidef):
816 return self.vfs.join(os.path.join(f, *insidef))
816 return self.vfs.join(os.path.join(f, *insidef))
817
817
818 def wjoin(self, f, *insidef):
818 def wjoin(self, f, *insidef):
819 return self.vfs.reljoin(self.root, f, *insidef)
819 return self.vfs.reljoin(self.root, f, *insidef)
820
820
821 def file(self, f):
821 def file(self, f):
822 if f[0] == '/':
822 if f[0] == '/':
823 f = f[1:]
823 f = f[1:]
824 return filelog.filelog(self.svfs, f)
824 return filelog.filelog(self.svfs, f)
825
825
826 def changectx(self, changeid):
826 def changectx(self, changeid):
827 return self[changeid]
827 return self[changeid]
828
828
829 def parents(self, changeid=None):
829 def parents(self, changeid=None):
830 '''get list of changectxs for parents of changeid'''
830 '''get list of changectxs for parents of changeid'''
831 return self[changeid].parents()
831 return self[changeid].parents()
832
832
833 def setparents(self, p1, p2=nullid):
833 def setparents(self, p1, p2=nullid):
834 self.dirstate.beginparentchange()
834 self.dirstate.beginparentchange()
835 copies = self.dirstate.setparents(p1, p2)
835 copies = self.dirstate.setparents(p1, p2)
836 pctx = self[p1]
836 pctx = self[p1]
837 if copies:
837 if copies:
838 # Adjust copy records, the dirstate cannot do it, it
838 # Adjust copy records, the dirstate cannot do it, it
839 # requires access to parents manifests. Preserve them
839 # requires access to parents manifests. Preserve them
840 # only for entries added to first parent.
840 # only for entries added to first parent.
841 for f in copies:
841 for f in copies:
842 if f not in pctx and copies[f] in pctx:
842 if f not in pctx and copies[f] in pctx:
843 self.dirstate.copy(copies[f], f)
843 self.dirstate.copy(copies[f], f)
844 if p2 == nullid:
844 if p2 == nullid:
845 for f, s in sorted(self.dirstate.copies().items()):
845 for f, s in sorted(self.dirstate.copies().items()):
846 if f not in pctx and s not in pctx:
846 if f not in pctx and s not in pctx:
847 self.dirstate.copy(None, f)
847 self.dirstate.copy(None, f)
848 self.dirstate.endparentchange()
848 self.dirstate.endparentchange()
849
849
850 def filectx(self, path, changeid=None, fileid=None):
850 def filectx(self, path, changeid=None, fileid=None):
851 """changeid can be a changeset revision, node, or tag.
851 """changeid can be a changeset revision, node, or tag.
852 fileid can be a file revision or node."""
852 fileid can be a file revision or node."""
853 return context.filectx(self, path, changeid, fileid)
853 return context.filectx(self, path, changeid, fileid)
854
854
855 def getcwd(self):
855 def getcwd(self):
856 return self.dirstate.getcwd()
856 return self.dirstate.getcwd()
857
857
858 def pathto(self, f, cwd=None):
858 def pathto(self, f, cwd=None):
859 return self.dirstate.pathto(f, cwd)
859 return self.dirstate.pathto(f, cwd)
860
860
861 def wfile(self, f, mode='r'):
861 def wfile(self, f, mode='r'):
862 return self.wvfs(f, mode)
862 return self.wvfs(f, mode)
863
863
864 def _link(self, f):
864 def _link(self, f):
865 return self.wvfs.islink(f)
865 return self.wvfs.islink(f)
866
866
867 def _loadfilter(self, filter):
867 def _loadfilter(self, filter):
868 if filter not in self.filterpats:
868 if filter not in self.filterpats:
869 l = []
869 l = []
870 for pat, cmd in self.ui.configitems(filter):
870 for pat, cmd in self.ui.configitems(filter):
871 if cmd == '!':
871 if cmd == '!':
872 continue
872 continue
873 mf = matchmod.match(self.root, '', [pat])
873 mf = matchmod.match(self.root, '', [pat])
874 fn = None
874 fn = None
875 params = cmd
875 params = cmd
876 for name, filterfn in self._datafilters.iteritems():
876 for name, filterfn in self._datafilters.iteritems():
877 if cmd.startswith(name):
877 if cmd.startswith(name):
878 fn = filterfn
878 fn = filterfn
879 params = cmd[len(name):].lstrip()
879 params = cmd[len(name):].lstrip()
880 break
880 break
881 if not fn:
881 if not fn:
882 fn = lambda s, c, **kwargs: util.filter(s, c)
882 fn = lambda s, c, **kwargs: util.filter(s, c)
883 # Wrap old filters not supporting keyword arguments
883 # Wrap old filters not supporting keyword arguments
884 if not inspect.getargspec(fn)[2]:
884 if not inspect.getargspec(fn)[2]:
885 oldfn = fn
885 oldfn = fn
886 fn = lambda s, c, **kwargs: oldfn(s, c)
886 fn = lambda s, c, **kwargs: oldfn(s, c)
887 l.append((mf, fn, params))
887 l.append((mf, fn, params))
888 self.filterpats[filter] = l
888 self.filterpats[filter] = l
889 return self.filterpats[filter]
889 return self.filterpats[filter]
890
890
891 def _filter(self, filterpats, filename, data):
891 def _filter(self, filterpats, filename, data):
892 for mf, fn, cmd in filterpats:
892 for mf, fn, cmd in filterpats:
893 if mf(filename):
893 if mf(filename):
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 break
896 break
897
897
898 return data
898 return data
899
899
900 @unfilteredpropertycache
900 @unfilteredpropertycache
901 def _encodefilterpats(self):
901 def _encodefilterpats(self):
902 return self._loadfilter('encode')
902 return self._loadfilter('encode')
903
903
904 @unfilteredpropertycache
904 @unfilteredpropertycache
905 def _decodefilterpats(self):
905 def _decodefilterpats(self):
906 return self._loadfilter('decode')
906 return self._loadfilter('decode')
907
907
908 def adddatafilter(self, name, filter):
908 def adddatafilter(self, name, filter):
909 self._datafilters[name] = filter
909 self._datafilters[name] = filter
910
910
911 def wread(self, filename):
911 def wread(self, filename):
912 if self._link(filename):
912 if self._link(filename):
913 data = self.wvfs.readlink(filename)
913 data = self.wvfs.readlink(filename)
914 else:
914 else:
915 data = self.wvfs.read(filename)
915 data = self.wvfs.read(filename)
916 return self._filter(self._encodefilterpats, filename, data)
916 return self._filter(self._encodefilterpats, filename, data)
917
917
918 def wwrite(self, filename, data, flags):
918 def wwrite(self, filename, data, flags):
919 """write ``data`` into ``filename`` in the working directory
919 """write ``data`` into ``filename`` in the working directory
920
920
921 This returns length of written (maybe decoded) data.
921 This returns length of written (maybe decoded) data.
922 """
922 """
923 data = self._filter(self._decodefilterpats, filename, data)
923 data = self._filter(self._decodefilterpats, filename, data)
924 if 'l' in flags:
924 if 'l' in flags:
925 self.wvfs.symlink(data, filename)
925 self.wvfs.symlink(data, filename)
926 else:
926 else:
927 self.wvfs.write(filename, data)
927 self.wvfs.write(filename, data)
928 if 'x' in flags:
928 if 'x' in flags:
929 self.wvfs.setflags(filename, False, True)
929 self.wvfs.setflags(filename, False, True)
930 return len(data)
930 return len(data)
931
931
932 def wwritedata(self, filename, data):
932 def wwritedata(self, filename, data):
933 return self._filter(self._decodefilterpats, filename, data)
933 return self._filter(self._decodefilterpats, filename, data)
934
934
935 def currenttransaction(self):
935 def currenttransaction(self):
936 """return the current transaction or None if non exists"""
936 """return the current transaction or None if non exists"""
937 if self._transref:
937 if self._transref:
938 tr = self._transref()
938 tr = self._transref()
939 else:
939 else:
940 tr = None
940 tr = None
941
941
942 if tr and tr.running():
942 if tr and tr.running():
943 return tr
943 return tr
944 return None
944 return None
945
945
946 def transaction(self, desc, report=None):
946 def transaction(self, desc, report=None):
947 if (self.ui.configbool('devel', 'all')
947 if (self.ui.configbool('devel', 'all')
948 or self.ui.configbool('devel', 'check-locks')):
948 or self.ui.configbool('devel', 'check-locks')):
949 l = self._lockref and self._lockref()
949 l = self._lockref and self._lockref()
950 if l is None or not l.held:
950 if l is None or not l.held:
951 scmutil.develwarn(self.ui, 'transaction with no lock')
951 scmutil.develwarn(self.ui, 'transaction with no lock')
952 tr = self.currenttransaction()
952 tr = self.currenttransaction()
953 if tr is not None:
953 if tr is not None:
954 return tr.nest()
954 return tr.nest()
955
955
956 # abort here if the journal already exists
956 # abort here if the journal already exists
957 if self.svfs.exists("journal"):
957 if self.svfs.exists("journal"):
958 raise error.RepoError(
958 raise error.RepoError(
959 _("abandoned transaction found"),
959 _("abandoned transaction found"),
960 hint=_("run 'hg recover' to clean up transaction"))
960 hint=_("run 'hg recover' to clean up transaction"))
961
961
962 self.hook('pretxnopen', throw=True, txnname=desc)
962 self.hook('pretxnopen', throw=True, txnname=desc)
963
963
964 self._writejournal(desc)
964 self._writejournal(desc)
965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
966 if report:
966 if report:
967 rp = report
967 rp = report
968 else:
968 else:
969 rp = self.ui.warn
969 rp = self.ui.warn
970 vfsmap = {'plain': self.vfs} # root of .hg/
970 vfsmap = {'plain': self.vfs} # root of .hg/
971 # we must avoid cyclic reference between repo and transaction.
971 # we must avoid cyclic reference between repo and transaction.
972 reporef = weakref.ref(self)
972 reporef = weakref.ref(self)
973 def validate(tr):
973 def validate(tr):
974 """will run pre-closing hooks"""
974 """will run pre-closing hooks"""
975 pending = lambda: tr.writepending() and self.root or ""
975 pending = lambda: tr.writepending() and self.root or ""
976 reporef().hook('pretxnclose', throw=True, pending=pending,
976 reporef().hook('pretxnclose', throw=True, pending=pending,
977 xnname=desc, **tr.hookargs)
977 xnname=desc, **tr.hookargs)
978
978
979 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 tr = transaction.transaction(rp, self.sopener, vfsmap,
980 "journal",
980 "journal",
981 "undo",
981 "undo",
982 aftertrans(renames),
982 aftertrans(renames),
983 self.store.createmode,
983 self.store.createmode,
984 validator=validate)
984 validator=validate)
985
985
986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
987 tr.hookargs['TXNID'] = trid
987 tr.hookargs['TXNID'] = trid
988 # note: writing the fncache only during finalize mean that the file is
988 # note: writing the fncache only during finalize mean that the file is
989 # outdated when running hooks. As fncache is used for streaming clone,
989 # outdated when running hooks. As fncache is used for streaming clone,
990 # this is not expected to break anything that happen during the hooks.
990 # this is not expected to break anything that happen during the hooks.
991 tr.addfinalize('flush-fncache', self.store.write)
991 tr.addfinalize('flush-fncache', self.store.write)
992 def txnclosehook(tr2):
992 def txnclosehook(tr2):
993 """To be run if transaction is successful, will schedule a hook run
993 """To be run if transaction is successful, will schedule a hook run
994 """
994 """
995 def hook():
995 def hook():
996 reporef().hook('txnclose', throw=False, txnname=desc,
996 reporef().hook('txnclose', throw=False, txnname=desc,
997 **tr2.hookargs)
997 **tr2.hookargs)
998 reporef()._afterlock(hook)
998 reporef()._afterlock(hook)
999 tr.addfinalize('txnclose-hook', txnclosehook)
999 tr.addfinalize('txnclose-hook', txnclosehook)
1000 def txnaborthook(tr2):
1000 def txnaborthook(tr2):
1001 """To be run if transaction is aborted
1001 """To be run if transaction is aborted
1002 """
1002 """
1003 reporef().hook('txnabort', throw=False, txnname=desc,
1003 reporef().hook('txnabort', throw=False, txnname=desc,
1004 **tr2.hookargs)
1004 **tr2.hookargs)
1005 tr.addabort('txnabort-hook', txnaborthook)
1005 tr.addabort('txnabort-hook', txnaborthook)
1006 self._transref = weakref.ref(tr)
1006 self._transref = weakref.ref(tr)
1007 return tr
1007 return tr
1008
1008
1009 def _journalfiles(self):
1009 def _journalfiles(self):
1010 return ((self.svfs, 'journal'),
1010 return ((self.svfs, 'journal'),
1011 (self.vfs, 'journal.dirstate'),
1011 (self.vfs, 'journal.dirstate'),
1012 (self.vfs, 'journal.branch'),
1012 (self.vfs, 'journal.branch'),
1013 (self.vfs, 'journal.desc'),
1013 (self.vfs, 'journal.desc'),
1014 (self.vfs, 'journal.bookmarks'),
1014 (self.vfs, 'journal.bookmarks'),
1015 (self.svfs, 'journal.phaseroots'))
1015 (self.svfs, 'journal.phaseroots'))
1016
1016
1017 def undofiles(self):
1017 def undofiles(self):
1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1019
1019
1020 def _writejournal(self, desc):
1020 def _writejournal(self, desc):
1021 self.vfs.write("journal.dirstate",
1021 self.vfs.write("journal.dirstate",
1022 self.vfs.tryread("dirstate"))
1022 self.vfs.tryread("dirstate"))
1023 self.vfs.write("journal.branch",
1023 self.vfs.write("journal.branch",
1024 encoding.fromlocal(self.dirstate.branch()))
1024 encoding.fromlocal(self.dirstate.branch()))
1025 self.vfs.write("journal.desc",
1025 self.vfs.write("journal.desc",
1026 "%d\n%s\n" % (len(self), desc))
1026 "%d\n%s\n" % (len(self), desc))
1027 self.vfs.write("journal.bookmarks",
1027 self.vfs.write("journal.bookmarks",
1028 self.vfs.tryread("bookmarks"))
1028 self.vfs.tryread("bookmarks"))
1029 self.svfs.write("journal.phaseroots",
1029 self.svfs.write("journal.phaseroots",
1030 self.svfs.tryread("phaseroots"))
1030 self.svfs.tryread("phaseroots"))
1031
1031
1032 def recover(self):
1032 def recover(self):
1033 lock = self.lock()
1033 lock = self.lock()
1034 try:
1034 try:
1035 if self.svfs.exists("journal"):
1035 if self.svfs.exists("journal"):
1036 self.ui.status(_("rolling back interrupted transaction\n"))
1036 self.ui.status(_("rolling back interrupted transaction\n"))
1037 vfsmap = {'': self.svfs,
1037 vfsmap = {'': self.svfs,
1038 'plain': self.vfs,}
1038 'plain': self.vfs,}
1039 transaction.rollback(self.svfs, vfsmap, "journal",
1039 transaction.rollback(self.svfs, vfsmap, "journal",
1040 self.ui.warn)
1040 self.ui.warn)
1041 self.invalidate()
1041 self.invalidate()
1042 return True
1042 return True
1043 else:
1043 else:
1044 self.ui.warn(_("no interrupted transaction available\n"))
1044 self.ui.warn(_("no interrupted transaction available\n"))
1045 return False
1045 return False
1046 finally:
1046 finally:
1047 lock.release()
1047 lock.release()
1048
1048
1049 def rollback(self, dryrun=False, force=False):
1049 def rollback(self, dryrun=False, force=False):
1050 wlock = lock = None
1050 wlock = lock = None
1051 try:
1051 try:
1052 wlock = self.wlock()
1052 wlock = self.wlock()
1053 lock = self.lock()
1053 lock = self.lock()
1054 if self.svfs.exists("undo"):
1054 if self.svfs.exists("undo"):
1055 return self._rollback(dryrun, force)
1055 return self._rollback(dryrun, force)
1056 else:
1056 else:
1057 self.ui.warn(_("no rollback information available\n"))
1057 self.ui.warn(_("no rollback information available\n"))
1058 return 1
1058 return 1
1059 finally:
1059 finally:
1060 release(lock, wlock)
1060 release(lock, wlock)
1061
1061
1062 @unfilteredmethod # Until we get smarter cache management
1062 @unfilteredmethod # Until we get smarter cache management
1063 def _rollback(self, dryrun, force):
1063 def _rollback(self, dryrun, force):
1064 ui = self.ui
1064 ui = self.ui
1065 try:
1065 try:
1066 args = self.vfs.read('undo.desc').splitlines()
1066 args = self.vfs.read('undo.desc').splitlines()
1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1068 if len(args) >= 3:
1068 if len(args) >= 3:
1069 detail = args[2]
1069 detail = args[2]
1070 oldtip = oldlen - 1
1070 oldtip = oldlen - 1
1071
1071
1072 if detail and ui.verbose:
1072 if detail and ui.verbose:
1073 msg = (_('repository tip rolled back to revision %s'
1073 msg = (_('repository tip rolled back to revision %s'
1074 ' (undo %s: %s)\n')
1074 ' (undo %s: %s)\n')
1075 % (oldtip, desc, detail))
1075 % (oldtip, desc, detail))
1076 else:
1076 else:
1077 msg = (_('repository tip rolled back to revision %s'
1077 msg = (_('repository tip rolled back to revision %s'
1078 ' (undo %s)\n')
1078 ' (undo %s)\n')
1079 % (oldtip, desc))
1079 % (oldtip, desc))
1080 except IOError:
1080 except IOError:
1081 msg = _('rolling back unknown transaction\n')
1081 msg = _('rolling back unknown transaction\n')
1082 desc = None
1082 desc = None
1083
1083
1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1085 raise util.Abort(
1085 raise util.Abort(
1086 _('rollback of last commit while not checked out '
1086 _('rollback of last commit while not checked out '
1087 'may lose data'), hint=_('use -f to force'))
1087 'may lose data'), hint=_('use -f to force'))
1088
1088
1089 ui.status(msg)
1089 ui.status(msg)
1090 if dryrun:
1090 if dryrun:
1091 return 0
1091 return 0
1092
1092
1093 parents = self.dirstate.parents()
1093 parents = self.dirstate.parents()
1094 self.destroying()
1094 self.destroying()
1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1097 if self.vfs.exists('undo.bookmarks'):
1097 if self.vfs.exists('undo.bookmarks'):
1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1099 if self.svfs.exists('undo.phaseroots'):
1099 if self.svfs.exists('undo.phaseroots'):
1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1101 self.invalidate()
1101 self.invalidate()
1102
1102
1103 parentgone = (parents[0] not in self.changelog.nodemap or
1103 parentgone = (parents[0] not in self.changelog.nodemap or
1104 parents[1] not in self.changelog.nodemap)
1104 parents[1] not in self.changelog.nodemap)
1105 if parentgone:
1105 if parentgone:
1106 self.vfs.rename('undo.dirstate', 'dirstate')
1106 self.vfs.rename('undo.dirstate', 'dirstate')
1107 try:
1107 try:
1108 branch = self.vfs.read('undo.branch')
1108 branch = self.vfs.read('undo.branch')
1109 self.dirstate.setbranch(encoding.tolocal(branch))
1109 self.dirstate.setbranch(encoding.tolocal(branch))
1110 except IOError:
1110 except IOError:
1111 ui.warn(_('named branch could not be reset: '
1111 ui.warn(_('named branch could not be reset: '
1112 'current branch is still \'%s\'\n')
1112 'current branch is still \'%s\'\n')
1113 % self.dirstate.branch())
1113 % self.dirstate.branch())
1114
1114
1115 self.dirstate.invalidate()
1115 self.dirstate.invalidate()
1116 parents = tuple([p.rev() for p in self.parents()])
1116 parents = tuple([p.rev() for p in self.parents()])
1117 if len(parents) > 1:
1117 if len(parents) > 1:
1118 ui.status(_('working directory now based on '
1118 ui.status(_('working directory now based on '
1119 'revisions %d and %d\n') % parents)
1119 'revisions %d and %d\n') % parents)
1120 else:
1120 else:
1121 ui.status(_('working directory now based on '
1121 ui.status(_('working directory now based on '
1122 'revision %d\n') % parents)
1122 'revision %d\n') % parents)
1123 ms = mergemod.mergestate(self)
1123 ms = mergemod.mergestate(self)
1124 ms.reset(self['.'].node())
1124 ms.reset(self['.'].node())
1125
1125
1126 # TODO: if we know which new heads may result from this rollback, pass
1126 # TODO: if we know which new heads may result from this rollback, pass
1127 # them to destroy(), which will prevent the branchhead cache from being
1127 # them to destroy(), which will prevent the branchhead cache from being
1128 # invalidated.
1128 # invalidated.
1129 self.destroyed()
1129 self.destroyed()
1130 return 0
1130 return 0
1131
1131
1132 def invalidatecaches(self):
1132 def invalidatecaches(self):
1133
1133
1134 if '_tagscache' in vars(self):
1134 if '_tagscache' in vars(self):
1135 # can't use delattr on proxy
1135 # can't use delattr on proxy
1136 del self.__dict__['_tagscache']
1136 del self.__dict__['_tagscache']
1137
1137
1138 self.unfiltered()._branchcaches.clear()
1138 self.unfiltered()._branchcaches.clear()
1139 self.invalidatevolatilesets()
1139 self.invalidatevolatilesets()
1140
1140
1141 def invalidatevolatilesets(self):
1141 def invalidatevolatilesets(self):
1142 self.filteredrevcache.clear()
1142 self.filteredrevcache.clear()
1143 obsolete.clearobscaches(self)
1143 obsolete.clearobscaches(self)
1144
1144
1145 def invalidatedirstate(self):
1145 def invalidatedirstate(self):
1146 '''Invalidates the dirstate, causing the next call to dirstate
1146 '''Invalidates the dirstate, causing the next call to dirstate
1147 to check if it was modified since the last time it was read,
1147 to check if it was modified since the last time it was read,
1148 rereading it if it has.
1148 rereading it if it has.
1149
1149
1150 This is different to dirstate.invalidate() that it doesn't always
1150 This is different to dirstate.invalidate() that it doesn't always
1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1152 explicitly read the dirstate again (i.e. restoring it to a previous
1152 explicitly read the dirstate again (i.e. restoring it to a previous
1153 known good state).'''
1153 known good state).'''
1154 if hasunfilteredcache(self, 'dirstate'):
1154 if hasunfilteredcache(self, 'dirstate'):
1155 for k in self.dirstate._filecache:
1155 for k in self.dirstate._filecache:
1156 try:
1156 try:
1157 delattr(self.dirstate, k)
1157 delattr(self.dirstate, k)
1158 except AttributeError:
1158 except AttributeError:
1159 pass
1159 pass
1160 delattr(self.unfiltered(), 'dirstate')
1160 delattr(self.unfiltered(), 'dirstate')
1161
1161
1162 def invalidate(self):
1162 def invalidate(self):
1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1164 for k in self._filecache:
1164 for k in self._filecache:
1165 # dirstate is invalidated separately in invalidatedirstate()
1165 # dirstate is invalidated separately in invalidatedirstate()
1166 if k == 'dirstate':
1166 if k == 'dirstate':
1167 continue
1167 continue
1168
1168
1169 try:
1169 try:
1170 delattr(unfiltered, k)
1170 delattr(unfiltered, k)
1171 except AttributeError:
1171 except AttributeError:
1172 pass
1172 pass
1173 self.invalidatecaches()
1173 self.invalidatecaches()
1174 self.store.invalidatecaches()
1174 self.store.invalidatecaches()
1175
1175
1176 def invalidateall(self):
1176 def invalidateall(self):
1177 '''Fully invalidates both store and non-store parts, causing the
1177 '''Fully invalidates both store and non-store parts, causing the
1178 subsequent operation to reread any outside changes.'''
1178 subsequent operation to reread any outside changes.'''
1179 # extension should hook this to invalidate its caches
1179 # extension should hook this to invalidate its caches
1180 self.invalidate()
1180 self.invalidate()
1181 self.invalidatedirstate()
1181 self.invalidatedirstate()
1182
1182
1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1184 try:
1184 try:
1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1186 except error.LockHeld, inst:
1186 except error.LockHeld, inst:
1187 if not wait:
1187 if not wait:
1188 raise
1188 raise
1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1190 (desc, inst.locker))
1190 (desc, inst.locker))
1191 # default to 600 seconds timeout
1191 # default to 600 seconds timeout
1192 l = lockmod.lock(vfs, lockname,
1192 l = lockmod.lock(vfs, lockname,
1193 int(self.ui.config("ui", "timeout", "600")),
1193 int(self.ui.config("ui", "timeout", "600")),
1194 releasefn, desc=desc)
1194 releasefn, desc=desc)
1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1196 if acquirefn:
1196 if acquirefn:
1197 acquirefn()
1197 acquirefn()
1198 return l
1198 return l
1199
1199
1200 def _afterlock(self, callback):
1200 def _afterlock(self, callback):
1201 """add a callback to be run when the repository is fully unlocked
1201 """add a callback to be run when the repository is fully unlocked
1202
1202
1203 The callback will be executed when the outermost lock is released
1203 The callback will be executed when the outermost lock is released
1204 (with wlock being higher level than 'lock')."""
1204 (with wlock being higher level than 'lock')."""
1205 for ref in (self._wlockref, self._lockref):
1205 for ref in (self._wlockref, self._lockref):
1206 l = ref and ref()
1206 l = ref and ref()
1207 if l and l.held:
1207 if l and l.held:
1208 l.postrelease.append(callback)
1208 l.postrelease.append(callback)
1209 break
1209 break
1210 else: # no lock have been found.
1210 else: # no lock have been found.
1211 callback()
1211 callback()
1212
1212
1213 def lock(self, wait=True):
1213 def lock(self, wait=True):
1214 '''Lock the repository store (.hg/store) and return a weak reference
1214 '''Lock the repository store (.hg/store) and return a weak reference
1215 to the lock. Use this before modifying the store (e.g. committing or
1215 to the lock. Use this before modifying the store (e.g. committing or
1216 stripping). If you are opening a transaction, get a lock as well.)
1216 stripping). If you are opening a transaction, get a lock as well.)
1217
1217
1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1219 'wlock' first to avoid a dead-lock hazard.'''
1219 'wlock' first to avoid a dead-lock hazard.'''
1220 l = self._lockref and self._lockref()
1220 l = self._lockref and self._lockref()
1221 if l is not None and l.held:
1221 if l is not None and l.held:
1222 l.lock()
1222 l.lock()
1223 return l
1223 return l
1224
1224
1225 def unlock():
1225 def unlock():
1226 for k, ce in self._filecache.items():
1226 for k, ce in self._filecache.items():
1227 if k == 'dirstate' or k not in self.__dict__:
1227 if k == 'dirstate' or k not in self.__dict__:
1228 continue
1228 continue
1229 ce.refresh()
1229 ce.refresh()
1230
1230
1231 l = self._lock(self.svfs, "lock", wait, unlock,
1231 l = self._lock(self.svfs, "lock", wait, unlock,
1232 self.invalidate, _('repository %s') % self.origroot)
1232 self.invalidate, _('repository %s') % self.origroot)
1233 self._lockref = weakref.ref(l)
1233 self._lockref = weakref.ref(l)
1234 return l
1234 return l
1235
1235
1236 def wlock(self, wait=True):
1236 def wlock(self, wait=True):
1237 '''Lock the non-store parts of the repository (everything under
1237 '''Lock the non-store parts of the repository (everything under
1238 .hg except .hg/store) and return a weak reference to the lock.
1238 .hg except .hg/store) and return a weak reference to the lock.
1239
1239
1240 Use this before modifying files in .hg.
1240 Use this before modifying files in .hg.
1241
1241
1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1243 'wlock' first to avoid a dead-lock hazard.'''
1243 'wlock' first to avoid a dead-lock hazard.'''
1244 l = self._wlockref and self._wlockref()
1244 l = self._wlockref and self._wlockref()
1245 if l is not None and l.held:
1245 if l is not None and l.held:
1246 l.lock()
1246 l.lock()
1247 return l
1247 return l
1248
1248
1249 # We do not need to check for non-waiting lock aquisition. Such
1249 # We do not need to check for non-waiting lock aquisition. Such
1250 # acquisition would not cause dead-lock as they would just fail.
1250 # acquisition would not cause dead-lock as they would just fail.
1251 if wait and (self.ui.configbool('devel', 'all')
1251 if wait and (self.ui.configbool('devel', 'all')
1252 or self.ui.configbool('devel', 'check-locks')):
1252 or self.ui.configbool('devel', 'check-locks')):
1253 l = self._lockref and self._lockref()
1253 l = self._lockref and self._lockref()
1254 if l is not None and l.held:
1254 if l is not None and l.held:
1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1256
1256
1257 def unlock():
1257 def unlock():
1258 if self.dirstate.pendingparentchange():
1258 if self.dirstate.pendingparentchange():
1259 self.dirstate.invalidate()
1259 self.dirstate.invalidate()
1260 else:
1260 else:
1261 self.dirstate.write()
1261 self.dirstate.write()
1262
1262
1263 self._filecache['dirstate'].refresh()
1263 self._filecache['dirstate'].refresh()
1264
1264
1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1266 self.invalidatedirstate, _('working directory of %s') %
1266 self.invalidatedirstate, _('working directory of %s') %
1267 self.origroot)
1267 self.origroot)
1268 self._wlockref = weakref.ref(l)
1268 self._wlockref = weakref.ref(l)
1269 return l
1269 return l
1270
1270
1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1272 """
1272 """
1273 commit an individual file as part of a larger transaction
1273 commit an individual file as part of a larger transaction
1274 """
1274 """
1275
1275
1276 fname = fctx.path()
1276 fname = fctx.path()
1277 fparent1 = manifest1.get(fname, nullid)
1277 fparent1 = manifest1.get(fname, nullid)
1278 fparent2 = manifest2.get(fname, nullid)
1278 fparent2 = manifest2.get(fname, nullid)
1279 if isinstance(fctx, context.filectx):
1279 if isinstance(fctx, context.filectx):
1280 node = fctx.filenode()
1280 node = fctx.filenode()
1281 if node in [fparent1, fparent2]:
1281 if node in [fparent1, fparent2]:
1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1283 return node
1283 return node
1284
1284
1285 flog = self.file(fname)
1285 flog = self.file(fname)
1286 meta = {}
1286 meta = {}
1287 copy = fctx.renamed()
1287 copy = fctx.renamed()
1288 if copy and copy[0] != fname:
1288 if copy and copy[0] != fname:
1289 # Mark the new revision of this file as a copy of another
1289 # Mark the new revision of this file as a copy of another
1290 # file. This copy data will effectively act as a parent
1290 # file. This copy data will effectively act as a parent
1291 # of this new revision. If this is a merge, the first
1291 # of this new revision. If this is a merge, the first
1292 # parent will be the nullid (meaning "look up the copy data")
1292 # parent will be the nullid (meaning "look up the copy data")
1293 # and the second one will be the other parent. For example:
1293 # and the second one will be the other parent. For example:
1294 #
1294 #
1295 # 0 --- 1 --- 3 rev1 changes file foo
1295 # 0 --- 1 --- 3 rev1 changes file foo
1296 # \ / rev2 renames foo to bar and changes it
1296 # \ / rev2 renames foo to bar and changes it
1297 # \- 2 -/ rev3 should have bar with all changes and
1297 # \- 2 -/ rev3 should have bar with all changes and
1298 # should record that bar descends from
1298 # should record that bar descends from
1299 # bar in rev2 and foo in rev1
1299 # bar in rev2 and foo in rev1
1300 #
1300 #
1301 # this allows this merge to succeed:
1301 # this allows this merge to succeed:
1302 #
1302 #
1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1304 # \ / merging rev3 and rev4 should use bar@rev2
1304 # \ / merging rev3 and rev4 should use bar@rev2
1305 # \- 2 --- 4 as the merge base
1305 # \- 2 --- 4 as the merge base
1306 #
1306 #
1307
1307
1308 cfname = copy[0]
1308 cfname = copy[0]
1309 crev = manifest1.get(cfname)
1309 crev = manifest1.get(cfname)
1310 newfparent = fparent2
1310 newfparent = fparent2
1311
1311
1312 if manifest2: # branch merge
1312 if manifest2: # branch merge
1313 if fparent2 == nullid or crev is None: # copied on remote side
1313 if fparent2 == nullid or crev is None: # copied on remote side
1314 if cfname in manifest2:
1314 if cfname in manifest2:
1315 crev = manifest2[cfname]
1315 crev = manifest2[cfname]
1316 newfparent = fparent1
1316 newfparent = fparent1
1317
1317
1318 # Here, we used to search backwards through history to try to find
1318 # Here, we used to search backwards through history to try to find
1319 # where the file copy came from if the source of a copy was not in
1319 # where the file copy came from if the source of a copy was not in
1320 # the parent directory. However, this doesn't actually make sense to
1320 # the parent directory. However, this doesn't actually make sense to
1321 # do (what does a copy from something not in your working copy even
1321 # do (what does a copy from something not in your working copy even
1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1323 # the user that copy information was dropped, so if they didn't
1323 # the user that copy information was dropped, so if they didn't
1324 # expect this outcome it can be fixed, but this is the correct
1324 # expect this outcome it can be fixed, but this is the correct
1325 # behavior in this circumstance.
1325 # behavior in this circumstance.
1326
1326
1327 if crev:
1327 if crev:
1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1329 meta["copy"] = cfname
1329 meta["copy"] = cfname
1330 meta["copyrev"] = hex(crev)
1330 meta["copyrev"] = hex(crev)
1331 fparent1, fparent2 = nullid, newfparent
1331 fparent1, fparent2 = nullid, newfparent
1332 else:
1332 else:
1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1334 "copied from '%s'!\n") % (fname, cfname))
1334 "copied from '%s'!\n") % (fname, cfname))
1335
1335
1336 elif fparent1 == nullid:
1336 elif fparent1 == nullid:
1337 fparent1, fparent2 = fparent2, nullid
1337 fparent1, fparent2 = fparent2, nullid
1338 elif fparent2 != nullid:
1338 elif fparent2 != nullid:
1339 # is one parent an ancestor of the other?
1339 # is one parent an ancestor of the other?
1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1341 if fparent1 in fparentancestors:
1341 if fparent1 in fparentancestors:
1342 fparent1, fparent2 = fparent2, nullid
1342 fparent1, fparent2 = fparent2, nullid
1343 elif fparent2 in fparentancestors:
1343 elif fparent2 in fparentancestors:
1344 fparent2 = nullid
1344 fparent2 = nullid
1345
1345
1346 # is the file changed?
1346 # is the file changed?
1347 text = fctx.data()
1347 text = fctx.data()
1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1349 changelist.append(fname)
1349 changelist.append(fname)
1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1351 # are just the flags changed during merge?
1351 # are just the flags changed during merge?
1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1353 changelist.append(fname)
1353 changelist.append(fname)
1354
1354
1355 return fparent1
1355 return fparent1
1356
1356
1357 @unfilteredmethod
1357 @unfilteredmethod
1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1359 editor=False, extra={}):
1359 editor=False, extra={}):
1360 """Add a new revision to current repository.
1360 """Add a new revision to current repository.
1361
1361
1362 Revision information is gathered from the working directory,
1362 Revision information is gathered from the working directory,
1363 match can be used to filter the committed files. If editor is
1363 match can be used to filter the committed files. If editor is
1364 supplied, it is called to get a commit message.
1364 supplied, it is called to get a commit message.
1365 """
1365 """
1366
1366
1367 def fail(f, msg):
1367 def fail(f, msg):
1368 raise util.Abort('%s: %s' % (f, msg))
1368 raise util.Abort('%s: %s' % (f, msg))
1369
1369
1370 if not match:
1370 if not match:
1371 match = matchmod.always(self.root, '')
1371 match = matchmod.always(self.root, '')
1372
1372
1373 if not force:
1373 if not force:
1374 vdirs = []
1374 vdirs = []
1375 match.explicitdir = vdirs.append
1375 match.explicitdir = vdirs.append
1376 match.bad = fail
1376 match.bad = fail
1377
1377
1378 wlock = self.wlock()
1378 wlock = self.wlock()
1379 try:
1379 try:
1380 wctx = self[None]
1380 wctx = self[None]
1381 merge = len(wctx.parents()) > 1
1381 merge = len(wctx.parents()) > 1
1382
1382
1383 if not force and merge and match.ispartial():
1383 if not force and merge and match.ispartial():
1384 raise util.Abort(_('cannot partially commit a merge '
1384 raise util.Abort(_('cannot partially commit a merge '
1385 '(do not specify files or patterns)'))
1385 '(do not specify files or patterns)'))
1386
1386
1387 status = self.status(match=match, clean=force)
1387 status = self.status(match=match, clean=force)
1388 if force:
1388 if force:
1389 status.modified.extend(status.clean) # mq may commit clean files
1389 status.modified.extend(status.clean) # mq may commit clean files
1390
1390
1391 # check subrepos
1391 # check subrepos
1392 subs = []
1392 subs = []
1393 commitsubs = set()
1393 commitsubs = set()
1394 newstate = wctx.substate.copy()
1394 newstate = wctx.substate.copy()
1395 # only manage subrepos and .hgsubstate if .hgsub is present
1395 # only manage subrepos and .hgsubstate if .hgsub is present
1396 if '.hgsub' in wctx:
1396 if '.hgsub' in wctx:
1397 # we'll decide whether to track this ourselves, thanks
1397 # we'll decide whether to track this ourselves, thanks
1398 for c in status.modified, status.added, status.removed:
1398 for c in status.modified, status.added, status.removed:
1399 if '.hgsubstate' in c:
1399 if '.hgsubstate' in c:
1400 c.remove('.hgsubstate')
1400 c.remove('.hgsubstate')
1401
1401
1402 # compare current state to last committed state
1402 # compare current state to last committed state
1403 # build new substate based on last committed state
1403 # build new substate based on last committed state
1404 oldstate = wctx.p1().substate
1404 oldstate = wctx.p1().substate
1405 for s in sorted(newstate.keys()):
1405 for s in sorted(newstate.keys()):
1406 if not match(s):
1406 if not match(s):
1407 # ignore working copy, use old state if present
1407 # ignore working copy, use old state if present
1408 if s in oldstate:
1408 if s in oldstate:
1409 newstate[s] = oldstate[s]
1409 newstate[s] = oldstate[s]
1410 continue
1410 continue
1411 if not force:
1411 if not force:
1412 raise util.Abort(
1412 raise util.Abort(
1413 _("commit with new subrepo %s excluded") % s)
1413 _("commit with new subrepo %s excluded") % s)
1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1415 if dirtyreason:
1415 if dirtyreason:
1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1417 raise util.Abort(dirtyreason,
1417 raise util.Abort(dirtyreason,
1418 hint=_("use --subrepos for recursive commit"))
1418 hint=_("use --subrepos for recursive commit"))
1419 subs.append(s)
1419 subs.append(s)
1420 commitsubs.add(s)
1420 commitsubs.add(s)
1421 else:
1421 else:
1422 bs = wctx.sub(s).basestate()
1422 bs = wctx.sub(s).basestate()
1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1424 if oldstate.get(s, (None, None, None))[1] != bs:
1424 if oldstate.get(s, (None, None, None))[1] != bs:
1425 subs.append(s)
1425 subs.append(s)
1426
1426
1427 # check for removed subrepos
1427 # check for removed subrepos
1428 for p in wctx.parents():
1428 for p in wctx.parents():
1429 r = [s for s in p.substate if s not in newstate]
1429 r = [s for s in p.substate if s not in newstate]
1430 subs += [s for s in r if match(s)]
1430 subs += [s for s in r if match(s)]
1431 if subs:
1431 if subs:
1432 if (not match('.hgsub') and
1432 if (not match('.hgsub') and
1433 '.hgsub' in (wctx.modified() + wctx.added())):
1433 '.hgsub' in (wctx.modified() + wctx.added())):
1434 raise util.Abort(
1434 raise util.Abort(
1435 _("can't commit subrepos without .hgsub"))
1435 _("can't commit subrepos without .hgsub"))
1436 status.modified.insert(0, '.hgsubstate')
1436 status.modified.insert(0, '.hgsubstate')
1437
1437
1438 elif '.hgsub' in status.removed:
1438 elif '.hgsub' in status.removed:
1439 # clean up .hgsubstate when .hgsub is removed
1439 # clean up .hgsubstate when .hgsub is removed
1440 if ('.hgsubstate' in wctx and
1440 if ('.hgsubstate' in wctx and
1441 '.hgsubstate' not in (status.modified + status.added +
1441 '.hgsubstate' not in (status.modified + status.added +
1442 status.removed)):
1442 status.removed)):
1443 status.removed.insert(0, '.hgsubstate')
1443 status.removed.insert(0, '.hgsubstate')
1444
1444
1445 # make sure all explicit patterns are matched
1445 # make sure all explicit patterns are matched
1446 if not force and match.files():
1446 if not force and match.files():
1447 matched = set(status.modified + status.added + status.removed)
1447 matched = set(status.modified + status.added + status.removed)
1448
1448
1449 for f in match.files():
1449 for f in match.files():
1450 f = self.dirstate.normalize(f)
1450 f = self.dirstate.normalize(f)
1451 if f == '.' or f in matched or f in wctx.substate:
1451 if f == '.' or f in matched or f in wctx.substate:
1452 continue
1452 continue
1453 if f in status.deleted:
1453 if f in status.deleted:
1454 fail(f, _('file not found!'))
1454 fail(f, _('file not found!'))
1455 if f in vdirs: # visited directory
1455 if f in vdirs: # visited directory
1456 d = f + '/'
1456 d = f + '/'
1457 for mf in matched:
1457 for mf in matched:
1458 if mf.startswith(d):
1458 if mf.startswith(d):
1459 break
1459 break
1460 else:
1460 else:
1461 fail(f, _("no match under directory!"))
1461 fail(f, _("no match under directory!"))
1462 elif f not in self.dirstate:
1462 elif f not in self.dirstate:
1463 fail(f, _("file not tracked!"))
1463 fail(f, _("file not tracked!"))
1464
1464
1465 cctx = context.workingcommitctx(self, status,
1465 cctx = context.workingcommitctx(self, status,
1466 text, user, date, extra)
1466 text, user, date, extra)
1467
1467
1468 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1468 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1469 or extra.get('close') or merge or cctx.files()
1469 or extra.get('close') or merge or cctx.files()
1470 or self.ui.configbool('ui', 'allowemptycommit'))
1470 or self.ui.configbool('ui', 'allowemptycommit'))
1471 if not allowemptycommit:
1471 if not allowemptycommit:
1472 return None
1472 return None
1473
1473
1474 if merge and cctx.deleted():
1474 if merge and cctx.deleted():
1475 raise util.Abort(_("cannot commit merge with missing files"))
1475 raise util.Abort(_("cannot commit merge with missing files"))
1476
1476
1477 ms = mergemod.mergestate(self)
1477 ms = mergemod.mergestate(self)
1478 for f in status.modified:
1478 for f in status.modified:
1479 if f in ms and ms[f] == 'u':
1479 if f in ms and ms[f] == 'u':
1480 raise util.Abort(_('unresolved merge conflicts '
1480 raise util.Abort(_('unresolved merge conflicts '
1481 '(see "hg help resolve")'))
1481 '(see "hg help resolve")'))
1482
1482
1483 if editor:
1483 if editor:
1484 cctx._text = editor(self, cctx, subs)
1484 cctx._text = editor(self, cctx, subs)
1485 edited = (text != cctx._text)
1485 edited = (text != cctx._text)
1486
1486
1487 # Save commit message in case this transaction gets rolled back
1487 # Save commit message in case this transaction gets rolled back
1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 # the assumption that the user will use the same editor again.
1489 # the assumption that the user will use the same editor again.
1490 msgfn = self.savecommitmessage(cctx._text)
1490 msgfn = self.savecommitmessage(cctx._text)
1491
1491
1492 # commit subs and write new state
1492 # commit subs and write new state
1493 if subs:
1493 if subs:
1494 for s in sorted(commitsubs):
1494 for s in sorted(commitsubs):
1495 sub = wctx.sub(s)
1495 sub = wctx.sub(s)
1496 self.ui.status(_('committing subrepository %s\n') %
1496 self.ui.status(_('committing subrepository %s\n') %
1497 subrepo.subrelpath(sub))
1497 subrepo.subrelpath(sub))
1498 sr = sub.commit(cctx._text, user, date)
1498 sr = sub.commit(cctx._text, user, date)
1499 newstate[s] = (newstate[s][0], sr)
1499 newstate[s] = (newstate[s][0], sr)
1500 subrepo.writestate(self, newstate)
1500 subrepo.writestate(self, newstate)
1501
1501
1502 p1, p2 = self.dirstate.parents()
1502 p1, p2 = self.dirstate.parents()
1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 try:
1504 try:
1505 self.hook("precommit", throw=True, parent1=hookp1,
1505 self.hook("precommit", throw=True, parent1=hookp1,
1506 parent2=hookp2)
1506 parent2=hookp2)
1507 ret = self.commitctx(cctx, True)
1507 ret = self.commitctx(cctx, True)
1508 except: # re-raises
1508 except: # re-raises
1509 if edited:
1509 if edited:
1510 self.ui.write(
1510 self.ui.write(
1511 _('note: commit message saved in %s\n') % msgfn)
1511 _('note: commit message saved in %s\n') % msgfn)
1512 raise
1512 raise
1513
1513
1514 # update bookmarks, dirstate and mergestate
1514 # update bookmarks, dirstate and mergestate
1515 bookmarks.update(self, [p1, p2], ret)
1515 bookmarks.update(self, [p1, p2], ret)
1516 cctx.markcommitted(ret)
1516 cctx.markcommitted(ret)
1517 ms.reset()
1517 ms.reset()
1518 finally:
1518 finally:
1519 wlock.release()
1519 wlock.release()
1520
1520
1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 # hack for command that use a temporary commit (eg: histedit)
1522 # hack for command that use a temporary commit (eg: histedit)
1523 # temporary commit got stripped before hook release
1523 # temporary commit got stripped before hook release
1524 if self.changelog.hasnode(ret):
1524 if self.changelog.hasnode(ret):
1525 self.hook("commit", node=node, parent1=parent1,
1525 self.hook("commit", node=node, parent1=parent1,
1526 parent2=parent2)
1526 parent2=parent2)
1527 self._afterlock(commithook)
1527 self._afterlock(commithook)
1528 return ret
1528 return ret
1529
1529
1530 @unfilteredmethod
1530 @unfilteredmethod
1531 def commitctx(self, ctx, error=False):
1531 def commitctx(self, ctx, error=False):
1532 """Add a new revision to current repository.
1532 """Add a new revision to current repository.
1533 Revision information is passed via the context argument.
1533 Revision information is passed via the context argument.
1534 """
1534 """
1535
1535
1536 tr = None
1536 tr = None
1537 p1, p2 = ctx.p1(), ctx.p2()
1537 p1, p2 = ctx.p1(), ctx.p2()
1538 user = ctx.user()
1538 user = ctx.user()
1539
1539
1540 lock = self.lock()
1540 lock = self.lock()
1541 try:
1541 try:
1542 tr = self.transaction("commit")
1542 tr = self.transaction("commit")
1543 trp = weakref.proxy(tr)
1543 trp = weakref.proxy(tr)
1544
1544
1545 if ctx.files():
1545 if ctx.files():
1546 m1 = p1.manifest()
1546 m1 = p1.manifest()
1547 m2 = p2.manifest()
1547 m2 = p2.manifest()
1548 m = m1.copy()
1548 m = m1.copy()
1549
1549
1550 # check in files
1550 # check in files
1551 added = []
1551 added = []
1552 changed = []
1552 changed = []
1553 removed = list(ctx.removed())
1553 removed = list(ctx.removed())
1554 linkrev = len(self)
1554 linkrev = len(self)
1555 self.ui.note(_("committing files:\n"))
1555 self.ui.note(_("committing files:\n"))
1556 for f in sorted(ctx.modified() + ctx.added()):
1556 for f in sorted(ctx.modified() + ctx.added()):
1557 self.ui.note(f + "\n")
1557 self.ui.note(f + "\n")
1558 try:
1558 try:
1559 fctx = ctx[f]
1559 fctx = ctx[f]
1560 if fctx is None:
1560 if fctx is None:
1561 removed.append(f)
1561 removed.append(f)
1562 else:
1562 else:
1563 added.append(f)
1563 added.append(f)
1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 trp, changed)
1565 trp, changed)
1566 m.setflag(f, fctx.flags())
1566 m.setflag(f, fctx.flags())
1567 except OSError, inst:
1567 except OSError, inst:
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1569 raise
1569 raise
1570 except IOError, inst:
1570 except IOError, inst:
1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 if error or errcode and errcode != errno.ENOENT:
1572 if error or errcode and errcode != errno.ENOENT:
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1574 raise
1574 raise
1575
1575
1576 # update manifest
1576 # update manifest
1577 self.ui.note(_("committing manifest\n"))
1577 self.ui.note(_("committing manifest\n"))
1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 drop = [f for f in removed if f in m]
1579 drop = [f for f in removed if f in m]
1580 for f in drop:
1580 for f in drop:
1581 del m[f]
1581 del m[f]
1582 mn = self.manifest.add(m, trp, linkrev,
1582 mn = self.manifest.add(m, trp, linkrev,
1583 p1.manifestnode(), p2.manifestnode(),
1583 p1.manifestnode(), p2.manifestnode(),
1584 added, drop)
1584 added, drop)
1585 files = changed + removed
1585 files = changed + removed
1586 else:
1586 else:
1587 mn = p1.manifestnode()
1587 mn = p1.manifestnode()
1588 files = []
1588 files = []
1589
1589
1590 # update changelog
1590 # update changelog
1591 self.ui.note(_("committing changelog\n"))
1591 self.ui.note(_("committing changelog\n"))
1592 self.changelog.delayupdate(tr)
1592 self.changelog.delayupdate(tr)
1593 n = self.changelog.add(mn, files, ctx.description(),
1593 n = self.changelog.add(mn, files, ctx.description(),
1594 trp, p1.node(), p2.node(),
1594 trp, p1.node(), p2.node(),
1595 user, ctx.date(), ctx.extra().copy())
1595 user, ctx.date(), ctx.extra().copy())
1596 p = lambda: tr.writepending() and self.root or ""
1596 p = lambda: tr.writepending() and self.root or ""
1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 parent2=xp2, pending=p)
1599 parent2=xp2, pending=p)
1600 # set the new commit is proper phase
1600 # set the new commit is proper phase
1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 if targetphase:
1602 if targetphase:
1603 # retract boundary do not alter parent changeset.
1603 # retract boundary do not alter parent changeset.
1604 # if a parent have higher the resulting phase will
1604 # if a parent have higher the resulting phase will
1605 # be compliant anyway
1605 # be compliant anyway
1606 #
1606 #
1607 # if minimal phase was 0 we don't need to retract anything
1607 # if minimal phase was 0 we don't need to retract anything
1608 phases.retractboundary(self, tr, targetphase, [n])
1608 phases.retractboundary(self, tr, targetphase, [n])
1609 tr.close()
1609 tr.close()
1610 branchmap.updatecache(self.filtered('served'))
1610 branchmap.updatecache(self.filtered('served'))
1611 return n
1611 return n
1612 finally:
1612 finally:
1613 if tr:
1613 if tr:
1614 tr.release()
1614 tr.release()
1615 lock.release()
1615 lock.release()
1616
1616
1617 @unfilteredmethod
1617 @unfilteredmethod
1618 def destroying(self):
1618 def destroying(self):
1619 '''Inform the repository that nodes are about to be destroyed.
1619 '''Inform the repository that nodes are about to be destroyed.
1620 Intended for use by strip and rollback, so there's a common
1620 Intended for use by strip and rollback, so there's a common
1621 place for anything that has to be done before destroying history.
1621 place for anything that has to be done before destroying history.
1622
1622
1623 This is mostly useful for saving state that is in memory and waiting
1623 This is mostly useful for saving state that is in memory and waiting
1624 to be flushed when the current lock is released. Because a call to
1624 to be flushed when the current lock is released. Because a call to
1625 destroyed is imminent, the repo will be invalidated causing those
1625 destroyed is imminent, the repo will be invalidated causing those
1626 changes to stay in memory (waiting for the next unlock), or vanish
1626 changes to stay in memory (waiting for the next unlock), or vanish
1627 completely.
1627 completely.
1628 '''
1628 '''
1629 # When using the same lock to commit and strip, the phasecache is left
1629 # When using the same lock to commit and strip, the phasecache is left
1630 # dirty after committing. Then when we strip, the repo is invalidated,
1630 # dirty after committing. Then when we strip, the repo is invalidated,
1631 # causing those changes to disappear.
1631 # causing those changes to disappear.
1632 if '_phasecache' in vars(self):
1632 if '_phasecache' in vars(self):
1633 self._phasecache.write()
1633 self._phasecache.write()
1634
1634
1635 @unfilteredmethod
1635 @unfilteredmethod
1636 def destroyed(self):
1636 def destroyed(self):
1637 '''Inform the repository that nodes have been destroyed.
1637 '''Inform the repository that nodes have been destroyed.
1638 Intended for use by strip and rollback, so there's a common
1638 Intended for use by strip and rollback, so there's a common
1639 place for anything that has to be done after destroying history.
1639 place for anything that has to be done after destroying history.
1640 '''
1640 '''
1641 # When one tries to:
1641 # When one tries to:
1642 # 1) destroy nodes thus calling this method (e.g. strip)
1642 # 1) destroy nodes thus calling this method (e.g. strip)
1643 # 2) use phasecache somewhere (e.g. commit)
1643 # 2) use phasecache somewhere (e.g. commit)
1644 #
1644 #
1645 # then 2) will fail because the phasecache contains nodes that were
1645 # then 2) will fail because the phasecache contains nodes that were
1646 # removed. We can either remove phasecache from the filecache,
1646 # removed. We can either remove phasecache from the filecache,
1647 # causing it to reload next time it is accessed, or simply filter
1647 # causing it to reload next time it is accessed, or simply filter
1648 # the removed nodes now and write the updated cache.
1648 # the removed nodes now and write the updated cache.
1649 self._phasecache.filterunknown(self)
1649 self._phasecache.filterunknown(self)
1650 self._phasecache.write()
1650 self._phasecache.write()
1651
1651
1652 # update the 'served' branch cache to help read only server process
1652 # update the 'served' branch cache to help read only server process
1653 # Thanks to branchcache collaboration this is done from the nearest
1653 # Thanks to branchcache collaboration this is done from the nearest
1654 # filtered subset and it is expected to be fast.
1654 # filtered subset and it is expected to be fast.
1655 branchmap.updatecache(self.filtered('served'))
1655 branchmap.updatecache(self.filtered('served'))
1656
1656
1657 # Ensure the persistent tag cache is updated. Doing it now
1657 # Ensure the persistent tag cache is updated. Doing it now
1658 # means that the tag cache only has to worry about destroyed
1658 # means that the tag cache only has to worry about destroyed
1659 # heads immediately after a strip/rollback. That in turn
1659 # heads immediately after a strip/rollback. That in turn
1660 # guarantees that "cachetip == currenttip" (comparing both rev
1660 # guarantees that "cachetip == currenttip" (comparing both rev
1661 # and node) always means no nodes have been added or destroyed.
1661 # and node) always means no nodes have been added or destroyed.
1662
1662
1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 # head, refresh the tag cache, then immediately add a new head.
1664 # head, refresh the tag cache, then immediately add a new head.
1665 # But I think doing it this way is necessary for the "instant
1665 # But I think doing it this way is necessary for the "instant
1666 # tag cache retrieval" case to work.
1666 # tag cache retrieval" case to work.
1667 self.invalidate()
1667 self.invalidate()
1668
1668
1669 def walk(self, match, node=None):
1669 def walk(self, match, node=None):
1670 '''
1670 '''
1671 walk recursively through the directory tree or a given
1671 walk recursively through the directory tree or a given
1672 changeset, finding all files matched by the match
1672 changeset, finding all files matched by the match
1673 function
1673 function
1674 '''
1674 '''
1675 return self[node].walk(match)
1675 return self[node].walk(match)
1676
1676
1677 def status(self, node1='.', node2=None, match=None,
1677 def status(self, node1='.', node2=None, match=None,
1678 ignored=False, clean=False, unknown=False,
1678 ignored=False, clean=False, unknown=False,
1679 listsubrepos=False):
1679 listsubrepos=False):
1680 '''a convenience method that calls node1.status(node2)'''
1680 '''a convenience method that calls node1.status(node2)'''
1681 return self[node1].status(node2, match, ignored, clean, unknown,
1681 return self[node1].status(node2, match, ignored, clean, unknown,
1682 listsubrepos)
1682 listsubrepos)
1683
1683
1684 def heads(self, start=None):
1684 def heads(self, start=None):
1685 heads = self.changelog.heads(start)
1685 heads = self.changelog.heads(start)
1686 # sort the output in rev descending order
1686 # sort the output in rev descending order
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1688
1688
1689 def branchheads(self, branch=None, start=None, closed=False):
1689 def branchheads(self, branch=None, start=None, closed=False):
1690 '''return a (possibly filtered) list of heads for the given branch
1690 '''return a (possibly filtered) list of heads for the given branch
1691
1691
1692 Heads are returned in topological order, from newest to oldest.
1692 Heads are returned in topological order, from newest to oldest.
1693 If branch is None, use the dirstate branch.
1693 If branch is None, use the dirstate branch.
1694 If start is not None, return only heads reachable from start.
1694 If start is not None, return only heads reachable from start.
1695 If closed is True, return heads that are marked as closed as well.
1695 If closed is True, return heads that are marked as closed as well.
1696 '''
1696 '''
1697 if branch is None:
1697 if branch is None:
1698 branch = self[None].branch()
1698 branch = self[None].branch()
1699 branches = self.branchmap()
1699 branches = self.branchmap()
1700 if branch not in branches:
1700 if branch not in branches:
1701 return []
1701 return []
1702 # the cache returns heads ordered lowest to highest
1702 # the cache returns heads ordered lowest to highest
1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 if start is not None:
1704 if start is not None:
1705 # filter out the heads that cannot be reached from startrev
1705 # filter out the heads that cannot be reached from startrev
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 bheads = [h for h in bheads if h in fbheads]
1707 bheads = [h for h in bheads if h in fbheads]
1708 return bheads
1708 return bheads
1709
1709
1710 def branches(self, nodes):
1710 def branches(self, nodes):
1711 if not nodes:
1711 if not nodes:
1712 nodes = [self.changelog.tip()]
1712 nodes = [self.changelog.tip()]
1713 b = []
1713 b = []
1714 for n in nodes:
1714 for n in nodes:
1715 t = n
1715 t = n
1716 while True:
1716 while True:
1717 p = self.changelog.parents(n)
1717 p = self.changelog.parents(n)
1718 if p[1] != nullid or p[0] == nullid:
1718 if p[1] != nullid or p[0] == nullid:
1719 b.append((t, n, p[0], p[1]))
1719 b.append((t, n, p[0], p[1]))
1720 break
1720 break
1721 n = p[0]
1721 n = p[0]
1722 return b
1722 return b
1723
1723
1724 def between(self, pairs):
1724 def between(self, pairs):
1725 r = []
1725 r = []
1726
1726
1727 for top, bottom in pairs:
1727 for top, bottom in pairs:
1728 n, l, i = top, [], 0
1728 n, l, i = top, [], 0
1729 f = 1
1729 f = 1
1730
1730
1731 while n != bottom and n != nullid:
1731 while n != bottom and n != nullid:
1732 p = self.changelog.parents(n)[0]
1732 p = self.changelog.parents(n)[0]
1733 if i == f:
1733 if i == f:
1734 l.append(n)
1734 l.append(n)
1735 f = f * 2
1735 f = f * 2
1736 n = p
1736 n = p
1737 i += 1
1737 i += 1
1738
1738
1739 r.append(l)
1739 r.append(l)
1740
1740
1741 return r
1741 return r
1742
1742
1743 def checkpush(self, pushop):
1743 def checkpush(self, pushop):
1744 """Extensions can override this function if additional checks have
1744 """Extensions can override this function if additional checks have
1745 to be performed before pushing, or call it if they override push
1745 to be performed before pushing, or call it if they override push
1746 command.
1746 command.
1747 """
1747 """
1748 pass
1748 pass
1749
1749
1750 @unfilteredpropertycache
1750 @unfilteredpropertycache
1751 def prepushoutgoinghooks(self):
1751 def prepushoutgoinghooks(self):
1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 functions, which are called before pushing changesets.
1753 functions, which are called before pushing changesets.
1754 """
1754 """
1755 return util.hooks()
1755 return util.hooks()
1756
1756
1757 def stream_in(self, remote, remotereqs):
1757 def stream_in(self, remote, remotereqs):
1758 lock = self.lock()
1758 lock = self.lock()
1759 try:
1759 try:
1760 # Save remote branchmap. We will use it later
1760 # Save remote branchmap. We will use it later
1761 # to speed up branchcache creation
1761 # to speed up branchcache creation
1762 rbranchmap = None
1762 rbranchmap = None
1763 if remote.capable("branchmap"):
1763 if remote.capable("branchmap"):
1764 rbranchmap = remote.branchmap()
1764 rbranchmap = remote.branchmap()
1765
1765
1766 fp = remote.stream_out()
1766 fp = remote.stream_out()
1767 l = fp.readline()
1767 l = fp.readline()
1768 try:
1768 try:
1769 resp = int(l)
1769 resp = int(l)
1770 except ValueError:
1770 except ValueError:
1771 raise error.ResponseError(
1771 raise error.ResponseError(
1772 _('unexpected response from remote server:'), l)
1772 _('unexpected response from remote server:'), l)
1773 if resp == 1:
1773 if resp == 1:
1774 raise util.Abort(_('operation forbidden by server'))
1774 raise util.Abort(_('operation forbidden by server'))
1775 elif resp == 2:
1775 elif resp == 2:
1776 raise util.Abort(_('locking the remote repository failed'))
1776 raise util.Abort(_('locking the remote repository failed'))
1777 elif resp != 0:
1777 elif resp != 0:
1778 raise util.Abort(_('the server sent an unknown error code'))
1778 raise util.Abort(_('the server sent an unknown error code'))
1779 self.ui.status(_('streaming all changes\n'))
1779 self.ui.status(_('streaming all changes\n'))
1780 l = fp.readline()
1780 l = fp.readline()
1781 try:
1781 try:
1782 total_files, total_bytes = map(int, l.split(' ', 1))
1782 total_files, total_bytes = map(int, l.split(' ', 1))
1783 except (ValueError, TypeError):
1783 except (ValueError, TypeError):
1784 raise error.ResponseError(
1784 raise error.ResponseError(
1785 _('unexpected response from remote server:'), l)
1785 _('unexpected response from remote server:'), l)
1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 (total_files, util.bytecount(total_bytes)))
1787 (total_files, util.bytecount(total_bytes)))
1788 handled_bytes = 0
1788 handled_bytes = 0
1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 start = time.time()
1790 start = time.time()
1791
1791
1792 tr = self.transaction(_('clone'))
1792 tr = self.transaction(_('clone'))
1793 try:
1793 try:
1794 for i in xrange(total_files):
1794 for i in xrange(total_files):
1795 # XXX doesn't support '\n' or '\r' in filenames
1795 # XXX doesn't support '\n' or '\r' in filenames
1796 l = fp.readline()
1796 l = fp.readline()
1797 try:
1797 try:
1798 name, size = l.split('\0', 1)
1798 name, size = l.split('\0', 1)
1799 size = int(size)
1799 size = int(size)
1800 except (ValueError, TypeError):
1800 except (ValueError, TypeError):
1801 raise error.ResponseError(
1801 raise error.ResponseError(
1802 _('unexpected response from remote server:'), l)
1802 _('unexpected response from remote server:'), l)
1803 if self.ui.debugflag:
1803 if self.ui.debugflag:
1804 self.ui.debug('adding %s (%s)\n' %
1804 self.ui.debug('adding %s (%s)\n' %
1805 (name, util.bytecount(size)))
1805 (name, util.bytecount(size)))
1806 # for backwards compat, name was partially encoded
1806 # for backwards compat, name was partially encoded
1807 ofp = self.svfs(store.decodedir(name), 'w')
1807 ofp = self.svfs(store.decodedir(name), 'w')
1808 for chunk in util.filechunkiter(fp, limit=size):
1808 for chunk in util.filechunkiter(fp, limit=size):
1809 handled_bytes += len(chunk)
1809 handled_bytes += len(chunk)
1810 self.ui.progress(_('clone'), handled_bytes,
1810 self.ui.progress(_('clone'), handled_bytes,
1811 total=total_bytes)
1811 total=total_bytes)
1812 ofp.write(chunk)
1812 ofp.write(chunk)
1813 ofp.close()
1813 ofp.close()
1814 tr.close()
1814 tr.close()
1815 finally:
1815 finally:
1816 tr.release()
1816 tr.release()
1817
1817
1818 # Writing straight to files circumvented the inmemory caches
1818 # Writing straight to files circumvented the inmemory caches
1819 self.invalidate()
1819 self.invalidate()
1820
1820
1821 elapsed = time.time() - start
1821 elapsed = time.time() - start
1822 if elapsed <= 0:
1822 if elapsed <= 0:
1823 elapsed = 0.001
1823 elapsed = 0.001
1824 self.ui.progress(_('clone'), None)
1824 self.ui.progress(_('clone'), None)
1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 (util.bytecount(total_bytes), elapsed,
1826 (util.bytecount(total_bytes), elapsed,
1827 util.bytecount(total_bytes / elapsed)))
1827 util.bytecount(total_bytes / elapsed)))
1828
1828
1829 # new requirements = old non-format requirements +
1829 # new requirements = old non-format requirements +
1830 # new format-related remote requirements
1830 # new format-related remote requirements
1831 # requirements from the streamed-in repository
1831 # requirements from the streamed-in repository
1832 self.requirements = remotereqs | (
1832 self.requirements = remotereqs | (
1833 self.requirements - self.supportedformats)
1833 self.requirements - self.supportedformats)
1834 self._applyopenerreqs()
1834 self._applyopenerreqs()
1835 self._writerequirements()
1835 self._writerequirements()
1836
1836
1837 if rbranchmap:
1837 if rbranchmap:
1838 rbheads = []
1838 rbheads = []
1839 closed = []
1839 closed = []
1840 for bheads in rbranchmap.itervalues():
1840 for bheads in rbranchmap.itervalues():
1841 rbheads.extend(bheads)
1841 rbheads.extend(bheads)
1842 for h in bheads:
1842 for h in bheads:
1843 r = self.changelog.rev(h)
1843 r = self.changelog.rev(h)
1844 b, c = self.changelog.branchinfo(r)
1844 b, c = self.changelog.branchinfo(r)
1845 if c:
1845 if c:
1846 closed.append(h)
1846 closed.append(h)
1847
1847
1848 if rbheads:
1848 if rbheads:
1849 rtiprev = max((int(self.changelog.rev(node))
1849 rtiprev = max((int(self.changelog.rev(node))
1850 for node in rbheads))
1850 for node in rbheads))
1851 cache = branchmap.branchcache(rbranchmap,
1851 cache = branchmap.branchcache(rbranchmap,
1852 self[rtiprev].node(),
1852 self[rtiprev].node(),
1853 rtiprev,
1853 rtiprev,
1854 closednodes=closed)
1854 closednodes=closed)
1855 # Try to stick it as low as possible
1855 # Try to stick it as low as possible
1856 # filter above served are unlikely to be fetch from a clone
1856 # filter above served are unlikely to be fetch from a clone
1857 for candidate in ('base', 'immutable', 'served'):
1857 for candidate in ('base', 'immutable', 'served'):
1858 rview = self.filtered(candidate)
1858 rview = self.filtered(candidate)
1859 if cache.validfor(rview):
1859 if cache.validfor(rview):
1860 self._branchcaches[candidate] = cache
1860 self._branchcaches[candidate] = cache
1861 cache.write(rview)
1861 cache.write(rview)
1862 break
1862 break
1863 self.invalidate()
1863 self.invalidate()
1864 return len(self.heads()) + 1
1864 return len(self.heads()) + 1
1865 finally:
1865 finally:
1866 lock.release()
1866 lock.release()
1867
1867
1868 def clone(self, remote, heads=[], stream=None):
1868 def clone(self, remote, heads=[], stream=None):
1869 '''clone remote repository.
1869 '''clone remote repository.
1870
1870
1871 keyword arguments:
1871 keyword arguments:
1872 heads: list of revs to clone (forces use of pull)
1872 heads: list of revs to clone (forces use of pull)
1873 stream: use streaming clone if possible'''
1873 stream: use streaming clone if possible'''
1874
1874
1875 # now, all clients that can request uncompressed clones can
1875 # now, all clients that can request uncompressed clones can
1876 # read repo formats supported by all servers that can serve
1876 # read repo formats supported by all servers that can serve
1877 # them.
1877 # them.
1878
1878
1879 # if revlog format changes, client will have to check version
1879 # if revlog format changes, client will have to check version
1880 # and format flags on "stream" capability, and use
1880 # and format flags on "stream" capability, and use
1881 # uncompressed only if compatible.
1881 # uncompressed only if compatible.
1882
1882
1883 if stream is None:
1883 if stream is None:
1884 # if the server explicitly prefers to stream (for fast LANs)
1884 # if the server explicitly prefers to stream (for fast LANs)
1885 stream = remote.capable('stream-preferred')
1885 stream = remote.capable('stream-preferred')
1886
1886
1887 if stream and not heads:
1887 if stream and not heads:
1888 # 'stream' means remote revlog format is revlogv1 only
1888 # 'stream' means remote revlog format is revlogv1 only
1889 if remote.capable('stream'):
1889 if remote.capable('stream'):
1890 self.stream_in(remote, set(('revlogv1',)))
1890 self.stream_in(remote, set(('revlogv1',)))
1891 else:
1891 else:
1892 # otherwise, 'streamreqs' contains the remote revlog format
1892 # otherwise, 'streamreqs' contains the remote revlog format
1893 streamreqs = remote.capable('streamreqs')
1893 streamreqs = remote.capable('streamreqs')
1894 if streamreqs:
1894 if streamreqs:
1895 streamreqs = set(streamreqs.split(','))
1895 streamreqs = set(streamreqs.split(','))
1896 # if we support it, stream in and adjust our requirements
1896 # if we support it, stream in and adjust our requirements
1897 if not streamreqs - self.supportedformats:
1897 if not streamreqs - self.supportedformats:
1898 self.stream_in(remote, streamreqs)
1898 self.stream_in(remote, streamreqs)
1899
1899
1900 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1900 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1901 try:
1901 try:
1902 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1902 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1903 ret = exchange.pull(self, remote, heads).cgresult
1903 ret = exchange.pull(self, remote, heads).cgresult
1904 finally:
1904 finally:
1905 self.ui.restoreconfig(quiet)
1905 self.ui.restoreconfig(quiet)
1906 return ret
1906 return ret
1907
1907
1908 def pushkey(self, namespace, key, old, new):
1908 def pushkey(self, namespace, key, old, new):
1909 try:
1909 try:
1910 tr = self.currenttransaction()
1910 tr = self.currenttransaction()
1911 hookargs = {}
1911 hookargs = {}
1912 if tr is not None:
1912 if tr is not None:
1913 hookargs.update(tr.hookargs)
1913 hookargs.update(tr.hookargs)
1914 pending = lambda: tr.writepending() and self.root or ""
1914 pending = lambda: tr.writepending() and self.root or ""
1915 hookargs['pending'] = pending
1915 hookargs['pending'] = pending
1916 hookargs['namespace'] = namespace
1916 hookargs['namespace'] = namespace
1917 hookargs['key'] = key
1917 hookargs['key'] = key
1918 hookargs['old'] = old
1918 hookargs['old'] = old
1919 hookargs['new'] = new
1919 hookargs['new'] = new
1920 self.hook('prepushkey', throw=True, **hookargs)
1920 self.hook('prepushkey', throw=True, **hookargs)
1921 except error.HookAbort, exc:
1921 except error.HookAbort, exc:
1922 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1923 if exc.hint:
1923 if exc.hint:
1924 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 self.ui.write_err(_("(%s)\n") % exc.hint)
1925 return False
1925 return False
1926 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1927 ret = pushkey.push(self, namespace, key, old, new)
1927 ret = pushkey.push(self, namespace, key, old, new)
1928 def runhook():
1928 def runhook():
1929 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1930 ret=ret)
1930 ret=ret)
1931 self._afterlock(runhook)
1931 self._afterlock(runhook)
1932 return ret
1932 return ret
1933
1933
1934 def listkeys(self, namespace):
1934 def listkeys(self, namespace):
1935 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 self.hook('prelistkeys', throw=True, namespace=namespace)
1936 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 self.ui.debug('listing keys for "%s"\n' % namespace)
1937 values = pushkey.list(self, namespace)
1937 values = pushkey.list(self, namespace)
1938 self.hook('listkeys', namespace=namespace, values=values)
1938 self.hook('listkeys', namespace=namespace, values=values)
1939 return values
1939 return values
1940
1940
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1942 '''used to test argument passing over the wire'''
1942 '''used to test argument passing over the wire'''
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1944
1944
1945 def savecommitmessage(self, text):
1945 def savecommitmessage(self, text):
1946 fp = self.vfs('last-message.txt', 'wb')
1946 fp = self.vfs('last-message.txt', 'wb')
1947 try:
1947 try:
1948 fp.write(text)
1948 fp.write(text)
1949 finally:
1949 finally:
1950 fp.close()
1950 fp.close()
1951 return self.pathto(fp.name[len(self.root) + 1:])
1951 return self.pathto(fp.name[len(self.root) + 1:])
1952
1952
1953 # used to avoid circular references so destructors work
1953 # used to avoid circular references so destructors work
1954 def aftertrans(files):
1954 def aftertrans(files):
1955 renamefiles = [tuple(t) for t in files]
1955 renamefiles = [tuple(t) for t in files]
1956 def a():
1956 def a():
1957 for vfs, src, dest in renamefiles:
1957 for vfs, src, dest in renamefiles:
1958 try:
1958 try:
1959 vfs.rename(src, dest)
1959 vfs.rename(src, dest)
1960 except OSError: # journal file does not yet exist
1960 except OSError: # journal file does not yet exist
1961 pass
1961 pass
1962 return a
1962 return a
1963
1963
1964 def undoname(fn):
1964 def undoname(fn):
1965 base, name = os.path.split(fn)
1965 base, name = os.path.split(fn)
1966 assert name.startswith('journal')
1966 assert name.startswith('journal')
1967 return os.path.join(base, name.replace('journal', 'undo', 1))
1967 return os.path.join(base, name.replace('journal', 'undo', 1))
1968
1968
1969 def instance(ui, path, create):
1969 def instance(ui, path, create):
1970 return localrepository(ui, util.urllocalpath(path), create)
1970 return localrepository(ui, util.urllocalpath(path), create)
1971
1971
1972 def islocal(path):
1972 def islocal(path):
1973 return True
1973 return True
@@ -1,946 +1,961 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11 import os
11 import os
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 def _parsev1(data):
15 def _parsev1(data):
16 # This method does a little bit of excessive-looking
16 # This method does a little bit of excessive-looking
17 # precondition checking. This is so that the behavior of this
17 # precondition checking. This is so that the behavior of this
18 # class exactly matches its C counterpart to try and help
18 # class exactly matches its C counterpart to try and help
19 # prevent surprise breakage for anyone that develops against
19 # prevent surprise breakage for anyone that develops against
20 # the pure version.
20 # the pure version.
21 if data and data[-1] != '\n':
21 if data and data[-1] != '\n':
22 raise ValueError('Manifest did not end in a newline.')
22 raise ValueError('Manifest did not end in a newline.')
23 prev = None
23 prev = None
24 for l in data.splitlines():
24 for l in data.splitlines():
25 if prev is not None and prev > l:
25 if prev is not None and prev > l:
26 raise ValueError('Manifest lines not in sorted order.')
26 raise ValueError('Manifest lines not in sorted order.')
27 prev = l
27 prev = l
28 f, n = l.split('\0')
28 f, n = l.split('\0')
29 if len(n) > 40:
29 if len(n) > 40:
30 yield f, revlog.bin(n[:40]), n[40:]
30 yield f, revlog.bin(n[:40]), n[40:]
31 else:
31 else:
32 yield f, revlog.bin(n), ''
32 yield f, revlog.bin(n), ''
33
33
34 def _parsev2(data):
34 def _parsev2(data):
35 metadataend = data.find('\n')
35 metadataend = data.find('\n')
36 # Just ignore metadata for now
36 # Just ignore metadata for now
37 pos = metadataend + 1
37 pos = metadataend + 1
38 prevf = ''
38 prevf = ''
39 while pos < len(data):
39 while pos < len(data):
40 end = data.find('\n', pos + 1) # +1 to skip stem length byte
40 end = data.find('\n', pos + 1) # +1 to skip stem length byte
41 if end == -1:
41 if end == -1:
42 raise ValueError('Manifest ended with incomplete file entry.')
42 raise ValueError('Manifest ended with incomplete file entry.')
43 stemlen = ord(data[pos])
43 stemlen = ord(data[pos])
44 items = data[pos + 1:end].split('\0')
44 items = data[pos + 1:end].split('\0')
45 f = prevf[:stemlen] + items[0]
45 f = prevf[:stemlen] + items[0]
46 if prevf > f:
46 if prevf > f:
47 raise ValueError('Manifest entries not in sorted order.')
47 raise ValueError('Manifest entries not in sorted order.')
48 fl = items[1]
48 fl = items[1]
49 # Just ignore metadata (items[2:] for now)
49 # Just ignore metadata (items[2:] for now)
50 n = data[end + 1:end + 21]
50 n = data[end + 1:end + 21]
51 yield f, n, fl
51 yield f, n, fl
52 pos = end + 22
52 pos = end + 22
53 prevf = f
53 prevf = f
54
54
55 def _parse(data):
55 def _parse(data):
56 """Generates (path, node, flags) tuples from a manifest text"""
56 """Generates (path, node, flags) tuples from a manifest text"""
57 if data.startswith('\0'):
57 if data.startswith('\0'):
58 return iter(_parsev2(data))
58 return iter(_parsev2(data))
59 else:
59 else:
60 return iter(_parsev1(data))
60 return iter(_parsev1(data))
61
61
62 def _text(it, usemanifestv2):
62 def _text(it, usemanifestv2):
63 """Given an iterator over (path, node, flags) tuples, returns a manifest
63 """Given an iterator over (path, node, flags) tuples, returns a manifest
64 text"""
64 text"""
65 if usemanifestv2:
65 if usemanifestv2:
66 return _textv2(it)
66 return _textv2(it)
67 else:
67 else:
68 return _textv1(it)
68 return _textv1(it)
69
69
70 def _textv1(it):
70 def _textv1(it):
71 files = []
71 files = []
72 lines = []
72 lines = []
73 _hex = revlog.hex
73 _hex = revlog.hex
74 for f, n, fl in it:
74 for f, n, fl in it:
75 files.append(f)
75 files.append(f)
76 # if this is changed to support newlines in filenames,
76 # if this is changed to support newlines in filenames,
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
78 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
78 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
79
79
80 _checkforbidden(files)
80 _checkforbidden(files)
81 return ''.join(lines)
81 return ''.join(lines)
82
82
83 def _textv2(it):
83 def _textv2(it):
84 files = []
84 files = []
85 lines = ['\0\n']
85 lines = ['\0\n']
86 prevf = ''
86 prevf = ''
87 for f, n, fl in it:
87 for f, n, fl in it:
88 files.append(f)
88 files.append(f)
89 stem = os.path.commonprefix([prevf, f])
89 stem = os.path.commonprefix([prevf, f])
90 stemlen = min(len(stem), 255)
90 stemlen = min(len(stem), 255)
91 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
91 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
92 prevf = f
92 prevf = f
93 _checkforbidden(files)
93 _checkforbidden(files)
94 return ''.join(lines)
94 return ''.join(lines)
95
95
96 class _lazymanifest(dict):
96 class _lazymanifest(dict):
97 """This is the pure implementation of lazymanifest.
97 """This is the pure implementation of lazymanifest.
98
98
99 It has not been optimized *at all* and is not lazy.
99 It has not been optimized *at all* and is not lazy.
100 """
100 """
101
101
102 def __init__(self, data):
102 def __init__(self, data):
103 dict.__init__(self)
103 dict.__init__(self)
104 for f, n, fl in _parse(data):
104 for f, n, fl in _parse(data):
105 self[f] = n, fl
105 self[f] = n, fl
106
106
107 def __setitem__(self, k, v):
107 def __setitem__(self, k, v):
108 node, flag = v
108 node, flag = v
109 assert node is not None
109 assert node is not None
110 if len(node) > 21:
110 if len(node) > 21:
111 node = node[:21] # match c implementation behavior
111 node = node[:21] # match c implementation behavior
112 dict.__setitem__(self, k, (node, flag))
112 dict.__setitem__(self, k, (node, flag))
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return iter(sorted(dict.keys(self)))
115 return iter(sorted(dict.keys(self)))
116
116
117 def iterkeys(self):
117 def iterkeys(self):
118 return iter(sorted(dict.keys(self)))
118 return iter(sorted(dict.keys(self)))
119
119
120 def iterentries(self):
120 def iterentries(self):
121 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
121 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
122
122
123 def copy(self):
123 def copy(self):
124 c = _lazymanifest('')
124 c = _lazymanifest('')
125 c.update(self)
125 c.update(self)
126 return c
126 return c
127
127
128 def diff(self, m2, clean=False):
128 def diff(self, m2, clean=False):
129 '''Finds changes between the current manifest and m2.'''
129 '''Finds changes between the current manifest and m2.'''
130 diff = {}
130 diff = {}
131
131
132 for fn, e1 in self.iteritems():
132 for fn, e1 in self.iteritems():
133 if fn not in m2:
133 if fn not in m2:
134 diff[fn] = e1, (None, '')
134 diff[fn] = e1, (None, '')
135 else:
135 else:
136 e2 = m2[fn]
136 e2 = m2[fn]
137 if e1 != e2:
137 if e1 != e2:
138 diff[fn] = e1, e2
138 diff[fn] = e1, e2
139 elif clean:
139 elif clean:
140 diff[fn] = None
140 diff[fn] = None
141
141
142 for fn, e2 in m2.iteritems():
142 for fn, e2 in m2.iteritems():
143 if fn not in self:
143 if fn not in self:
144 diff[fn] = (None, ''), e2
144 diff[fn] = (None, ''), e2
145
145
146 return diff
146 return diff
147
147
148 def filtercopy(self, filterfn):
148 def filtercopy(self, filterfn):
149 c = _lazymanifest('')
149 c = _lazymanifest('')
150 for f, n, fl in self.iterentries():
150 for f, n, fl in self.iterentries():
151 if filterfn(f):
151 if filterfn(f):
152 c[f] = n, fl
152 c[f] = n, fl
153 return c
153 return c
154
154
155 def text(self):
155 def text(self):
156 """Get the full data of this manifest as a bytestring."""
156 """Get the full data of this manifest as a bytestring."""
157 return _textv1(self.iterentries())
157 return _textv1(self.iterentries())
158
158
159 try:
159 try:
160 _lazymanifest = parsers.lazymanifest
160 _lazymanifest = parsers.lazymanifest
161 except AttributeError:
161 except AttributeError:
162 pass
162 pass
163
163
164 class manifestdict(object):
164 class manifestdict(object):
165 def __init__(self, data=''):
165 def __init__(self, data=''):
166 if data.startswith('\0'):
166 if data.startswith('\0'):
167 #_lazymanifest can not parse v2
167 #_lazymanifest can not parse v2
168 self._lm = _lazymanifest('')
168 self._lm = _lazymanifest('')
169 for f, n, fl in _parsev2(data):
169 for f, n, fl in _parsev2(data):
170 self._lm[f] = n, fl
170 self._lm[f] = n, fl
171 else:
171 else:
172 self._lm = _lazymanifest(data)
172 self._lm = _lazymanifest(data)
173
173
174 def __getitem__(self, key):
174 def __getitem__(self, key):
175 return self._lm[key][0]
175 return self._lm[key][0]
176
176
177 def find(self, key):
177 def find(self, key):
178 return self._lm[key]
178 return self._lm[key]
179
179
180 def __len__(self):
180 def __len__(self):
181 return len(self._lm)
181 return len(self._lm)
182
182
183 def __setitem__(self, key, node):
183 def __setitem__(self, key, node):
184 self._lm[key] = node, self.flags(key, '')
184 self._lm[key] = node, self.flags(key, '')
185
185
186 def __contains__(self, key):
186 def __contains__(self, key):
187 return key in self._lm
187 return key in self._lm
188
188
189 def __delitem__(self, key):
189 def __delitem__(self, key):
190 del self._lm[key]
190 del self._lm[key]
191
191
192 def __iter__(self):
192 def __iter__(self):
193 return self._lm.__iter__()
193 return self._lm.__iter__()
194
194
195 def iterkeys(self):
195 def iterkeys(self):
196 return self._lm.iterkeys()
196 return self._lm.iterkeys()
197
197
198 def keys(self):
198 def keys(self):
199 return list(self.iterkeys())
199 return list(self.iterkeys())
200
200
201 def filesnotin(self, m2):
201 def filesnotin(self, m2):
202 '''Set of files in this manifest that are not in the other'''
202 '''Set of files in this manifest that are not in the other'''
203 files = set(self)
203 files = set(self)
204 files.difference_update(m2)
204 files.difference_update(m2)
205 return files
205 return files
206
206
207 @propertycache
207 @propertycache
208 def _dirs(self):
208 def _dirs(self):
209 return util.dirs(self)
209 return util.dirs(self)
210
210
211 def dirs(self):
211 def dirs(self):
212 return self._dirs
212 return self._dirs
213
213
214 def hasdir(self, dir):
214 def hasdir(self, dir):
215 return dir in self._dirs
215 return dir in self._dirs
216
216
217 def _filesfastpath(self, match):
217 def _filesfastpath(self, match):
218 '''Checks whether we can correctly and quickly iterate over matcher
218 '''Checks whether we can correctly and quickly iterate over matcher
219 files instead of over manifest files.'''
219 files instead of over manifest files.'''
220 files = match.files()
220 files = match.files()
221 return (len(files) < 100 and (match.isexact() or
221 return (len(files) < 100 and (match.isexact() or
222 (not match.anypats() and all(fn in self for fn in files))))
222 (not match.anypats() and all(fn in self for fn in files))))
223
223
224 def walk(self, match):
224 def walk(self, match):
225 '''Generates matching file names.
225 '''Generates matching file names.
226
226
227 Equivalent to manifest.matches(match).iterkeys(), but without creating
227 Equivalent to manifest.matches(match).iterkeys(), but without creating
228 an entirely new manifest.
228 an entirely new manifest.
229
229
230 It also reports nonexistent files by marking them bad with match.bad().
230 It also reports nonexistent files by marking them bad with match.bad().
231 '''
231 '''
232 if match.always():
232 if match.always():
233 for f in iter(self):
233 for f in iter(self):
234 yield f
234 yield f
235 return
235 return
236
236
237 fset = set(match.files())
237 fset = set(match.files())
238
238
239 # avoid the entire walk if we're only looking for specific files
239 # avoid the entire walk if we're only looking for specific files
240 if self._filesfastpath(match):
240 if self._filesfastpath(match):
241 for fn in sorted(fset):
241 for fn in sorted(fset):
242 yield fn
242 yield fn
243 return
243 return
244
244
245 for fn in self:
245 for fn in self:
246 if fn in fset:
246 if fn in fset:
247 # specified pattern is the exact name
247 # specified pattern is the exact name
248 fset.remove(fn)
248 fset.remove(fn)
249 if match(fn):
249 if match(fn):
250 yield fn
250 yield fn
251
251
252 # for dirstate.walk, files=['.'] means "walk the whole tree".
252 # for dirstate.walk, files=['.'] means "walk the whole tree".
253 # follow that here, too
253 # follow that here, too
254 fset.discard('.')
254 fset.discard('.')
255
255
256 for fn in sorted(fset):
256 for fn in sorted(fset):
257 if not self.hasdir(fn):
257 if not self.hasdir(fn):
258 match.bad(fn, None)
258 match.bad(fn, None)
259
259
260 def matches(self, match):
260 def matches(self, match):
261 '''generate a new manifest filtered by the match argument'''
261 '''generate a new manifest filtered by the match argument'''
262 if match.always():
262 if match.always():
263 return self.copy()
263 return self.copy()
264
264
265 if self._filesfastpath(match):
265 if self._filesfastpath(match):
266 m = manifestdict()
266 m = manifestdict()
267 lm = self._lm
267 lm = self._lm
268 for fn in match.files():
268 for fn in match.files():
269 if fn in lm:
269 if fn in lm:
270 m._lm[fn] = lm[fn]
270 m._lm[fn] = lm[fn]
271 return m
271 return m
272
272
273 m = manifestdict()
273 m = manifestdict()
274 m._lm = self._lm.filtercopy(match)
274 m._lm = self._lm.filtercopy(match)
275 return m
275 return m
276
276
277 def diff(self, m2, clean=False):
277 def diff(self, m2, clean=False):
278 '''Finds changes between the current manifest and m2.
278 '''Finds changes between the current manifest and m2.
279
279
280 Args:
280 Args:
281 m2: the manifest to which this manifest should be compared.
281 m2: the manifest to which this manifest should be compared.
282 clean: if true, include files unchanged between these manifests
282 clean: if true, include files unchanged between these manifests
283 with a None value in the returned dictionary.
283 with a None value in the returned dictionary.
284
284
285 The result is returned as a dict with filename as key and
285 The result is returned as a dict with filename as key and
286 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
286 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
287 nodeid in the current/other manifest and fl1/fl2 is the flag
287 nodeid in the current/other manifest and fl1/fl2 is the flag
288 in the current/other manifest. Where the file does not exist,
288 in the current/other manifest. Where the file does not exist,
289 the nodeid will be None and the flags will be the empty
289 the nodeid will be None and the flags will be the empty
290 string.
290 string.
291 '''
291 '''
292 return self._lm.diff(m2._lm, clean)
292 return self._lm.diff(m2._lm, clean)
293
293
294 def setflag(self, key, flag):
294 def setflag(self, key, flag):
295 self._lm[key] = self[key], flag
295 self._lm[key] = self[key], flag
296
296
297 def get(self, key, default=None):
297 def get(self, key, default=None):
298 try:
298 try:
299 return self._lm[key][0]
299 return self._lm[key][0]
300 except KeyError:
300 except KeyError:
301 return default
301 return default
302
302
303 def flags(self, key, default=''):
303 def flags(self, key, default=''):
304 try:
304 try:
305 return self._lm[key][1]
305 return self._lm[key][1]
306 except KeyError:
306 except KeyError:
307 return default
307 return default
308
308
309 def copy(self):
309 def copy(self):
310 c = manifestdict()
310 c = manifestdict()
311 c._lm = self._lm.copy()
311 c._lm = self._lm.copy()
312 return c
312 return c
313
313
314 def iteritems(self):
314 def iteritems(self):
315 return (x[:2] for x in self._lm.iterentries())
315 return (x[:2] for x in self._lm.iterentries())
316
316
317 def text(self, usemanifestv2=False):
317 def text(self, usemanifestv2=False):
318 if usemanifestv2:
318 if usemanifestv2:
319 return _textv2(self._lm.iterentries())
319 return _textv2(self._lm.iterentries())
320 else:
320 else:
321 # use (probably) native version for v1
321 # use (probably) native version for v1
322 return self._lm.text()
322 return self._lm.text()
323
323
324 def fastdelta(self, base, changes):
324 def fastdelta(self, base, changes):
325 """Given a base manifest text as an array.array and a list of changes
325 """Given a base manifest text as an array.array and a list of changes
326 relative to that text, compute a delta that can be used by revlog.
326 relative to that text, compute a delta that can be used by revlog.
327 """
327 """
328 delta = []
328 delta = []
329 dstart = None
329 dstart = None
330 dend = None
330 dend = None
331 dline = [""]
331 dline = [""]
332 start = 0
332 start = 0
333 # zero copy representation of base as a buffer
333 # zero copy representation of base as a buffer
334 addbuf = util.buffer(base)
334 addbuf = util.buffer(base)
335
335
336 # start with a readonly loop that finds the offset of
336 # start with a readonly loop that finds the offset of
337 # each line and creates the deltas
337 # each line and creates the deltas
338 for f, todelete in changes:
338 for f, todelete in changes:
339 # bs will either be the index of the item or the insert point
339 # bs will either be the index of the item or the insert point
340 start, end = _msearch(addbuf, f, start)
340 start, end = _msearch(addbuf, f, start)
341 if not todelete:
341 if not todelete:
342 h, fl = self._lm[f]
342 h, fl = self._lm[f]
343 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
343 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
344 else:
344 else:
345 if start == end:
345 if start == end:
346 # item we want to delete was not found, error out
346 # item we want to delete was not found, error out
347 raise AssertionError(
347 raise AssertionError(
348 _("failed to remove %s from manifest") % f)
348 _("failed to remove %s from manifest") % f)
349 l = ""
349 l = ""
350 if dstart is not None and dstart <= start and dend >= start:
350 if dstart is not None and dstart <= start and dend >= start:
351 if dend < end:
351 if dend < end:
352 dend = end
352 dend = end
353 if l:
353 if l:
354 dline.append(l)
354 dline.append(l)
355 else:
355 else:
356 if dstart is not None:
356 if dstart is not None:
357 delta.append([dstart, dend, "".join(dline)])
357 delta.append([dstart, dend, "".join(dline)])
358 dstart = start
358 dstart = start
359 dend = end
359 dend = end
360 dline = [l]
360 dline = [l]
361
361
362 if dstart is not None:
362 if dstart is not None:
363 delta.append([dstart, dend, "".join(dline)])
363 delta.append([dstart, dend, "".join(dline)])
364 # apply the delta to the base, and get a delta for addrevision
364 # apply the delta to the base, and get a delta for addrevision
365 deltatext, arraytext = _addlistdelta(base, delta)
365 deltatext, arraytext = _addlistdelta(base, delta)
366 return arraytext, deltatext
366 return arraytext, deltatext
367
367
368 def _msearch(m, s, lo=0, hi=None):
368 def _msearch(m, s, lo=0, hi=None):
369 '''return a tuple (start, end) that says where to find s within m.
369 '''return a tuple (start, end) that says where to find s within m.
370
370
371 If the string is found m[start:end] are the line containing
371 If the string is found m[start:end] are the line containing
372 that string. If start == end the string was not found and
372 that string. If start == end the string was not found and
373 they indicate the proper sorted insertion point.
373 they indicate the proper sorted insertion point.
374
374
375 m should be a buffer or a string
375 m should be a buffer or a string
376 s is a string'''
376 s is a string'''
377 def advance(i, c):
377 def advance(i, c):
378 while i < lenm and m[i] != c:
378 while i < lenm and m[i] != c:
379 i += 1
379 i += 1
380 return i
380 return i
381 if not s:
381 if not s:
382 return (lo, lo)
382 return (lo, lo)
383 lenm = len(m)
383 lenm = len(m)
384 if not hi:
384 if not hi:
385 hi = lenm
385 hi = lenm
386 while lo < hi:
386 while lo < hi:
387 mid = (lo + hi) // 2
387 mid = (lo + hi) // 2
388 start = mid
388 start = mid
389 while start > 0 and m[start - 1] != '\n':
389 while start > 0 and m[start - 1] != '\n':
390 start -= 1
390 start -= 1
391 end = advance(start, '\0')
391 end = advance(start, '\0')
392 if m[start:end] < s:
392 if m[start:end] < s:
393 # we know that after the null there are 40 bytes of sha1
393 # we know that after the null there are 40 bytes of sha1
394 # this translates to the bisect lo = mid + 1
394 # this translates to the bisect lo = mid + 1
395 lo = advance(end + 40, '\n') + 1
395 lo = advance(end + 40, '\n') + 1
396 else:
396 else:
397 # this translates to the bisect hi = mid
397 # this translates to the bisect hi = mid
398 hi = start
398 hi = start
399 end = advance(lo, '\0')
399 end = advance(lo, '\0')
400 found = m[lo:end]
400 found = m[lo:end]
401 if s == found:
401 if s == found:
402 # we know that after the null there are 40 bytes of sha1
402 # we know that after the null there are 40 bytes of sha1
403 end = advance(end + 40, '\n')
403 end = advance(end + 40, '\n')
404 return (lo, end + 1)
404 return (lo, end + 1)
405 else:
405 else:
406 return (lo, lo)
406 return (lo, lo)
407
407
408 def _checkforbidden(l):
408 def _checkforbidden(l):
409 """Check filenames for illegal characters."""
409 """Check filenames for illegal characters."""
410 for f in l:
410 for f in l:
411 if '\n' in f or '\r' in f:
411 if '\n' in f or '\r' in f:
412 raise error.RevlogError(
412 raise error.RevlogError(
413 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
413 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
414
414
415
415
416 # apply the changes collected during the bisect loop to our addlist
416 # apply the changes collected during the bisect loop to our addlist
417 # return a delta suitable for addrevision
417 # return a delta suitable for addrevision
418 def _addlistdelta(addlist, x):
418 def _addlistdelta(addlist, x):
419 # for large addlist arrays, building a new array is cheaper
419 # for large addlist arrays, building a new array is cheaper
420 # than repeatedly modifying the existing one
420 # than repeatedly modifying the existing one
421 currentposition = 0
421 currentposition = 0
422 newaddlist = array.array('c')
422 newaddlist = array.array('c')
423
423
424 for start, end, content in x:
424 for start, end, content in x:
425 newaddlist += addlist[currentposition:start]
425 newaddlist += addlist[currentposition:start]
426 if content:
426 if content:
427 newaddlist += array.array('c', content)
427 newaddlist += array.array('c', content)
428
428
429 currentposition = end
429 currentposition = end
430
430
431 newaddlist += addlist[currentposition:]
431 newaddlist += addlist[currentposition:]
432
432
433 deltatext = "".join(struct.pack(">lll", start, end, len(content))
433 deltatext = "".join(struct.pack(">lll", start, end, len(content))
434 + content for start, end, content in x)
434 + content for start, end, content in x)
435 return deltatext, newaddlist
435 return deltatext, newaddlist
436
436
437 def _splittopdir(f):
437 def _splittopdir(f):
438 if '/' in f:
438 if '/' in f:
439 dir, subpath = f.split('/', 1)
439 dir, subpath = f.split('/', 1)
440 return dir + '/', subpath
440 return dir + '/', subpath
441 else:
441 else:
442 return '', f
442 return '', f
443
443
444 class treemanifest(object):
444 class treemanifest(object):
445 def __init__(self, dir='', text=''):
445 def __init__(self, dir='', text=''):
446 self._dir = dir
446 self._dir = dir
447 self._node = revlog.nullid
447 self._node = revlog.nullid
448 self._dirs = {}
448 self._dirs = {}
449 # Using _lazymanifest here is a little slower than plain old dicts
449 # Using _lazymanifest here is a little slower than plain old dicts
450 self._files = {}
450 self._files = {}
451 self._flags = {}
451 self._flags = {}
452 def readsubtree(subdir, subm):
452 def readsubtree(subdir, subm):
453 raise AssertionError('treemanifest constructor only accepts '
453 raise AssertionError('treemanifest constructor only accepts '
454 'flat manifests')
454 'flat manifests')
455 self.parse(text, readsubtree)
455 self.parse(text, readsubtree)
456
456
457 def _subpath(self, path):
457 def _subpath(self, path):
458 return self._dir + path
458 return self._dir + path
459
459
460 def __len__(self):
460 def __len__(self):
461 size = len(self._files)
461 size = len(self._files)
462 for m in self._dirs.values():
462 for m in self._dirs.values():
463 size += m.__len__()
463 size += m.__len__()
464 return size
464 return size
465
465
466 def _isempty(self):
466 def _isempty(self):
467 return (not self._files and (not self._dirs or
467 return (not self._files and (not self._dirs or
468 all(m._isempty() for m in self._dirs.values())))
468 all(m._isempty() for m in self._dirs.values())))
469
469
470 def __str__(self):
470 def __str__(self):
471 return ('<treemanifest dir=%s, node=%s>' %
471 return ('<treemanifest dir=%s, node=%s>' %
472 (self._dir, revlog.hex(self._node)))
472 (self._dir, revlog.hex(self._node)))
473
473
474 def dir(self):
474 def dir(self):
475 '''The directory that this tree manifest represents, including a
475 '''The directory that this tree manifest represents, including a
476 trailing '/'. Empty string for the repo root directory.'''
476 trailing '/'. Empty string for the repo root directory.'''
477 return self._dir
477 return self._dir
478
478
479 def node(self):
479 def node(self):
480 '''This node of this instance. nullid for unsaved instances. Should
480 '''This node of this instance. nullid for unsaved instances. Should
481 be updated when the instance is read or written from a revlog.
481 be updated when the instance is read or written from a revlog.
482 '''
482 '''
483 return self._node
483 return self._node
484
484
485 def setnode(self, node):
485 def setnode(self, node):
486 self._node = node
486 self._node = node
487
487
488 def iteritems(self):
488 def iteritems(self):
489 for p, n in sorted(self._dirs.items() + self._files.items()):
489 for p, n in sorted(self._dirs.items() + self._files.items()):
490 if p in self._files:
490 if p in self._files:
491 yield self._subpath(p), n
491 yield self._subpath(p), n
492 else:
492 else:
493 for f, sn in n.iteritems():
493 for f, sn in n.iteritems():
494 yield f, sn
494 yield f, sn
495
495
496 def iterkeys(self):
496 def iterkeys(self):
497 for p in sorted(self._dirs.keys() + self._files.keys()):
497 for p in sorted(self._dirs.keys() + self._files.keys()):
498 if p in self._files:
498 if p in self._files:
499 yield self._subpath(p)
499 yield self._subpath(p)
500 else:
500 else:
501 for f in self._dirs[p].iterkeys():
501 for f in self._dirs[p].iterkeys():
502 yield f
502 yield f
503
503
504 def keys(self):
504 def keys(self):
505 return list(self.iterkeys())
505 return list(self.iterkeys())
506
506
507 def __iter__(self):
507 def __iter__(self):
508 return self.iterkeys()
508 return self.iterkeys()
509
509
510 def __contains__(self, f):
510 def __contains__(self, f):
511 if f is None:
511 if f is None:
512 return False
512 return False
513 dir, subpath = _splittopdir(f)
513 dir, subpath = _splittopdir(f)
514 if dir:
514 if dir:
515 if dir not in self._dirs:
515 if dir not in self._dirs:
516 return False
516 return False
517 return self._dirs[dir].__contains__(subpath)
517 return self._dirs[dir].__contains__(subpath)
518 else:
518 else:
519 return f in self._files
519 return f in self._files
520
520
521 def get(self, f, default=None):
521 def get(self, f, default=None):
522 dir, subpath = _splittopdir(f)
522 dir, subpath = _splittopdir(f)
523 if dir:
523 if dir:
524 if dir not in self._dirs:
524 if dir not in self._dirs:
525 return default
525 return default
526 return self._dirs[dir].get(subpath, default)
526 return self._dirs[dir].get(subpath, default)
527 else:
527 else:
528 return self._files.get(f, default)
528 return self._files.get(f, default)
529
529
530 def __getitem__(self, f):
530 def __getitem__(self, f):
531 dir, subpath = _splittopdir(f)
531 dir, subpath = _splittopdir(f)
532 if dir:
532 if dir:
533 return self._dirs[dir].__getitem__(subpath)
533 return self._dirs[dir].__getitem__(subpath)
534 else:
534 else:
535 return self._files[f]
535 return self._files[f]
536
536
537 def flags(self, f):
537 def flags(self, f):
538 dir, subpath = _splittopdir(f)
538 dir, subpath = _splittopdir(f)
539 if dir:
539 if dir:
540 if dir not in self._dirs:
540 if dir not in self._dirs:
541 return ''
541 return ''
542 return self._dirs[dir].flags(subpath)
542 return self._dirs[dir].flags(subpath)
543 else:
543 else:
544 if f in self._dirs:
544 if f in self._dirs:
545 return ''
545 return ''
546 return self._flags.get(f, '')
546 return self._flags.get(f, '')
547
547
548 def find(self, f):
548 def find(self, f):
549 dir, subpath = _splittopdir(f)
549 dir, subpath = _splittopdir(f)
550 if dir:
550 if dir:
551 return self._dirs[dir].find(subpath)
551 return self._dirs[dir].find(subpath)
552 else:
552 else:
553 return self._files[f], self._flags.get(f, '')
553 return self._files[f], self._flags.get(f, '')
554
554
555 def __delitem__(self, f):
555 def __delitem__(self, f):
556 dir, subpath = _splittopdir(f)
556 dir, subpath = _splittopdir(f)
557 if dir:
557 if dir:
558 self._dirs[dir].__delitem__(subpath)
558 self._dirs[dir].__delitem__(subpath)
559 # If the directory is now empty, remove it
559 # If the directory is now empty, remove it
560 if self._dirs[dir]._isempty():
560 if self._dirs[dir]._isempty():
561 del self._dirs[dir]
561 del self._dirs[dir]
562 else:
562 else:
563 del self._files[f]
563 del self._files[f]
564 if f in self._flags:
564 if f in self._flags:
565 del self._flags[f]
565 del self._flags[f]
566
566
567 def __setitem__(self, f, n):
567 def __setitem__(self, f, n):
568 assert n is not None
568 assert n is not None
569 dir, subpath = _splittopdir(f)
569 dir, subpath = _splittopdir(f)
570 if dir:
570 if dir:
571 if dir not in self._dirs:
571 if dir not in self._dirs:
572 self._dirs[dir] = treemanifest(self._subpath(dir))
572 self._dirs[dir] = treemanifest(self._subpath(dir))
573 self._dirs[dir].__setitem__(subpath, n)
573 self._dirs[dir].__setitem__(subpath, n)
574 else:
574 else:
575 self._files[f] = n[:21] # to match manifestdict's behavior
575 self._files[f] = n[:21] # to match manifestdict's behavior
576
576
577 def setflag(self, f, flags):
577 def setflag(self, f, flags):
578 """Set the flags (symlink, executable) for path f."""
578 """Set the flags (symlink, executable) for path f."""
579 assert 'd' not in flags
579 assert 'd' not in flags
580 dir, subpath = _splittopdir(f)
580 dir, subpath = _splittopdir(f)
581 if dir:
581 if dir:
582 if dir not in self._dirs:
582 if dir not in self._dirs:
583 self._dirs[dir] = treemanifest(self._subpath(dir))
583 self._dirs[dir] = treemanifest(self._subpath(dir))
584 self._dirs[dir].setflag(subpath, flags)
584 self._dirs[dir].setflag(subpath, flags)
585 else:
585 else:
586 self._flags[f] = flags
586 self._flags[f] = flags
587
587
588 def copy(self):
588 def copy(self):
589 copy = treemanifest(self._dir)
589 copy = treemanifest(self._dir)
590 copy._node = self._node
590 copy._node = self._node
591 for d in self._dirs:
591 for d in self._dirs:
592 copy._dirs[d] = self._dirs[d].copy()
592 copy._dirs[d] = self._dirs[d].copy()
593 copy._files = dict.copy(self._files)
593 copy._files = dict.copy(self._files)
594 copy._flags = dict.copy(self._flags)
594 copy._flags = dict.copy(self._flags)
595 return copy
595 return copy
596
596
597 def filesnotin(self, m2):
597 def filesnotin(self, m2):
598 '''Set of files in this manifest that are not in the other'''
598 '''Set of files in this manifest that are not in the other'''
599 files = set()
599 files = set()
600 def _filesnotin(t1, t2):
600 def _filesnotin(t1, t2):
601 for d, m1 in t1._dirs.iteritems():
601 for d, m1 in t1._dirs.iteritems():
602 if d in t2._dirs:
602 if d in t2._dirs:
603 m2 = t2._dirs[d]
603 m2 = t2._dirs[d]
604 _filesnotin(m1, m2)
604 _filesnotin(m1, m2)
605 else:
605 else:
606 files.update(m1.iterkeys())
606 files.update(m1.iterkeys())
607
607
608 for fn in t1._files.iterkeys():
608 for fn in t1._files.iterkeys():
609 if fn not in t2._files:
609 if fn not in t2._files:
610 files.add(t1._subpath(fn))
610 files.add(t1._subpath(fn))
611
611
612 _filesnotin(self, m2)
612 _filesnotin(self, m2)
613 return files
613 return files
614
614
615 @propertycache
615 @propertycache
616 def _alldirs(self):
616 def _alldirs(self):
617 return util.dirs(self)
617 return util.dirs(self)
618
618
619 def dirs(self):
619 def dirs(self):
620 return self._alldirs
620 return self._alldirs
621
621
622 def hasdir(self, dir):
622 def hasdir(self, dir):
623 topdir, subdir = _splittopdir(dir)
623 topdir, subdir = _splittopdir(dir)
624 if topdir:
624 if topdir:
625 if topdir in self._dirs:
625 if topdir in self._dirs:
626 return self._dirs[topdir].hasdir(subdir)
626 return self._dirs[topdir].hasdir(subdir)
627 return False
627 return False
628 return (dir + '/') in self._dirs
628 return (dir + '/') in self._dirs
629
629
630 def walk(self, match):
630 def walk(self, match):
631 '''Generates matching file names.
631 '''Generates matching file names.
632
632
633 Equivalent to manifest.matches(match).iterkeys(), but without creating
633 Equivalent to manifest.matches(match).iterkeys(), but without creating
634 an entirely new manifest.
634 an entirely new manifest.
635
635
636 It also reports nonexistent files by marking them bad with match.bad().
636 It also reports nonexistent files by marking them bad with match.bad().
637 '''
637 '''
638 if match.always():
638 if match.always():
639 for f in iter(self):
639 for f in iter(self):
640 yield f
640 yield f
641 return
641 return
642
642
643 fset = set(match.files())
643 fset = set(match.files())
644
644
645 for fn in self._walk(match):
645 for fn in self._walk(match):
646 if fn in fset:
646 if fn in fset:
647 # specified pattern is the exact name
647 # specified pattern is the exact name
648 fset.remove(fn)
648 fset.remove(fn)
649 yield fn
649 yield fn
650
650
651 # for dirstate.walk, files=['.'] means "walk the whole tree".
651 # for dirstate.walk, files=['.'] means "walk the whole tree".
652 # follow that here, too
652 # follow that here, too
653 fset.discard('.')
653 fset.discard('.')
654
654
655 for fn in sorted(fset):
655 for fn in sorted(fset):
656 if not self.hasdir(fn):
656 if not self.hasdir(fn):
657 match.bad(fn, None)
657 match.bad(fn, None)
658
658
659 def _walk(self, match, alldirs=False):
659 def _walk(self, match, alldirs=False):
660 '''Recursively generates matching file names for walk().
660 '''Recursively generates matching file names for walk().
661
661
662 Will visit all subdirectories if alldirs is True, otherwise it will
662 Will visit all subdirectories if alldirs is True, otherwise it will
663 only visit subdirectories for which match.visitdir is True.'''
663 only visit subdirectories for which match.visitdir is True.'''
664
664
665 if not alldirs:
665 if not alldirs:
666 # substring to strip trailing slash
666 # substring to strip trailing slash
667 visit = match.visitdir(self._dir[:-1] or '.')
667 visit = match.visitdir(self._dir[:-1] or '.')
668 if not visit:
668 if not visit:
669 return
669 return
670 alldirs = (visit == 'all')
670 alldirs = (visit == 'all')
671
671
672 # yield this dir's files and walk its submanifests
672 # yield this dir's files and walk its submanifests
673 for p in sorted(self._dirs.keys() + self._files.keys()):
673 for p in sorted(self._dirs.keys() + self._files.keys()):
674 if p in self._files:
674 if p in self._files:
675 fullp = self._subpath(p)
675 fullp = self._subpath(p)
676 if match(fullp):
676 if match(fullp):
677 yield fullp
677 yield fullp
678 else:
678 else:
679 for f in self._dirs[p]._walk(match, alldirs):
679 for f in self._dirs[p]._walk(match, alldirs):
680 yield f
680 yield f
681
681
682 def matches(self, match):
682 def matches(self, match):
683 '''generate a new manifest filtered by the match argument'''
683 '''generate a new manifest filtered by the match argument'''
684 if match.always():
684 if match.always():
685 return self.copy()
685 return self.copy()
686
686
687 return self._matches(match)
687 return self._matches(match)
688
688
689 def _matches(self, match, alldirs=False):
689 def _matches(self, match, alldirs=False):
690 '''recursively generate a new manifest filtered by the match argument.
690 '''recursively generate a new manifest filtered by the match argument.
691
691
692 Will visit all subdirectories if alldirs is True, otherwise it will
692 Will visit all subdirectories if alldirs is True, otherwise it will
693 only visit subdirectories for which match.visitdir is True.'''
693 only visit subdirectories for which match.visitdir is True.'''
694
694
695 ret = treemanifest(self._dir)
695 ret = treemanifest(self._dir)
696 if not alldirs:
696 if not alldirs:
697 # substring to strip trailing slash
697 # substring to strip trailing slash
698 visit = match.visitdir(self._dir[:-1] or '.')
698 visit = match.visitdir(self._dir[:-1] or '.')
699 if not visit:
699 if not visit:
700 return ret
700 return ret
701 alldirs = (visit == 'all')
701 alldirs = (visit == 'all')
702
702
703 for fn in self._files:
703 for fn in self._files:
704 fullp = self._subpath(fn)
704 fullp = self._subpath(fn)
705 if not match(fullp):
705 if not match(fullp):
706 continue
706 continue
707 ret._files[fn] = self._files[fn]
707 ret._files[fn] = self._files[fn]
708 if fn in self._flags:
708 if fn in self._flags:
709 ret._flags[fn] = self._flags[fn]
709 ret._flags[fn] = self._flags[fn]
710
710
711 for dir, subm in self._dirs.iteritems():
711 for dir, subm in self._dirs.iteritems():
712 m = subm._matches(match, alldirs)
712 m = subm._matches(match, alldirs)
713 if not m._isempty():
713 if not m._isempty():
714 ret._dirs[dir] = m
714 ret._dirs[dir] = m
715
715
716 return ret
716 return ret
717
717
718 def diff(self, m2, clean=False):
718 def diff(self, m2, clean=False):
719 '''Finds changes between the current manifest and m2.
719 '''Finds changes between the current manifest and m2.
720
720
721 Args:
721 Args:
722 m2: the manifest to which this manifest should be compared.
722 m2: the manifest to which this manifest should be compared.
723 clean: if true, include files unchanged between these manifests
723 clean: if true, include files unchanged between these manifests
724 with a None value in the returned dictionary.
724 with a None value in the returned dictionary.
725
725
726 The result is returned as a dict with filename as key and
726 The result is returned as a dict with filename as key and
727 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
727 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
728 nodeid in the current/other manifest and fl1/fl2 is the flag
728 nodeid in the current/other manifest and fl1/fl2 is the flag
729 in the current/other manifest. Where the file does not exist,
729 in the current/other manifest. Where the file does not exist,
730 the nodeid will be None and the flags will be the empty
730 the nodeid will be None and the flags will be the empty
731 string.
731 string.
732 '''
732 '''
733 result = {}
733 result = {}
734 emptytree = treemanifest()
734 emptytree = treemanifest()
735 def _diff(t1, t2):
735 def _diff(t1, t2):
736 for d, m1 in t1._dirs.iteritems():
736 for d, m1 in t1._dirs.iteritems():
737 m2 = t2._dirs.get(d, emptytree)
737 m2 = t2._dirs.get(d, emptytree)
738 _diff(m1, m2)
738 _diff(m1, m2)
739
739
740 for d, m2 in t2._dirs.iteritems():
740 for d, m2 in t2._dirs.iteritems():
741 if d not in t1._dirs:
741 if d not in t1._dirs:
742 _diff(emptytree, m2)
742 _diff(emptytree, m2)
743
743
744 for fn, n1 in t1._files.iteritems():
744 for fn, n1 in t1._files.iteritems():
745 fl1 = t1._flags.get(fn, '')
745 fl1 = t1._flags.get(fn, '')
746 n2 = t2._files.get(fn, None)
746 n2 = t2._files.get(fn, None)
747 fl2 = t2._flags.get(fn, '')
747 fl2 = t2._flags.get(fn, '')
748 if n1 != n2 or fl1 != fl2:
748 if n1 != n2 or fl1 != fl2:
749 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
749 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
750 elif clean:
750 elif clean:
751 result[t1._subpath(fn)] = None
751 result[t1._subpath(fn)] = None
752
752
753 for fn, n2 in t2._files.iteritems():
753 for fn, n2 in t2._files.iteritems():
754 if fn not in t1._files:
754 if fn not in t1._files:
755 fl2 = t2._flags.get(fn, '')
755 fl2 = t2._flags.get(fn, '')
756 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
756 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
757
757
758 _diff(self, m2)
758 _diff(self, m2)
759 return result
759 return result
760
760
761 def parse(self, text, readsubtree):
761 def parse(self, text, readsubtree):
762 for f, n, fl in _parse(text):
762 for f, n, fl in _parse(text):
763 if fl == 'd':
763 if fl == 'd':
764 f = f + '/'
764 f = f + '/'
765 self._dirs[f] = readsubtree(self._subpath(f), n)
765 self._dirs[f] = readsubtree(self._subpath(f), n)
766 else:
766 else:
767 # Use __setitem__ and setflag rather than assigning directly
767 # Use __setitem__ and setflag rather than assigning directly
768 # to _files and _flags, thereby letting us parse flat manifests
768 # to _files and _flags, thereby letting us parse flat manifests
769 # as well as tree manifests.
769 # as well as tree manifests.
770 self[f] = n
770 self[f] = n
771 if fl:
771 if fl:
772 self.setflag(f, fl)
772 self.setflag(f, fl)
773
773
774 def text(self, usemanifestv2=False):
774 def text(self, usemanifestv2=False):
775 """Get the full data of this manifest as a bytestring."""
775 """Get the full data of this manifest as a bytestring."""
776 flags = self.flags
776 flags = self.flags
777 return _text(((f, self[f], flags(f)) for f in self.keys()),
777 return _text(((f, self[f], flags(f)) for f in self.keys()),
778 usemanifestv2)
778 usemanifestv2)
779
779
780 def dirtext(self, usemanifestv2=False):
780 def dirtext(self, usemanifestv2=False):
781 """Get the full data of this directory as a bytestring. Make sure that
781 """Get the full data of this directory as a bytestring. Make sure that
782 any submanifests have been written first, so their nodeids are correct.
782 any submanifests have been written first, so their nodeids are correct.
783 """
783 """
784 flags = self.flags
784 flags = self.flags
785 dirs = [(d[:-1], self._dirs[d]._node, 'd') for d in self._dirs]
785 dirs = [(d[:-1], self._dirs[d]._node, 'd') for d in self._dirs]
786 files = [(f, self._files[f], flags(f)) for f in self._files]
786 files = [(f, self._files[f], flags(f)) for f in self._files]
787 return _text(sorted(dirs + files), usemanifestv2)
787 return _text(sorted(dirs + files), usemanifestv2)
788
788
789 def writesubtrees(self, m1, m2, writesubtree):
789 def writesubtrees(self, m1, m2, writesubtree):
790 emptytree = treemanifest()
790 emptytree = treemanifest()
791 for d, subm in self._dirs.iteritems():
791 for d, subm in self._dirs.iteritems():
792 subp1 = m1._dirs.get(d, emptytree)._node
792 subp1 = m1._dirs.get(d, emptytree)._node
793 subp2 = m2._dirs.get(d, emptytree)._node
793 subp2 = m2._dirs.get(d, emptytree)._node
794 if subp1 == revlog.nullid:
794 if subp1 == revlog.nullid:
795 subp1, subp2 = subp2, subp1
795 subp1, subp2 = subp2, subp1
796 writesubtree(subm, subp1, subp2)
796 writesubtree(subm, subp1, subp2)
797
797
798 class manifest(revlog.revlog):
798 class manifest(revlog.revlog):
799 def __init__(self, opener, dir=''):
799 def __init__(self, opener, dir='', dirlogcache=None):
800 '''The 'dir' and 'dirlogcache' arguments are for internal use by
801 manifest.manifest only. External users should create a root manifest
802 log with manifest.manifest(opener) and call dirlog() on it.
803 '''
800 # During normal operations, we expect to deal with not more than four
804 # During normal operations, we expect to deal with not more than four
801 # revs at a time (such as during commit --amend). When rebasing large
805 # revs at a time (such as during commit --amend). When rebasing large
802 # stacks of commits, the number can go up, hence the config knob below.
806 # stacks of commits, the number can go up, hence the config knob below.
803 cachesize = 4
807 cachesize = 4
804 usetreemanifest = False
808 usetreemanifest = False
805 usemanifestv2 = False
809 usemanifestv2 = False
806 opts = getattr(opener, 'options', None)
810 opts = getattr(opener, 'options', None)
807 if opts is not None:
811 if opts is not None:
808 cachesize = opts.get('manifestcachesize', cachesize)
812 cachesize = opts.get('manifestcachesize', cachesize)
809 usetreemanifest = opts.get('treemanifest', usetreemanifest)
813 usetreemanifest = opts.get('treemanifest', usetreemanifest)
810 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
814 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
811 self._mancache = util.lrucachedict(cachesize)
815 self._mancache = util.lrucachedict(cachesize)
812 self._treeinmem = usetreemanifest
816 self._treeinmem = usetreemanifest
813 self._treeondisk = usetreemanifest
817 self._treeondisk = usetreemanifest
814 self._usemanifestv2 = usemanifestv2
818 self._usemanifestv2 = usemanifestv2
815 indexfile = "00manifest.i"
819 indexfile = "00manifest.i"
816 if dir:
820 if dir:
817 assert self._treeondisk
821 assert self._treeondisk
818 if not dir.endswith('/'):
822 if not dir.endswith('/'):
819 dir = dir + '/'
823 dir = dir + '/'
820 indexfile = "meta/" + dir + "00manifest.i"
824 indexfile = "meta/" + dir + "00manifest.i"
821 revlog.revlog.__init__(self, opener, indexfile)
825 revlog.revlog.__init__(self, opener, indexfile)
822 self._dir = dir
826 self._dir = dir
827 # The dirlogcache is kept on the root manifest log
828 if dir:
829 self._dirlogcache = dirlogcache
830 else:
831 self._dirlogcache = {'': self}
823
832
824 def _newmanifest(self, data=''):
833 def _newmanifest(self, data=''):
825 if self._treeinmem:
834 if self._treeinmem:
826 return treemanifest(self._dir, data)
835 return treemanifest(self._dir, data)
827 return manifestdict(data)
836 return manifestdict(data)
828
837
838 def dirlog(self, dir):
839 assert self._treeondisk
840 if dir not in self._dirlogcache:
841 self._dirlogcache[dir] = manifest(self.opener, dir,
842 self._dirlogcache)
843 return self._dirlogcache[dir]
844
829 def _slowreaddelta(self, node):
845 def _slowreaddelta(self, node):
830 r0 = self.deltaparent(self.rev(node))
846 r0 = self.deltaparent(self.rev(node))
831 m0 = self.read(self.node(r0))
847 m0 = self.read(self.node(r0))
832 m1 = self.read(node)
848 m1 = self.read(node)
833 md = self._newmanifest()
849 md = self._newmanifest()
834 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
850 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
835 if n1:
851 if n1:
836 md[f] = n1
852 md[f] = n1
837 if fl1:
853 if fl1:
838 md.setflag(f, fl1)
854 md.setflag(f, fl1)
839 return md
855 return md
840
856
841 def readdelta(self, node):
857 def readdelta(self, node):
842 if self._usemanifestv2 or self._treeondisk:
858 if self._usemanifestv2 or self._treeondisk:
843 return self._slowreaddelta(node)
859 return self._slowreaddelta(node)
844 r = self.rev(node)
860 r = self.rev(node)
845 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
861 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
846 return self._newmanifest(d)
862 return self._newmanifest(d)
847
863
848 def readfast(self, node):
864 def readfast(self, node):
849 '''use the faster of readdelta or read
865 '''use the faster of readdelta or read
850
866
851 This will return a manifest which is either only the files
867 This will return a manifest which is either only the files
852 added/modified relative to p1, or all files in the
868 added/modified relative to p1, or all files in the
853 manifest. Which one is returned depends on the codepath used
869 manifest. Which one is returned depends on the codepath used
854 to retrieve the data.
870 to retrieve the data.
855 '''
871 '''
856 r = self.rev(node)
872 r = self.rev(node)
857 deltaparent = self.deltaparent(r)
873 deltaparent = self.deltaparent(r)
858 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
874 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
859 return self.readdelta(node)
875 return self.readdelta(node)
860 return self.read(node)
876 return self.read(node)
861
877
862 def read(self, node):
878 def read(self, node):
863 if node == revlog.nullid:
879 if node == revlog.nullid:
864 return self._newmanifest() # don't upset local cache
880 return self._newmanifest() # don't upset local cache
865 if node in self._mancache:
881 if node in self._mancache:
866 return self._mancache[node][0]
882 return self._mancache[node][0]
867 text = self.revision(node)
883 text = self.revision(node)
868 if self._treeondisk:
884 if self._treeondisk:
869 def readsubtree(dir, subm):
885 def readsubtree(dir, subm):
870 sublog = manifest(self.opener, dir)
886 return self.dirlog(dir).read(subm)
871 return sublog.read(subm)
872 m = self._newmanifest()
887 m = self._newmanifest()
873 m.parse(text, readsubtree)
888 m.parse(text, readsubtree)
874 m.setnode(node)
889 m.setnode(node)
875 arraytext = None
890 arraytext = None
876 else:
891 else:
877 m = self._newmanifest(text)
892 m = self._newmanifest(text)
878 arraytext = array.array('c', text)
893 arraytext = array.array('c', text)
879 self._mancache[node] = (m, arraytext)
894 self._mancache[node] = (m, arraytext)
880 return m
895 return m
881
896
882 def find(self, node, f):
897 def find(self, node, f):
883 '''look up entry for a single file efficiently.
898 '''look up entry for a single file efficiently.
884 return (node, flags) pair if found, (None, None) if not.'''
899 return (node, flags) pair if found, (None, None) if not.'''
885 m = self.read(node)
900 m = self.read(node)
886 try:
901 try:
887 return m.find(f)
902 return m.find(f)
888 except KeyError:
903 except KeyError:
889 return None, None
904 return None, None
890
905
891 def add(self, m, transaction, link, p1, p2, added, removed):
906 def add(self, m, transaction, link, p1, p2, added, removed):
892 if (p1 in self._mancache and not self._treeinmem
907 if (p1 in self._mancache and not self._treeinmem
893 and not self._usemanifestv2):
908 and not self._usemanifestv2):
894 # If our first parent is in the manifest cache, we can
909 # If our first parent is in the manifest cache, we can
895 # compute a delta here using properties we know about the
910 # compute a delta here using properties we know about the
896 # manifest up-front, which may save time later for the
911 # manifest up-front, which may save time later for the
897 # revlog layer.
912 # revlog layer.
898
913
899 _checkforbidden(added)
914 _checkforbidden(added)
900 # combine the changed lists into one list for sorting
915 # combine the changed lists into one list for sorting
901 work = [(x, False) for x in added]
916 work = [(x, False) for x in added]
902 work.extend((x, True) for x in removed)
917 work.extend((x, True) for x in removed)
903 # this could use heapq.merge() (from Python 2.6+) or equivalent
918 # this could use heapq.merge() (from Python 2.6+) or equivalent
904 # since the lists are already sorted
919 # since the lists are already sorted
905 work.sort()
920 work.sort()
906
921
907 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
922 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
908 cachedelta = self.rev(p1), deltatext
923 cachedelta = self.rev(p1), deltatext
909 text = util.buffer(arraytext)
924 text = util.buffer(arraytext)
910 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
925 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
911 else:
926 else:
912 # The first parent manifest isn't already loaded, so we'll
927 # The first parent manifest isn't already loaded, so we'll
913 # just encode a fulltext of the manifest and pass that
928 # just encode a fulltext of the manifest and pass that
914 # through to the revlog layer, and let it handle the delta
929 # through to the revlog layer, and let it handle the delta
915 # process.
930 # process.
916 if self._treeondisk:
931 if self._treeondisk:
917 m1 = self.read(p1)
932 m1 = self.read(p1)
918 m2 = self.read(p2)
933 m2 = self.read(p2)
919 n = self._addtree(m, transaction, link, m1, m2)
934 n = self._addtree(m, transaction, link, m1, m2)
920 arraytext = None
935 arraytext = None
921 else:
936 else:
922 text = m.text(self._usemanifestv2)
937 text = m.text(self._usemanifestv2)
923 n = self.addrevision(text, transaction, link, p1, p2)
938 n = self.addrevision(text, transaction, link, p1, p2)
924 arraytext = array.array('c', text)
939 arraytext = array.array('c', text)
925
940
926 self._mancache[n] = (m, arraytext)
941 self._mancache[n] = (m, arraytext)
927
942
928 return n
943 return n
929
944
930 def _addtree(self, m, transaction, link, m1, m2):
945 def _addtree(self, m, transaction, link, m1, m2):
931 def writesubtree(subm, subp1, subp2):
946 def writesubtree(subm, subp1, subp2):
932 sublog = manifest(self.opener, subm.dir())
947 sublog = self.dirlog(subm.dir())
933 sublog.add(subm, transaction, link, subp1, subp2, None, None)
948 sublog.add(subm, transaction, link, subp1, subp2, None, None)
934 m.writesubtrees(m1, m2, writesubtree)
949 m.writesubtrees(m1, m2, writesubtree)
935 text = m.dirtext(self._usemanifestv2)
950 text = m.dirtext(self._usemanifestv2)
936 # If the manifest is unchanged compared to one parent,
951 # If the manifest is unchanged compared to one parent,
937 # don't write a new revision
952 # don't write a new revision
938 if text == m1.dirtext(self._usemanifestv2):
953 if text == m1.dirtext(self._usemanifestv2):
939 n = m1.node()
954 n = m1.node()
940 elif text == m2.dirtext(self._usemanifestv2):
955 elif text == m2.dirtext(self._usemanifestv2):
941 n = m2.node()
956 n = m2.node()
942 else:
957 else:
943 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
958 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
944 # Save nodeid so parent manifest can calculate its nodeid
959 # Save nodeid so parent manifest can calculate its nodeid
945 m.setnode(n)
960 m.setnode(n)
946 return n
961 return n
General Comments 0
You need to be logged in to leave comments. Login now