##// END OF EJS Templates
manifest: make lru size configurable...
Durham Goode -
r24033:ed5e8a95 default
parent child Browse files
Show More
@@ -1,1853 +1,1856 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 requirements = set(requirements)
244 requirements = set(requirements)
245 else:
245 else:
246 raise error.RepoError(_("repository %s not found") % path)
246 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
247 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
248 raise error.RepoError(_("repository %s already exists") % path)
249 else:
249 else:
250 try:
250 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
252 except IOError, inst:
253 if inst.errno != errno.ENOENT:
253 if inst.errno != errno.ENOENT:
254 raise
254 raise
255 requirements = set()
255 requirements = set()
256
256
257 self.sharedpath = self.path
257 self.sharedpath = self.path
258 try:
258 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
260 realpath=True)
261 s = vfs.base
261 s = vfs.base
262 if not vfs.exists():
262 if not vfs.exists():
263 raise error.RepoError(
263 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
265 self.sharedpath = s
266 except IOError, inst:
266 except IOError, inst:
267 if inst.errno != errno.ENOENT:
267 if inst.errno != errno.ENOENT:
268 raise
268 raise
269
269
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
271 self.spath = self.store.path
272 self.svfs = self.store.vfs
272 self.svfs = self.store.vfs
273 self.sopener = self.svfs
273 self.sopener = self.svfs
274 self.sjoin = self.store.join
274 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
275 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
276 self._applyrequirements(requirements)
277 if create:
277 if create:
278 self._writerequirements()
278 self._writerequirements()
279
279
280
280
281 self._branchcaches = {}
281 self._branchcaches = {}
282 self.filterpats = {}
282 self.filterpats = {}
283 self._datafilters = {}
283 self._datafilters = {}
284 self._transref = self._lockref = self._wlockref = None
284 self._transref = self._lockref = self._wlockref = None
285
285
286 # A cache for various files under .hg/ that tracks file changes,
286 # A cache for various files under .hg/ that tracks file changes,
287 # (used by the filecache decorator)
287 # (used by the filecache decorator)
288 #
288 #
289 # Maps a property name to its util.filecacheentry
289 # Maps a property name to its util.filecacheentry
290 self._filecache = {}
290 self._filecache = {}
291
291
292 # hold sets of revision to be filtered
292 # hold sets of revision to be filtered
293 # should be cleared when something might have changed the filter value:
293 # should be cleared when something might have changed the filter value:
294 # - new changesets,
294 # - new changesets,
295 # - phase change,
295 # - phase change,
296 # - new obsolescence marker,
296 # - new obsolescence marker,
297 # - working directory parent change,
297 # - working directory parent change,
298 # - bookmark changes
298 # - bookmark changes
299 self.filteredrevcache = {}
299 self.filteredrevcache = {}
300
300
301 # generic mapping between names and nodes
301 # generic mapping between names and nodes
302 self.names = namespaces.namespaces()
302 self.names = namespaces.namespaces()
303
303
304 def close(self):
304 def close(self):
305 pass
305 pass
306
306
307 def _restrictcapabilities(self, caps):
307 def _restrictcapabilities(self, caps):
308 # bundle2 is not ready for prime time, drop it unless explicitly
308 # bundle2 is not ready for prime time, drop it unless explicitly
309 # required by the tests (or some brave tester)
309 # required by the tests (or some brave tester)
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 caps = set(caps)
311 caps = set(caps)
312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 return caps
314 return caps
315
315
316 def _applyrequirements(self, requirements):
316 def _applyrequirements(self, requirements):
317 self.requirements = requirements
317 self.requirements = requirements
318 self.svfs.options = dict((r, 1) for r in requirements
318 self.svfs.options = dict((r, 1) for r in requirements
319 if r in self.openerreqs)
319 if r in self.openerreqs)
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 if chunkcachesize is not None:
321 if chunkcachesize is not None:
322 self.svfs.options['chunkcachesize'] = chunkcachesize
322 self.svfs.options['chunkcachesize'] = chunkcachesize
323 maxchainlen = self.ui.configint('format', 'maxchainlen')
323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 if maxchainlen is not None:
324 if maxchainlen is not None:
325 self.svfs.options['maxchainlen'] = maxchainlen
325 self.svfs.options['maxchainlen'] = maxchainlen
326 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 if manifestcachesize is not None:
328 self.svfs.options['manifestcachesize'] = manifestcachesize
326
329
327 def _writerequirements(self):
330 def _writerequirements(self):
328 reqfile = self.vfs("requires", "w")
331 reqfile = self.vfs("requires", "w")
329 for r in sorted(self.requirements):
332 for r in sorted(self.requirements):
330 reqfile.write("%s\n" % r)
333 reqfile.write("%s\n" % r)
331 reqfile.close()
334 reqfile.close()
332
335
333 def _checknested(self, path):
336 def _checknested(self, path):
334 """Determine if path is a legal nested repository."""
337 """Determine if path is a legal nested repository."""
335 if not path.startswith(self.root):
338 if not path.startswith(self.root):
336 return False
339 return False
337 subpath = path[len(self.root) + 1:]
340 subpath = path[len(self.root) + 1:]
338 normsubpath = util.pconvert(subpath)
341 normsubpath = util.pconvert(subpath)
339
342
340 # XXX: Checking against the current working copy is wrong in
343 # XXX: Checking against the current working copy is wrong in
341 # the sense that it can reject things like
344 # the sense that it can reject things like
342 #
345 #
343 # $ hg cat -r 10 sub/x.txt
346 # $ hg cat -r 10 sub/x.txt
344 #
347 #
345 # if sub/ is no longer a subrepository in the working copy
348 # if sub/ is no longer a subrepository in the working copy
346 # parent revision.
349 # parent revision.
347 #
350 #
348 # However, it can of course also allow things that would have
351 # However, it can of course also allow things that would have
349 # been rejected before, such as the above cat command if sub/
352 # been rejected before, such as the above cat command if sub/
350 # is a subrepository now, but was a normal directory before.
353 # is a subrepository now, but was a normal directory before.
351 # The old path auditor would have rejected by mistake since it
354 # The old path auditor would have rejected by mistake since it
352 # panics when it sees sub/.hg/.
355 # panics when it sees sub/.hg/.
353 #
356 #
354 # All in all, checking against the working copy seems sensible
357 # All in all, checking against the working copy seems sensible
355 # since we want to prevent access to nested repositories on
358 # since we want to prevent access to nested repositories on
356 # the filesystem *now*.
359 # the filesystem *now*.
357 ctx = self[None]
360 ctx = self[None]
358 parts = util.splitpath(subpath)
361 parts = util.splitpath(subpath)
359 while parts:
362 while parts:
360 prefix = '/'.join(parts)
363 prefix = '/'.join(parts)
361 if prefix in ctx.substate:
364 if prefix in ctx.substate:
362 if prefix == normsubpath:
365 if prefix == normsubpath:
363 return True
366 return True
364 else:
367 else:
365 sub = ctx.sub(prefix)
368 sub = ctx.sub(prefix)
366 return sub.checknested(subpath[len(prefix) + 1:])
369 return sub.checknested(subpath[len(prefix) + 1:])
367 else:
370 else:
368 parts.pop()
371 parts.pop()
369 return False
372 return False
370
373
371 def peer(self):
374 def peer(self):
372 return localpeer(self) # not cached to avoid reference cycle
375 return localpeer(self) # not cached to avoid reference cycle
373
376
374 def unfiltered(self):
377 def unfiltered(self):
375 """Return unfiltered version of the repository
378 """Return unfiltered version of the repository
376
379
377 Intended to be overwritten by filtered repo."""
380 Intended to be overwritten by filtered repo."""
378 return self
381 return self
379
382
380 def filtered(self, name):
383 def filtered(self, name):
381 """Return a filtered version of a repository"""
384 """Return a filtered version of a repository"""
382 # build a new class with the mixin and the current class
385 # build a new class with the mixin and the current class
383 # (possibly subclass of the repo)
386 # (possibly subclass of the repo)
384 class proxycls(repoview.repoview, self.unfiltered().__class__):
387 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 pass
388 pass
386 return proxycls(self, name)
389 return proxycls(self, name)
387
390
388 @repofilecache('bookmarks')
391 @repofilecache('bookmarks')
389 def _bookmarks(self):
392 def _bookmarks(self):
390 return bookmarks.bmstore(self)
393 return bookmarks.bmstore(self)
391
394
392 @repofilecache('bookmarks.current')
395 @repofilecache('bookmarks.current')
393 def _bookmarkcurrent(self):
396 def _bookmarkcurrent(self):
394 return bookmarks.readcurrent(self)
397 return bookmarks.readcurrent(self)
395
398
396 def bookmarkheads(self, bookmark):
399 def bookmarkheads(self, bookmark):
397 name = bookmark.split('@', 1)[0]
400 name = bookmark.split('@', 1)[0]
398 heads = []
401 heads = []
399 for mark, n in self._bookmarks.iteritems():
402 for mark, n in self._bookmarks.iteritems():
400 if mark.split('@', 1)[0] == name:
403 if mark.split('@', 1)[0] == name:
401 heads.append(n)
404 heads.append(n)
402 return heads
405 return heads
403
406
404 @storecache('phaseroots')
407 @storecache('phaseroots')
405 def _phasecache(self):
408 def _phasecache(self):
406 return phases.phasecache(self, self._phasedefaults)
409 return phases.phasecache(self, self._phasedefaults)
407
410
408 @storecache('obsstore')
411 @storecache('obsstore')
409 def obsstore(self):
412 def obsstore(self):
410 # read default format for new obsstore.
413 # read default format for new obsstore.
411 defaultformat = self.ui.configint('format', 'obsstore-version', None)
414 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 # rely on obsstore class default when possible.
415 # rely on obsstore class default when possible.
413 kwargs = {}
416 kwargs = {}
414 if defaultformat is not None:
417 if defaultformat is not None:
415 kwargs['defaultformat'] = defaultformat
418 kwargs['defaultformat'] = defaultformat
416 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
419 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 store = obsolete.obsstore(self.svfs, readonly=readonly,
420 store = obsolete.obsstore(self.svfs, readonly=readonly,
418 **kwargs)
421 **kwargs)
419 if store and readonly:
422 if store and readonly:
420 # message is rare enough to not be translated
423 # message is rare enough to not be translated
421 msg = 'obsolete feature not enabled but %i markers found!\n'
424 msg = 'obsolete feature not enabled but %i markers found!\n'
422 self.ui.warn(msg % len(list(store)))
425 self.ui.warn(msg % len(list(store)))
423 return store
426 return store
424
427
425 @storecache('00changelog.i')
428 @storecache('00changelog.i')
426 def changelog(self):
429 def changelog(self):
427 c = changelog.changelog(self.svfs)
430 c = changelog.changelog(self.svfs)
428 if 'HG_PENDING' in os.environ:
431 if 'HG_PENDING' in os.environ:
429 p = os.environ['HG_PENDING']
432 p = os.environ['HG_PENDING']
430 if p.startswith(self.root):
433 if p.startswith(self.root):
431 c.readpending('00changelog.i.a')
434 c.readpending('00changelog.i.a')
432 return c
435 return c
433
436
434 @storecache('00manifest.i')
437 @storecache('00manifest.i')
435 def manifest(self):
438 def manifest(self):
436 return manifest.manifest(self.svfs)
439 return manifest.manifest(self.svfs)
437
440
438 @repofilecache('dirstate')
441 @repofilecache('dirstate')
439 def dirstate(self):
442 def dirstate(self):
440 warned = [0]
443 warned = [0]
441 def validate(node):
444 def validate(node):
442 try:
445 try:
443 self.changelog.rev(node)
446 self.changelog.rev(node)
444 return node
447 return node
445 except error.LookupError:
448 except error.LookupError:
446 if not warned[0]:
449 if not warned[0]:
447 warned[0] = True
450 warned[0] = True
448 self.ui.warn(_("warning: ignoring unknown"
451 self.ui.warn(_("warning: ignoring unknown"
449 " working parent %s!\n") % short(node))
452 " working parent %s!\n") % short(node))
450 return nullid
453 return nullid
451
454
452 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
455 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
453
456
454 def __getitem__(self, changeid):
457 def __getitem__(self, changeid):
455 if changeid is None:
458 if changeid is None:
456 return context.workingctx(self)
459 return context.workingctx(self)
457 if isinstance(changeid, slice):
460 if isinstance(changeid, slice):
458 return [context.changectx(self, i)
461 return [context.changectx(self, i)
459 for i in xrange(*changeid.indices(len(self)))
462 for i in xrange(*changeid.indices(len(self)))
460 if i not in self.changelog.filteredrevs]
463 if i not in self.changelog.filteredrevs]
461 return context.changectx(self, changeid)
464 return context.changectx(self, changeid)
462
465
463 def __contains__(self, changeid):
466 def __contains__(self, changeid):
464 try:
467 try:
465 return bool(self.lookup(changeid))
468 return bool(self.lookup(changeid))
466 except error.RepoLookupError:
469 except error.RepoLookupError:
467 return False
470 return False
468
471
469 def __nonzero__(self):
472 def __nonzero__(self):
470 return True
473 return True
471
474
472 def __len__(self):
475 def __len__(self):
473 return len(self.changelog)
476 return len(self.changelog)
474
477
475 def __iter__(self):
478 def __iter__(self):
476 return iter(self.changelog)
479 return iter(self.changelog)
477
480
478 def revs(self, expr, *args):
481 def revs(self, expr, *args):
479 '''Return a list of revisions matching the given revset'''
482 '''Return a list of revisions matching the given revset'''
480 expr = revset.formatspec(expr, *args)
483 expr = revset.formatspec(expr, *args)
481 m = revset.match(None, expr)
484 m = revset.match(None, expr)
482 return m(self, revset.spanset(self))
485 return m(self, revset.spanset(self))
483
486
484 def set(self, expr, *args):
487 def set(self, expr, *args):
485 '''
488 '''
486 Yield a context for each matching revision, after doing arg
489 Yield a context for each matching revision, after doing arg
487 replacement via revset.formatspec
490 replacement via revset.formatspec
488 '''
491 '''
489 for r in self.revs(expr, *args):
492 for r in self.revs(expr, *args):
490 yield self[r]
493 yield self[r]
491
494
492 def url(self):
495 def url(self):
493 return 'file:' + self.root
496 return 'file:' + self.root
494
497
495 def hook(self, name, throw=False, **args):
498 def hook(self, name, throw=False, **args):
496 """Call a hook, passing this repo instance.
499 """Call a hook, passing this repo instance.
497
500
498 This a convenience method to aid invoking hooks. Extensions likely
501 This a convenience method to aid invoking hooks. Extensions likely
499 won't call this unless they have registered a custom hook or are
502 won't call this unless they have registered a custom hook or are
500 replacing code that is expected to call a hook.
503 replacing code that is expected to call a hook.
501 """
504 """
502 return hook.hook(self.ui, self, name, throw, **args)
505 return hook.hook(self.ui, self, name, throw, **args)
503
506
504 @unfilteredmethod
507 @unfilteredmethod
505 def _tag(self, names, node, message, local, user, date, extra={},
508 def _tag(self, names, node, message, local, user, date, extra={},
506 editor=False):
509 editor=False):
507 if isinstance(names, str):
510 if isinstance(names, str):
508 names = (names,)
511 names = (names,)
509
512
510 branches = self.branchmap()
513 branches = self.branchmap()
511 for name in names:
514 for name in names:
512 self.hook('pretag', throw=True, node=hex(node), tag=name,
515 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 local=local)
516 local=local)
514 if name in branches:
517 if name in branches:
515 self.ui.warn(_("warning: tag %s conflicts with existing"
518 self.ui.warn(_("warning: tag %s conflicts with existing"
516 " branch name\n") % name)
519 " branch name\n") % name)
517
520
518 def writetags(fp, names, munge, prevtags):
521 def writetags(fp, names, munge, prevtags):
519 fp.seek(0, 2)
522 fp.seek(0, 2)
520 if prevtags and prevtags[-1] != '\n':
523 if prevtags and prevtags[-1] != '\n':
521 fp.write('\n')
524 fp.write('\n')
522 for name in names:
525 for name in names:
523 m = munge and munge(name) or name
526 m = munge and munge(name) or name
524 if (self._tagscache.tagtypes and
527 if (self._tagscache.tagtypes and
525 name in self._tagscache.tagtypes):
528 name in self._tagscache.tagtypes):
526 old = self.tags().get(name, nullid)
529 old = self.tags().get(name, nullid)
527 fp.write('%s %s\n' % (hex(old), m))
530 fp.write('%s %s\n' % (hex(old), m))
528 fp.write('%s %s\n' % (hex(node), m))
531 fp.write('%s %s\n' % (hex(node), m))
529 fp.close()
532 fp.close()
530
533
531 prevtags = ''
534 prevtags = ''
532 if local:
535 if local:
533 try:
536 try:
534 fp = self.vfs('localtags', 'r+')
537 fp = self.vfs('localtags', 'r+')
535 except IOError:
538 except IOError:
536 fp = self.vfs('localtags', 'a')
539 fp = self.vfs('localtags', 'a')
537 else:
540 else:
538 prevtags = fp.read()
541 prevtags = fp.read()
539
542
540 # local tags are stored in the current charset
543 # local tags are stored in the current charset
541 writetags(fp, names, None, prevtags)
544 writetags(fp, names, None, prevtags)
542 for name in names:
545 for name in names:
543 self.hook('tag', node=hex(node), tag=name, local=local)
546 self.hook('tag', node=hex(node), tag=name, local=local)
544 return
547 return
545
548
546 try:
549 try:
547 fp = self.wfile('.hgtags', 'rb+')
550 fp = self.wfile('.hgtags', 'rb+')
548 except IOError, e:
551 except IOError, e:
549 if e.errno != errno.ENOENT:
552 if e.errno != errno.ENOENT:
550 raise
553 raise
551 fp = self.wfile('.hgtags', 'ab')
554 fp = self.wfile('.hgtags', 'ab')
552 else:
555 else:
553 prevtags = fp.read()
556 prevtags = fp.read()
554
557
555 # committed tags are stored in UTF-8
558 # committed tags are stored in UTF-8
556 writetags(fp, names, encoding.fromlocal, prevtags)
559 writetags(fp, names, encoding.fromlocal, prevtags)
557
560
558 fp.close()
561 fp.close()
559
562
560 self.invalidatecaches()
563 self.invalidatecaches()
561
564
562 if '.hgtags' not in self.dirstate:
565 if '.hgtags' not in self.dirstate:
563 self[None].add(['.hgtags'])
566 self[None].add(['.hgtags'])
564
567
565 m = matchmod.exact(self.root, '', ['.hgtags'])
568 m = matchmod.exact(self.root, '', ['.hgtags'])
566 tagnode = self.commit(message, user, date, extra=extra, match=m,
569 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 editor=editor)
570 editor=editor)
568
571
569 for name in names:
572 for name in names:
570 self.hook('tag', node=hex(node), tag=name, local=local)
573 self.hook('tag', node=hex(node), tag=name, local=local)
571
574
572 return tagnode
575 return tagnode
573
576
574 def tag(self, names, node, message, local, user, date, editor=False):
577 def tag(self, names, node, message, local, user, date, editor=False):
575 '''tag a revision with one or more symbolic names.
578 '''tag a revision with one or more symbolic names.
576
579
577 names is a list of strings or, when adding a single tag, names may be a
580 names is a list of strings or, when adding a single tag, names may be a
578 string.
581 string.
579
582
580 if local is True, the tags are stored in a per-repository file.
583 if local is True, the tags are stored in a per-repository file.
581 otherwise, they are stored in the .hgtags file, and a new
584 otherwise, they are stored in the .hgtags file, and a new
582 changeset is committed with the change.
585 changeset is committed with the change.
583
586
584 keyword arguments:
587 keyword arguments:
585
588
586 local: whether to store tags in non-version-controlled file
589 local: whether to store tags in non-version-controlled file
587 (default False)
590 (default False)
588
591
589 message: commit message to use if committing
592 message: commit message to use if committing
590
593
591 user: name of user to use if committing
594 user: name of user to use if committing
592
595
593 date: date tuple to use if committing'''
596 date: date tuple to use if committing'''
594
597
595 if not local:
598 if not local:
596 m = matchmod.exact(self.root, '', ['.hgtags'])
599 m = matchmod.exact(self.root, '', ['.hgtags'])
597 if util.any(self.status(match=m, unknown=True, ignored=True)):
600 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 raise util.Abort(_('working copy of .hgtags is changed'),
601 raise util.Abort(_('working copy of .hgtags is changed'),
599 hint=_('please commit .hgtags manually'))
602 hint=_('please commit .hgtags manually'))
600
603
601 self.tags() # instantiate the cache
604 self.tags() # instantiate the cache
602 self._tag(names, node, message, local, user, date, editor=editor)
605 self._tag(names, node, message, local, user, date, editor=editor)
603
606
604 @filteredpropertycache
607 @filteredpropertycache
605 def _tagscache(self):
608 def _tagscache(self):
606 '''Returns a tagscache object that contains various tags related
609 '''Returns a tagscache object that contains various tags related
607 caches.'''
610 caches.'''
608
611
609 # This simplifies its cache management by having one decorated
612 # This simplifies its cache management by having one decorated
610 # function (this one) and the rest simply fetch things from it.
613 # function (this one) and the rest simply fetch things from it.
611 class tagscache(object):
614 class tagscache(object):
612 def __init__(self):
615 def __init__(self):
613 # These two define the set of tags for this repository. tags
616 # These two define the set of tags for this repository. tags
614 # maps tag name to node; tagtypes maps tag name to 'global' or
617 # maps tag name to node; tagtypes maps tag name to 'global' or
615 # 'local'. (Global tags are defined by .hgtags across all
618 # 'local'. (Global tags are defined by .hgtags across all
616 # heads, and local tags are defined in .hg/localtags.)
619 # heads, and local tags are defined in .hg/localtags.)
617 # They constitute the in-memory cache of tags.
620 # They constitute the in-memory cache of tags.
618 self.tags = self.tagtypes = None
621 self.tags = self.tagtypes = None
619
622
620 self.nodetagscache = self.tagslist = None
623 self.nodetagscache = self.tagslist = None
621
624
622 cache = tagscache()
625 cache = tagscache()
623 cache.tags, cache.tagtypes = self._findtags()
626 cache.tags, cache.tagtypes = self._findtags()
624
627
625 return cache
628 return cache
626
629
627 def tags(self):
630 def tags(self):
628 '''return a mapping of tag to node'''
631 '''return a mapping of tag to node'''
629 t = {}
632 t = {}
630 if self.changelog.filteredrevs:
633 if self.changelog.filteredrevs:
631 tags, tt = self._findtags()
634 tags, tt = self._findtags()
632 else:
635 else:
633 tags = self._tagscache.tags
636 tags = self._tagscache.tags
634 for k, v in tags.iteritems():
637 for k, v in tags.iteritems():
635 try:
638 try:
636 # ignore tags to unknown nodes
639 # ignore tags to unknown nodes
637 self.changelog.rev(v)
640 self.changelog.rev(v)
638 t[k] = v
641 t[k] = v
639 except (error.LookupError, ValueError):
642 except (error.LookupError, ValueError):
640 pass
643 pass
641 return t
644 return t
642
645
643 def _findtags(self):
646 def _findtags(self):
644 '''Do the hard work of finding tags. Return a pair of dicts
647 '''Do the hard work of finding tags. Return a pair of dicts
645 (tags, tagtypes) where tags maps tag name to node, and tagtypes
648 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 maps tag name to a string like \'global\' or \'local\'.
649 maps tag name to a string like \'global\' or \'local\'.
647 Subclasses or extensions are free to add their own tags, but
650 Subclasses or extensions are free to add their own tags, but
648 should be aware that the returned dicts will be retained for the
651 should be aware that the returned dicts will be retained for the
649 duration of the localrepo object.'''
652 duration of the localrepo object.'''
650
653
651 # XXX what tagtype should subclasses/extensions use? Currently
654 # XXX what tagtype should subclasses/extensions use? Currently
652 # mq and bookmarks add tags, but do not set the tagtype at all.
655 # mq and bookmarks add tags, but do not set the tagtype at all.
653 # Should each extension invent its own tag type? Should there
656 # Should each extension invent its own tag type? Should there
654 # be one tagtype for all such "virtual" tags? Or is the status
657 # be one tagtype for all such "virtual" tags? Or is the status
655 # quo fine?
658 # quo fine?
656
659
657 alltags = {} # map tag name to (node, hist)
660 alltags = {} # map tag name to (node, hist)
658 tagtypes = {}
661 tagtypes = {}
659
662
660 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
663 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
664 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662
665
663 # Build the return dicts. Have to re-encode tag names because
666 # Build the return dicts. Have to re-encode tag names because
664 # the tags module always uses UTF-8 (in order not to lose info
667 # the tags module always uses UTF-8 (in order not to lose info
665 # writing to the cache), but the rest of Mercurial wants them in
668 # writing to the cache), but the rest of Mercurial wants them in
666 # local encoding.
669 # local encoding.
667 tags = {}
670 tags = {}
668 for (name, (node, hist)) in alltags.iteritems():
671 for (name, (node, hist)) in alltags.iteritems():
669 if node != nullid:
672 if node != nullid:
670 tags[encoding.tolocal(name)] = node
673 tags[encoding.tolocal(name)] = node
671 tags['tip'] = self.changelog.tip()
674 tags['tip'] = self.changelog.tip()
672 tagtypes = dict([(encoding.tolocal(name), value)
675 tagtypes = dict([(encoding.tolocal(name), value)
673 for (name, value) in tagtypes.iteritems()])
676 for (name, value) in tagtypes.iteritems()])
674 return (tags, tagtypes)
677 return (tags, tagtypes)
675
678
676 def tagtype(self, tagname):
679 def tagtype(self, tagname):
677 '''
680 '''
678 return the type of the given tag. result can be:
681 return the type of the given tag. result can be:
679
682
680 'local' : a local tag
683 'local' : a local tag
681 'global' : a global tag
684 'global' : a global tag
682 None : tag does not exist
685 None : tag does not exist
683 '''
686 '''
684
687
685 return self._tagscache.tagtypes.get(tagname)
688 return self._tagscache.tagtypes.get(tagname)
686
689
687 def tagslist(self):
690 def tagslist(self):
688 '''return a list of tags ordered by revision'''
691 '''return a list of tags ordered by revision'''
689 if not self._tagscache.tagslist:
692 if not self._tagscache.tagslist:
690 l = []
693 l = []
691 for t, n in self.tags().iteritems():
694 for t, n in self.tags().iteritems():
692 l.append((self.changelog.rev(n), t, n))
695 l.append((self.changelog.rev(n), t, n))
693 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
696 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694
697
695 return self._tagscache.tagslist
698 return self._tagscache.tagslist
696
699
697 def nodetags(self, node):
700 def nodetags(self, node):
698 '''return the tags associated with a node'''
701 '''return the tags associated with a node'''
699 if not self._tagscache.nodetagscache:
702 if not self._tagscache.nodetagscache:
700 nodetagscache = {}
703 nodetagscache = {}
701 for t, n in self._tagscache.tags.iteritems():
704 for t, n in self._tagscache.tags.iteritems():
702 nodetagscache.setdefault(n, []).append(t)
705 nodetagscache.setdefault(n, []).append(t)
703 for tags in nodetagscache.itervalues():
706 for tags in nodetagscache.itervalues():
704 tags.sort()
707 tags.sort()
705 self._tagscache.nodetagscache = nodetagscache
708 self._tagscache.nodetagscache = nodetagscache
706 return self._tagscache.nodetagscache.get(node, [])
709 return self._tagscache.nodetagscache.get(node, [])
707
710
708 def nodebookmarks(self, node):
711 def nodebookmarks(self, node):
709 marks = []
712 marks = []
710 for bookmark, n in self._bookmarks.iteritems():
713 for bookmark, n in self._bookmarks.iteritems():
711 if n == node:
714 if n == node:
712 marks.append(bookmark)
715 marks.append(bookmark)
713 return sorted(marks)
716 return sorted(marks)
714
717
715 def branchmap(self):
718 def branchmap(self):
716 '''returns a dictionary {branch: [branchheads]} with branchheads
719 '''returns a dictionary {branch: [branchheads]} with branchheads
717 ordered by increasing revision number'''
720 ordered by increasing revision number'''
718 branchmap.updatecache(self)
721 branchmap.updatecache(self)
719 return self._branchcaches[self.filtername]
722 return self._branchcaches[self.filtername]
720
723
721 def branchtip(self, branch, ignoremissing=False):
724 def branchtip(self, branch, ignoremissing=False):
722 '''return the tip node for a given branch
725 '''return the tip node for a given branch
723
726
724 If ignoremissing is True, then this method will not raise an error.
727 If ignoremissing is True, then this method will not raise an error.
725 This is helpful for callers that only expect None for a missing branch
728 This is helpful for callers that only expect None for a missing branch
726 (e.g. namespace).
729 (e.g. namespace).
727
730
728 '''
731 '''
729 try:
732 try:
730 return self.branchmap().branchtip(branch)
733 return self.branchmap().branchtip(branch)
731 except KeyError:
734 except KeyError:
732 if not ignoremissing:
735 if not ignoremissing:
733 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
736 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
734 else:
737 else:
735 pass
738 pass
736
739
737 def lookup(self, key):
740 def lookup(self, key):
738 return self[key].node()
741 return self[key].node()
739
742
740 def lookupbranch(self, key, remote=None):
743 def lookupbranch(self, key, remote=None):
741 repo = remote or self
744 repo = remote or self
742 if key in repo.branchmap():
745 if key in repo.branchmap():
743 return key
746 return key
744
747
745 repo = (remote and remote.local()) and remote or self
748 repo = (remote and remote.local()) and remote or self
746 return repo[key].branch()
749 return repo[key].branch()
747
750
748 def known(self, nodes):
751 def known(self, nodes):
749 nm = self.changelog.nodemap
752 nm = self.changelog.nodemap
750 pc = self._phasecache
753 pc = self._phasecache
751 result = []
754 result = []
752 for n in nodes:
755 for n in nodes:
753 r = nm.get(n)
756 r = nm.get(n)
754 resp = not (r is None or pc.phase(self, r) >= phases.secret)
757 resp = not (r is None or pc.phase(self, r) >= phases.secret)
755 result.append(resp)
758 result.append(resp)
756 return result
759 return result
757
760
758 def local(self):
761 def local(self):
759 return self
762 return self
760
763
761 def cancopy(self):
764 def cancopy(self):
762 # so statichttprepo's override of local() works
765 # so statichttprepo's override of local() works
763 if not self.local():
766 if not self.local():
764 return False
767 return False
765 if not self.ui.configbool('phases', 'publish', True):
768 if not self.ui.configbool('phases', 'publish', True):
766 return True
769 return True
767 # if publishing we can't copy if there is filtered content
770 # if publishing we can't copy if there is filtered content
768 return not self.filtered('visible').changelog.filteredrevs
771 return not self.filtered('visible').changelog.filteredrevs
769
772
770 def shared(self):
773 def shared(self):
771 '''the type of shared repository (None if not shared)'''
774 '''the type of shared repository (None if not shared)'''
772 if self.sharedpath != self.path:
775 if self.sharedpath != self.path:
773 return 'store'
776 return 'store'
774 return None
777 return None
775
778
776 def join(self, f, *insidef):
779 def join(self, f, *insidef):
777 return self.vfs.join(os.path.join(f, *insidef))
780 return self.vfs.join(os.path.join(f, *insidef))
778
781
779 def wjoin(self, f, *insidef):
782 def wjoin(self, f, *insidef):
780 return self.vfs.reljoin(self.root, f, *insidef)
783 return self.vfs.reljoin(self.root, f, *insidef)
781
784
782 def file(self, f):
785 def file(self, f):
783 if f[0] == '/':
786 if f[0] == '/':
784 f = f[1:]
787 f = f[1:]
785 return filelog.filelog(self.svfs, f)
788 return filelog.filelog(self.svfs, f)
786
789
787 def changectx(self, changeid):
790 def changectx(self, changeid):
788 return self[changeid]
791 return self[changeid]
789
792
790 def parents(self, changeid=None):
793 def parents(self, changeid=None):
791 '''get list of changectxs for parents of changeid'''
794 '''get list of changectxs for parents of changeid'''
792 return self[changeid].parents()
795 return self[changeid].parents()
793
796
794 def setparents(self, p1, p2=nullid):
797 def setparents(self, p1, p2=nullid):
795 self.dirstate.beginparentchange()
798 self.dirstate.beginparentchange()
796 copies = self.dirstate.setparents(p1, p2)
799 copies = self.dirstate.setparents(p1, p2)
797 pctx = self[p1]
800 pctx = self[p1]
798 if copies:
801 if copies:
799 # Adjust copy records, the dirstate cannot do it, it
802 # Adjust copy records, the dirstate cannot do it, it
800 # requires access to parents manifests. Preserve them
803 # requires access to parents manifests. Preserve them
801 # only for entries added to first parent.
804 # only for entries added to first parent.
802 for f in copies:
805 for f in copies:
803 if f not in pctx and copies[f] in pctx:
806 if f not in pctx and copies[f] in pctx:
804 self.dirstate.copy(copies[f], f)
807 self.dirstate.copy(copies[f], f)
805 if p2 == nullid:
808 if p2 == nullid:
806 for f, s in sorted(self.dirstate.copies().items()):
809 for f, s in sorted(self.dirstate.copies().items()):
807 if f not in pctx and s not in pctx:
810 if f not in pctx and s not in pctx:
808 self.dirstate.copy(None, f)
811 self.dirstate.copy(None, f)
809 self.dirstate.endparentchange()
812 self.dirstate.endparentchange()
810
813
811 def filectx(self, path, changeid=None, fileid=None):
814 def filectx(self, path, changeid=None, fileid=None):
812 """changeid can be a changeset revision, node, or tag.
815 """changeid can be a changeset revision, node, or tag.
813 fileid can be a file revision or node."""
816 fileid can be a file revision or node."""
814 return context.filectx(self, path, changeid, fileid)
817 return context.filectx(self, path, changeid, fileid)
815
818
816 def getcwd(self):
819 def getcwd(self):
817 return self.dirstate.getcwd()
820 return self.dirstate.getcwd()
818
821
819 def pathto(self, f, cwd=None):
822 def pathto(self, f, cwd=None):
820 return self.dirstate.pathto(f, cwd)
823 return self.dirstate.pathto(f, cwd)
821
824
822 def wfile(self, f, mode='r'):
825 def wfile(self, f, mode='r'):
823 return self.wvfs(f, mode)
826 return self.wvfs(f, mode)
824
827
825 def _link(self, f):
828 def _link(self, f):
826 return self.wvfs.islink(f)
829 return self.wvfs.islink(f)
827
830
828 def _loadfilter(self, filter):
831 def _loadfilter(self, filter):
829 if filter not in self.filterpats:
832 if filter not in self.filterpats:
830 l = []
833 l = []
831 for pat, cmd in self.ui.configitems(filter):
834 for pat, cmd in self.ui.configitems(filter):
832 if cmd == '!':
835 if cmd == '!':
833 continue
836 continue
834 mf = matchmod.match(self.root, '', [pat])
837 mf = matchmod.match(self.root, '', [pat])
835 fn = None
838 fn = None
836 params = cmd
839 params = cmd
837 for name, filterfn in self._datafilters.iteritems():
840 for name, filterfn in self._datafilters.iteritems():
838 if cmd.startswith(name):
841 if cmd.startswith(name):
839 fn = filterfn
842 fn = filterfn
840 params = cmd[len(name):].lstrip()
843 params = cmd[len(name):].lstrip()
841 break
844 break
842 if not fn:
845 if not fn:
843 fn = lambda s, c, **kwargs: util.filter(s, c)
846 fn = lambda s, c, **kwargs: util.filter(s, c)
844 # Wrap old filters not supporting keyword arguments
847 # Wrap old filters not supporting keyword arguments
845 if not inspect.getargspec(fn)[2]:
848 if not inspect.getargspec(fn)[2]:
846 oldfn = fn
849 oldfn = fn
847 fn = lambda s, c, **kwargs: oldfn(s, c)
850 fn = lambda s, c, **kwargs: oldfn(s, c)
848 l.append((mf, fn, params))
851 l.append((mf, fn, params))
849 self.filterpats[filter] = l
852 self.filterpats[filter] = l
850 return self.filterpats[filter]
853 return self.filterpats[filter]
851
854
852 def _filter(self, filterpats, filename, data):
855 def _filter(self, filterpats, filename, data):
853 for mf, fn, cmd in filterpats:
856 for mf, fn, cmd in filterpats:
854 if mf(filename):
857 if mf(filename):
855 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
856 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
857 break
860 break
858
861
859 return data
862 return data
860
863
861 @unfilteredpropertycache
864 @unfilteredpropertycache
862 def _encodefilterpats(self):
865 def _encodefilterpats(self):
863 return self._loadfilter('encode')
866 return self._loadfilter('encode')
864
867
865 @unfilteredpropertycache
868 @unfilteredpropertycache
866 def _decodefilterpats(self):
869 def _decodefilterpats(self):
867 return self._loadfilter('decode')
870 return self._loadfilter('decode')
868
871
869 def adddatafilter(self, name, filter):
872 def adddatafilter(self, name, filter):
870 self._datafilters[name] = filter
873 self._datafilters[name] = filter
871
874
872 def wread(self, filename):
875 def wread(self, filename):
873 if self._link(filename):
876 if self._link(filename):
874 data = self.wvfs.readlink(filename)
877 data = self.wvfs.readlink(filename)
875 else:
878 else:
876 data = self.wvfs.read(filename)
879 data = self.wvfs.read(filename)
877 return self._filter(self._encodefilterpats, filename, data)
880 return self._filter(self._encodefilterpats, filename, data)
878
881
879 def wwrite(self, filename, data, flags):
882 def wwrite(self, filename, data, flags):
880 data = self._filter(self._decodefilterpats, filename, data)
883 data = self._filter(self._decodefilterpats, filename, data)
881 if 'l' in flags:
884 if 'l' in flags:
882 self.wvfs.symlink(data, filename)
885 self.wvfs.symlink(data, filename)
883 else:
886 else:
884 self.wvfs.write(filename, data)
887 self.wvfs.write(filename, data)
885 if 'x' in flags:
888 if 'x' in flags:
886 self.wvfs.setflags(filename, False, True)
889 self.wvfs.setflags(filename, False, True)
887
890
888 def wwritedata(self, filename, data):
891 def wwritedata(self, filename, data):
889 return self._filter(self._decodefilterpats, filename, data)
892 return self._filter(self._decodefilterpats, filename, data)
890
893
891 def currenttransaction(self):
894 def currenttransaction(self):
892 """return the current transaction or None if non exists"""
895 """return the current transaction or None if non exists"""
893 tr = self._transref and self._transref() or None
896 tr = self._transref and self._transref() or None
894 if tr and tr.running():
897 if tr and tr.running():
895 return tr
898 return tr
896 return None
899 return None
897
900
898 def transaction(self, desc, report=None):
901 def transaction(self, desc, report=None):
899 tr = self.currenttransaction()
902 tr = self.currenttransaction()
900 if tr is not None:
903 if tr is not None:
901 return tr.nest()
904 return tr.nest()
902
905
903 # abort here if the journal already exists
906 # abort here if the journal already exists
904 if self.svfs.exists("journal"):
907 if self.svfs.exists("journal"):
905 raise error.RepoError(
908 raise error.RepoError(
906 _("abandoned transaction found"),
909 _("abandoned transaction found"),
907 hint=_("run 'hg recover' to clean up transaction"))
910 hint=_("run 'hg recover' to clean up transaction"))
908
911
909 self._writejournal(desc)
912 self._writejournal(desc)
910 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
913 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
911 rp = report and report or self.ui.warn
914 rp = report and report or self.ui.warn
912 vfsmap = {'plain': self.vfs} # root of .hg/
915 vfsmap = {'plain': self.vfs} # root of .hg/
913 tr = transaction.transaction(rp, self.svfs, vfsmap,
916 tr = transaction.transaction(rp, self.svfs, vfsmap,
914 "journal",
917 "journal",
915 "undo",
918 "undo",
916 aftertrans(renames),
919 aftertrans(renames),
917 self.store.createmode)
920 self.store.createmode)
918 # note: writing the fncache only during finalize mean that the file is
921 # note: writing the fncache only during finalize mean that the file is
919 # outdated when running hooks. As fncache is used for streaming clone,
922 # outdated when running hooks. As fncache is used for streaming clone,
920 # this is not expected to break anything that happen during the hooks.
923 # this is not expected to break anything that happen during the hooks.
921 tr.addfinalize('flush-fncache', self.store.write)
924 tr.addfinalize('flush-fncache', self.store.write)
922 self._transref = weakref.ref(tr)
925 self._transref = weakref.ref(tr)
923 return tr
926 return tr
924
927
925 def _journalfiles(self):
928 def _journalfiles(self):
926 return ((self.svfs, 'journal'),
929 return ((self.svfs, 'journal'),
927 (self.vfs, 'journal.dirstate'),
930 (self.vfs, 'journal.dirstate'),
928 (self.vfs, 'journal.branch'),
931 (self.vfs, 'journal.branch'),
929 (self.vfs, 'journal.desc'),
932 (self.vfs, 'journal.desc'),
930 (self.vfs, 'journal.bookmarks'),
933 (self.vfs, 'journal.bookmarks'),
931 (self.svfs, 'journal.phaseroots'))
934 (self.svfs, 'journal.phaseroots'))
932
935
933 def undofiles(self):
936 def undofiles(self):
934 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
937 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
935
938
936 def _writejournal(self, desc):
939 def _writejournal(self, desc):
937 self.vfs.write("journal.dirstate",
940 self.vfs.write("journal.dirstate",
938 self.vfs.tryread("dirstate"))
941 self.vfs.tryread("dirstate"))
939 self.vfs.write("journal.branch",
942 self.vfs.write("journal.branch",
940 encoding.fromlocal(self.dirstate.branch()))
943 encoding.fromlocal(self.dirstate.branch()))
941 self.vfs.write("journal.desc",
944 self.vfs.write("journal.desc",
942 "%d\n%s\n" % (len(self), desc))
945 "%d\n%s\n" % (len(self), desc))
943 self.vfs.write("journal.bookmarks",
946 self.vfs.write("journal.bookmarks",
944 self.vfs.tryread("bookmarks"))
947 self.vfs.tryread("bookmarks"))
945 self.svfs.write("journal.phaseroots",
948 self.svfs.write("journal.phaseroots",
946 self.svfs.tryread("phaseroots"))
949 self.svfs.tryread("phaseroots"))
947
950
948 def recover(self):
951 def recover(self):
949 lock = self.lock()
952 lock = self.lock()
950 try:
953 try:
951 if self.svfs.exists("journal"):
954 if self.svfs.exists("journal"):
952 self.ui.status(_("rolling back interrupted transaction\n"))
955 self.ui.status(_("rolling back interrupted transaction\n"))
953 vfsmap = {'': self.svfs,
956 vfsmap = {'': self.svfs,
954 'plain': self.vfs,}
957 'plain': self.vfs,}
955 transaction.rollback(self.svfs, vfsmap, "journal",
958 transaction.rollback(self.svfs, vfsmap, "journal",
956 self.ui.warn)
959 self.ui.warn)
957 self.invalidate()
960 self.invalidate()
958 return True
961 return True
959 else:
962 else:
960 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
961 return False
964 return False
962 finally:
965 finally:
963 lock.release()
966 lock.release()
964
967
965 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
966 wlock = lock = None
969 wlock = lock = None
967 try:
970 try:
968 wlock = self.wlock()
971 wlock = self.wlock()
969 lock = self.lock()
972 lock = self.lock()
970 if self.svfs.exists("undo"):
973 if self.svfs.exists("undo"):
971 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
972 else:
975 else:
973 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
974 return 1
977 return 1
975 finally:
978 finally:
976 release(lock, wlock)
979 release(lock, wlock)
977
980
978 @unfilteredmethod # Until we get smarter cache management
981 @unfilteredmethod # Until we get smarter cache management
979 def _rollback(self, dryrun, force):
982 def _rollback(self, dryrun, force):
980 ui = self.ui
983 ui = self.ui
981 try:
984 try:
982 args = self.vfs.read('undo.desc').splitlines()
985 args = self.vfs.read('undo.desc').splitlines()
983 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
984 if len(args) >= 3:
987 if len(args) >= 3:
985 detail = args[2]
988 detail = args[2]
986 oldtip = oldlen - 1
989 oldtip = oldlen - 1
987
990
988 if detail and ui.verbose:
991 if detail and ui.verbose:
989 msg = (_('repository tip rolled back to revision %s'
992 msg = (_('repository tip rolled back to revision %s'
990 ' (undo %s: %s)\n')
993 ' (undo %s: %s)\n')
991 % (oldtip, desc, detail))
994 % (oldtip, desc, detail))
992 else:
995 else:
993 msg = (_('repository tip rolled back to revision %s'
996 msg = (_('repository tip rolled back to revision %s'
994 ' (undo %s)\n')
997 ' (undo %s)\n')
995 % (oldtip, desc))
998 % (oldtip, desc))
996 except IOError:
999 except IOError:
997 msg = _('rolling back unknown transaction\n')
1000 msg = _('rolling back unknown transaction\n')
998 desc = None
1001 desc = None
999
1002
1000 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1001 raise util.Abort(
1004 raise util.Abort(
1002 _('rollback of last commit while not checked out '
1005 _('rollback of last commit while not checked out '
1003 'may lose data'), hint=_('use -f to force'))
1006 'may lose data'), hint=_('use -f to force'))
1004
1007
1005 ui.status(msg)
1008 ui.status(msg)
1006 if dryrun:
1009 if dryrun:
1007 return 0
1010 return 0
1008
1011
1009 parents = self.dirstate.parents()
1012 parents = self.dirstate.parents()
1010 self.destroying()
1013 self.destroying()
1011 vfsmap = {'plain': self.vfs, '': self.svfs}
1014 vfsmap = {'plain': self.vfs, '': self.svfs}
1012 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1015 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1013 if self.vfs.exists('undo.bookmarks'):
1016 if self.vfs.exists('undo.bookmarks'):
1014 self.vfs.rename('undo.bookmarks', 'bookmarks')
1017 self.vfs.rename('undo.bookmarks', 'bookmarks')
1015 if self.svfs.exists('undo.phaseroots'):
1018 if self.svfs.exists('undo.phaseroots'):
1016 self.svfs.rename('undo.phaseroots', 'phaseroots')
1019 self.svfs.rename('undo.phaseroots', 'phaseroots')
1017 self.invalidate()
1020 self.invalidate()
1018
1021
1019 parentgone = (parents[0] not in self.changelog.nodemap or
1022 parentgone = (parents[0] not in self.changelog.nodemap or
1020 parents[1] not in self.changelog.nodemap)
1023 parents[1] not in self.changelog.nodemap)
1021 if parentgone:
1024 if parentgone:
1022 self.vfs.rename('undo.dirstate', 'dirstate')
1025 self.vfs.rename('undo.dirstate', 'dirstate')
1023 try:
1026 try:
1024 branch = self.vfs.read('undo.branch')
1027 branch = self.vfs.read('undo.branch')
1025 self.dirstate.setbranch(encoding.tolocal(branch))
1028 self.dirstate.setbranch(encoding.tolocal(branch))
1026 except IOError:
1029 except IOError:
1027 ui.warn(_('named branch could not be reset: '
1030 ui.warn(_('named branch could not be reset: '
1028 'current branch is still \'%s\'\n')
1031 'current branch is still \'%s\'\n')
1029 % self.dirstate.branch())
1032 % self.dirstate.branch())
1030
1033
1031 self.dirstate.invalidate()
1034 self.dirstate.invalidate()
1032 parents = tuple([p.rev() for p in self.parents()])
1035 parents = tuple([p.rev() for p in self.parents()])
1033 if len(parents) > 1:
1036 if len(parents) > 1:
1034 ui.status(_('working directory now based on '
1037 ui.status(_('working directory now based on '
1035 'revisions %d and %d\n') % parents)
1038 'revisions %d and %d\n') % parents)
1036 else:
1039 else:
1037 ui.status(_('working directory now based on '
1040 ui.status(_('working directory now based on '
1038 'revision %d\n') % parents)
1041 'revision %d\n') % parents)
1039 # TODO: if we know which new heads may result from this rollback, pass
1042 # TODO: if we know which new heads may result from this rollback, pass
1040 # them to destroy(), which will prevent the branchhead cache from being
1043 # them to destroy(), which will prevent the branchhead cache from being
1041 # invalidated.
1044 # invalidated.
1042 self.destroyed()
1045 self.destroyed()
1043 return 0
1046 return 0
1044
1047
1045 def invalidatecaches(self):
1048 def invalidatecaches(self):
1046
1049
1047 if '_tagscache' in vars(self):
1050 if '_tagscache' in vars(self):
1048 # can't use delattr on proxy
1051 # can't use delattr on proxy
1049 del self.__dict__['_tagscache']
1052 del self.__dict__['_tagscache']
1050
1053
1051 self.unfiltered()._branchcaches.clear()
1054 self.unfiltered()._branchcaches.clear()
1052 self.invalidatevolatilesets()
1055 self.invalidatevolatilesets()
1053
1056
1054 def invalidatevolatilesets(self):
1057 def invalidatevolatilesets(self):
1055 self.filteredrevcache.clear()
1058 self.filteredrevcache.clear()
1056 obsolete.clearobscaches(self)
1059 obsolete.clearobscaches(self)
1057
1060
1058 def invalidatedirstate(self):
1061 def invalidatedirstate(self):
1059 '''Invalidates the dirstate, causing the next call to dirstate
1062 '''Invalidates the dirstate, causing the next call to dirstate
1060 to check if it was modified since the last time it was read,
1063 to check if it was modified since the last time it was read,
1061 rereading it if it has.
1064 rereading it if it has.
1062
1065
1063 This is different to dirstate.invalidate() that it doesn't always
1066 This is different to dirstate.invalidate() that it doesn't always
1064 rereads the dirstate. Use dirstate.invalidate() if you want to
1067 rereads the dirstate. Use dirstate.invalidate() if you want to
1065 explicitly read the dirstate again (i.e. restoring it to a previous
1068 explicitly read the dirstate again (i.e. restoring it to a previous
1066 known good state).'''
1069 known good state).'''
1067 if hasunfilteredcache(self, 'dirstate'):
1070 if hasunfilteredcache(self, 'dirstate'):
1068 for k in self.dirstate._filecache:
1071 for k in self.dirstate._filecache:
1069 try:
1072 try:
1070 delattr(self.dirstate, k)
1073 delattr(self.dirstate, k)
1071 except AttributeError:
1074 except AttributeError:
1072 pass
1075 pass
1073 delattr(self.unfiltered(), 'dirstate')
1076 delattr(self.unfiltered(), 'dirstate')
1074
1077
1075 def invalidate(self):
1078 def invalidate(self):
1076 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1077 for k in self._filecache:
1080 for k in self._filecache:
1078 # dirstate is invalidated separately in invalidatedirstate()
1081 # dirstate is invalidated separately in invalidatedirstate()
1079 if k == 'dirstate':
1082 if k == 'dirstate':
1080 continue
1083 continue
1081
1084
1082 try:
1085 try:
1083 delattr(unfiltered, k)
1086 delattr(unfiltered, k)
1084 except AttributeError:
1087 except AttributeError:
1085 pass
1088 pass
1086 self.invalidatecaches()
1089 self.invalidatecaches()
1087 self.store.invalidatecaches()
1090 self.store.invalidatecaches()
1088
1091
1089 def invalidateall(self):
1092 def invalidateall(self):
1090 '''Fully invalidates both store and non-store parts, causing the
1093 '''Fully invalidates both store and non-store parts, causing the
1091 subsequent operation to reread any outside changes.'''
1094 subsequent operation to reread any outside changes.'''
1092 # extension should hook this to invalidate its caches
1095 # extension should hook this to invalidate its caches
1093 self.invalidate()
1096 self.invalidate()
1094 self.invalidatedirstate()
1097 self.invalidatedirstate()
1095
1098
1096 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1099 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1097 try:
1100 try:
1098 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1101 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1099 except error.LockHeld, inst:
1102 except error.LockHeld, inst:
1100 if not wait:
1103 if not wait:
1101 raise
1104 raise
1102 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1105 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1103 (desc, inst.locker))
1106 (desc, inst.locker))
1104 # default to 600 seconds timeout
1107 # default to 600 seconds timeout
1105 l = lockmod.lock(vfs, lockname,
1108 l = lockmod.lock(vfs, lockname,
1106 int(self.ui.config("ui", "timeout", "600")),
1109 int(self.ui.config("ui", "timeout", "600")),
1107 releasefn, desc=desc)
1110 releasefn, desc=desc)
1108 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1111 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1109 if acquirefn:
1112 if acquirefn:
1110 acquirefn()
1113 acquirefn()
1111 return l
1114 return l
1112
1115
1113 def _afterlock(self, callback):
1116 def _afterlock(self, callback):
1114 """add a callback to the current repository lock.
1117 """add a callback to the current repository lock.
1115
1118
1116 The callback will be executed on lock release."""
1119 The callback will be executed on lock release."""
1117 l = self._lockref and self._lockref()
1120 l = self._lockref and self._lockref()
1118 if l:
1121 if l:
1119 l.postrelease.append(callback)
1122 l.postrelease.append(callback)
1120 else:
1123 else:
1121 callback()
1124 callback()
1122
1125
1123 def lock(self, wait=True):
1126 def lock(self, wait=True):
1124 '''Lock the repository store (.hg/store) and return a weak reference
1127 '''Lock the repository store (.hg/store) and return a weak reference
1125 to the lock. Use this before modifying the store (e.g. committing or
1128 to the lock. Use this before modifying the store (e.g. committing or
1126 stripping). If you are opening a transaction, get a lock as well.)'''
1129 stripping). If you are opening a transaction, get a lock as well.)'''
1127 l = self._lockref and self._lockref()
1130 l = self._lockref and self._lockref()
1128 if l is not None and l.held:
1131 if l is not None and l.held:
1129 l.lock()
1132 l.lock()
1130 return l
1133 return l
1131
1134
1132 def unlock():
1135 def unlock():
1133 for k, ce in self._filecache.items():
1136 for k, ce in self._filecache.items():
1134 if k == 'dirstate' or k not in self.__dict__:
1137 if k == 'dirstate' or k not in self.__dict__:
1135 continue
1138 continue
1136 ce.refresh()
1139 ce.refresh()
1137
1140
1138 l = self._lock(self.svfs, "lock", wait, unlock,
1141 l = self._lock(self.svfs, "lock", wait, unlock,
1139 self.invalidate, _('repository %s') % self.origroot)
1142 self.invalidate, _('repository %s') % self.origroot)
1140 self._lockref = weakref.ref(l)
1143 self._lockref = weakref.ref(l)
1141 return l
1144 return l
1142
1145
1143 def wlock(self, wait=True):
1146 def wlock(self, wait=True):
1144 '''Lock the non-store parts of the repository (everything under
1147 '''Lock the non-store parts of the repository (everything under
1145 .hg except .hg/store) and return a weak reference to the lock.
1148 .hg except .hg/store) and return a weak reference to the lock.
1146 Use this before modifying files in .hg.'''
1149 Use this before modifying files in .hg.'''
1147 l = self._wlockref and self._wlockref()
1150 l = self._wlockref and self._wlockref()
1148 if l is not None and l.held:
1151 if l is not None and l.held:
1149 l.lock()
1152 l.lock()
1150 return l
1153 return l
1151
1154
1152 def unlock():
1155 def unlock():
1153 if self.dirstate.pendingparentchange():
1156 if self.dirstate.pendingparentchange():
1154 self.dirstate.invalidate()
1157 self.dirstate.invalidate()
1155 else:
1158 else:
1156 self.dirstate.write()
1159 self.dirstate.write()
1157
1160
1158 self._filecache['dirstate'].refresh()
1161 self._filecache['dirstate'].refresh()
1159
1162
1160 l = self._lock(self.vfs, "wlock", wait, unlock,
1163 l = self._lock(self.vfs, "wlock", wait, unlock,
1161 self.invalidatedirstate, _('working directory of %s') %
1164 self.invalidatedirstate, _('working directory of %s') %
1162 self.origroot)
1165 self.origroot)
1163 self._wlockref = weakref.ref(l)
1166 self._wlockref = weakref.ref(l)
1164 return l
1167 return l
1165
1168
1166 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1169 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1167 """
1170 """
1168 commit an individual file as part of a larger transaction
1171 commit an individual file as part of a larger transaction
1169 """
1172 """
1170
1173
1171 fname = fctx.path()
1174 fname = fctx.path()
1172 text = fctx.data()
1175 text = fctx.data()
1173 flog = self.file(fname)
1176 flog = self.file(fname)
1174 fparent1 = manifest1.get(fname, nullid)
1177 fparent1 = manifest1.get(fname, nullid)
1175 fparent2 = manifest2.get(fname, nullid)
1178 fparent2 = manifest2.get(fname, nullid)
1176
1179
1177 meta = {}
1180 meta = {}
1178 copy = fctx.renamed()
1181 copy = fctx.renamed()
1179 if copy and copy[0] != fname:
1182 if copy and copy[0] != fname:
1180 # Mark the new revision of this file as a copy of another
1183 # Mark the new revision of this file as a copy of another
1181 # file. This copy data will effectively act as a parent
1184 # file. This copy data will effectively act as a parent
1182 # of this new revision. If this is a merge, the first
1185 # of this new revision. If this is a merge, the first
1183 # parent will be the nullid (meaning "look up the copy data")
1186 # parent will be the nullid (meaning "look up the copy data")
1184 # and the second one will be the other parent. For example:
1187 # and the second one will be the other parent. For example:
1185 #
1188 #
1186 # 0 --- 1 --- 3 rev1 changes file foo
1189 # 0 --- 1 --- 3 rev1 changes file foo
1187 # \ / rev2 renames foo to bar and changes it
1190 # \ / rev2 renames foo to bar and changes it
1188 # \- 2 -/ rev3 should have bar with all changes and
1191 # \- 2 -/ rev3 should have bar with all changes and
1189 # should record that bar descends from
1192 # should record that bar descends from
1190 # bar in rev2 and foo in rev1
1193 # bar in rev2 and foo in rev1
1191 #
1194 #
1192 # this allows this merge to succeed:
1195 # this allows this merge to succeed:
1193 #
1196 #
1194 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1197 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1195 # \ / merging rev3 and rev4 should use bar@rev2
1198 # \ / merging rev3 and rev4 should use bar@rev2
1196 # \- 2 --- 4 as the merge base
1199 # \- 2 --- 4 as the merge base
1197 #
1200 #
1198
1201
1199 cfname = copy[0]
1202 cfname = copy[0]
1200 crev = manifest1.get(cfname)
1203 crev = manifest1.get(cfname)
1201 newfparent = fparent2
1204 newfparent = fparent2
1202
1205
1203 if manifest2: # branch merge
1206 if manifest2: # branch merge
1204 if fparent2 == nullid or crev is None: # copied on remote side
1207 if fparent2 == nullid or crev is None: # copied on remote side
1205 if cfname in manifest2:
1208 if cfname in manifest2:
1206 crev = manifest2[cfname]
1209 crev = manifest2[cfname]
1207 newfparent = fparent1
1210 newfparent = fparent1
1208
1211
1209 # Here, we used to search backwards through history to try to find
1212 # Here, we used to search backwards through history to try to find
1210 # where the file copy came from if the source of a copy was not in
1213 # where the file copy came from if the source of a copy was not in
1211 # the parent diretory. However, this doesn't actually make sense to
1214 # the parent diretory. However, this doesn't actually make sense to
1212 # do (what does a copy from something not in your working copy even
1215 # do (what does a copy from something not in your working copy even
1213 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1216 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1214 # the user that copy information was dropped, so if they didn't
1217 # the user that copy information was dropped, so if they didn't
1215 # expect this outcome it can be fixed, but this is the correct
1218 # expect this outcome it can be fixed, but this is the correct
1216 # behavior in this circumstance.
1219 # behavior in this circumstance.
1217
1220
1218 if crev:
1221 if crev:
1219 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1222 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1220 meta["copy"] = cfname
1223 meta["copy"] = cfname
1221 meta["copyrev"] = hex(crev)
1224 meta["copyrev"] = hex(crev)
1222 fparent1, fparent2 = nullid, newfparent
1225 fparent1, fparent2 = nullid, newfparent
1223 else:
1226 else:
1224 self.ui.warn(_("warning: can't find ancestor for '%s' "
1227 self.ui.warn(_("warning: can't find ancestor for '%s' "
1225 "copied from '%s'!\n") % (fname, cfname))
1228 "copied from '%s'!\n") % (fname, cfname))
1226
1229
1227 elif fparent1 == nullid:
1230 elif fparent1 == nullid:
1228 fparent1, fparent2 = fparent2, nullid
1231 fparent1, fparent2 = fparent2, nullid
1229 elif fparent2 != nullid:
1232 elif fparent2 != nullid:
1230 # is one parent an ancestor of the other?
1233 # is one parent an ancestor of the other?
1231 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1234 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1232 if fparent1 in fparentancestors:
1235 if fparent1 in fparentancestors:
1233 fparent1, fparent2 = fparent2, nullid
1236 fparent1, fparent2 = fparent2, nullid
1234 elif fparent2 in fparentancestors:
1237 elif fparent2 in fparentancestors:
1235 fparent2 = nullid
1238 fparent2 = nullid
1236
1239
1237 # is the file changed?
1240 # is the file changed?
1238 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1241 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1239 changelist.append(fname)
1242 changelist.append(fname)
1240 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1243 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1241 # are just the flags changed during merge?
1244 # are just the flags changed during merge?
1242 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1245 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1243 changelist.append(fname)
1246 changelist.append(fname)
1244
1247
1245 return fparent1
1248 return fparent1
1246
1249
1247 @unfilteredmethod
1250 @unfilteredmethod
1248 def commit(self, text="", user=None, date=None, match=None, force=False,
1251 def commit(self, text="", user=None, date=None, match=None, force=False,
1249 editor=False, extra={}):
1252 editor=False, extra={}):
1250 """Add a new revision to current repository.
1253 """Add a new revision to current repository.
1251
1254
1252 Revision information is gathered from the working directory,
1255 Revision information is gathered from the working directory,
1253 match can be used to filter the committed files. If editor is
1256 match can be used to filter the committed files. If editor is
1254 supplied, it is called to get a commit message.
1257 supplied, it is called to get a commit message.
1255 """
1258 """
1256
1259
1257 def fail(f, msg):
1260 def fail(f, msg):
1258 raise util.Abort('%s: %s' % (f, msg))
1261 raise util.Abort('%s: %s' % (f, msg))
1259
1262
1260 if not match:
1263 if not match:
1261 match = matchmod.always(self.root, '')
1264 match = matchmod.always(self.root, '')
1262
1265
1263 if not force:
1266 if not force:
1264 vdirs = []
1267 vdirs = []
1265 match.explicitdir = vdirs.append
1268 match.explicitdir = vdirs.append
1266 match.bad = fail
1269 match.bad = fail
1267
1270
1268 wlock = self.wlock()
1271 wlock = self.wlock()
1269 try:
1272 try:
1270 wctx = self[None]
1273 wctx = self[None]
1271 merge = len(wctx.parents()) > 1
1274 merge = len(wctx.parents()) > 1
1272
1275
1273 if (not force and merge and match and
1276 if (not force and merge and match and
1274 (match.files() or match.anypats())):
1277 (match.files() or match.anypats())):
1275 raise util.Abort(_('cannot partially commit a merge '
1278 raise util.Abort(_('cannot partially commit a merge '
1276 '(do not specify files or patterns)'))
1279 '(do not specify files or patterns)'))
1277
1280
1278 status = self.status(match=match, clean=force)
1281 status = self.status(match=match, clean=force)
1279 if force:
1282 if force:
1280 status.modified.extend(status.clean) # mq may commit clean files
1283 status.modified.extend(status.clean) # mq may commit clean files
1281
1284
1282 # check subrepos
1285 # check subrepos
1283 subs = []
1286 subs = []
1284 commitsubs = set()
1287 commitsubs = set()
1285 newstate = wctx.substate.copy()
1288 newstate = wctx.substate.copy()
1286 # only manage subrepos and .hgsubstate if .hgsub is present
1289 # only manage subrepos and .hgsubstate if .hgsub is present
1287 if '.hgsub' in wctx:
1290 if '.hgsub' in wctx:
1288 # we'll decide whether to track this ourselves, thanks
1291 # we'll decide whether to track this ourselves, thanks
1289 for c in status.modified, status.added, status.removed:
1292 for c in status.modified, status.added, status.removed:
1290 if '.hgsubstate' in c:
1293 if '.hgsubstate' in c:
1291 c.remove('.hgsubstate')
1294 c.remove('.hgsubstate')
1292
1295
1293 # compare current state to last committed state
1296 # compare current state to last committed state
1294 # build new substate based on last committed state
1297 # build new substate based on last committed state
1295 oldstate = wctx.p1().substate
1298 oldstate = wctx.p1().substate
1296 for s in sorted(newstate.keys()):
1299 for s in sorted(newstate.keys()):
1297 if not match(s):
1300 if not match(s):
1298 # ignore working copy, use old state if present
1301 # ignore working copy, use old state if present
1299 if s in oldstate:
1302 if s in oldstate:
1300 newstate[s] = oldstate[s]
1303 newstate[s] = oldstate[s]
1301 continue
1304 continue
1302 if not force:
1305 if not force:
1303 raise util.Abort(
1306 raise util.Abort(
1304 _("commit with new subrepo %s excluded") % s)
1307 _("commit with new subrepo %s excluded") % s)
1305 if wctx.sub(s).dirty(True):
1308 if wctx.sub(s).dirty(True):
1306 if not self.ui.configbool('ui', 'commitsubrepos'):
1309 if not self.ui.configbool('ui', 'commitsubrepos'):
1307 raise util.Abort(
1310 raise util.Abort(
1308 _("uncommitted changes in subrepo %s") % s,
1311 _("uncommitted changes in subrepo %s") % s,
1309 hint=_("use --subrepos for recursive commit"))
1312 hint=_("use --subrepos for recursive commit"))
1310 subs.append(s)
1313 subs.append(s)
1311 commitsubs.add(s)
1314 commitsubs.add(s)
1312 else:
1315 else:
1313 bs = wctx.sub(s).basestate()
1316 bs = wctx.sub(s).basestate()
1314 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1317 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1315 if oldstate.get(s, (None, None, None))[1] != bs:
1318 if oldstate.get(s, (None, None, None))[1] != bs:
1316 subs.append(s)
1319 subs.append(s)
1317
1320
1318 # check for removed subrepos
1321 # check for removed subrepos
1319 for p in wctx.parents():
1322 for p in wctx.parents():
1320 r = [s for s in p.substate if s not in newstate]
1323 r = [s for s in p.substate if s not in newstate]
1321 subs += [s for s in r if match(s)]
1324 subs += [s for s in r if match(s)]
1322 if subs:
1325 if subs:
1323 if (not match('.hgsub') and
1326 if (not match('.hgsub') and
1324 '.hgsub' in (wctx.modified() + wctx.added())):
1327 '.hgsub' in (wctx.modified() + wctx.added())):
1325 raise util.Abort(
1328 raise util.Abort(
1326 _("can't commit subrepos without .hgsub"))
1329 _("can't commit subrepos without .hgsub"))
1327 status.modified.insert(0, '.hgsubstate')
1330 status.modified.insert(0, '.hgsubstate')
1328
1331
1329 elif '.hgsub' in status.removed:
1332 elif '.hgsub' in status.removed:
1330 # clean up .hgsubstate when .hgsub is removed
1333 # clean up .hgsubstate when .hgsub is removed
1331 if ('.hgsubstate' in wctx and
1334 if ('.hgsubstate' in wctx and
1332 '.hgsubstate' not in (status.modified + status.added +
1335 '.hgsubstate' not in (status.modified + status.added +
1333 status.removed)):
1336 status.removed)):
1334 status.removed.insert(0, '.hgsubstate')
1337 status.removed.insert(0, '.hgsubstate')
1335
1338
1336 # make sure all explicit patterns are matched
1339 # make sure all explicit patterns are matched
1337 if not force and match.files():
1340 if not force and match.files():
1338 matched = set(status.modified + status.added + status.removed)
1341 matched = set(status.modified + status.added + status.removed)
1339
1342
1340 for f in match.files():
1343 for f in match.files():
1341 f = self.dirstate.normalize(f)
1344 f = self.dirstate.normalize(f)
1342 if f == '.' or f in matched or f in wctx.substate:
1345 if f == '.' or f in matched or f in wctx.substate:
1343 continue
1346 continue
1344 if f in status.deleted:
1347 if f in status.deleted:
1345 fail(f, _('file not found!'))
1348 fail(f, _('file not found!'))
1346 if f in vdirs: # visited directory
1349 if f in vdirs: # visited directory
1347 d = f + '/'
1350 d = f + '/'
1348 for mf in matched:
1351 for mf in matched:
1349 if mf.startswith(d):
1352 if mf.startswith(d):
1350 break
1353 break
1351 else:
1354 else:
1352 fail(f, _("no match under directory!"))
1355 fail(f, _("no match under directory!"))
1353 elif f not in self.dirstate:
1356 elif f not in self.dirstate:
1354 fail(f, _("file not tracked!"))
1357 fail(f, _("file not tracked!"))
1355
1358
1356 cctx = context.workingcommitctx(self, status,
1359 cctx = context.workingcommitctx(self, status,
1357 text, user, date, extra)
1360 text, user, date, extra)
1358
1361
1359 if (not force and not extra.get("close") and not merge
1362 if (not force and not extra.get("close") and not merge
1360 and not cctx.files()
1363 and not cctx.files()
1361 and wctx.branch() == wctx.p1().branch()):
1364 and wctx.branch() == wctx.p1().branch()):
1362 return None
1365 return None
1363
1366
1364 if merge and cctx.deleted():
1367 if merge and cctx.deleted():
1365 raise util.Abort(_("cannot commit merge with missing files"))
1368 raise util.Abort(_("cannot commit merge with missing files"))
1366
1369
1367 ms = mergemod.mergestate(self)
1370 ms = mergemod.mergestate(self)
1368 for f in status.modified:
1371 for f in status.modified:
1369 if f in ms and ms[f] == 'u':
1372 if f in ms and ms[f] == 'u':
1370 raise util.Abort(_('unresolved merge conflicts '
1373 raise util.Abort(_('unresolved merge conflicts '
1371 '(see "hg help resolve")'))
1374 '(see "hg help resolve")'))
1372
1375
1373 if editor:
1376 if editor:
1374 cctx._text = editor(self, cctx, subs)
1377 cctx._text = editor(self, cctx, subs)
1375 edited = (text != cctx._text)
1378 edited = (text != cctx._text)
1376
1379
1377 # Save commit message in case this transaction gets rolled back
1380 # Save commit message in case this transaction gets rolled back
1378 # (e.g. by a pretxncommit hook). Leave the content alone on
1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1379 # the assumption that the user will use the same editor again.
1382 # the assumption that the user will use the same editor again.
1380 msgfn = self.savecommitmessage(cctx._text)
1383 msgfn = self.savecommitmessage(cctx._text)
1381
1384
1382 # commit subs and write new state
1385 # commit subs and write new state
1383 if subs:
1386 if subs:
1384 for s in sorted(commitsubs):
1387 for s in sorted(commitsubs):
1385 sub = wctx.sub(s)
1388 sub = wctx.sub(s)
1386 self.ui.status(_('committing subrepository %s\n') %
1389 self.ui.status(_('committing subrepository %s\n') %
1387 subrepo.subrelpath(sub))
1390 subrepo.subrelpath(sub))
1388 sr = sub.commit(cctx._text, user, date)
1391 sr = sub.commit(cctx._text, user, date)
1389 newstate[s] = (newstate[s][0], sr)
1392 newstate[s] = (newstate[s][0], sr)
1390 subrepo.writestate(self, newstate)
1393 subrepo.writestate(self, newstate)
1391
1394
1392 p1, p2 = self.dirstate.parents()
1395 p1, p2 = self.dirstate.parents()
1393 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1396 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1394 try:
1397 try:
1395 self.hook("precommit", throw=True, parent1=hookp1,
1398 self.hook("precommit", throw=True, parent1=hookp1,
1396 parent2=hookp2)
1399 parent2=hookp2)
1397 ret = self.commitctx(cctx, True)
1400 ret = self.commitctx(cctx, True)
1398 except: # re-raises
1401 except: # re-raises
1399 if edited:
1402 if edited:
1400 self.ui.write(
1403 self.ui.write(
1401 _('note: commit message saved in %s\n') % msgfn)
1404 _('note: commit message saved in %s\n') % msgfn)
1402 raise
1405 raise
1403
1406
1404 # update bookmarks, dirstate and mergestate
1407 # update bookmarks, dirstate and mergestate
1405 bookmarks.update(self, [p1, p2], ret)
1408 bookmarks.update(self, [p1, p2], ret)
1406 cctx.markcommitted(ret)
1409 cctx.markcommitted(ret)
1407 ms.reset()
1410 ms.reset()
1408 finally:
1411 finally:
1409 wlock.release()
1412 wlock.release()
1410
1413
1411 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1414 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1412 # hack for command that use a temporary commit (eg: histedit)
1415 # hack for command that use a temporary commit (eg: histedit)
1413 # temporary commit got stripped before hook release
1416 # temporary commit got stripped before hook release
1414 if node in self:
1417 if node in self:
1415 self.hook("commit", node=node, parent1=parent1,
1418 self.hook("commit", node=node, parent1=parent1,
1416 parent2=parent2)
1419 parent2=parent2)
1417 self._afterlock(commithook)
1420 self._afterlock(commithook)
1418 return ret
1421 return ret
1419
1422
1420 @unfilteredmethod
1423 @unfilteredmethod
1421 def commitctx(self, ctx, error=False):
1424 def commitctx(self, ctx, error=False):
1422 """Add a new revision to current repository.
1425 """Add a new revision to current repository.
1423 Revision information is passed via the context argument.
1426 Revision information is passed via the context argument.
1424 """
1427 """
1425
1428
1426 tr = None
1429 tr = None
1427 p1, p2 = ctx.p1(), ctx.p2()
1430 p1, p2 = ctx.p1(), ctx.p2()
1428 user = ctx.user()
1431 user = ctx.user()
1429
1432
1430 lock = self.lock()
1433 lock = self.lock()
1431 try:
1434 try:
1432 tr = self.transaction("commit")
1435 tr = self.transaction("commit")
1433 trp = weakref.proxy(tr)
1436 trp = weakref.proxy(tr)
1434
1437
1435 if ctx.files():
1438 if ctx.files():
1436 m1 = p1.manifest()
1439 m1 = p1.manifest()
1437 m2 = p2.manifest()
1440 m2 = p2.manifest()
1438 m = m1.copy()
1441 m = m1.copy()
1439
1442
1440 # check in files
1443 # check in files
1441 added = []
1444 added = []
1442 changed = []
1445 changed = []
1443 removed = list(ctx.removed())
1446 removed = list(ctx.removed())
1444 linkrev = len(self)
1447 linkrev = len(self)
1445 self.ui.note(_("committing files:\n"))
1448 self.ui.note(_("committing files:\n"))
1446 for f in sorted(ctx.modified() + ctx.added()):
1449 for f in sorted(ctx.modified() + ctx.added()):
1447 self.ui.note(f + "\n")
1450 self.ui.note(f + "\n")
1448 try:
1451 try:
1449 fctx = ctx[f]
1452 fctx = ctx[f]
1450 if fctx is None:
1453 if fctx is None:
1451 removed.append(f)
1454 removed.append(f)
1452 else:
1455 else:
1453 added.append(f)
1456 added.append(f)
1454 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1457 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1455 trp, changed)
1458 trp, changed)
1456 m.setflag(f, fctx.flags())
1459 m.setflag(f, fctx.flags())
1457 except OSError, inst:
1460 except OSError, inst:
1458 self.ui.warn(_("trouble committing %s!\n") % f)
1461 self.ui.warn(_("trouble committing %s!\n") % f)
1459 raise
1462 raise
1460 except IOError, inst:
1463 except IOError, inst:
1461 errcode = getattr(inst, 'errno', errno.ENOENT)
1464 errcode = getattr(inst, 'errno', errno.ENOENT)
1462 if error or errcode and errcode != errno.ENOENT:
1465 if error or errcode and errcode != errno.ENOENT:
1463 self.ui.warn(_("trouble committing %s!\n") % f)
1466 self.ui.warn(_("trouble committing %s!\n") % f)
1464 raise
1467 raise
1465
1468
1466 # update manifest
1469 # update manifest
1467 self.ui.note(_("committing manifest\n"))
1470 self.ui.note(_("committing manifest\n"))
1468 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1471 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1469 drop = [f for f in removed if f in m]
1472 drop = [f for f in removed if f in m]
1470 for f in drop:
1473 for f in drop:
1471 del m[f]
1474 del m[f]
1472 mn = self.manifest.add(m, trp, linkrev,
1475 mn = self.manifest.add(m, trp, linkrev,
1473 p1.manifestnode(), p2.manifestnode(),
1476 p1.manifestnode(), p2.manifestnode(),
1474 added, drop)
1477 added, drop)
1475 files = changed + removed
1478 files = changed + removed
1476 else:
1479 else:
1477 mn = p1.manifestnode()
1480 mn = p1.manifestnode()
1478 files = []
1481 files = []
1479
1482
1480 # update changelog
1483 # update changelog
1481 self.ui.note(_("committing changelog\n"))
1484 self.ui.note(_("committing changelog\n"))
1482 self.changelog.delayupdate(tr)
1485 self.changelog.delayupdate(tr)
1483 n = self.changelog.add(mn, files, ctx.description(),
1486 n = self.changelog.add(mn, files, ctx.description(),
1484 trp, p1.node(), p2.node(),
1487 trp, p1.node(), p2.node(),
1485 user, ctx.date(), ctx.extra().copy())
1488 user, ctx.date(), ctx.extra().copy())
1486 p = lambda: tr.writepending() and self.root or ""
1489 p = lambda: tr.writepending() and self.root or ""
1487 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1490 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1488 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1491 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1489 parent2=xp2, pending=p)
1492 parent2=xp2, pending=p)
1490 # set the new commit is proper phase
1493 # set the new commit is proper phase
1491 targetphase = subrepo.newcommitphase(self.ui, ctx)
1494 targetphase = subrepo.newcommitphase(self.ui, ctx)
1492 if targetphase:
1495 if targetphase:
1493 # retract boundary do not alter parent changeset.
1496 # retract boundary do not alter parent changeset.
1494 # if a parent have higher the resulting phase will
1497 # if a parent have higher the resulting phase will
1495 # be compliant anyway
1498 # be compliant anyway
1496 #
1499 #
1497 # if minimal phase was 0 we don't need to retract anything
1500 # if minimal phase was 0 we don't need to retract anything
1498 phases.retractboundary(self, tr, targetphase, [n])
1501 phases.retractboundary(self, tr, targetphase, [n])
1499 tr.close()
1502 tr.close()
1500 branchmap.updatecache(self.filtered('served'))
1503 branchmap.updatecache(self.filtered('served'))
1501 return n
1504 return n
1502 finally:
1505 finally:
1503 if tr:
1506 if tr:
1504 tr.release()
1507 tr.release()
1505 lock.release()
1508 lock.release()
1506
1509
1507 @unfilteredmethod
1510 @unfilteredmethod
1508 def destroying(self):
1511 def destroying(self):
1509 '''Inform the repository that nodes are about to be destroyed.
1512 '''Inform the repository that nodes are about to be destroyed.
1510 Intended for use by strip and rollback, so there's a common
1513 Intended for use by strip and rollback, so there's a common
1511 place for anything that has to be done before destroying history.
1514 place for anything that has to be done before destroying history.
1512
1515
1513 This is mostly useful for saving state that is in memory and waiting
1516 This is mostly useful for saving state that is in memory and waiting
1514 to be flushed when the current lock is released. Because a call to
1517 to be flushed when the current lock is released. Because a call to
1515 destroyed is imminent, the repo will be invalidated causing those
1518 destroyed is imminent, the repo will be invalidated causing those
1516 changes to stay in memory (waiting for the next unlock), or vanish
1519 changes to stay in memory (waiting for the next unlock), or vanish
1517 completely.
1520 completely.
1518 '''
1521 '''
1519 # When using the same lock to commit and strip, the phasecache is left
1522 # When using the same lock to commit and strip, the phasecache is left
1520 # dirty after committing. Then when we strip, the repo is invalidated,
1523 # dirty after committing. Then when we strip, the repo is invalidated,
1521 # causing those changes to disappear.
1524 # causing those changes to disappear.
1522 if '_phasecache' in vars(self):
1525 if '_phasecache' in vars(self):
1523 self._phasecache.write()
1526 self._phasecache.write()
1524
1527
1525 @unfilteredmethod
1528 @unfilteredmethod
1526 def destroyed(self):
1529 def destroyed(self):
1527 '''Inform the repository that nodes have been destroyed.
1530 '''Inform the repository that nodes have been destroyed.
1528 Intended for use by strip and rollback, so there's a common
1531 Intended for use by strip and rollback, so there's a common
1529 place for anything that has to be done after destroying history.
1532 place for anything that has to be done after destroying history.
1530 '''
1533 '''
1531 # When one tries to:
1534 # When one tries to:
1532 # 1) destroy nodes thus calling this method (e.g. strip)
1535 # 1) destroy nodes thus calling this method (e.g. strip)
1533 # 2) use phasecache somewhere (e.g. commit)
1536 # 2) use phasecache somewhere (e.g. commit)
1534 #
1537 #
1535 # then 2) will fail because the phasecache contains nodes that were
1538 # then 2) will fail because the phasecache contains nodes that were
1536 # removed. We can either remove phasecache from the filecache,
1539 # removed. We can either remove phasecache from the filecache,
1537 # causing it to reload next time it is accessed, or simply filter
1540 # causing it to reload next time it is accessed, or simply filter
1538 # the removed nodes now and write the updated cache.
1541 # the removed nodes now and write the updated cache.
1539 self._phasecache.filterunknown(self)
1542 self._phasecache.filterunknown(self)
1540 self._phasecache.write()
1543 self._phasecache.write()
1541
1544
1542 # update the 'served' branch cache to help read only server process
1545 # update the 'served' branch cache to help read only server process
1543 # Thanks to branchcache collaboration this is done from the nearest
1546 # Thanks to branchcache collaboration this is done from the nearest
1544 # filtered subset and it is expected to be fast.
1547 # filtered subset and it is expected to be fast.
1545 branchmap.updatecache(self.filtered('served'))
1548 branchmap.updatecache(self.filtered('served'))
1546
1549
1547 # Ensure the persistent tag cache is updated. Doing it now
1550 # Ensure the persistent tag cache is updated. Doing it now
1548 # means that the tag cache only has to worry about destroyed
1551 # means that the tag cache only has to worry about destroyed
1549 # heads immediately after a strip/rollback. That in turn
1552 # heads immediately after a strip/rollback. That in turn
1550 # guarantees that "cachetip == currenttip" (comparing both rev
1553 # guarantees that "cachetip == currenttip" (comparing both rev
1551 # and node) always means no nodes have been added or destroyed.
1554 # and node) always means no nodes have been added or destroyed.
1552
1555
1553 # XXX this is suboptimal when qrefresh'ing: we strip the current
1556 # XXX this is suboptimal when qrefresh'ing: we strip the current
1554 # head, refresh the tag cache, then immediately add a new head.
1557 # head, refresh the tag cache, then immediately add a new head.
1555 # But I think doing it this way is necessary for the "instant
1558 # But I think doing it this way is necessary for the "instant
1556 # tag cache retrieval" case to work.
1559 # tag cache retrieval" case to work.
1557 self.invalidate()
1560 self.invalidate()
1558
1561
1559 def walk(self, match, node=None):
1562 def walk(self, match, node=None):
1560 '''
1563 '''
1561 walk recursively through the directory tree or a given
1564 walk recursively through the directory tree or a given
1562 changeset, finding all files matched by the match
1565 changeset, finding all files matched by the match
1563 function
1566 function
1564 '''
1567 '''
1565 return self[node].walk(match)
1568 return self[node].walk(match)
1566
1569
1567 def status(self, node1='.', node2=None, match=None,
1570 def status(self, node1='.', node2=None, match=None,
1568 ignored=False, clean=False, unknown=False,
1571 ignored=False, clean=False, unknown=False,
1569 listsubrepos=False):
1572 listsubrepos=False):
1570 '''a convenience method that calls node1.status(node2)'''
1573 '''a convenience method that calls node1.status(node2)'''
1571 return self[node1].status(node2, match, ignored, clean, unknown,
1574 return self[node1].status(node2, match, ignored, clean, unknown,
1572 listsubrepos)
1575 listsubrepos)
1573
1576
1574 def heads(self, start=None):
1577 def heads(self, start=None):
1575 heads = self.changelog.heads(start)
1578 heads = self.changelog.heads(start)
1576 # sort the output in rev descending order
1579 # sort the output in rev descending order
1577 return sorted(heads, key=self.changelog.rev, reverse=True)
1580 return sorted(heads, key=self.changelog.rev, reverse=True)
1578
1581
1579 def branchheads(self, branch=None, start=None, closed=False):
1582 def branchheads(self, branch=None, start=None, closed=False):
1580 '''return a (possibly filtered) list of heads for the given branch
1583 '''return a (possibly filtered) list of heads for the given branch
1581
1584
1582 Heads are returned in topological order, from newest to oldest.
1585 Heads are returned in topological order, from newest to oldest.
1583 If branch is None, use the dirstate branch.
1586 If branch is None, use the dirstate branch.
1584 If start is not None, return only heads reachable from start.
1587 If start is not None, return only heads reachable from start.
1585 If closed is True, return heads that are marked as closed as well.
1588 If closed is True, return heads that are marked as closed as well.
1586 '''
1589 '''
1587 if branch is None:
1590 if branch is None:
1588 branch = self[None].branch()
1591 branch = self[None].branch()
1589 branches = self.branchmap()
1592 branches = self.branchmap()
1590 if branch not in branches:
1593 if branch not in branches:
1591 return []
1594 return []
1592 # the cache returns heads ordered lowest to highest
1595 # the cache returns heads ordered lowest to highest
1593 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1596 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1594 if start is not None:
1597 if start is not None:
1595 # filter out the heads that cannot be reached from startrev
1598 # filter out the heads that cannot be reached from startrev
1596 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1599 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1597 bheads = [h for h in bheads if h in fbheads]
1600 bheads = [h for h in bheads if h in fbheads]
1598 return bheads
1601 return bheads
1599
1602
1600 def branches(self, nodes):
1603 def branches(self, nodes):
1601 if not nodes:
1604 if not nodes:
1602 nodes = [self.changelog.tip()]
1605 nodes = [self.changelog.tip()]
1603 b = []
1606 b = []
1604 for n in nodes:
1607 for n in nodes:
1605 t = n
1608 t = n
1606 while True:
1609 while True:
1607 p = self.changelog.parents(n)
1610 p = self.changelog.parents(n)
1608 if p[1] != nullid or p[0] == nullid:
1611 if p[1] != nullid or p[0] == nullid:
1609 b.append((t, n, p[0], p[1]))
1612 b.append((t, n, p[0], p[1]))
1610 break
1613 break
1611 n = p[0]
1614 n = p[0]
1612 return b
1615 return b
1613
1616
1614 def between(self, pairs):
1617 def between(self, pairs):
1615 r = []
1618 r = []
1616
1619
1617 for top, bottom in pairs:
1620 for top, bottom in pairs:
1618 n, l, i = top, [], 0
1621 n, l, i = top, [], 0
1619 f = 1
1622 f = 1
1620
1623
1621 while n != bottom and n != nullid:
1624 while n != bottom and n != nullid:
1622 p = self.changelog.parents(n)[0]
1625 p = self.changelog.parents(n)[0]
1623 if i == f:
1626 if i == f:
1624 l.append(n)
1627 l.append(n)
1625 f = f * 2
1628 f = f * 2
1626 n = p
1629 n = p
1627 i += 1
1630 i += 1
1628
1631
1629 r.append(l)
1632 r.append(l)
1630
1633
1631 return r
1634 return r
1632
1635
1633 def checkpush(self, pushop):
1636 def checkpush(self, pushop):
1634 """Extensions can override this function if additional checks have
1637 """Extensions can override this function if additional checks have
1635 to be performed before pushing, or call it if they override push
1638 to be performed before pushing, or call it if they override push
1636 command.
1639 command.
1637 """
1640 """
1638 pass
1641 pass
1639
1642
1640 @unfilteredpropertycache
1643 @unfilteredpropertycache
1641 def prepushoutgoinghooks(self):
1644 def prepushoutgoinghooks(self):
1642 """Return util.hooks consists of "(repo, remote, outgoing)"
1645 """Return util.hooks consists of "(repo, remote, outgoing)"
1643 functions, which are called before pushing changesets.
1646 functions, which are called before pushing changesets.
1644 """
1647 """
1645 return util.hooks()
1648 return util.hooks()
1646
1649
1647 def stream_in(self, remote, requirements):
1650 def stream_in(self, remote, requirements):
1648 lock = self.lock()
1651 lock = self.lock()
1649 try:
1652 try:
1650 # Save remote branchmap. We will use it later
1653 # Save remote branchmap. We will use it later
1651 # to speed up branchcache creation
1654 # to speed up branchcache creation
1652 rbranchmap = None
1655 rbranchmap = None
1653 if remote.capable("branchmap"):
1656 if remote.capable("branchmap"):
1654 rbranchmap = remote.branchmap()
1657 rbranchmap = remote.branchmap()
1655
1658
1656 fp = remote.stream_out()
1659 fp = remote.stream_out()
1657 l = fp.readline()
1660 l = fp.readline()
1658 try:
1661 try:
1659 resp = int(l)
1662 resp = int(l)
1660 except ValueError:
1663 except ValueError:
1661 raise error.ResponseError(
1664 raise error.ResponseError(
1662 _('unexpected response from remote server:'), l)
1665 _('unexpected response from remote server:'), l)
1663 if resp == 1:
1666 if resp == 1:
1664 raise util.Abort(_('operation forbidden by server'))
1667 raise util.Abort(_('operation forbidden by server'))
1665 elif resp == 2:
1668 elif resp == 2:
1666 raise util.Abort(_('locking the remote repository failed'))
1669 raise util.Abort(_('locking the remote repository failed'))
1667 elif resp != 0:
1670 elif resp != 0:
1668 raise util.Abort(_('the server sent an unknown error code'))
1671 raise util.Abort(_('the server sent an unknown error code'))
1669 self.ui.status(_('streaming all changes\n'))
1672 self.ui.status(_('streaming all changes\n'))
1670 l = fp.readline()
1673 l = fp.readline()
1671 try:
1674 try:
1672 total_files, total_bytes = map(int, l.split(' ', 1))
1675 total_files, total_bytes = map(int, l.split(' ', 1))
1673 except (ValueError, TypeError):
1676 except (ValueError, TypeError):
1674 raise error.ResponseError(
1677 raise error.ResponseError(
1675 _('unexpected response from remote server:'), l)
1678 _('unexpected response from remote server:'), l)
1676 self.ui.status(_('%d files to transfer, %s of data\n') %
1679 self.ui.status(_('%d files to transfer, %s of data\n') %
1677 (total_files, util.bytecount(total_bytes)))
1680 (total_files, util.bytecount(total_bytes)))
1678 handled_bytes = 0
1681 handled_bytes = 0
1679 self.ui.progress(_('clone'), 0, total=total_bytes)
1682 self.ui.progress(_('clone'), 0, total=total_bytes)
1680 start = time.time()
1683 start = time.time()
1681
1684
1682 tr = self.transaction(_('clone'))
1685 tr = self.transaction(_('clone'))
1683 try:
1686 try:
1684 for i in xrange(total_files):
1687 for i in xrange(total_files):
1685 # XXX doesn't support '\n' or '\r' in filenames
1688 # XXX doesn't support '\n' or '\r' in filenames
1686 l = fp.readline()
1689 l = fp.readline()
1687 try:
1690 try:
1688 name, size = l.split('\0', 1)
1691 name, size = l.split('\0', 1)
1689 size = int(size)
1692 size = int(size)
1690 except (ValueError, TypeError):
1693 except (ValueError, TypeError):
1691 raise error.ResponseError(
1694 raise error.ResponseError(
1692 _('unexpected response from remote server:'), l)
1695 _('unexpected response from remote server:'), l)
1693 if self.ui.debugflag:
1696 if self.ui.debugflag:
1694 self.ui.debug('adding %s (%s)\n' %
1697 self.ui.debug('adding %s (%s)\n' %
1695 (name, util.bytecount(size)))
1698 (name, util.bytecount(size)))
1696 # for backwards compat, name was partially encoded
1699 # for backwards compat, name was partially encoded
1697 ofp = self.svfs(store.decodedir(name), 'w')
1700 ofp = self.svfs(store.decodedir(name), 'w')
1698 for chunk in util.filechunkiter(fp, limit=size):
1701 for chunk in util.filechunkiter(fp, limit=size):
1699 handled_bytes += len(chunk)
1702 handled_bytes += len(chunk)
1700 self.ui.progress(_('clone'), handled_bytes,
1703 self.ui.progress(_('clone'), handled_bytes,
1701 total=total_bytes)
1704 total=total_bytes)
1702 ofp.write(chunk)
1705 ofp.write(chunk)
1703 ofp.close()
1706 ofp.close()
1704 tr.close()
1707 tr.close()
1705 finally:
1708 finally:
1706 tr.release()
1709 tr.release()
1707
1710
1708 # Writing straight to files circumvented the inmemory caches
1711 # Writing straight to files circumvented the inmemory caches
1709 self.invalidate()
1712 self.invalidate()
1710
1713
1711 elapsed = time.time() - start
1714 elapsed = time.time() - start
1712 if elapsed <= 0:
1715 if elapsed <= 0:
1713 elapsed = 0.001
1716 elapsed = 0.001
1714 self.ui.progress(_('clone'), None)
1717 self.ui.progress(_('clone'), None)
1715 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1718 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1716 (util.bytecount(total_bytes), elapsed,
1719 (util.bytecount(total_bytes), elapsed,
1717 util.bytecount(total_bytes / elapsed)))
1720 util.bytecount(total_bytes / elapsed)))
1718
1721
1719 # new requirements = old non-format requirements +
1722 # new requirements = old non-format requirements +
1720 # new format-related
1723 # new format-related
1721 # requirements from the streamed-in repository
1724 # requirements from the streamed-in repository
1722 requirements.update(set(self.requirements) - self.supportedformats)
1725 requirements.update(set(self.requirements) - self.supportedformats)
1723 self._applyrequirements(requirements)
1726 self._applyrequirements(requirements)
1724 self._writerequirements()
1727 self._writerequirements()
1725
1728
1726 if rbranchmap:
1729 if rbranchmap:
1727 rbheads = []
1730 rbheads = []
1728 closed = []
1731 closed = []
1729 for bheads in rbranchmap.itervalues():
1732 for bheads in rbranchmap.itervalues():
1730 rbheads.extend(bheads)
1733 rbheads.extend(bheads)
1731 for h in bheads:
1734 for h in bheads:
1732 r = self.changelog.rev(h)
1735 r = self.changelog.rev(h)
1733 b, c = self.changelog.branchinfo(r)
1736 b, c = self.changelog.branchinfo(r)
1734 if c:
1737 if c:
1735 closed.append(h)
1738 closed.append(h)
1736
1739
1737 if rbheads:
1740 if rbheads:
1738 rtiprev = max((int(self.changelog.rev(node))
1741 rtiprev = max((int(self.changelog.rev(node))
1739 for node in rbheads))
1742 for node in rbheads))
1740 cache = branchmap.branchcache(rbranchmap,
1743 cache = branchmap.branchcache(rbranchmap,
1741 self[rtiprev].node(),
1744 self[rtiprev].node(),
1742 rtiprev,
1745 rtiprev,
1743 closednodes=closed)
1746 closednodes=closed)
1744 # Try to stick it as low as possible
1747 # Try to stick it as low as possible
1745 # filter above served are unlikely to be fetch from a clone
1748 # filter above served are unlikely to be fetch from a clone
1746 for candidate in ('base', 'immutable', 'served'):
1749 for candidate in ('base', 'immutable', 'served'):
1747 rview = self.filtered(candidate)
1750 rview = self.filtered(candidate)
1748 if cache.validfor(rview):
1751 if cache.validfor(rview):
1749 self._branchcaches[candidate] = cache
1752 self._branchcaches[candidate] = cache
1750 cache.write(rview)
1753 cache.write(rview)
1751 break
1754 break
1752 self.invalidate()
1755 self.invalidate()
1753 return len(self.heads()) + 1
1756 return len(self.heads()) + 1
1754 finally:
1757 finally:
1755 lock.release()
1758 lock.release()
1756
1759
1757 def clone(self, remote, heads=[], stream=None):
1760 def clone(self, remote, heads=[], stream=None):
1758 '''clone remote repository.
1761 '''clone remote repository.
1759
1762
1760 keyword arguments:
1763 keyword arguments:
1761 heads: list of revs to clone (forces use of pull)
1764 heads: list of revs to clone (forces use of pull)
1762 stream: use streaming clone if possible'''
1765 stream: use streaming clone if possible'''
1763
1766
1764 # now, all clients that can request uncompressed clones can
1767 # now, all clients that can request uncompressed clones can
1765 # read repo formats supported by all servers that can serve
1768 # read repo formats supported by all servers that can serve
1766 # them.
1769 # them.
1767
1770
1768 # if revlog format changes, client will have to check version
1771 # if revlog format changes, client will have to check version
1769 # and format flags on "stream" capability, and use
1772 # and format flags on "stream" capability, and use
1770 # uncompressed only if compatible.
1773 # uncompressed only if compatible.
1771
1774
1772 if stream is None:
1775 if stream is None:
1773 # if the server explicitly prefers to stream (for fast LANs)
1776 # if the server explicitly prefers to stream (for fast LANs)
1774 stream = remote.capable('stream-preferred')
1777 stream = remote.capable('stream-preferred')
1775
1778
1776 if stream and not heads:
1779 if stream and not heads:
1777 # 'stream' means remote revlog format is revlogv1 only
1780 # 'stream' means remote revlog format is revlogv1 only
1778 if remote.capable('stream'):
1781 if remote.capable('stream'):
1779 self.stream_in(remote, set(('revlogv1',)))
1782 self.stream_in(remote, set(('revlogv1',)))
1780 else:
1783 else:
1781 # otherwise, 'streamreqs' contains the remote revlog format
1784 # otherwise, 'streamreqs' contains the remote revlog format
1782 streamreqs = remote.capable('streamreqs')
1785 streamreqs = remote.capable('streamreqs')
1783 if streamreqs:
1786 if streamreqs:
1784 streamreqs = set(streamreqs.split(','))
1787 streamreqs = set(streamreqs.split(','))
1785 # if we support it, stream in and adjust our requirements
1788 # if we support it, stream in and adjust our requirements
1786 if not streamreqs - self.supportedformats:
1789 if not streamreqs - self.supportedformats:
1787 self.stream_in(remote, streamreqs)
1790 self.stream_in(remote, streamreqs)
1788
1791
1789 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1792 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1790 try:
1793 try:
1791 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1794 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1792 ret = exchange.pull(self, remote, heads).cgresult
1795 ret = exchange.pull(self, remote, heads).cgresult
1793 finally:
1796 finally:
1794 self.ui.restoreconfig(quiet)
1797 self.ui.restoreconfig(quiet)
1795 return ret
1798 return ret
1796
1799
1797 def pushkey(self, namespace, key, old, new):
1800 def pushkey(self, namespace, key, old, new):
1798 try:
1801 try:
1799 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1802 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1800 old=old, new=new)
1803 old=old, new=new)
1801 except error.HookAbort, exc:
1804 except error.HookAbort, exc:
1802 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1805 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1803 if exc.hint:
1806 if exc.hint:
1804 self.ui.write_err(_("(%s)\n") % exc.hint)
1807 self.ui.write_err(_("(%s)\n") % exc.hint)
1805 return False
1808 return False
1806 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1809 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1807 ret = pushkey.push(self, namespace, key, old, new)
1810 ret = pushkey.push(self, namespace, key, old, new)
1808 def runhook():
1811 def runhook():
1809 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1812 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1810 ret=ret)
1813 ret=ret)
1811 self._afterlock(runhook)
1814 self._afterlock(runhook)
1812 return ret
1815 return ret
1813
1816
1814 def listkeys(self, namespace):
1817 def listkeys(self, namespace):
1815 self.hook('prelistkeys', throw=True, namespace=namespace)
1818 self.hook('prelistkeys', throw=True, namespace=namespace)
1816 self.ui.debug('listing keys for "%s"\n' % namespace)
1819 self.ui.debug('listing keys for "%s"\n' % namespace)
1817 values = pushkey.list(self, namespace)
1820 values = pushkey.list(self, namespace)
1818 self.hook('listkeys', namespace=namespace, values=values)
1821 self.hook('listkeys', namespace=namespace, values=values)
1819 return values
1822 return values
1820
1823
1821 def debugwireargs(self, one, two, three=None, four=None, five=None):
1824 def debugwireargs(self, one, two, three=None, four=None, five=None):
1822 '''used to test argument passing over the wire'''
1825 '''used to test argument passing over the wire'''
1823 return "%s %s %s %s %s" % (one, two, three, four, five)
1826 return "%s %s %s %s %s" % (one, two, three, four, five)
1824
1827
1825 def savecommitmessage(self, text):
1828 def savecommitmessage(self, text):
1826 fp = self.vfs('last-message.txt', 'wb')
1829 fp = self.vfs('last-message.txt', 'wb')
1827 try:
1830 try:
1828 fp.write(text)
1831 fp.write(text)
1829 finally:
1832 finally:
1830 fp.close()
1833 fp.close()
1831 return self.pathto(fp.name[len(self.root) + 1:])
1834 return self.pathto(fp.name[len(self.root) + 1:])
1832
1835
1833 # used to avoid circular references so destructors work
1836 # used to avoid circular references so destructors work
1834 def aftertrans(files):
1837 def aftertrans(files):
1835 renamefiles = [tuple(t) for t in files]
1838 renamefiles = [tuple(t) for t in files]
1836 def a():
1839 def a():
1837 for vfs, src, dest in renamefiles:
1840 for vfs, src, dest in renamefiles:
1838 try:
1841 try:
1839 vfs.rename(src, dest)
1842 vfs.rename(src, dest)
1840 except OSError: # journal file does not yet exist
1843 except OSError: # journal file does not yet exist
1841 pass
1844 pass
1842 return a
1845 return a
1843
1846
1844 def undoname(fn):
1847 def undoname(fn):
1845 base, name = os.path.split(fn)
1848 base, name = os.path.split(fn)
1846 assert name.startswith('journal')
1849 assert name.startswith('journal')
1847 return os.path.join(base, name.replace('journal', 'undo', 1))
1850 return os.path.join(base, name.replace('journal', 'undo', 1))
1848
1851
1849 def instance(ui, path, create):
1852 def instance(ui, path, create):
1850 return localrepository(ui, util.urllocalpath(path), create)
1853 return localrepository(ui, util.urllocalpath(path), create)
1851
1854
1852 def islocal(path):
1855 def islocal(path):
1853 return True
1856 return True
@@ -1,295 +1,300 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self, mapping=None, flags=None):
13 def __init__(self, mapping=None, flags=None):
14 if mapping is None:
14 if mapping is None:
15 mapping = {}
15 mapping = {}
16 if flags is None:
16 if flags is None:
17 flags = {}
17 flags = {}
18 dict.__init__(self, mapping)
18 dict.__init__(self, mapping)
19 self._flags = flags
19 self._flags = flags
20 def __setitem__(self, k, v):
20 def __setitem__(self, k, v):
21 assert v is not None
21 assert v is not None
22 dict.__setitem__(self, k, v)
22 dict.__setitem__(self, k, v)
23 def flags(self, f):
23 def flags(self, f):
24 return self._flags.get(f, "")
24 return self._flags.get(f, "")
25 def setflag(self, f, flags):
25 def setflag(self, f, flags):
26 """Set the flags (symlink, executable) for path f."""
26 """Set the flags (symlink, executable) for path f."""
27 self._flags[f] = flags
27 self._flags[f] = flags
28 def copy(self):
28 def copy(self):
29 return manifestdict(self, dict.copy(self._flags))
29 return manifestdict(self, dict.copy(self._flags))
30 def intersectfiles(self, files):
30 def intersectfiles(self, files):
31 '''make a new manifestdict with the intersection of self with files
31 '''make a new manifestdict with the intersection of self with files
32
32
33 The algorithm assumes that files is much smaller than self.'''
33 The algorithm assumes that files is much smaller than self.'''
34 ret = manifestdict()
34 ret = manifestdict()
35 for fn in files:
35 for fn in files:
36 if fn in self:
36 if fn in self:
37 ret[fn] = self[fn]
37 ret[fn] = self[fn]
38 flags = self._flags.get(fn, None)
38 flags = self._flags.get(fn, None)
39 if flags:
39 if flags:
40 ret._flags[fn] = flags
40 ret._flags[fn] = flags
41 return ret
41 return ret
42
42
43 def matches(self, match):
43 def matches(self, match):
44 '''generate a new manifest filtered by the match argument'''
44 '''generate a new manifest filtered by the match argument'''
45 if match.always():
45 if match.always():
46 return self.copy()
46 return self.copy()
47
47
48 files = match.files()
48 files = match.files()
49 if (match.matchfn == match.exact or
49 if (match.matchfn == match.exact or
50 (not match.anypats() and util.all(fn in self for fn in files))):
50 (not match.anypats() and util.all(fn in self for fn in files))):
51 return self.intersectfiles(files)
51 return self.intersectfiles(files)
52
52
53 mf = self.copy()
53 mf = self.copy()
54 for fn in mf.keys():
54 for fn in mf.keys():
55 if not match(fn):
55 if not match(fn):
56 del mf[fn]
56 del mf[fn]
57 return mf
57 return mf
58
58
59 def diff(self, m2, clean=False):
59 def diff(self, m2, clean=False):
60 '''Finds changes between the current manifest and m2.
60 '''Finds changes between the current manifest and m2.
61
61
62 Args:
62 Args:
63 m2: the manifest to which this manifest should be compared.
63 m2: the manifest to which this manifest should be compared.
64 clean: if true, include files unchanged between these manifests
64 clean: if true, include files unchanged between these manifests
65 with a None value in the returned dictionary.
65 with a None value in the returned dictionary.
66
66
67 The result is returned as a dict with filename as key and
67 The result is returned as a dict with filename as key and
68 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
68 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
69 nodeid in the current/other manifest and fl1/fl2 is the flag
69 nodeid in the current/other manifest and fl1/fl2 is the flag
70 in the current/other manifest. Where the file does not exist,
70 in the current/other manifest. Where the file does not exist,
71 the nodeid will be None and the flags will be the empty
71 the nodeid will be None and the flags will be the empty
72 string.
72 string.
73 '''
73 '''
74 diff = {}
74 diff = {}
75
75
76 for fn, n1 in self.iteritems():
76 for fn, n1 in self.iteritems():
77 fl1 = self._flags.get(fn, '')
77 fl1 = self._flags.get(fn, '')
78 n2 = m2.get(fn, None)
78 n2 = m2.get(fn, None)
79 fl2 = m2._flags.get(fn, '')
79 fl2 = m2._flags.get(fn, '')
80 if n2 is None:
80 if n2 is None:
81 fl2 = ''
81 fl2 = ''
82 if n1 != n2 or fl1 != fl2:
82 if n1 != n2 or fl1 != fl2:
83 diff[fn] = ((n1, fl1), (n2, fl2))
83 diff[fn] = ((n1, fl1), (n2, fl2))
84 elif clean:
84 elif clean:
85 diff[fn] = None
85 diff[fn] = None
86
86
87 for fn, n2 in m2.iteritems():
87 for fn, n2 in m2.iteritems():
88 if fn not in self:
88 if fn not in self:
89 fl2 = m2._flags.get(fn, '')
89 fl2 = m2._flags.get(fn, '')
90 diff[fn] = ((None, ''), (n2, fl2))
90 diff[fn] = ((None, ''), (n2, fl2))
91
91
92 return diff
92 return diff
93
93
94 def text(self):
94 def text(self):
95 """Get the full data of this manifest as a bytestring."""
95 """Get the full data of this manifest as a bytestring."""
96 fl = sorted(self)
96 fl = sorted(self)
97 _checkforbidden(fl)
97 _checkforbidden(fl)
98
98
99 hex, flags = revlog.hex, self.flags
99 hex, flags = revlog.hex, self.flags
100 # if this is changed to support newlines in filenames,
100 # if this is changed to support newlines in filenames,
101 # be sure to check the templates/ dir again (especially *-raw.tmpl)
101 # be sure to check the templates/ dir again (especially *-raw.tmpl)
102 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
102 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
103
103
104 def fastdelta(self, base, changes):
104 def fastdelta(self, base, changes):
105 """Given a base manifest text as an array.array and a list of changes
105 """Given a base manifest text as an array.array and a list of changes
106 relative to that text, compute a delta that can be used by revlog.
106 relative to that text, compute a delta that can be used by revlog.
107 """
107 """
108 delta = []
108 delta = []
109 dstart = None
109 dstart = None
110 dend = None
110 dend = None
111 dline = [""]
111 dline = [""]
112 start = 0
112 start = 0
113 # zero copy representation of base as a buffer
113 # zero copy representation of base as a buffer
114 addbuf = util.buffer(base)
114 addbuf = util.buffer(base)
115
115
116 # start with a readonly loop that finds the offset of
116 # start with a readonly loop that finds the offset of
117 # each line and creates the deltas
117 # each line and creates the deltas
118 for f, todelete in changes:
118 for f, todelete in changes:
119 # bs will either be the index of the item or the insert point
119 # bs will either be the index of the item or the insert point
120 start, end = _msearch(addbuf, f, start)
120 start, end = _msearch(addbuf, f, start)
121 if not todelete:
121 if not todelete:
122 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
122 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
123 else:
123 else:
124 if start == end:
124 if start == end:
125 # item we want to delete was not found, error out
125 # item we want to delete was not found, error out
126 raise AssertionError(
126 raise AssertionError(
127 _("failed to remove %s from manifest") % f)
127 _("failed to remove %s from manifest") % f)
128 l = ""
128 l = ""
129 if dstart is not None and dstart <= start and dend >= start:
129 if dstart is not None and dstart <= start and dend >= start:
130 if dend < end:
130 if dend < end:
131 dend = end
131 dend = end
132 if l:
132 if l:
133 dline.append(l)
133 dline.append(l)
134 else:
134 else:
135 if dstart is not None:
135 if dstart is not None:
136 delta.append([dstart, dend, "".join(dline)])
136 delta.append([dstart, dend, "".join(dline)])
137 dstart = start
137 dstart = start
138 dend = end
138 dend = end
139 dline = [l]
139 dline = [l]
140
140
141 if dstart is not None:
141 if dstart is not None:
142 delta.append([dstart, dend, "".join(dline)])
142 delta.append([dstart, dend, "".join(dline)])
143 # apply the delta to the base, and get a delta for addrevision
143 # apply the delta to the base, and get a delta for addrevision
144 deltatext, arraytext = _addlistdelta(base, delta)
144 deltatext, arraytext = _addlistdelta(base, delta)
145 return arraytext, deltatext
145 return arraytext, deltatext
146
146
147 def _msearch(m, s, lo=0, hi=None):
147 def _msearch(m, s, lo=0, hi=None):
148 '''return a tuple (start, end) that says where to find s within m.
148 '''return a tuple (start, end) that says where to find s within m.
149
149
150 If the string is found m[start:end] are the line containing
150 If the string is found m[start:end] are the line containing
151 that string. If start == end the string was not found and
151 that string. If start == end the string was not found and
152 they indicate the proper sorted insertion point.
152 they indicate the proper sorted insertion point.
153
153
154 m should be a buffer or a string
154 m should be a buffer or a string
155 s is a string'''
155 s is a string'''
156 def advance(i, c):
156 def advance(i, c):
157 while i < lenm and m[i] != c:
157 while i < lenm and m[i] != c:
158 i += 1
158 i += 1
159 return i
159 return i
160 if not s:
160 if not s:
161 return (lo, lo)
161 return (lo, lo)
162 lenm = len(m)
162 lenm = len(m)
163 if not hi:
163 if not hi:
164 hi = lenm
164 hi = lenm
165 while lo < hi:
165 while lo < hi:
166 mid = (lo + hi) // 2
166 mid = (lo + hi) // 2
167 start = mid
167 start = mid
168 while start > 0 and m[start - 1] != '\n':
168 while start > 0 and m[start - 1] != '\n':
169 start -= 1
169 start -= 1
170 end = advance(start, '\0')
170 end = advance(start, '\0')
171 if m[start:end] < s:
171 if m[start:end] < s:
172 # we know that after the null there are 40 bytes of sha1
172 # we know that after the null there are 40 bytes of sha1
173 # this translates to the bisect lo = mid + 1
173 # this translates to the bisect lo = mid + 1
174 lo = advance(end + 40, '\n') + 1
174 lo = advance(end + 40, '\n') + 1
175 else:
175 else:
176 # this translates to the bisect hi = mid
176 # this translates to the bisect hi = mid
177 hi = start
177 hi = start
178 end = advance(lo, '\0')
178 end = advance(lo, '\0')
179 found = m[lo:end]
179 found = m[lo:end]
180 if s == found:
180 if s == found:
181 # we know that after the null there are 40 bytes of sha1
181 # we know that after the null there are 40 bytes of sha1
182 end = advance(end + 40, '\n')
182 end = advance(end + 40, '\n')
183 return (lo, end + 1)
183 return (lo, end + 1)
184 else:
184 else:
185 return (lo, lo)
185 return (lo, lo)
186
186
187 def _checkforbidden(l):
187 def _checkforbidden(l):
188 """Check filenames for illegal characters."""
188 """Check filenames for illegal characters."""
189 for f in l:
189 for f in l:
190 if '\n' in f or '\r' in f:
190 if '\n' in f or '\r' in f:
191 raise error.RevlogError(
191 raise error.RevlogError(
192 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
192 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
193
193
194
194
195 # apply the changes collected during the bisect loop to our addlist
195 # apply the changes collected during the bisect loop to our addlist
196 # return a delta suitable for addrevision
196 # return a delta suitable for addrevision
197 def _addlistdelta(addlist, x):
197 def _addlistdelta(addlist, x):
198 # for large addlist arrays, building a new array is cheaper
198 # for large addlist arrays, building a new array is cheaper
199 # than repeatedly modifying the existing one
199 # than repeatedly modifying the existing one
200 currentposition = 0
200 currentposition = 0
201 newaddlist = array.array('c')
201 newaddlist = array.array('c')
202
202
203 for start, end, content in x:
203 for start, end, content in x:
204 newaddlist += addlist[currentposition:start]
204 newaddlist += addlist[currentposition:start]
205 if content:
205 if content:
206 newaddlist += array.array('c', content)
206 newaddlist += array.array('c', content)
207
207
208 currentposition = end
208 currentposition = end
209
209
210 newaddlist += addlist[currentposition:]
210 newaddlist += addlist[currentposition:]
211
211
212 deltatext = "".join(struct.pack(">lll", start, end, len(content))
212 deltatext = "".join(struct.pack(">lll", start, end, len(content))
213 + content for start, end, content in x)
213 + content for start, end, content in x)
214 return deltatext, newaddlist
214 return deltatext, newaddlist
215
215
216 def _parse(lines):
216 def _parse(lines):
217 mfdict = manifestdict()
217 mfdict = manifestdict()
218 parsers.parse_manifest(mfdict, mfdict._flags, lines)
218 parsers.parse_manifest(mfdict, mfdict._flags, lines)
219 return mfdict
219 return mfdict
220
220
221 class manifest(revlog.revlog):
221 class manifest(revlog.revlog):
222 def __init__(self, opener):
222 def __init__(self, opener):
223 # we expect to deal with not more than four revs at a time,
223 # During normal operations, we expect to deal with not more than four
224 # during a commit --amend
224 # revs at a time (such as during commit --amend). When rebasing large
225 self._mancache = util.lrucachedict(4)
225 # stacks of commits, the number can go up, hence the config knob below.
226 cachesize = 4
227 opts = getattr(opener, 'options', None)
228 if opts is not None:
229 cachesize = opts.get('manifestcachesize', cachesize)
230 self._mancache = util.lrucachedict(cachesize)
226 revlog.revlog.__init__(self, opener, "00manifest.i")
231 revlog.revlog.__init__(self, opener, "00manifest.i")
227
232
228 def readdelta(self, node):
233 def readdelta(self, node):
229 r = self.rev(node)
234 r = self.rev(node)
230 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
235 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
231
236
232 def readfast(self, node):
237 def readfast(self, node):
233 '''use the faster of readdelta or read'''
238 '''use the faster of readdelta or read'''
234 r = self.rev(node)
239 r = self.rev(node)
235 deltaparent = self.deltaparent(r)
240 deltaparent = self.deltaparent(r)
236 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
241 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
237 return self.readdelta(node)
242 return self.readdelta(node)
238 return self.read(node)
243 return self.read(node)
239
244
240 def read(self, node):
245 def read(self, node):
241 if node == revlog.nullid:
246 if node == revlog.nullid:
242 return manifestdict() # don't upset local cache
247 return manifestdict() # don't upset local cache
243 if node in self._mancache:
248 if node in self._mancache:
244 return self._mancache[node][0]
249 return self._mancache[node][0]
245 text = self.revision(node)
250 text = self.revision(node)
246 arraytext = array.array('c', text)
251 arraytext = array.array('c', text)
247 mapping = _parse(text)
252 mapping = _parse(text)
248 self._mancache[node] = (mapping, arraytext)
253 self._mancache[node] = (mapping, arraytext)
249 return mapping
254 return mapping
250
255
251 def find(self, node, f):
256 def find(self, node, f):
252 '''look up entry for a single file efficiently.
257 '''look up entry for a single file efficiently.
253 return (node, flags) pair if found, (None, None) if not.'''
258 return (node, flags) pair if found, (None, None) if not.'''
254 if node in self._mancache:
259 if node in self._mancache:
255 mapping = self._mancache[node][0]
260 mapping = self._mancache[node][0]
256 return mapping.get(f), mapping.flags(f)
261 return mapping.get(f), mapping.flags(f)
257 text = self.revision(node)
262 text = self.revision(node)
258 start, end = _msearch(text, f)
263 start, end = _msearch(text, f)
259 if start == end:
264 if start == end:
260 return None, None
265 return None, None
261 l = text[start:end]
266 l = text[start:end]
262 f, n = l.split('\0')
267 f, n = l.split('\0')
263 return revlog.bin(n[:40]), n[40:-1]
268 return revlog.bin(n[:40]), n[40:-1]
264
269
265 def add(self, map, transaction, link, p1, p2, added, removed):
270 def add(self, map, transaction, link, p1, p2, added, removed):
266 if p1 in self._mancache:
271 if p1 in self._mancache:
267 # If our first parent is in the manifest cache, we can
272 # If our first parent is in the manifest cache, we can
268 # compute a delta here using properties we know about the
273 # compute a delta here using properties we know about the
269 # manifest up-front, which may save time later for the
274 # manifest up-front, which may save time later for the
270 # revlog layer.
275 # revlog layer.
271
276
272 _checkforbidden(added)
277 _checkforbidden(added)
273 # combine the changed lists into one list for sorting
278 # combine the changed lists into one list for sorting
274 work = [(x, False) for x in added]
279 work = [(x, False) for x in added]
275 work.extend((x, True) for x in removed)
280 work.extend((x, True) for x in removed)
276 # this could use heapq.merge() (from Python 2.6+) or equivalent
281 # this could use heapq.merge() (from Python 2.6+) or equivalent
277 # since the lists are already sorted
282 # since the lists are already sorted
278 work.sort()
283 work.sort()
279
284
280 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
285 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
281 cachedelta = self.rev(p1), deltatext
286 cachedelta = self.rev(p1), deltatext
282 text = util.buffer(arraytext)
287 text = util.buffer(arraytext)
283 else:
288 else:
284 # The first parent manifest isn't already loaded, so we'll
289 # The first parent manifest isn't already loaded, so we'll
285 # just encode a fulltext of the manifest and pass that
290 # just encode a fulltext of the manifest and pass that
286 # through to the revlog layer, and let it handle the delta
291 # through to the revlog layer, and let it handle the delta
287 # process.
292 # process.
288 text = map.text()
293 text = map.text()
289 arraytext = array.array('c', text)
294 arraytext = array.array('c', text)
290 cachedelta = None
295 cachedelta = None
291
296
292 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
297 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
293 self._mancache[n] = (map, arraytext)
298 self._mancache[n] = (map, arraytext)
294
299
295 return n
300 return n
General Comments 0
You need to be logged in to leave comments. Login now