##// END OF EJS Templates
localrepo: use file API via vfs while ensuring repository directory...
FUJIWARA Katsunori -
r17161:be016e96 default
parent child Browse files
Show More
@@ -1,2457 +1,2457 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 class localrepository(repo.repository):
26 class localrepository(repo.repository):
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 'known', 'getbundle'))
28 'known', 'getbundle'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
29 supportedformats = set(('revlogv1', 'generaldelta'))
30 supported = supportedformats | set(('store', 'fncache', 'shared',
30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 'dotencode'))
31 'dotencode'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
32 openerreqs = set(('revlogv1', 'generaldelta'))
33 requirements = ['revlogv1']
33 requirements = ['revlogv1']
34
34
35 def _baserequirements(self, create):
35 def _baserequirements(self, create):
36 return self.requirements[:]
36 return self.requirements[:]
37
37
38 def __init__(self, baseui, path=None, create=False):
38 def __init__(self, baseui, path=None, create=False):
39 repo.repository.__init__(self)
39 repo.repository.__init__(self)
40 self.wopener = scmutil.opener(path, expand=True)
40 self.wopener = scmutil.opener(path, expand=True)
41 self.wvfs = self.wopener
41 self.wvfs = self.wopener
42 self.root = self.wvfs.base
42 self.root = self.wvfs.base
43 self.path = self.wvfs.join(".hg")
43 self.path = self.wvfs.join(".hg")
44 self.origroot = path
44 self.origroot = path
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 self.opener = scmutil.opener(self.path)
46 self.opener = scmutil.opener(self.path)
47 self.vfs = self.opener
47 self.vfs = self.opener
48 self.baseui = baseui
48 self.baseui = baseui
49 self.ui = baseui.copy()
49 self.ui = baseui.copy()
50 # A list of callback to shape the phase if no data were found.
50 # A list of callback to shape the phase if no data were found.
51 # Callback are in the form: func(repo, roots) --> processed root.
51 # Callback are in the form: func(repo, roots) --> processed root.
52 # This list it to be filled by extension during repo setup
52 # This list it to be filled by extension during repo setup
53 self._phasedefaults = []
53 self._phasedefaults = []
54
54
55 try:
55 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
57 extensions.loadall(self.ui)
57 extensions.loadall(self.ui)
58 except IOError:
58 except IOError:
59 pass
59 pass
60
60
61 if not os.path.isdir(self.path):
61 if not self.vfs.isdir():
62 if create:
62 if create:
63 if not os.path.exists(self.root):
63 if not self.wvfs.exists():
64 util.makedirs(self.root)
64 self.wvfs.makedirs()
65 util.makedir(self.path, notindexed=True)
65 self.vfs.makedir(notindexed=True)
66 requirements = self._baserequirements(create)
66 requirements = self._baserequirements(create)
67 if self.ui.configbool('format', 'usestore', True):
67 if self.ui.configbool('format', 'usestore', True):
68 os.mkdir(os.path.join(self.path, "store"))
68 self.vfs.mkdir("store")
69 requirements.append("store")
69 requirements.append("store")
70 if self.ui.configbool('format', 'usefncache', True):
70 if self.ui.configbool('format', 'usefncache', True):
71 requirements.append("fncache")
71 requirements.append("fncache")
72 if self.ui.configbool('format', 'dotencode', True):
72 if self.ui.configbool('format', 'dotencode', True):
73 requirements.append('dotencode')
73 requirements.append('dotencode')
74 # create an invalid changelog
74 # create an invalid changelog
75 self.vfs.append(
75 self.vfs.append(
76 "00changelog.i",
76 "00changelog.i",
77 '\0\0\0\2' # represents revlogv2
77 '\0\0\0\2' # represents revlogv2
78 ' dummy changelog to prevent using the old repo layout'
78 ' dummy changelog to prevent using the old repo layout'
79 )
79 )
80 if self.ui.configbool('format', 'generaldelta', False):
80 if self.ui.configbool('format', 'generaldelta', False):
81 requirements.append("generaldelta")
81 requirements.append("generaldelta")
82 requirements = set(requirements)
82 requirements = set(requirements)
83 else:
83 else:
84 raise error.RepoError(_("repository %s not found") % path)
84 raise error.RepoError(_("repository %s not found") % path)
85 elif create:
85 elif create:
86 raise error.RepoError(_("repository %s already exists") % path)
86 raise error.RepoError(_("repository %s already exists") % path)
87 else:
87 else:
88 try:
88 try:
89 requirements = scmutil.readrequires(self.vfs, self.supported)
89 requirements = scmutil.readrequires(self.vfs, self.supported)
90 except IOError, inst:
90 except IOError, inst:
91 if inst.errno != errno.ENOENT:
91 if inst.errno != errno.ENOENT:
92 raise
92 raise
93 requirements = set()
93 requirements = set()
94
94
95 self.sharedpath = self.path
95 self.sharedpath = self.path
96 try:
96 try:
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 if not os.path.exists(s):
98 if not os.path.exists(s):
99 raise error.RepoError(
99 raise error.RepoError(
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 self.sharedpath = s
101 self.sharedpath = s
102 except IOError, inst:
102 except IOError, inst:
103 if inst.errno != errno.ENOENT:
103 if inst.errno != errno.ENOENT:
104 raise
104 raise
105
105
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 self.spath = self.store.path
107 self.spath = self.store.path
108 self.sopener = self.store.opener
108 self.sopener = self.store.opener
109 self.svfs = self.sopener
109 self.svfs = self.sopener
110 self.sjoin = self.store.join
110 self.sjoin = self.store.join
111 self.opener.createmode = self.store.createmode
111 self.opener.createmode = self.store.createmode
112 self._applyrequirements(requirements)
112 self._applyrequirements(requirements)
113 if create:
113 if create:
114 self._writerequirements()
114 self._writerequirements()
115
115
116
116
117 self._branchcache = None
117 self._branchcache = None
118 self._branchcachetip = None
118 self._branchcachetip = None
119 self.filterpats = {}
119 self.filterpats = {}
120 self._datafilters = {}
120 self._datafilters = {}
121 self._transref = self._lockref = self._wlockref = None
121 self._transref = self._lockref = self._wlockref = None
122
122
123 # A cache for various files under .hg/ that tracks file changes,
123 # A cache for various files under .hg/ that tracks file changes,
124 # (used by the filecache decorator)
124 # (used by the filecache decorator)
125 #
125 #
126 # Maps a property name to its util.filecacheentry
126 # Maps a property name to its util.filecacheentry
127 self._filecache = {}
127 self._filecache = {}
128
128
129 def _applyrequirements(self, requirements):
129 def _applyrequirements(self, requirements):
130 self.requirements = requirements
130 self.requirements = requirements
131 self.sopener.options = dict((r, 1) for r in requirements
131 self.sopener.options = dict((r, 1) for r in requirements
132 if r in self.openerreqs)
132 if r in self.openerreqs)
133
133
134 def _writerequirements(self):
134 def _writerequirements(self):
135 reqfile = self.opener("requires", "w")
135 reqfile = self.opener("requires", "w")
136 for r in self.requirements:
136 for r in self.requirements:
137 reqfile.write("%s\n" % r)
137 reqfile.write("%s\n" % r)
138 reqfile.close()
138 reqfile.close()
139
139
140 def _checknested(self, path):
140 def _checknested(self, path):
141 """Determine if path is a legal nested repository."""
141 """Determine if path is a legal nested repository."""
142 if not path.startswith(self.root):
142 if not path.startswith(self.root):
143 return False
143 return False
144 subpath = path[len(self.root) + 1:]
144 subpath = path[len(self.root) + 1:]
145 normsubpath = util.pconvert(subpath)
145 normsubpath = util.pconvert(subpath)
146
146
147 # XXX: Checking against the current working copy is wrong in
147 # XXX: Checking against the current working copy is wrong in
148 # the sense that it can reject things like
148 # the sense that it can reject things like
149 #
149 #
150 # $ hg cat -r 10 sub/x.txt
150 # $ hg cat -r 10 sub/x.txt
151 #
151 #
152 # if sub/ is no longer a subrepository in the working copy
152 # if sub/ is no longer a subrepository in the working copy
153 # parent revision.
153 # parent revision.
154 #
154 #
155 # However, it can of course also allow things that would have
155 # However, it can of course also allow things that would have
156 # been rejected before, such as the above cat command if sub/
156 # been rejected before, such as the above cat command if sub/
157 # is a subrepository now, but was a normal directory before.
157 # is a subrepository now, but was a normal directory before.
158 # The old path auditor would have rejected by mistake since it
158 # The old path auditor would have rejected by mistake since it
159 # panics when it sees sub/.hg/.
159 # panics when it sees sub/.hg/.
160 #
160 #
161 # All in all, checking against the working copy seems sensible
161 # All in all, checking against the working copy seems sensible
162 # since we want to prevent access to nested repositories on
162 # since we want to prevent access to nested repositories on
163 # the filesystem *now*.
163 # the filesystem *now*.
164 ctx = self[None]
164 ctx = self[None]
165 parts = util.splitpath(subpath)
165 parts = util.splitpath(subpath)
166 while parts:
166 while parts:
167 prefix = '/'.join(parts)
167 prefix = '/'.join(parts)
168 if prefix in ctx.substate:
168 if prefix in ctx.substate:
169 if prefix == normsubpath:
169 if prefix == normsubpath:
170 return True
170 return True
171 else:
171 else:
172 sub = ctx.sub(prefix)
172 sub = ctx.sub(prefix)
173 return sub.checknested(subpath[len(prefix) + 1:])
173 return sub.checknested(subpath[len(prefix) + 1:])
174 else:
174 else:
175 parts.pop()
175 parts.pop()
176 return False
176 return False
177
177
178 @filecache('bookmarks')
178 @filecache('bookmarks')
179 def _bookmarks(self):
179 def _bookmarks(self):
180 return bookmarks.read(self)
180 return bookmarks.read(self)
181
181
182 @filecache('bookmarks.current')
182 @filecache('bookmarks.current')
183 def _bookmarkcurrent(self):
183 def _bookmarkcurrent(self):
184 return bookmarks.readcurrent(self)
184 return bookmarks.readcurrent(self)
185
185
186 def _writebookmarks(self, marks):
186 def _writebookmarks(self, marks):
187 bookmarks.write(self)
187 bookmarks.write(self)
188
188
189 def bookmarkheads(self, bookmark):
189 def bookmarkheads(self, bookmark):
190 name = bookmark.split('@', 1)[0]
190 name = bookmark.split('@', 1)[0]
191 heads = []
191 heads = []
192 for mark, n in self._bookmarks.iteritems():
192 for mark, n in self._bookmarks.iteritems():
193 if mark.split('@', 1)[0] == name:
193 if mark.split('@', 1)[0] == name:
194 heads.append(n)
194 heads.append(n)
195 return heads
195 return heads
196
196
197 @storecache('phaseroots')
197 @storecache('phaseroots')
198 def _phasecache(self):
198 def _phasecache(self):
199 return phases.phasecache(self, self._phasedefaults)
199 return phases.phasecache(self, self._phasedefaults)
200
200
201 @storecache('obsstore')
201 @storecache('obsstore')
202 def obsstore(self):
202 def obsstore(self):
203 store = obsolete.obsstore(self.sopener)
203 store = obsolete.obsstore(self.sopener)
204 return store
204 return store
205
205
206 @storecache('00changelog.i')
206 @storecache('00changelog.i')
207 def changelog(self):
207 def changelog(self):
208 c = changelog.changelog(self.sopener)
208 c = changelog.changelog(self.sopener)
209 if 'HG_PENDING' in os.environ:
209 if 'HG_PENDING' in os.environ:
210 p = os.environ['HG_PENDING']
210 p = os.environ['HG_PENDING']
211 if p.startswith(self.root):
211 if p.startswith(self.root):
212 c.readpending('00changelog.i.a')
212 c.readpending('00changelog.i.a')
213 return c
213 return c
214
214
215 @storecache('00manifest.i')
215 @storecache('00manifest.i')
216 def manifest(self):
216 def manifest(self):
217 return manifest.manifest(self.sopener)
217 return manifest.manifest(self.sopener)
218
218
219 @filecache('dirstate')
219 @filecache('dirstate')
220 def dirstate(self):
220 def dirstate(self):
221 warned = [0]
221 warned = [0]
222 def validate(node):
222 def validate(node):
223 try:
223 try:
224 self.changelog.rev(node)
224 self.changelog.rev(node)
225 return node
225 return node
226 except error.LookupError:
226 except error.LookupError:
227 if not warned[0]:
227 if not warned[0]:
228 warned[0] = True
228 warned[0] = True
229 self.ui.warn(_("warning: ignoring unknown"
229 self.ui.warn(_("warning: ignoring unknown"
230 " working parent %s!\n") % short(node))
230 " working parent %s!\n") % short(node))
231 return nullid
231 return nullid
232
232
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234
234
235 def __getitem__(self, changeid):
235 def __getitem__(self, changeid):
236 if changeid is None:
236 if changeid is None:
237 return context.workingctx(self)
237 return context.workingctx(self)
238 return context.changectx(self, changeid)
238 return context.changectx(self, changeid)
239
239
240 def __contains__(self, changeid):
240 def __contains__(self, changeid):
241 try:
241 try:
242 return bool(self.lookup(changeid))
242 return bool(self.lookup(changeid))
243 except error.RepoLookupError:
243 except error.RepoLookupError:
244 return False
244 return False
245
245
246 def __nonzero__(self):
246 def __nonzero__(self):
247 return True
247 return True
248
248
249 def __len__(self):
249 def __len__(self):
250 return len(self.changelog)
250 return len(self.changelog)
251
251
252 def __iter__(self):
252 def __iter__(self):
253 for i in xrange(len(self)):
253 for i in xrange(len(self)):
254 yield i
254 yield i
255
255
256 def revs(self, expr, *args):
256 def revs(self, expr, *args):
257 '''Return a list of revisions matching the given revset'''
257 '''Return a list of revisions matching the given revset'''
258 expr = revset.formatspec(expr, *args)
258 expr = revset.formatspec(expr, *args)
259 m = revset.match(None, expr)
259 m = revset.match(None, expr)
260 return [r for r in m(self, range(len(self)))]
260 return [r for r in m(self, range(len(self)))]
261
261
262 def set(self, expr, *args):
262 def set(self, expr, *args):
263 '''
263 '''
264 Yield a context for each matching revision, after doing arg
264 Yield a context for each matching revision, after doing arg
265 replacement via revset.formatspec
265 replacement via revset.formatspec
266 '''
266 '''
267 for r in self.revs(expr, *args):
267 for r in self.revs(expr, *args):
268 yield self[r]
268 yield self[r]
269
269
270 def url(self):
270 def url(self):
271 return 'file:' + self.root
271 return 'file:' + self.root
272
272
273 def hook(self, name, throw=False, **args):
273 def hook(self, name, throw=False, **args):
274 return hook.hook(self.ui, self, name, throw, **args)
274 return hook.hook(self.ui, self, name, throw, **args)
275
275
276 tag_disallowed = ':\r\n'
276 tag_disallowed = ':\r\n'
277
277
278 def _tag(self, names, node, message, local, user, date, extra={}):
278 def _tag(self, names, node, message, local, user, date, extra={}):
279 if isinstance(names, str):
279 if isinstance(names, str):
280 allchars = names
280 allchars = names
281 names = (names,)
281 names = (names,)
282 else:
282 else:
283 allchars = ''.join(names)
283 allchars = ''.join(names)
284 for c in self.tag_disallowed:
284 for c in self.tag_disallowed:
285 if c in allchars:
285 if c in allchars:
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287
287
288 branches = self.branchmap()
288 branches = self.branchmap()
289 for name in names:
289 for name in names:
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 local=local)
291 local=local)
292 if name in branches:
292 if name in branches:
293 self.ui.warn(_("warning: tag %s conflicts with existing"
293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 " branch name\n") % name)
294 " branch name\n") % name)
295
295
296 def writetags(fp, names, munge, prevtags):
296 def writetags(fp, names, munge, prevtags):
297 fp.seek(0, 2)
297 fp.seek(0, 2)
298 if prevtags and prevtags[-1] != '\n':
298 if prevtags and prevtags[-1] != '\n':
299 fp.write('\n')
299 fp.write('\n')
300 for name in names:
300 for name in names:
301 m = munge and munge(name) or name
301 m = munge and munge(name) or name
302 if (self._tagscache.tagtypes and
302 if (self._tagscache.tagtypes and
303 name in self._tagscache.tagtypes):
303 name in self._tagscache.tagtypes):
304 old = self.tags().get(name, nullid)
304 old = self.tags().get(name, nullid)
305 fp.write('%s %s\n' % (hex(old), m))
305 fp.write('%s %s\n' % (hex(old), m))
306 fp.write('%s %s\n' % (hex(node), m))
306 fp.write('%s %s\n' % (hex(node), m))
307 fp.close()
307 fp.close()
308
308
309 prevtags = ''
309 prevtags = ''
310 if local:
310 if local:
311 try:
311 try:
312 fp = self.opener('localtags', 'r+')
312 fp = self.opener('localtags', 'r+')
313 except IOError:
313 except IOError:
314 fp = self.opener('localtags', 'a')
314 fp = self.opener('localtags', 'a')
315 else:
315 else:
316 prevtags = fp.read()
316 prevtags = fp.read()
317
317
318 # local tags are stored in the current charset
318 # local tags are stored in the current charset
319 writetags(fp, names, None, prevtags)
319 writetags(fp, names, None, prevtags)
320 for name in names:
320 for name in names:
321 self.hook('tag', node=hex(node), tag=name, local=local)
321 self.hook('tag', node=hex(node), tag=name, local=local)
322 return
322 return
323
323
324 try:
324 try:
325 fp = self.wfile('.hgtags', 'rb+')
325 fp = self.wfile('.hgtags', 'rb+')
326 except IOError, e:
326 except IOError, e:
327 if e.errno != errno.ENOENT:
327 if e.errno != errno.ENOENT:
328 raise
328 raise
329 fp = self.wfile('.hgtags', 'ab')
329 fp = self.wfile('.hgtags', 'ab')
330 else:
330 else:
331 prevtags = fp.read()
331 prevtags = fp.read()
332
332
333 # committed tags are stored in UTF-8
333 # committed tags are stored in UTF-8
334 writetags(fp, names, encoding.fromlocal, prevtags)
334 writetags(fp, names, encoding.fromlocal, prevtags)
335
335
336 fp.close()
336 fp.close()
337
337
338 self.invalidatecaches()
338 self.invalidatecaches()
339
339
340 if '.hgtags' not in self.dirstate:
340 if '.hgtags' not in self.dirstate:
341 self[None].add(['.hgtags'])
341 self[None].add(['.hgtags'])
342
342
343 m = matchmod.exact(self.root, '', ['.hgtags'])
343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345
345
346 for name in names:
346 for name in names:
347 self.hook('tag', node=hex(node), tag=name, local=local)
347 self.hook('tag', node=hex(node), tag=name, local=local)
348
348
349 return tagnode
349 return tagnode
350
350
351 def tag(self, names, node, message, local, user, date):
351 def tag(self, names, node, message, local, user, date):
352 '''tag a revision with one or more symbolic names.
352 '''tag a revision with one or more symbolic names.
353
353
354 names is a list of strings or, when adding a single tag, names may be a
354 names is a list of strings or, when adding a single tag, names may be a
355 string.
355 string.
356
356
357 if local is True, the tags are stored in a per-repository file.
357 if local is True, the tags are stored in a per-repository file.
358 otherwise, they are stored in the .hgtags file, and a new
358 otherwise, they are stored in the .hgtags file, and a new
359 changeset is committed with the change.
359 changeset is committed with the change.
360
360
361 keyword arguments:
361 keyword arguments:
362
362
363 local: whether to store tags in non-version-controlled file
363 local: whether to store tags in non-version-controlled file
364 (default False)
364 (default False)
365
365
366 message: commit message to use if committing
366 message: commit message to use if committing
367
367
368 user: name of user to use if committing
368 user: name of user to use if committing
369
369
370 date: date tuple to use if committing'''
370 date: date tuple to use if committing'''
371
371
372 if not local:
372 if not local:
373 for x in self.status()[:5]:
373 for x in self.status()[:5]:
374 if '.hgtags' in x:
374 if '.hgtags' in x:
375 raise util.Abort(_('working copy of .hgtags is changed '
375 raise util.Abort(_('working copy of .hgtags is changed '
376 '(please commit .hgtags manually)'))
376 '(please commit .hgtags manually)'))
377
377
378 self.tags() # instantiate the cache
378 self.tags() # instantiate the cache
379 self._tag(names, node, message, local, user, date)
379 self._tag(names, node, message, local, user, date)
380
380
381 @propertycache
381 @propertycache
382 def _tagscache(self):
382 def _tagscache(self):
383 '''Returns a tagscache object that contains various tags related
383 '''Returns a tagscache object that contains various tags related
384 caches.'''
384 caches.'''
385
385
386 # This simplifies its cache management by having one decorated
386 # This simplifies its cache management by having one decorated
387 # function (this one) and the rest simply fetch things from it.
387 # function (this one) and the rest simply fetch things from it.
388 class tagscache(object):
388 class tagscache(object):
389 def __init__(self):
389 def __init__(self):
390 # These two define the set of tags for this repository. tags
390 # These two define the set of tags for this repository. tags
391 # maps tag name to node; tagtypes maps tag name to 'global' or
391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 # 'local'. (Global tags are defined by .hgtags across all
392 # 'local'. (Global tags are defined by .hgtags across all
393 # heads, and local tags are defined in .hg/localtags.)
393 # heads, and local tags are defined in .hg/localtags.)
394 # They constitute the in-memory cache of tags.
394 # They constitute the in-memory cache of tags.
395 self.tags = self.tagtypes = None
395 self.tags = self.tagtypes = None
396
396
397 self.nodetagscache = self.tagslist = None
397 self.nodetagscache = self.tagslist = None
398
398
399 cache = tagscache()
399 cache = tagscache()
400 cache.tags, cache.tagtypes = self._findtags()
400 cache.tags, cache.tagtypes = self._findtags()
401
401
402 return cache
402 return cache
403
403
404 def tags(self):
404 def tags(self):
405 '''return a mapping of tag to node'''
405 '''return a mapping of tag to node'''
406 t = {}
406 t = {}
407 for k, v in self._tagscache.tags.iteritems():
407 for k, v in self._tagscache.tags.iteritems():
408 try:
408 try:
409 # ignore tags to unknown nodes
409 # ignore tags to unknown nodes
410 self.changelog.rev(v)
410 self.changelog.rev(v)
411 t[k] = v
411 t[k] = v
412 except (error.LookupError, ValueError):
412 except (error.LookupError, ValueError):
413 pass
413 pass
414 return t
414 return t
415
415
416 def _findtags(self):
416 def _findtags(self):
417 '''Do the hard work of finding tags. Return a pair of dicts
417 '''Do the hard work of finding tags. Return a pair of dicts
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 maps tag name to a string like \'global\' or \'local\'.
419 maps tag name to a string like \'global\' or \'local\'.
420 Subclasses or extensions are free to add their own tags, but
420 Subclasses or extensions are free to add their own tags, but
421 should be aware that the returned dicts will be retained for the
421 should be aware that the returned dicts will be retained for the
422 duration of the localrepo object.'''
422 duration of the localrepo object.'''
423
423
424 # XXX what tagtype should subclasses/extensions use? Currently
424 # XXX what tagtype should subclasses/extensions use? Currently
425 # mq and bookmarks add tags, but do not set the tagtype at all.
425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 # Should each extension invent its own tag type? Should there
426 # Should each extension invent its own tag type? Should there
427 # be one tagtype for all such "virtual" tags? Or is the status
427 # be one tagtype for all such "virtual" tags? Or is the status
428 # quo fine?
428 # quo fine?
429
429
430 alltags = {} # map tag name to (node, hist)
430 alltags = {} # map tag name to (node, hist)
431 tagtypes = {}
431 tagtypes = {}
432
432
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435
435
436 # Build the return dicts. Have to re-encode tag names because
436 # Build the return dicts. Have to re-encode tag names because
437 # the tags module always uses UTF-8 (in order not to lose info
437 # the tags module always uses UTF-8 (in order not to lose info
438 # writing to the cache), but the rest of Mercurial wants them in
438 # writing to the cache), but the rest of Mercurial wants them in
439 # local encoding.
439 # local encoding.
440 tags = {}
440 tags = {}
441 for (name, (node, hist)) in alltags.iteritems():
441 for (name, (node, hist)) in alltags.iteritems():
442 if node != nullid:
442 if node != nullid:
443 tags[encoding.tolocal(name)] = node
443 tags[encoding.tolocal(name)] = node
444 tags['tip'] = self.changelog.tip()
444 tags['tip'] = self.changelog.tip()
445 tagtypes = dict([(encoding.tolocal(name), value)
445 tagtypes = dict([(encoding.tolocal(name), value)
446 for (name, value) in tagtypes.iteritems()])
446 for (name, value) in tagtypes.iteritems()])
447 return (tags, tagtypes)
447 return (tags, tagtypes)
448
448
449 def tagtype(self, tagname):
449 def tagtype(self, tagname):
450 '''
450 '''
451 return the type of the given tag. result can be:
451 return the type of the given tag. result can be:
452
452
453 'local' : a local tag
453 'local' : a local tag
454 'global' : a global tag
454 'global' : a global tag
455 None : tag does not exist
455 None : tag does not exist
456 '''
456 '''
457
457
458 return self._tagscache.tagtypes.get(tagname)
458 return self._tagscache.tagtypes.get(tagname)
459
459
460 def tagslist(self):
460 def tagslist(self):
461 '''return a list of tags ordered by revision'''
461 '''return a list of tags ordered by revision'''
462 if not self._tagscache.tagslist:
462 if not self._tagscache.tagslist:
463 l = []
463 l = []
464 for t, n in self.tags().iteritems():
464 for t, n in self.tags().iteritems():
465 r = self.changelog.rev(n)
465 r = self.changelog.rev(n)
466 l.append((r, t, n))
466 l.append((r, t, n))
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468
468
469 return self._tagscache.tagslist
469 return self._tagscache.tagslist
470
470
471 def nodetags(self, node):
471 def nodetags(self, node):
472 '''return the tags associated with a node'''
472 '''return the tags associated with a node'''
473 if not self._tagscache.nodetagscache:
473 if not self._tagscache.nodetagscache:
474 nodetagscache = {}
474 nodetagscache = {}
475 for t, n in self._tagscache.tags.iteritems():
475 for t, n in self._tagscache.tags.iteritems():
476 nodetagscache.setdefault(n, []).append(t)
476 nodetagscache.setdefault(n, []).append(t)
477 for tags in nodetagscache.itervalues():
477 for tags in nodetagscache.itervalues():
478 tags.sort()
478 tags.sort()
479 self._tagscache.nodetagscache = nodetagscache
479 self._tagscache.nodetagscache = nodetagscache
480 return self._tagscache.nodetagscache.get(node, [])
480 return self._tagscache.nodetagscache.get(node, [])
481
481
482 def nodebookmarks(self, node):
482 def nodebookmarks(self, node):
483 marks = []
483 marks = []
484 for bookmark, n in self._bookmarks.iteritems():
484 for bookmark, n in self._bookmarks.iteritems():
485 if n == node:
485 if n == node:
486 marks.append(bookmark)
486 marks.append(bookmark)
487 return sorted(marks)
487 return sorted(marks)
488
488
489 def _branchtags(self, partial, lrev):
489 def _branchtags(self, partial, lrev):
490 # TODO: rename this function?
490 # TODO: rename this function?
491 tiprev = len(self) - 1
491 tiprev = len(self) - 1
492 if lrev != tiprev:
492 if lrev != tiprev:
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 self._updatebranchcache(partial, ctxgen)
494 self._updatebranchcache(partial, ctxgen)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496
496
497 return partial
497 return partial
498
498
499 def updatebranchcache(self):
499 def updatebranchcache(self):
500 tip = self.changelog.tip()
500 tip = self.changelog.tip()
501 if self._branchcache is not None and self._branchcachetip == tip:
501 if self._branchcache is not None and self._branchcachetip == tip:
502 return
502 return
503
503
504 oldtip = self._branchcachetip
504 oldtip = self._branchcachetip
505 self._branchcachetip = tip
505 self._branchcachetip = tip
506 if oldtip is None or oldtip not in self.changelog.nodemap:
506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 partial, last, lrev = self._readbranchcache()
507 partial, last, lrev = self._readbranchcache()
508 else:
508 else:
509 lrev = self.changelog.rev(oldtip)
509 lrev = self.changelog.rev(oldtip)
510 partial = self._branchcache
510 partial = self._branchcache
511
511
512 self._branchtags(partial, lrev)
512 self._branchtags(partial, lrev)
513 # this private cache holds all heads (not just the branch tips)
513 # this private cache holds all heads (not just the branch tips)
514 self._branchcache = partial
514 self._branchcache = partial
515
515
516 def branchmap(self):
516 def branchmap(self):
517 '''returns a dictionary {branch: [branchheads]}'''
517 '''returns a dictionary {branch: [branchheads]}'''
518 self.updatebranchcache()
518 self.updatebranchcache()
519 return self._branchcache
519 return self._branchcache
520
520
521 def _branchtip(self, heads):
521 def _branchtip(self, heads):
522 '''return the tipmost branch head in heads'''
522 '''return the tipmost branch head in heads'''
523 tip = heads[-1]
523 tip = heads[-1]
524 for h in reversed(heads):
524 for h in reversed(heads):
525 if not self[h].closesbranch():
525 if not self[h].closesbranch():
526 tip = h
526 tip = h
527 break
527 break
528 return tip
528 return tip
529
529
530 def branchtip(self, branch):
530 def branchtip(self, branch):
531 '''return the tip node for a given branch'''
531 '''return the tip node for a given branch'''
532 if branch not in self.branchmap():
532 if branch not in self.branchmap():
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 return self._branchtip(self.branchmap()[branch])
534 return self._branchtip(self.branchmap()[branch])
535
535
536 def branchtags(self):
536 def branchtags(self):
537 '''return a dict where branch names map to the tipmost head of
537 '''return a dict where branch names map to the tipmost head of
538 the branch, open heads come before closed'''
538 the branch, open heads come before closed'''
539 bt = {}
539 bt = {}
540 for bn, heads in self.branchmap().iteritems():
540 for bn, heads in self.branchmap().iteritems():
541 bt[bn] = self._branchtip(heads)
541 bt[bn] = self._branchtip(heads)
542 return bt
542 return bt
543
543
544 def _readbranchcache(self):
544 def _readbranchcache(self):
545 partial = {}
545 partial = {}
546 try:
546 try:
547 f = self.opener("cache/branchheads")
547 f = self.opener("cache/branchheads")
548 lines = f.read().split('\n')
548 lines = f.read().split('\n')
549 f.close()
549 f.close()
550 except (IOError, OSError):
550 except (IOError, OSError):
551 return {}, nullid, nullrev
551 return {}, nullid, nullrev
552
552
553 try:
553 try:
554 last, lrev = lines.pop(0).split(" ", 1)
554 last, lrev = lines.pop(0).split(" ", 1)
555 last, lrev = bin(last), int(lrev)
555 last, lrev = bin(last), int(lrev)
556 if lrev >= len(self) or self[lrev].node() != last:
556 if lrev >= len(self) or self[lrev].node() != last:
557 # invalidate the cache
557 # invalidate the cache
558 raise ValueError('invalidating branch cache (tip differs)')
558 raise ValueError('invalidating branch cache (tip differs)')
559 for l in lines:
559 for l in lines:
560 if not l:
560 if not l:
561 continue
561 continue
562 node, label = l.split(" ", 1)
562 node, label = l.split(" ", 1)
563 label = encoding.tolocal(label.strip())
563 label = encoding.tolocal(label.strip())
564 if not node in self:
564 if not node in self:
565 raise ValueError('invalidating branch cache because node '+
565 raise ValueError('invalidating branch cache because node '+
566 '%s does not exist' % node)
566 '%s does not exist' % node)
567 partial.setdefault(label, []).append(bin(node))
567 partial.setdefault(label, []).append(bin(node))
568 except KeyboardInterrupt:
568 except KeyboardInterrupt:
569 raise
569 raise
570 except Exception, inst:
570 except Exception, inst:
571 if self.ui.debugflag:
571 if self.ui.debugflag:
572 self.ui.warn(str(inst), '\n')
572 self.ui.warn(str(inst), '\n')
573 partial, last, lrev = {}, nullid, nullrev
573 partial, last, lrev = {}, nullid, nullrev
574 return partial, last, lrev
574 return partial, last, lrev
575
575
576 def _writebranchcache(self, branches, tip, tiprev):
576 def _writebranchcache(self, branches, tip, tiprev):
577 try:
577 try:
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 f.write("%s %s\n" % (hex(tip), tiprev))
579 f.write("%s %s\n" % (hex(tip), tiprev))
580 for label, nodes in branches.iteritems():
580 for label, nodes in branches.iteritems():
581 for node in nodes:
581 for node in nodes:
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 f.close()
583 f.close()
584 except (IOError, OSError):
584 except (IOError, OSError):
585 pass
585 pass
586
586
587 def _updatebranchcache(self, partial, ctxgen):
587 def _updatebranchcache(self, partial, ctxgen):
588 """Given a branchhead cache, partial, that may have extra nodes or be
588 """Given a branchhead cache, partial, that may have extra nodes or be
589 missing heads, and a generator of nodes that are at least a superset of
589 missing heads, and a generator of nodes that are at least a superset of
590 heads missing, this function updates partial to be correct.
590 heads missing, this function updates partial to be correct.
591 """
591 """
592 # collect new branch entries
592 # collect new branch entries
593 newbranches = {}
593 newbranches = {}
594 for c in ctxgen:
594 for c in ctxgen:
595 newbranches.setdefault(c.branch(), []).append(c.node())
595 newbranches.setdefault(c.branch(), []).append(c.node())
596 # if older branchheads are reachable from new ones, they aren't
596 # if older branchheads are reachable from new ones, they aren't
597 # really branchheads. Note checking parents is insufficient:
597 # really branchheads. Note checking parents is insufficient:
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 for branch, newnodes in newbranches.iteritems():
599 for branch, newnodes in newbranches.iteritems():
600 bheads = partial.setdefault(branch, [])
600 bheads = partial.setdefault(branch, [])
601 # Remove candidate heads that no longer are in the repo (e.g., as
601 # Remove candidate heads that no longer are in the repo (e.g., as
602 # the result of a strip that just happened). Avoid using 'node in
602 # the result of a strip that just happened). Avoid using 'node in
603 # self' here because that dives down into branchcache code somewhat
603 # self' here because that dives down into branchcache code somewhat
604 # recrusively.
604 # recrusively.
605 bheadrevs = [self.changelog.rev(node) for node in bheads
605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 if self.changelog.hasnode(node)]
606 if self.changelog.hasnode(node)]
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 if self.changelog.hasnode(node)]
608 if self.changelog.hasnode(node)]
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 # Remove duplicates - nodes that are in newheadrevs and are already
610 # Remove duplicates - nodes that are in newheadrevs and are already
611 # in bheadrevs. This can happen if you strip a node whose parent
611 # in bheadrevs. This can happen if you strip a node whose parent
612 # was already a head (because they're on different branches).
612 # was already a head (because they're on different branches).
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614
614
615 # Starting from tip means fewer passes over reachable. If we know
615 # Starting from tip means fewer passes over reachable. If we know
616 # the new candidates are not ancestors of existing heads, we don't
616 # the new candidates are not ancestors of existing heads, we don't
617 # have to examine ancestors of existing heads
617 # have to examine ancestors of existing heads
618 if ctxisnew:
618 if ctxisnew:
619 iterrevs = sorted(newheadrevs)
619 iterrevs = sorted(newheadrevs)
620 else:
620 else:
621 iterrevs = list(bheadrevs)
621 iterrevs = list(bheadrevs)
622
622
623 # This loop prunes out two kinds of heads - heads that are
623 # This loop prunes out two kinds of heads - heads that are
624 # superceded by a head in newheadrevs, and newheadrevs that are not
624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 # heads because an existing head is their descendant.
625 # heads because an existing head is their descendant.
626 while iterrevs:
626 while iterrevs:
627 latest = iterrevs.pop()
627 latest = iterrevs.pop()
628 if latest not in bheadrevs:
628 if latest not in bheadrevs:
629 continue
629 continue
630 ancestors = set(self.changelog.ancestors([latest],
630 ancestors = set(self.changelog.ancestors([latest],
631 bheadrevs[0]))
631 bheadrevs[0]))
632 if ancestors:
632 if ancestors:
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635
635
636 # There may be branches that cease to exist when the last commit in the
636 # There may be branches that cease to exist when the last commit in the
637 # branch was stripped. This code filters them out. Note that the
637 # branch was stripped. This code filters them out. Note that the
638 # branch that ceased to exist may not be in newbranches because
638 # branch that ceased to exist may not be in newbranches because
639 # newbranches is the set of candidate heads, which when you strip the
639 # newbranches is the set of candidate heads, which when you strip the
640 # last commit in a branch will be the parent branch.
640 # last commit in a branch will be the parent branch.
641 for branch in partial:
641 for branch in partial:
642 nodes = [head for head in partial[branch]
642 nodes = [head for head in partial[branch]
643 if self.changelog.hasnode(head)]
643 if self.changelog.hasnode(head)]
644 if not nodes:
644 if not nodes:
645 del partial[branch]
645 del partial[branch]
646
646
647 def lookup(self, key):
647 def lookup(self, key):
648 return self[key].node()
648 return self[key].node()
649
649
650 def lookupbranch(self, key, remote=None):
650 def lookupbranch(self, key, remote=None):
651 repo = remote or self
651 repo = remote or self
652 if key in repo.branchmap():
652 if key in repo.branchmap():
653 return key
653 return key
654
654
655 repo = (remote and remote.local()) and remote or self
655 repo = (remote and remote.local()) and remote or self
656 return repo[key].branch()
656 return repo[key].branch()
657
657
658 def known(self, nodes):
658 def known(self, nodes):
659 nm = self.changelog.nodemap
659 nm = self.changelog.nodemap
660 pc = self._phasecache
660 pc = self._phasecache
661 result = []
661 result = []
662 for n in nodes:
662 for n in nodes:
663 r = nm.get(n)
663 r = nm.get(n)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 result.append(resp)
665 result.append(resp)
666 return result
666 return result
667
667
668 def local(self):
668 def local(self):
669 return self
669 return self
670
670
671 def join(self, f):
671 def join(self, f):
672 return os.path.join(self.path, f)
672 return os.path.join(self.path, f)
673
673
674 def wjoin(self, f):
674 def wjoin(self, f):
675 return os.path.join(self.root, f)
675 return os.path.join(self.root, f)
676
676
677 def file(self, f):
677 def file(self, f):
678 if f[0] == '/':
678 if f[0] == '/':
679 f = f[1:]
679 f = f[1:]
680 return filelog.filelog(self.sopener, f)
680 return filelog.filelog(self.sopener, f)
681
681
682 def changectx(self, changeid):
682 def changectx(self, changeid):
683 return self[changeid]
683 return self[changeid]
684
684
685 def parents(self, changeid=None):
685 def parents(self, changeid=None):
686 '''get list of changectxs for parents of changeid'''
686 '''get list of changectxs for parents of changeid'''
687 return self[changeid].parents()
687 return self[changeid].parents()
688
688
689 def setparents(self, p1, p2=nullid):
689 def setparents(self, p1, p2=nullid):
690 copies = self.dirstate.setparents(p1, p2)
690 copies = self.dirstate.setparents(p1, p2)
691 if copies:
691 if copies:
692 # Adjust copy records, the dirstate cannot do it, it
692 # Adjust copy records, the dirstate cannot do it, it
693 # requires access to parents manifests. Preserve them
693 # requires access to parents manifests. Preserve them
694 # only for entries added to first parent.
694 # only for entries added to first parent.
695 pctx = self[p1]
695 pctx = self[p1]
696 for f in copies:
696 for f in copies:
697 if f not in pctx and copies[f] in pctx:
697 if f not in pctx and copies[f] in pctx:
698 self.dirstate.copy(copies[f], f)
698 self.dirstate.copy(copies[f], f)
699
699
700 def filectx(self, path, changeid=None, fileid=None):
700 def filectx(self, path, changeid=None, fileid=None):
701 """changeid can be a changeset revision, node, or tag.
701 """changeid can be a changeset revision, node, or tag.
702 fileid can be a file revision or node."""
702 fileid can be a file revision or node."""
703 return context.filectx(self, path, changeid, fileid)
703 return context.filectx(self, path, changeid, fileid)
704
704
705 def getcwd(self):
705 def getcwd(self):
706 return self.dirstate.getcwd()
706 return self.dirstate.getcwd()
707
707
708 def pathto(self, f, cwd=None):
708 def pathto(self, f, cwd=None):
709 return self.dirstate.pathto(f, cwd)
709 return self.dirstate.pathto(f, cwd)
710
710
711 def wfile(self, f, mode='r'):
711 def wfile(self, f, mode='r'):
712 return self.wopener(f, mode)
712 return self.wopener(f, mode)
713
713
714 def _link(self, f):
714 def _link(self, f):
715 return os.path.islink(self.wjoin(f))
715 return os.path.islink(self.wjoin(f))
716
716
717 def _loadfilter(self, filter):
717 def _loadfilter(self, filter):
718 if filter not in self.filterpats:
718 if filter not in self.filterpats:
719 l = []
719 l = []
720 for pat, cmd in self.ui.configitems(filter):
720 for pat, cmd in self.ui.configitems(filter):
721 if cmd == '!':
721 if cmd == '!':
722 continue
722 continue
723 mf = matchmod.match(self.root, '', [pat])
723 mf = matchmod.match(self.root, '', [pat])
724 fn = None
724 fn = None
725 params = cmd
725 params = cmd
726 for name, filterfn in self._datafilters.iteritems():
726 for name, filterfn in self._datafilters.iteritems():
727 if cmd.startswith(name):
727 if cmd.startswith(name):
728 fn = filterfn
728 fn = filterfn
729 params = cmd[len(name):].lstrip()
729 params = cmd[len(name):].lstrip()
730 break
730 break
731 if not fn:
731 if not fn:
732 fn = lambda s, c, **kwargs: util.filter(s, c)
732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 # Wrap old filters not supporting keyword arguments
733 # Wrap old filters not supporting keyword arguments
734 if not inspect.getargspec(fn)[2]:
734 if not inspect.getargspec(fn)[2]:
735 oldfn = fn
735 oldfn = fn
736 fn = lambda s, c, **kwargs: oldfn(s, c)
736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 l.append((mf, fn, params))
737 l.append((mf, fn, params))
738 self.filterpats[filter] = l
738 self.filterpats[filter] = l
739 return self.filterpats[filter]
739 return self.filterpats[filter]
740
740
741 def _filter(self, filterpats, filename, data):
741 def _filter(self, filterpats, filename, data):
742 for mf, fn, cmd in filterpats:
742 for mf, fn, cmd in filterpats:
743 if mf(filename):
743 if mf(filename):
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 break
746 break
747
747
748 return data
748 return data
749
749
750 @propertycache
750 @propertycache
751 def _encodefilterpats(self):
751 def _encodefilterpats(self):
752 return self._loadfilter('encode')
752 return self._loadfilter('encode')
753
753
754 @propertycache
754 @propertycache
755 def _decodefilterpats(self):
755 def _decodefilterpats(self):
756 return self._loadfilter('decode')
756 return self._loadfilter('decode')
757
757
758 def adddatafilter(self, name, filter):
758 def adddatafilter(self, name, filter):
759 self._datafilters[name] = filter
759 self._datafilters[name] = filter
760
760
761 def wread(self, filename):
761 def wread(self, filename):
762 if self._link(filename):
762 if self._link(filename):
763 data = os.readlink(self.wjoin(filename))
763 data = os.readlink(self.wjoin(filename))
764 else:
764 else:
765 data = self.wopener.read(filename)
765 data = self.wopener.read(filename)
766 return self._filter(self._encodefilterpats, filename, data)
766 return self._filter(self._encodefilterpats, filename, data)
767
767
768 def wwrite(self, filename, data, flags):
768 def wwrite(self, filename, data, flags):
769 data = self._filter(self._decodefilterpats, filename, data)
769 data = self._filter(self._decodefilterpats, filename, data)
770 if 'l' in flags:
770 if 'l' in flags:
771 self.wopener.symlink(data, filename)
771 self.wopener.symlink(data, filename)
772 else:
772 else:
773 self.wopener.write(filename, data)
773 self.wopener.write(filename, data)
774 if 'x' in flags:
774 if 'x' in flags:
775 util.setflags(self.wjoin(filename), False, True)
775 util.setflags(self.wjoin(filename), False, True)
776
776
777 def wwritedata(self, filename, data):
777 def wwritedata(self, filename, data):
778 return self._filter(self._decodefilterpats, filename, data)
778 return self._filter(self._decodefilterpats, filename, data)
779
779
780 def transaction(self, desc):
780 def transaction(self, desc):
781 tr = self._transref and self._transref() or None
781 tr = self._transref and self._transref() or None
782 if tr and tr.running():
782 if tr and tr.running():
783 return tr.nest()
783 return tr.nest()
784
784
785 # abort here if the journal already exists
785 # abort here if the journal already exists
786 if os.path.exists(self.sjoin("journal")):
786 if os.path.exists(self.sjoin("journal")):
787 raise error.RepoError(
787 raise error.RepoError(
788 _("abandoned transaction found - run hg recover"))
788 _("abandoned transaction found - run hg recover"))
789
789
790 self._writejournal(desc)
790 self._writejournal(desc)
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792
792
793 tr = transaction.transaction(self.ui.warn, self.sopener,
793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 self.sjoin("journal"),
794 self.sjoin("journal"),
795 aftertrans(renames),
795 aftertrans(renames),
796 self.store.createmode)
796 self.store.createmode)
797 self._transref = weakref.ref(tr)
797 self._transref = weakref.ref(tr)
798 return tr
798 return tr
799
799
800 def _journalfiles(self):
800 def _journalfiles(self):
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 self.join('journal.branch'), self.join('journal.desc'),
802 self.join('journal.branch'), self.join('journal.desc'),
803 self.join('journal.bookmarks'),
803 self.join('journal.bookmarks'),
804 self.sjoin('journal.phaseroots'))
804 self.sjoin('journal.phaseroots'))
805
805
806 def undofiles(self):
806 def undofiles(self):
807 return [undoname(x) for x in self._journalfiles()]
807 return [undoname(x) for x in self._journalfiles()]
808
808
809 def _writejournal(self, desc):
809 def _writejournal(self, desc):
810 self.opener.write("journal.dirstate",
810 self.opener.write("journal.dirstate",
811 self.opener.tryread("dirstate"))
811 self.opener.tryread("dirstate"))
812 self.opener.write("journal.branch",
812 self.opener.write("journal.branch",
813 encoding.fromlocal(self.dirstate.branch()))
813 encoding.fromlocal(self.dirstate.branch()))
814 self.opener.write("journal.desc",
814 self.opener.write("journal.desc",
815 "%d\n%s\n" % (len(self), desc))
815 "%d\n%s\n" % (len(self), desc))
816 self.opener.write("journal.bookmarks",
816 self.opener.write("journal.bookmarks",
817 self.opener.tryread("bookmarks"))
817 self.opener.tryread("bookmarks"))
818 self.sopener.write("journal.phaseroots",
818 self.sopener.write("journal.phaseroots",
819 self.sopener.tryread("phaseroots"))
819 self.sopener.tryread("phaseroots"))
820
820
821 def recover(self):
821 def recover(self):
822 lock = self.lock()
822 lock = self.lock()
823 try:
823 try:
824 if os.path.exists(self.sjoin("journal")):
824 if os.path.exists(self.sjoin("journal")):
825 self.ui.status(_("rolling back interrupted transaction\n"))
825 self.ui.status(_("rolling back interrupted transaction\n"))
826 transaction.rollback(self.sopener, self.sjoin("journal"),
826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 self.ui.warn)
827 self.ui.warn)
828 self.invalidate()
828 self.invalidate()
829 return True
829 return True
830 else:
830 else:
831 self.ui.warn(_("no interrupted transaction available\n"))
831 self.ui.warn(_("no interrupted transaction available\n"))
832 return False
832 return False
833 finally:
833 finally:
834 lock.release()
834 lock.release()
835
835
836 def rollback(self, dryrun=False, force=False):
836 def rollback(self, dryrun=False, force=False):
837 wlock = lock = None
837 wlock = lock = None
838 try:
838 try:
839 wlock = self.wlock()
839 wlock = self.wlock()
840 lock = self.lock()
840 lock = self.lock()
841 if os.path.exists(self.sjoin("undo")):
841 if os.path.exists(self.sjoin("undo")):
842 return self._rollback(dryrun, force)
842 return self._rollback(dryrun, force)
843 else:
843 else:
844 self.ui.warn(_("no rollback information available\n"))
844 self.ui.warn(_("no rollback information available\n"))
845 return 1
845 return 1
846 finally:
846 finally:
847 release(lock, wlock)
847 release(lock, wlock)
848
848
849 def _rollback(self, dryrun, force):
849 def _rollback(self, dryrun, force):
850 ui = self.ui
850 ui = self.ui
851 try:
851 try:
852 args = self.opener.read('undo.desc').splitlines()
852 args = self.opener.read('undo.desc').splitlines()
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 if len(args) >= 3:
854 if len(args) >= 3:
855 detail = args[2]
855 detail = args[2]
856 oldtip = oldlen - 1
856 oldtip = oldlen - 1
857
857
858 if detail and ui.verbose:
858 if detail and ui.verbose:
859 msg = (_('repository tip rolled back to revision %s'
859 msg = (_('repository tip rolled back to revision %s'
860 ' (undo %s: %s)\n')
860 ' (undo %s: %s)\n')
861 % (oldtip, desc, detail))
861 % (oldtip, desc, detail))
862 else:
862 else:
863 msg = (_('repository tip rolled back to revision %s'
863 msg = (_('repository tip rolled back to revision %s'
864 ' (undo %s)\n')
864 ' (undo %s)\n')
865 % (oldtip, desc))
865 % (oldtip, desc))
866 except IOError:
866 except IOError:
867 msg = _('rolling back unknown transaction\n')
867 msg = _('rolling back unknown transaction\n')
868 desc = None
868 desc = None
869
869
870 if not force and self['.'] != self['tip'] and desc == 'commit':
870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 raise util.Abort(
871 raise util.Abort(
872 _('rollback of last commit while not checked out '
872 _('rollback of last commit while not checked out '
873 'may lose data'), hint=_('use -f to force'))
873 'may lose data'), hint=_('use -f to force'))
874
874
875 ui.status(msg)
875 ui.status(msg)
876 if dryrun:
876 if dryrun:
877 return 0
877 return 0
878
878
879 parents = self.dirstate.parents()
879 parents = self.dirstate.parents()
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 if os.path.exists(self.join('undo.bookmarks')):
881 if os.path.exists(self.join('undo.bookmarks')):
882 util.rename(self.join('undo.bookmarks'),
882 util.rename(self.join('undo.bookmarks'),
883 self.join('bookmarks'))
883 self.join('bookmarks'))
884 if os.path.exists(self.sjoin('undo.phaseroots')):
884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 util.rename(self.sjoin('undo.phaseroots'),
885 util.rename(self.sjoin('undo.phaseroots'),
886 self.sjoin('phaseroots'))
886 self.sjoin('phaseroots'))
887 self.invalidate()
887 self.invalidate()
888
888
889 parentgone = (parents[0] not in self.changelog.nodemap or
889 parentgone = (parents[0] not in self.changelog.nodemap or
890 parents[1] not in self.changelog.nodemap)
890 parents[1] not in self.changelog.nodemap)
891 if parentgone:
891 if parentgone:
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 try:
893 try:
894 branch = self.opener.read('undo.branch')
894 branch = self.opener.read('undo.branch')
895 self.dirstate.setbranch(branch)
895 self.dirstate.setbranch(branch)
896 except IOError:
896 except IOError:
897 ui.warn(_('named branch could not be reset: '
897 ui.warn(_('named branch could not be reset: '
898 'current branch is still \'%s\'\n')
898 'current branch is still \'%s\'\n')
899 % self.dirstate.branch())
899 % self.dirstate.branch())
900
900
901 self.dirstate.invalidate()
901 self.dirstate.invalidate()
902 parents = tuple([p.rev() for p in self.parents()])
902 parents = tuple([p.rev() for p in self.parents()])
903 if len(parents) > 1:
903 if len(parents) > 1:
904 ui.status(_('working directory now based on '
904 ui.status(_('working directory now based on '
905 'revisions %d and %d\n') % parents)
905 'revisions %d and %d\n') % parents)
906 else:
906 else:
907 ui.status(_('working directory now based on '
907 ui.status(_('working directory now based on '
908 'revision %d\n') % parents)
908 'revision %d\n') % parents)
909 # TODO: if we know which new heads may result from this rollback, pass
909 # TODO: if we know which new heads may result from this rollback, pass
910 # them to destroy(), which will prevent the branchhead cache from being
910 # them to destroy(), which will prevent the branchhead cache from being
911 # invalidated.
911 # invalidated.
912 self.destroyed()
912 self.destroyed()
913 return 0
913 return 0
914
914
915 def invalidatecaches(self):
915 def invalidatecaches(self):
916 def delcache(name):
916 def delcache(name):
917 try:
917 try:
918 delattr(self, name)
918 delattr(self, name)
919 except AttributeError:
919 except AttributeError:
920 pass
920 pass
921
921
922 delcache('_tagscache')
922 delcache('_tagscache')
923
923
924 self._branchcache = None # in UTF-8
924 self._branchcache = None # in UTF-8
925 self._branchcachetip = None
925 self._branchcachetip = None
926
926
927 def invalidatedirstate(self):
927 def invalidatedirstate(self):
928 '''Invalidates the dirstate, causing the next call to dirstate
928 '''Invalidates the dirstate, causing the next call to dirstate
929 to check if it was modified since the last time it was read,
929 to check if it was modified since the last time it was read,
930 rereading it if it has.
930 rereading it if it has.
931
931
932 This is different to dirstate.invalidate() that it doesn't always
932 This is different to dirstate.invalidate() that it doesn't always
933 rereads the dirstate. Use dirstate.invalidate() if you want to
933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 explicitly read the dirstate again (i.e. restoring it to a previous
934 explicitly read the dirstate again (i.e. restoring it to a previous
935 known good state).'''
935 known good state).'''
936 if 'dirstate' in self.__dict__:
936 if 'dirstate' in self.__dict__:
937 for k in self.dirstate._filecache:
937 for k in self.dirstate._filecache:
938 try:
938 try:
939 delattr(self.dirstate, k)
939 delattr(self.dirstate, k)
940 except AttributeError:
940 except AttributeError:
941 pass
941 pass
942 delattr(self, 'dirstate')
942 delattr(self, 'dirstate')
943
943
944 def invalidate(self):
944 def invalidate(self):
945 for k in self._filecache:
945 for k in self._filecache:
946 # dirstate is invalidated separately in invalidatedirstate()
946 # dirstate is invalidated separately in invalidatedirstate()
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949
949
950 try:
950 try:
951 delattr(self, k)
951 delattr(self, k)
952 except AttributeError:
952 except AttributeError:
953 pass
953 pass
954 self.invalidatecaches()
954 self.invalidatecaches()
955
955
956 # Discard all cache entries to force reloading everything.
956 # Discard all cache entries to force reloading everything.
957 self._filecache.clear()
957 self._filecache.clear()
958
958
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 try:
960 try:
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 except error.LockHeld, inst:
962 except error.LockHeld, inst:
963 if not wait:
963 if not wait:
964 raise
964 raise
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 (desc, inst.locker))
966 (desc, inst.locker))
967 # default to 600 seconds timeout
967 # default to 600 seconds timeout
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 releasefn, desc=desc)
969 releasefn, desc=desc)
970 if acquirefn:
970 if acquirefn:
971 acquirefn()
971 acquirefn()
972 return l
972 return l
973
973
974 def _afterlock(self, callback):
974 def _afterlock(self, callback):
975 """add a callback to the current repository lock.
975 """add a callback to the current repository lock.
976
976
977 The callback will be executed on lock release."""
977 The callback will be executed on lock release."""
978 l = self._lockref and self._lockref()
978 l = self._lockref and self._lockref()
979 if l:
979 if l:
980 l.postrelease.append(callback)
980 l.postrelease.append(callback)
981 else:
981 else:
982 callback()
982 callback()
983
983
984 def lock(self, wait=True):
984 def lock(self, wait=True):
985 '''Lock the repository store (.hg/store) and return a weak reference
985 '''Lock the repository store (.hg/store) and return a weak reference
986 to the lock. Use this before modifying the store (e.g. committing or
986 to the lock. Use this before modifying the store (e.g. committing or
987 stripping). If you are opening a transaction, get a lock as well.)'''
987 stripping). If you are opening a transaction, get a lock as well.)'''
988 l = self._lockref and self._lockref()
988 l = self._lockref and self._lockref()
989 if l is not None and l.held:
989 if l is not None and l.held:
990 l.lock()
990 l.lock()
991 return l
991 return l
992
992
993 def unlock():
993 def unlock():
994 self.store.write()
994 self.store.write()
995 if '_phasecache' in vars(self):
995 if '_phasecache' in vars(self):
996 self._phasecache.write()
996 self._phasecache.write()
997 for k, ce in self._filecache.items():
997 for k, ce in self._filecache.items():
998 if k == 'dirstate':
998 if k == 'dirstate':
999 continue
999 continue
1000 ce.refresh()
1000 ce.refresh()
1001
1001
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 self.invalidate, _('repository %s') % self.origroot)
1003 self.invalidate, _('repository %s') % self.origroot)
1004 self._lockref = weakref.ref(l)
1004 self._lockref = weakref.ref(l)
1005 return l
1005 return l
1006
1006
1007 def wlock(self, wait=True):
1007 def wlock(self, wait=True):
1008 '''Lock the non-store parts of the repository (everything under
1008 '''Lock the non-store parts of the repository (everything under
1009 .hg except .hg/store) and return a weak reference to the lock.
1009 .hg except .hg/store) and return a weak reference to the lock.
1010 Use this before modifying files in .hg.'''
1010 Use this before modifying files in .hg.'''
1011 l = self._wlockref and self._wlockref()
1011 l = self._wlockref and self._wlockref()
1012 if l is not None and l.held:
1012 if l is not None and l.held:
1013 l.lock()
1013 l.lock()
1014 return l
1014 return l
1015
1015
1016 def unlock():
1016 def unlock():
1017 self.dirstate.write()
1017 self.dirstate.write()
1018 ce = self._filecache.get('dirstate')
1018 ce = self._filecache.get('dirstate')
1019 if ce:
1019 if ce:
1020 ce.refresh()
1020 ce.refresh()
1021
1021
1022 l = self._lock(self.join("wlock"), wait, unlock,
1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 self.invalidatedirstate, _('working directory of %s') %
1023 self.invalidatedirstate, _('working directory of %s') %
1024 self.origroot)
1024 self.origroot)
1025 self._wlockref = weakref.ref(l)
1025 self._wlockref = weakref.ref(l)
1026 return l
1026 return l
1027
1027
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 """
1029 """
1030 commit an individual file as part of a larger transaction
1030 commit an individual file as part of a larger transaction
1031 """
1031 """
1032
1032
1033 fname = fctx.path()
1033 fname = fctx.path()
1034 text = fctx.data()
1034 text = fctx.data()
1035 flog = self.file(fname)
1035 flog = self.file(fname)
1036 fparent1 = manifest1.get(fname, nullid)
1036 fparent1 = manifest1.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038
1038
1039 meta = {}
1039 meta = {}
1040 copy = fctx.renamed()
1040 copy = fctx.renamed()
1041 if copy and copy[0] != fname:
1041 if copy and copy[0] != fname:
1042 # Mark the new revision of this file as a copy of another
1042 # Mark the new revision of this file as a copy of another
1043 # file. This copy data will effectively act as a parent
1043 # file. This copy data will effectively act as a parent
1044 # of this new revision. If this is a merge, the first
1044 # of this new revision. If this is a merge, the first
1045 # parent will be the nullid (meaning "look up the copy data")
1045 # parent will be the nullid (meaning "look up the copy data")
1046 # and the second one will be the other parent. For example:
1046 # and the second one will be the other parent. For example:
1047 #
1047 #
1048 # 0 --- 1 --- 3 rev1 changes file foo
1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 # \ / rev2 renames foo to bar and changes it
1049 # \ / rev2 renames foo to bar and changes it
1050 # \- 2 -/ rev3 should have bar with all changes and
1050 # \- 2 -/ rev3 should have bar with all changes and
1051 # should record that bar descends from
1051 # should record that bar descends from
1052 # bar in rev2 and foo in rev1
1052 # bar in rev2 and foo in rev1
1053 #
1053 #
1054 # this allows this merge to succeed:
1054 # this allows this merge to succeed:
1055 #
1055 #
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 # \- 2 --- 4 as the merge base
1058 # \- 2 --- 4 as the merge base
1059 #
1059 #
1060
1060
1061 cfname = copy[0]
1061 cfname = copy[0]
1062 crev = manifest1.get(cfname)
1062 crev = manifest1.get(cfname)
1063 newfparent = fparent2
1063 newfparent = fparent2
1064
1064
1065 if manifest2: # branch merge
1065 if manifest2: # branch merge
1066 if fparent2 == nullid or crev is None: # copied on remote side
1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 if cfname in manifest2:
1067 if cfname in manifest2:
1068 crev = manifest2[cfname]
1068 crev = manifest2[cfname]
1069 newfparent = fparent1
1069 newfparent = fparent1
1070
1070
1071 # find source in nearest ancestor if we've lost track
1071 # find source in nearest ancestor if we've lost track
1072 if not crev:
1072 if not crev:
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 (fname, cfname))
1074 (fname, cfname))
1075 for ancestor in self[None].ancestors():
1075 for ancestor in self[None].ancestors():
1076 if cfname in ancestor:
1076 if cfname in ancestor:
1077 crev = ancestor[cfname].filenode()
1077 crev = ancestor[cfname].filenode()
1078 break
1078 break
1079
1079
1080 if crev:
1080 if crev:
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 meta["copy"] = cfname
1082 meta["copy"] = cfname
1083 meta["copyrev"] = hex(crev)
1083 meta["copyrev"] = hex(crev)
1084 fparent1, fparent2 = nullid, newfparent
1084 fparent1, fparent2 = nullid, newfparent
1085 else:
1085 else:
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 "copied from '%s'!\n") % (fname, cfname))
1087 "copied from '%s'!\n") % (fname, cfname))
1088
1088
1089 elif fparent2 != nullid:
1089 elif fparent2 != nullid:
1090 # is one parent an ancestor of the other?
1090 # is one parent an ancestor of the other?
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 if fparentancestor == fparent1:
1092 if fparentancestor == fparent1:
1093 fparent1, fparent2 = fparent2, nullid
1093 fparent1, fparent2 = fparent2, nullid
1094 elif fparentancestor == fparent2:
1094 elif fparentancestor == fparent2:
1095 fparent2 = nullid
1095 fparent2 = nullid
1096
1096
1097 # is the file changed?
1097 # is the file changed?
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 changelist.append(fname)
1099 changelist.append(fname)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101
1101
1102 # are just the flags changed during merge?
1102 # are just the flags changed during merge?
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 changelist.append(fname)
1104 changelist.append(fname)
1105
1105
1106 return fparent1
1106 return fparent1
1107
1107
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 editor=False, extra={}):
1109 editor=False, extra={}):
1110 """Add a new revision to current repository.
1110 """Add a new revision to current repository.
1111
1111
1112 Revision information is gathered from the working directory,
1112 Revision information is gathered from the working directory,
1113 match can be used to filter the committed files. If editor is
1113 match can be used to filter the committed files. If editor is
1114 supplied, it is called to get a commit message.
1114 supplied, it is called to get a commit message.
1115 """
1115 """
1116
1116
1117 def fail(f, msg):
1117 def fail(f, msg):
1118 raise util.Abort('%s: %s' % (f, msg))
1118 raise util.Abort('%s: %s' % (f, msg))
1119
1119
1120 if not match:
1120 if not match:
1121 match = matchmod.always(self.root, '')
1121 match = matchmod.always(self.root, '')
1122
1122
1123 if not force:
1123 if not force:
1124 vdirs = []
1124 vdirs = []
1125 match.dir = vdirs.append
1125 match.dir = vdirs.append
1126 match.bad = fail
1126 match.bad = fail
1127
1127
1128 wlock = self.wlock()
1128 wlock = self.wlock()
1129 try:
1129 try:
1130 wctx = self[None]
1130 wctx = self[None]
1131 merge = len(wctx.parents()) > 1
1131 merge = len(wctx.parents()) > 1
1132
1132
1133 if (not force and merge and match and
1133 if (not force and merge and match and
1134 (match.files() or match.anypats())):
1134 (match.files() or match.anypats())):
1135 raise util.Abort(_('cannot partially commit a merge '
1135 raise util.Abort(_('cannot partially commit a merge '
1136 '(do not specify files or patterns)'))
1136 '(do not specify files or patterns)'))
1137
1137
1138 changes = self.status(match=match, clean=force)
1138 changes = self.status(match=match, clean=force)
1139 if force:
1139 if force:
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141
1141
1142 # check subrepos
1142 # check subrepos
1143 subs = []
1143 subs = []
1144 commitsubs = set()
1144 commitsubs = set()
1145 newstate = wctx.substate.copy()
1145 newstate = wctx.substate.copy()
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 if '.hgsub' in wctx:
1147 if '.hgsub' in wctx:
1148 # we'll decide whether to track this ourselves, thanks
1148 # we'll decide whether to track this ourselves, thanks
1149 if '.hgsubstate' in changes[0]:
1149 if '.hgsubstate' in changes[0]:
1150 changes[0].remove('.hgsubstate')
1150 changes[0].remove('.hgsubstate')
1151 if '.hgsubstate' in changes[2]:
1151 if '.hgsubstate' in changes[2]:
1152 changes[2].remove('.hgsubstate')
1152 changes[2].remove('.hgsubstate')
1153
1153
1154 # compare current state to last committed state
1154 # compare current state to last committed state
1155 # build new substate based on last committed state
1155 # build new substate based on last committed state
1156 oldstate = wctx.p1().substate
1156 oldstate = wctx.p1().substate
1157 for s in sorted(newstate.keys()):
1157 for s in sorted(newstate.keys()):
1158 if not match(s):
1158 if not match(s):
1159 # ignore working copy, use old state if present
1159 # ignore working copy, use old state if present
1160 if s in oldstate:
1160 if s in oldstate:
1161 newstate[s] = oldstate[s]
1161 newstate[s] = oldstate[s]
1162 continue
1162 continue
1163 if not force:
1163 if not force:
1164 raise util.Abort(
1164 raise util.Abort(
1165 _("commit with new subrepo %s excluded") % s)
1165 _("commit with new subrepo %s excluded") % s)
1166 if wctx.sub(s).dirty(True):
1166 if wctx.sub(s).dirty(True):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 raise util.Abort(
1168 raise util.Abort(
1169 _("uncommitted changes in subrepo %s") % s,
1169 _("uncommitted changes in subrepo %s") % s,
1170 hint=_("use --subrepos for recursive commit"))
1170 hint=_("use --subrepos for recursive commit"))
1171 subs.append(s)
1171 subs.append(s)
1172 commitsubs.add(s)
1172 commitsubs.add(s)
1173 else:
1173 else:
1174 bs = wctx.sub(s).basestate()
1174 bs = wctx.sub(s).basestate()
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 subs.append(s)
1177 subs.append(s)
1178
1178
1179 # check for removed subrepos
1179 # check for removed subrepos
1180 for p in wctx.parents():
1180 for p in wctx.parents():
1181 r = [s for s in p.substate if s not in newstate]
1181 r = [s for s in p.substate if s not in newstate]
1182 subs += [s for s in r if match(s)]
1182 subs += [s for s in r if match(s)]
1183 if subs:
1183 if subs:
1184 if (not match('.hgsub') and
1184 if (not match('.hgsub') and
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 raise util.Abort(
1186 raise util.Abort(
1187 _("can't commit subrepos without .hgsub"))
1187 _("can't commit subrepos without .hgsub"))
1188 changes[0].insert(0, '.hgsubstate')
1188 changes[0].insert(0, '.hgsubstate')
1189
1189
1190 elif '.hgsub' in changes[2]:
1190 elif '.hgsub' in changes[2]:
1191 # clean up .hgsubstate when .hgsub is removed
1191 # clean up .hgsubstate when .hgsub is removed
1192 if ('.hgsubstate' in wctx and
1192 if ('.hgsubstate' in wctx and
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 changes[2].insert(0, '.hgsubstate')
1194 changes[2].insert(0, '.hgsubstate')
1195
1195
1196 # make sure all explicit patterns are matched
1196 # make sure all explicit patterns are matched
1197 if not force and match.files():
1197 if not force and match.files():
1198 matched = set(changes[0] + changes[1] + changes[2])
1198 matched = set(changes[0] + changes[1] + changes[2])
1199
1199
1200 for f in match.files():
1200 for f in match.files():
1201 if f == '.' or f in matched or f in wctx.substate:
1201 if f == '.' or f in matched or f in wctx.substate:
1202 continue
1202 continue
1203 if f in changes[3]: # missing
1203 if f in changes[3]: # missing
1204 fail(f, _('file not found!'))
1204 fail(f, _('file not found!'))
1205 if f in vdirs: # visited directory
1205 if f in vdirs: # visited directory
1206 d = f + '/'
1206 d = f + '/'
1207 for mf in matched:
1207 for mf in matched:
1208 if mf.startswith(d):
1208 if mf.startswith(d):
1209 break
1209 break
1210 else:
1210 else:
1211 fail(f, _("no match under directory!"))
1211 fail(f, _("no match under directory!"))
1212 elif f not in self.dirstate:
1212 elif f not in self.dirstate:
1213 fail(f, _("file not tracked!"))
1213 fail(f, _("file not tracked!"))
1214
1214
1215 if (not force and not extra.get("close") and not merge
1215 if (not force and not extra.get("close") and not merge
1216 and not (changes[0] or changes[1] or changes[2])
1216 and not (changes[0] or changes[1] or changes[2])
1217 and wctx.branch() == wctx.p1().branch()):
1217 and wctx.branch() == wctx.p1().branch()):
1218 return None
1218 return None
1219
1219
1220 if merge and changes[3]:
1220 if merge and changes[3]:
1221 raise util.Abort(_("cannot commit merge with missing files"))
1221 raise util.Abort(_("cannot commit merge with missing files"))
1222
1222
1223 ms = mergemod.mergestate(self)
1223 ms = mergemod.mergestate(self)
1224 for f in changes[0]:
1224 for f in changes[0]:
1225 if f in ms and ms[f] == 'u':
1225 if f in ms and ms[f] == 'u':
1226 raise util.Abort(_("unresolved merge conflicts "
1226 raise util.Abort(_("unresolved merge conflicts "
1227 "(see hg help resolve)"))
1227 "(see hg help resolve)"))
1228
1228
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 if editor:
1230 if editor:
1231 cctx._text = editor(self, cctx, subs)
1231 cctx._text = editor(self, cctx, subs)
1232 edited = (text != cctx._text)
1232 edited = (text != cctx._text)
1233
1233
1234 # commit subs and write new state
1234 # commit subs and write new state
1235 if subs:
1235 if subs:
1236 for s in sorted(commitsubs):
1236 for s in sorted(commitsubs):
1237 sub = wctx.sub(s)
1237 sub = wctx.sub(s)
1238 self.ui.status(_('committing subrepository %s\n') %
1238 self.ui.status(_('committing subrepository %s\n') %
1239 subrepo.subrelpath(sub))
1239 subrepo.subrelpath(sub))
1240 sr = sub.commit(cctx._text, user, date)
1240 sr = sub.commit(cctx._text, user, date)
1241 newstate[s] = (newstate[s][0], sr)
1241 newstate[s] = (newstate[s][0], sr)
1242 subrepo.writestate(self, newstate)
1242 subrepo.writestate(self, newstate)
1243
1243
1244 # Save commit message in case this transaction gets rolled back
1244 # Save commit message in case this transaction gets rolled back
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 # the assumption that the user will use the same editor again.
1246 # the assumption that the user will use the same editor again.
1247 msgfn = self.savecommitmessage(cctx._text)
1247 msgfn = self.savecommitmessage(cctx._text)
1248
1248
1249 p1, p2 = self.dirstate.parents()
1249 p1, p2 = self.dirstate.parents()
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 try:
1251 try:
1252 self.hook("precommit", throw=True, parent1=hookp1,
1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 parent2=hookp2)
1253 parent2=hookp2)
1254 ret = self.commitctx(cctx, True)
1254 ret = self.commitctx(cctx, True)
1255 except: # re-raises
1255 except: # re-raises
1256 if edited:
1256 if edited:
1257 self.ui.write(
1257 self.ui.write(
1258 _('note: commit message saved in %s\n') % msgfn)
1258 _('note: commit message saved in %s\n') % msgfn)
1259 raise
1259 raise
1260
1260
1261 # update bookmarks, dirstate and mergestate
1261 # update bookmarks, dirstate and mergestate
1262 bookmarks.update(self, [p1, p2], ret)
1262 bookmarks.update(self, [p1, p2], ret)
1263 for f in changes[0] + changes[1]:
1263 for f in changes[0] + changes[1]:
1264 self.dirstate.normal(f)
1264 self.dirstate.normal(f)
1265 for f in changes[2]:
1265 for f in changes[2]:
1266 self.dirstate.drop(f)
1266 self.dirstate.drop(f)
1267 self.dirstate.setparents(ret)
1267 self.dirstate.setparents(ret)
1268 ms.reset()
1268 ms.reset()
1269 finally:
1269 finally:
1270 wlock.release()
1270 wlock.release()
1271
1271
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 self._afterlock(commithook)
1274 self._afterlock(commithook)
1275 return ret
1275 return ret
1276
1276
1277 def commitctx(self, ctx, error=False):
1277 def commitctx(self, ctx, error=False):
1278 """Add a new revision to current repository.
1278 """Add a new revision to current repository.
1279 Revision information is passed via the context argument.
1279 Revision information is passed via the context argument.
1280 """
1280 """
1281
1281
1282 tr = lock = None
1282 tr = lock = None
1283 removed = list(ctx.removed())
1283 removed = list(ctx.removed())
1284 p1, p2 = ctx.p1(), ctx.p2()
1284 p1, p2 = ctx.p1(), ctx.p2()
1285 user = ctx.user()
1285 user = ctx.user()
1286
1286
1287 lock = self.lock()
1287 lock = self.lock()
1288 try:
1288 try:
1289 tr = self.transaction("commit")
1289 tr = self.transaction("commit")
1290 trp = weakref.proxy(tr)
1290 trp = weakref.proxy(tr)
1291
1291
1292 if ctx.files():
1292 if ctx.files():
1293 m1 = p1.manifest().copy()
1293 m1 = p1.manifest().copy()
1294 m2 = p2.manifest()
1294 m2 = p2.manifest()
1295
1295
1296 # check in files
1296 # check in files
1297 new = {}
1297 new = {}
1298 changed = []
1298 changed = []
1299 linkrev = len(self)
1299 linkrev = len(self)
1300 for f in sorted(ctx.modified() + ctx.added()):
1300 for f in sorted(ctx.modified() + ctx.added()):
1301 self.ui.note(f + "\n")
1301 self.ui.note(f + "\n")
1302 try:
1302 try:
1303 fctx = ctx[f]
1303 fctx = ctx[f]
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 changed)
1305 changed)
1306 m1.set(f, fctx.flags())
1306 m1.set(f, fctx.flags())
1307 except OSError, inst:
1307 except OSError, inst:
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 raise
1309 raise
1310 except IOError, inst:
1310 except IOError, inst:
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 if error or errcode and errcode != errno.ENOENT:
1312 if error or errcode and errcode != errno.ENOENT:
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 raise
1314 raise
1315 else:
1315 else:
1316 removed.append(f)
1316 removed.append(f)
1317
1317
1318 # update manifest
1318 # update manifest
1319 m1.update(new)
1319 m1.update(new)
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 drop = [f for f in removed if f in m1]
1321 drop = [f for f in removed if f in m1]
1322 for f in drop:
1322 for f in drop:
1323 del m1[f]
1323 del m1[f]
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 p2.manifestnode(), (new, drop))
1325 p2.manifestnode(), (new, drop))
1326 files = changed + removed
1326 files = changed + removed
1327 else:
1327 else:
1328 mn = p1.manifestnode()
1328 mn = p1.manifestnode()
1329 files = []
1329 files = []
1330
1330
1331 # update changelog
1331 # update changelog
1332 self.changelog.delayupdate()
1332 self.changelog.delayupdate()
1333 n = self.changelog.add(mn, files, ctx.description(),
1333 n = self.changelog.add(mn, files, ctx.description(),
1334 trp, p1.node(), p2.node(),
1334 trp, p1.node(), p2.node(),
1335 user, ctx.date(), ctx.extra().copy())
1335 user, ctx.date(), ctx.extra().copy())
1336 p = lambda: self.changelog.writepending() and self.root or ""
1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 parent2=xp2, pending=p)
1339 parent2=xp2, pending=p)
1340 self.changelog.finalize(trp)
1340 self.changelog.finalize(trp)
1341 # set the new commit is proper phase
1341 # set the new commit is proper phase
1342 targetphase = phases.newcommitphase(self.ui)
1342 targetphase = phases.newcommitphase(self.ui)
1343 if targetphase:
1343 if targetphase:
1344 # retract boundary do not alter parent changeset.
1344 # retract boundary do not alter parent changeset.
1345 # if a parent have higher the resulting phase will
1345 # if a parent have higher the resulting phase will
1346 # be compliant anyway
1346 # be compliant anyway
1347 #
1347 #
1348 # if minimal phase was 0 we don't need to retract anything
1348 # if minimal phase was 0 we don't need to retract anything
1349 phases.retractboundary(self, targetphase, [n])
1349 phases.retractboundary(self, targetphase, [n])
1350 tr.close()
1350 tr.close()
1351 self.updatebranchcache()
1351 self.updatebranchcache()
1352 return n
1352 return n
1353 finally:
1353 finally:
1354 if tr:
1354 if tr:
1355 tr.release()
1355 tr.release()
1356 lock.release()
1356 lock.release()
1357
1357
1358 def destroyed(self, newheadnodes=None):
1358 def destroyed(self, newheadnodes=None):
1359 '''Inform the repository that nodes have been destroyed.
1359 '''Inform the repository that nodes have been destroyed.
1360 Intended for use by strip and rollback, so there's a common
1360 Intended for use by strip and rollback, so there's a common
1361 place for anything that has to be done after destroying history.
1361 place for anything that has to be done after destroying history.
1362
1362
1363 If you know the branchheadcache was uptodate before nodes were removed
1363 If you know the branchheadcache was uptodate before nodes were removed
1364 and you also know the set of candidate new heads that may have resulted
1364 and you also know the set of candidate new heads that may have resulted
1365 from the destruction, you can set newheadnodes. This will enable the
1365 from the destruction, you can set newheadnodes. This will enable the
1366 code to update the branchheads cache, rather than having future code
1366 code to update the branchheads cache, rather than having future code
1367 decide it's invalid and regenrating it from scratch.
1367 decide it's invalid and regenrating it from scratch.
1368 '''
1368 '''
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 # will be caught the next time it is read.
1371 # will be caught the next time it is read.
1372 if newheadnodes:
1372 if newheadnodes:
1373 tiprev = len(self) - 1
1373 tiprev = len(self) - 1
1374 ctxgen = (self[node] for node in newheadnodes
1374 ctxgen = (self[node] for node in newheadnodes
1375 if self.changelog.hasnode(node))
1375 if self.changelog.hasnode(node))
1376 self._updatebranchcache(self._branchcache, ctxgen)
1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 tiprev)
1378 tiprev)
1379
1379
1380 # Ensure the persistent tag cache is updated. Doing it now
1380 # Ensure the persistent tag cache is updated. Doing it now
1381 # means that the tag cache only has to worry about destroyed
1381 # means that the tag cache only has to worry about destroyed
1382 # heads immediately after a strip/rollback. That in turn
1382 # heads immediately after a strip/rollback. That in turn
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 # and node) always means no nodes have been added or destroyed.
1384 # and node) always means no nodes have been added or destroyed.
1385
1385
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 # head, refresh the tag cache, then immediately add a new head.
1387 # head, refresh the tag cache, then immediately add a new head.
1388 # But I think doing it this way is necessary for the "instant
1388 # But I think doing it this way is necessary for the "instant
1389 # tag cache retrieval" case to work.
1389 # tag cache retrieval" case to work.
1390 self.invalidatecaches()
1390 self.invalidatecaches()
1391
1391
1392 def walk(self, match, node=None):
1392 def walk(self, match, node=None):
1393 '''
1393 '''
1394 walk recursively through the directory tree or a given
1394 walk recursively through the directory tree or a given
1395 changeset, finding all files matched by the match
1395 changeset, finding all files matched by the match
1396 function
1396 function
1397 '''
1397 '''
1398 return self[node].walk(match)
1398 return self[node].walk(match)
1399
1399
1400 def status(self, node1='.', node2=None, match=None,
1400 def status(self, node1='.', node2=None, match=None,
1401 ignored=False, clean=False, unknown=False,
1401 ignored=False, clean=False, unknown=False,
1402 listsubrepos=False):
1402 listsubrepos=False):
1403 """return status of files between two nodes or node and working
1403 """return status of files between two nodes or node and working
1404 directory.
1404 directory.
1405
1405
1406 If node1 is None, use the first dirstate parent instead.
1406 If node1 is None, use the first dirstate parent instead.
1407 If node2 is None, compare node1 with working directory.
1407 If node2 is None, compare node1 with working directory.
1408 """
1408 """
1409
1409
1410 def mfmatches(ctx):
1410 def mfmatches(ctx):
1411 mf = ctx.manifest().copy()
1411 mf = ctx.manifest().copy()
1412 if match.always():
1412 if match.always():
1413 return mf
1413 return mf
1414 for fn in mf.keys():
1414 for fn in mf.keys():
1415 if not match(fn):
1415 if not match(fn):
1416 del mf[fn]
1416 del mf[fn]
1417 return mf
1417 return mf
1418
1418
1419 if isinstance(node1, context.changectx):
1419 if isinstance(node1, context.changectx):
1420 ctx1 = node1
1420 ctx1 = node1
1421 else:
1421 else:
1422 ctx1 = self[node1]
1422 ctx1 = self[node1]
1423 if isinstance(node2, context.changectx):
1423 if isinstance(node2, context.changectx):
1424 ctx2 = node2
1424 ctx2 = node2
1425 else:
1425 else:
1426 ctx2 = self[node2]
1426 ctx2 = self[node2]
1427
1427
1428 working = ctx2.rev() is None
1428 working = ctx2.rev() is None
1429 parentworking = working and ctx1 == self['.']
1429 parentworking = working and ctx1 == self['.']
1430 match = match or matchmod.always(self.root, self.getcwd())
1430 match = match or matchmod.always(self.root, self.getcwd())
1431 listignored, listclean, listunknown = ignored, clean, unknown
1431 listignored, listclean, listunknown = ignored, clean, unknown
1432
1432
1433 # load earliest manifest first for caching reasons
1433 # load earliest manifest first for caching reasons
1434 if not working and ctx2.rev() < ctx1.rev():
1434 if not working and ctx2.rev() < ctx1.rev():
1435 ctx2.manifest()
1435 ctx2.manifest()
1436
1436
1437 if not parentworking:
1437 if not parentworking:
1438 def bad(f, msg):
1438 def bad(f, msg):
1439 # 'f' may be a directory pattern from 'match.files()',
1439 # 'f' may be a directory pattern from 'match.files()',
1440 # so 'f not in ctx1' is not enough
1440 # so 'f not in ctx1' is not enough
1441 if f not in ctx1 and f not in ctx1.dirs():
1441 if f not in ctx1 and f not in ctx1.dirs():
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 match.bad = bad
1443 match.bad = bad
1444
1444
1445 if working: # we need to scan the working dir
1445 if working: # we need to scan the working dir
1446 subrepos = []
1446 subrepos = []
1447 if '.hgsub' in self.dirstate:
1447 if '.hgsub' in self.dirstate:
1448 subrepos = ctx2.substate.keys()
1448 subrepos = ctx2.substate.keys()
1449 s = self.dirstate.status(match, subrepos, listignored,
1449 s = self.dirstate.status(match, subrepos, listignored,
1450 listclean, listunknown)
1450 listclean, listunknown)
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452
1452
1453 # check for any possibly clean files
1453 # check for any possibly clean files
1454 if parentworking and cmp:
1454 if parentworking and cmp:
1455 fixup = []
1455 fixup = []
1456 # do a full compare of any files that might have changed
1456 # do a full compare of any files that might have changed
1457 for f in sorted(cmp):
1457 for f in sorted(cmp):
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 or ctx1[f].cmp(ctx2[f])):
1459 or ctx1[f].cmp(ctx2[f])):
1460 modified.append(f)
1460 modified.append(f)
1461 else:
1461 else:
1462 fixup.append(f)
1462 fixup.append(f)
1463
1463
1464 # update dirstate for files that are actually clean
1464 # update dirstate for files that are actually clean
1465 if fixup:
1465 if fixup:
1466 if listclean:
1466 if listclean:
1467 clean += fixup
1467 clean += fixup
1468
1468
1469 try:
1469 try:
1470 # updating the dirstate is optional
1470 # updating the dirstate is optional
1471 # so we don't wait on the lock
1471 # so we don't wait on the lock
1472 wlock = self.wlock(False)
1472 wlock = self.wlock(False)
1473 try:
1473 try:
1474 for f in fixup:
1474 for f in fixup:
1475 self.dirstate.normal(f)
1475 self.dirstate.normal(f)
1476 finally:
1476 finally:
1477 wlock.release()
1477 wlock.release()
1478 except error.LockError:
1478 except error.LockError:
1479 pass
1479 pass
1480
1480
1481 if not parentworking:
1481 if not parentworking:
1482 mf1 = mfmatches(ctx1)
1482 mf1 = mfmatches(ctx1)
1483 if working:
1483 if working:
1484 # we are comparing working dir against non-parent
1484 # we are comparing working dir against non-parent
1485 # generate a pseudo-manifest for the working dir
1485 # generate a pseudo-manifest for the working dir
1486 mf2 = mfmatches(self['.'])
1486 mf2 = mfmatches(self['.'])
1487 for f in cmp + modified + added:
1487 for f in cmp + modified + added:
1488 mf2[f] = None
1488 mf2[f] = None
1489 mf2.set(f, ctx2.flags(f))
1489 mf2.set(f, ctx2.flags(f))
1490 for f in removed:
1490 for f in removed:
1491 if f in mf2:
1491 if f in mf2:
1492 del mf2[f]
1492 del mf2[f]
1493 else:
1493 else:
1494 # we are comparing two revisions
1494 # we are comparing two revisions
1495 deleted, unknown, ignored = [], [], []
1495 deleted, unknown, ignored = [], [], []
1496 mf2 = mfmatches(ctx2)
1496 mf2 = mfmatches(ctx2)
1497
1497
1498 modified, added, clean = [], [], []
1498 modified, added, clean = [], [], []
1499 withflags = mf1.withflags() | mf2.withflags()
1499 withflags = mf1.withflags() | mf2.withflags()
1500 for fn in mf2:
1500 for fn in mf2:
1501 if fn in mf1:
1501 if fn in mf1:
1502 if (fn not in deleted and
1502 if (fn not in deleted and
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 (mf1[fn] != mf2[fn] and
1504 (mf1[fn] != mf2[fn] and
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 modified.append(fn)
1506 modified.append(fn)
1507 elif listclean:
1507 elif listclean:
1508 clean.append(fn)
1508 clean.append(fn)
1509 del mf1[fn]
1509 del mf1[fn]
1510 elif fn not in deleted:
1510 elif fn not in deleted:
1511 added.append(fn)
1511 added.append(fn)
1512 removed = mf1.keys()
1512 removed = mf1.keys()
1513
1513
1514 if working and modified and not self.dirstate._checklink:
1514 if working and modified and not self.dirstate._checklink:
1515 # Symlink placeholders may get non-symlink-like contents
1515 # Symlink placeholders may get non-symlink-like contents
1516 # via user error or dereferencing by NFS or Samba servers,
1516 # via user error or dereferencing by NFS or Samba servers,
1517 # so we filter out any placeholders that don't look like a
1517 # so we filter out any placeholders that don't look like a
1518 # symlink
1518 # symlink
1519 sane = []
1519 sane = []
1520 for f in modified:
1520 for f in modified:
1521 if ctx2.flags(f) == 'l':
1521 if ctx2.flags(f) == 'l':
1522 d = ctx2[f].data()
1522 d = ctx2[f].data()
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 self.ui.debug('ignoring suspect symlink placeholder'
1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 ' "%s"\n' % f)
1525 ' "%s"\n' % f)
1526 continue
1526 continue
1527 sane.append(f)
1527 sane.append(f)
1528 modified = sane
1528 modified = sane
1529
1529
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531
1531
1532 if listsubrepos:
1532 if listsubrepos:
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 if working:
1534 if working:
1535 rev2 = None
1535 rev2 = None
1536 else:
1536 else:
1537 rev2 = ctx2.substate[subpath][1]
1537 rev2 = ctx2.substate[subpath][1]
1538 try:
1538 try:
1539 submatch = matchmod.narrowmatcher(subpath, match)
1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 clean=listclean, unknown=listunknown,
1541 clean=listclean, unknown=listunknown,
1542 listsubrepos=True)
1542 listsubrepos=True)
1543 for rfiles, sfiles in zip(r, s):
1543 for rfiles, sfiles in zip(r, s):
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 except error.LookupError:
1545 except error.LookupError:
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 % subpath)
1547 % subpath)
1548
1548
1549 for l in r:
1549 for l in r:
1550 l.sort()
1550 l.sort()
1551 return r
1551 return r
1552
1552
1553 def heads(self, start=None):
1553 def heads(self, start=None):
1554 heads = self.changelog.heads(start)
1554 heads = self.changelog.heads(start)
1555 # sort the output in rev descending order
1555 # sort the output in rev descending order
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557
1557
1558 def branchheads(self, branch=None, start=None, closed=False):
1558 def branchheads(self, branch=None, start=None, closed=False):
1559 '''return a (possibly filtered) list of heads for the given branch
1559 '''return a (possibly filtered) list of heads for the given branch
1560
1560
1561 Heads are returned in topological order, from newest to oldest.
1561 Heads are returned in topological order, from newest to oldest.
1562 If branch is None, use the dirstate branch.
1562 If branch is None, use the dirstate branch.
1563 If start is not None, return only heads reachable from start.
1563 If start is not None, return only heads reachable from start.
1564 If closed is True, return heads that are marked as closed as well.
1564 If closed is True, return heads that are marked as closed as well.
1565 '''
1565 '''
1566 if branch is None:
1566 if branch is None:
1567 branch = self[None].branch()
1567 branch = self[None].branch()
1568 branches = self.branchmap()
1568 branches = self.branchmap()
1569 if branch not in branches:
1569 if branch not in branches:
1570 return []
1570 return []
1571 # the cache returns heads ordered lowest to highest
1571 # the cache returns heads ordered lowest to highest
1572 bheads = list(reversed(branches[branch]))
1572 bheads = list(reversed(branches[branch]))
1573 if start is not None:
1573 if start is not None:
1574 # filter out the heads that cannot be reached from startrev
1574 # filter out the heads that cannot be reached from startrev
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 bheads = [h for h in bheads if h in fbheads]
1576 bheads = [h for h in bheads if h in fbheads]
1577 if not closed:
1577 if not closed:
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 return bheads
1579 return bheads
1580
1580
1581 def branches(self, nodes):
1581 def branches(self, nodes):
1582 if not nodes:
1582 if not nodes:
1583 nodes = [self.changelog.tip()]
1583 nodes = [self.changelog.tip()]
1584 b = []
1584 b = []
1585 for n in nodes:
1585 for n in nodes:
1586 t = n
1586 t = n
1587 while True:
1587 while True:
1588 p = self.changelog.parents(n)
1588 p = self.changelog.parents(n)
1589 if p[1] != nullid or p[0] == nullid:
1589 if p[1] != nullid or p[0] == nullid:
1590 b.append((t, n, p[0], p[1]))
1590 b.append((t, n, p[0], p[1]))
1591 break
1591 break
1592 n = p[0]
1592 n = p[0]
1593 return b
1593 return b
1594
1594
1595 def between(self, pairs):
1595 def between(self, pairs):
1596 r = []
1596 r = []
1597
1597
1598 for top, bottom in pairs:
1598 for top, bottom in pairs:
1599 n, l, i = top, [], 0
1599 n, l, i = top, [], 0
1600 f = 1
1600 f = 1
1601
1601
1602 while n != bottom and n != nullid:
1602 while n != bottom and n != nullid:
1603 p = self.changelog.parents(n)[0]
1603 p = self.changelog.parents(n)[0]
1604 if i == f:
1604 if i == f:
1605 l.append(n)
1605 l.append(n)
1606 f = f * 2
1606 f = f * 2
1607 n = p
1607 n = p
1608 i += 1
1608 i += 1
1609
1609
1610 r.append(l)
1610 r.append(l)
1611
1611
1612 return r
1612 return r
1613
1613
1614 def pull(self, remote, heads=None, force=False):
1614 def pull(self, remote, heads=None, force=False):
1615 # don't open transaction for nothing or you break future useful
1615 # don't open transaction for nothing or you break future useful
1616 # rollback call
1616 # rollback call
1617 tr = None
1617 tr = None
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 lock = self.lock()
1619 lock = self.lock()
1620 try:
1620 try:
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 force=force)
1622 force=force)
1623 common, fetch, rheads = tmp
1623 common, fetch, rheads = tmp
1624 if not fetch:
1624 if not fetch:
1625 self.ui.status(_("no changes found\n"))
1625 self.ui.status(_("no changes found\n"))
1626 added = []
1626 added = []
1627 result = 0
1627 result = 0
1628 else:
1628 else:
1629 tr = self.transaction(trname)
1629 tr = self.transaction(trname)
1630 if heads is None and list(common) == [nullid]:
1630 if heads is None and list(common) == [nullid]:
1631 self.ui.status(_("requesting all changes\n"))
1631 self.ui.status(_("requesting all changes\n"))
1632 elif heads is None and remote.capable('changegroupsubset'):
1632 elif heads is None and remote.capable('changegroupsubset'):
1633 # issue1320, avoid a race if remote changed after discovery
1633 # issue1320, avoid a race if remote changed after discovery
1634 heads = rheads
1634 heads = rheads
1635
1635
1636 if remote.capable('getbundle'):
1636 if remote.capable('getbundle'):
1637 cg = remote.getbundle('pull', common=common,
1637 cg = remote.getbundle('pull', common=common,
1638 heads=heads or rheads)
1638 heads=heads or rheads)
1639 elif heads is None:
1639 elif heads is None:
1640 cg = remote.changegroup(fetch, 'pull')
1640 cg = remote.changegroup(fetch, 'pull')
1641 elif not remote.capable('changegroupsubset'):
1641 elif not remote.capable('changegroupsubset'):
1642 raise util.Abort(_("partial pull cannot be done because "
1642 raise util.Abort(_("partial pull cannot be done because "
1643 "other repository doesn't support "
1643 "other repository doesn't support "
1644 "changegroupsubset."))
1644 "changegroupsubset."))
1645 else:
1645 else:
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 clstart = len(self.changelog)
1647 clstart = len(self.changelog)
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 clend = len(self.changelog)
1649 clend = len(self.changelog)
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651
1651
1652 # compute target subset
1652 # compute target subset
1653 if heads is None:
1653 if heads is None:
1654 # We pulled every thing possible
1654 # We pulled every thing possible
1655 # sync on everything common
1655 # sync on everything common
1656 subset = common + added
1656 subset = common + added
1657 else:
1657 else:
1658 # We pulled a specific subset
1658 # We pulled a specific subset
1659 # sync on this subset
1659 # sync on this subset
1660 subset = heads
1660 subset = heads
1661
1661
1662 # Get remote phases data from remote
1662 # Get remote phases data from remote
1663 remotephases = remote.listkeys('phases')
1663 remotephases = remote.listkeys('phases')
1664 publishing = bool(remotephases.get('publishing', False))
1664 publishing = bool(remotephases.get('publishing', False))
1665 if remotephases and not publishing:
1665 if remotephases and not publishing:
1666 # remote is new and unpublishing
1666 # remote is new and unpublishing
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 remotephases)
1668 remotephases)
1669 phases.advanceboundary(self, phases.public, pheads)
1669 phases.advanceboundary(self, phases.public, pheads)
1670 phases.advanceboundary(self, phases.draft, subset)
1670 phases.advanceboundary(self, phases.draft, subset)
1671 else:
1671 else:
1672 # Remote is old or publishing all common changesets
1672 # Remote is old or publishing all common changesets
1673 # should be seen as public
1673 # should be seen as public
1674 phases.advanceboundary(self, phases.public, subset)
1674 phases.advanceboundary(self, phases.public, subset)
1675
1675
1676 remoteobs = remote.listkeys('obsolete')
1676 remoteobs = remote.listkeys('obsolete')
1677 if 'dump' in remoteobs:
1677 if 'dump' in remoteobs:
1678 if tr is None:
1678 if tr is None:
1679 tr = self.transaction(trname)
1679 tr = self.transaction(trname)
1680 data = base85.b85decode(remoteobs['dump'])
1680 data = base85.b85decode(remoteobs['dump'])
1681 self.obsstore.mergemarkers(tr, data)
1681 self.obsstore.mergemarkers(tr, data)
1682 if tr is not None:
1682 if tr is not None:
1683 tr.close()
1683 tr.close()
1684 finally:
1684 finally:
1685 if tr is not None:
1685 if tr is not None:
1686 tr.release()
1686 tr.release()
1687 lock.release()
1687 lock.release()
1688
1688
1689 return result
1689 return result
1690
1690
1691 def checkpush(self, force, revs):
1691 def checkpush(self, force, revs):
1692 """Extensions can override this function if additional checks have
1692 """Extensions can override this function if additional checks have
1693 to be performed before pushing, or call it if they override push
1693 to be performed before pushing, or call it if they override push
1694 command.
1694 command.
1695 """
1695 """
1696 pass
1696 pass
1697
1697
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 '''Push outgoing changesets (limited by revs) from the current
1699 '''Push outgoing changesets (limited by revs) from the current
1700 repository to remote. Return an integer:
1700 repository to remote. Return an integer:
1701 - None means nothing to push
1701 - None means nothing to push
1702 - 0 means HTTP error
1702 - 0 means HTTP error
1703 - 1 means we pushed and remote head count is unchanged *or*
1703 - 1 means we pushed and remote head count is unchanged *or*
1704 we have outgoing changesets but refused to push
1704 we have outgoing changesets but refused to push
1705 - other values as described by addchangegroup()
1705 - other values as described by addchangegroup()
1706 '''
1706 '''
1707 # there are two ways to push to remote repo:
1707 # there are two ways to push to remote repo:
1708 #
1708 #
1709 # addchangegroup assumes local user can lock remote
1709 # addchangegroup assumes local user can lock remote
1710 # repo (local filesystem, old ssh servers).
1710 # repo (local filesystem, old ssh servers).
1711 #
1711 #
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 # servers, http servers).
1713 # servers, http servers).
1714
1714
1715 # get local lock as we might write phase data
1715 # get local lock as we might write phase data
1716 locallock = self.lock()
1716 locallock = self.lock()
1717 try:
1717 try:
1718 self.checkpush(force, revs)
1718 self.checkpush(force, revs)
1719 lock = None
1719 lock = None
1720 unbundle = remote.capable('unbundle')
1720 unbundle = remote.capable('unbundle')
1721 if not unbundle:
1721 if not unbundle:
1722 lock = remote.lock()
1722 lock = remote.lock()
1723 try:
1723 try:
1724 # discovery
1724 # discovery
1725 fci = discovery.findcommonincoming
1725 fci = discovery.findcommonincoming
1726 commoninc = fci(self, remote, force=force)
1726 commoninc = fci(self, remote, force=force)
1727 common, inc, remoteheads = commoninc
1727 common, inc, remoteheads = commoninc
1728 fco = discovery.findcommonoutgoing
1728 fco = discovery.findcommonoutgoing
1729 outgoing = fco(self, remote, onlyheads=revs,
1729 outgoing = fco(self, remote, onlyheads=revs,
1730 commoninc=commoninc, force=force)
1730 commoninc=commoninc, force=force)
1731
1731
1732
1732
1733 if not outgoing.missing:
1733 if not outgoing.missing:
1734 # nothing to push
1734 # nothing to push
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 ret = None
1736 ret = None
1737 else:
1737 else:
1738 # something to push
1738 # something to push
1739 if not force:
1739 if not force:
1740 discovery.checkheads(self, remote, outgoing,
1740 discovery.checkheads(self, remote, outgoing,
1741 remoteheads, newbranch,
1741 remoteheads, newbranch,
1742 bool(inc))
1742 bool(inc))
1743
1743
1744 # create a changegroup from local
1744 # create a changegroup from local
1745 if revs is None and not outgoing.excluded:
1745 if revs is None and not outgoing.excluded:
1746 # push everything,
1746 # push everything,
1747 # use the fast path, no race possible on push
1747 # use the fast path, no race possible on push
1748 cg = self._changegroup(outgoing.missing, 'push')
1748 cg = self._changegroup(outgoing.missing, 'push')
1749 else:
1749 else:
1750 cg = self.getlocalbundle('push', outgoing)
1750 cg = self.getlocalbundle('push', outgoing)
1751
1751
1752 # apply changegroup to remote
1752 # apply changegroup to remote
1753 if unbundle:
1753 if unbundle:
1754 # local repo finds heads on server, finds out what
1754 # local repo finds heads on server, finds out what
1755 # revs it must push. once revs transferred, if server
1755 # revs it must push. once revs transferred, if server
1756 # finds it has different heads (someone else won
1756 # finds it has different heads (someone else won
1757 # commit/push race), server aborts.
1757 # commit/push race), server aborts.
1758 if force:
1758 if force:
1759 remoteheads = ['force']
1759 remoteheads = ['force']
1760 # ssh: return remote's addchangegroup()
1760 # ssh: return remote's addchangegroup()
1761 # http: return remote's addchangegroup() or 0 for error
1761 # http: return remote's addchangegroup() or 0 for error
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 else:
1763 else:
1764 # we return an integer indicating remote head count
1764 # we return an integer indicating remote head count
1765 # change
1765 # change
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767
1767
1768 if ret:
1768 if ret:
1769 # push succeed, synchonize target of the push
1769 # push succeed, synchonize target of the push
1770 cheads = outgoing.missingheads
1770 cheads = outgoing.missingheads
1771 elif revs is None:
1771 elif revs is None:
1772 # All out push fails. synchronize all common
1772 # All out push fails. synchronize all common
1773 cheads = outgoing.commonheads
1773 cheads = outgoing.commonheads
1774 else:
1774 else:
1775 # I want cheads = heads(::missingheads and ::commonheads)
1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 # (missingheads is revs with secret changeset filtered out)
1776 # (missingheads is revs with secret changeset filtered out)
1777 #
1777 #
1778 # This can be expressed as:
1778 # This can be expressed as:
1779 # cheads = ( (missingheads and ::commonheads)
1779 # cheads = ( (missingheads and ::commonheads)
1780 # + (commonheads and ::missingheads))"
1780 # + (commonheads and ::missingheads))"
1781 # )
1781 # )
1782 #
1782 #
1783 # while trying to push we already computed the following:
1783 # while trying to push we already computed the following:
1784 # common = (::commonheads)
1784 # common = (::commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1785 # missing = ((commonheads::missingheads) - commonheads)
1786 #
1786 #
1787 # We can pick:
1787 # We can pick:
1788 # * missingheads part of comon (::commonheads)
1788 # * missingheads part of comon (::commonheads)
1789 common = set(outgoing.common)
1789 common = set(outgoing.common)
1790 cheads = [node for node in revs if node in common]
1790 cheads = [node for node in revs if node in common]
1791 # and
1791 # and
1792 # * commonheads parents on missing
1792 # * commonheads parents on missing
1793 revset = self.set('%ln and parents(roots(%ln))',
1793 revset = self.set('%ln and parents(roots(%ln))',
1794 outgoing.commonheads,
1794 outgoing.commonheads,
1795 outgoing.missing)
1795 outgoing.missing)
1796 cheads.extend(c.node() for c in revset)
1796 cheads.extend(c.node() for c in revset)
1797 # even when we don't push, exchanging phase data is useful
1797 # even when we don't push, exchanging phase data is useful
1798 remotephases = remote.listkeys('phases')
1798 remotephases = remote.listkeys('phases')
1799 if not remotephases: # old server or public only repo
1799 if not remotephases: # old server or public only repo
1800 phases.advanceboundary(self, phases.public, cheads)
1800 phases.advanceboundary(self, phases.public, cheads)
1801 # don't push any phase data as there is nothing to push
1801 # don't push any phase data as there is nothing to push
1802 else:
1802 else:
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 pheads, droots = ana
1804 pheads, droots = ana
1805 ### Apply remote phase on local
1805 ### Apply remote phase on local
1806 if remotephases.get('publishing', False):
1806 if remotephases.get('publishing', False):
1807 phases.advanceboundary(self, phases.public, cheads)
1807 phases.advanceboundary(self, phases.public, cheads)
1808 else: # publish = False
1808 else: # publish = False
1809 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1810 phases.advanceboundary(self, phases.draft, cheads)
1811 ### Apply local phase on remote
1811 ### Apply local phase on remote
1812
1812
1813 # Get the list of all revs draft on remote by public here.
1813 # Get the list of all revs draft on remote by public here.
1814 # XXX Beware that revset break if droots is not strictly
1814 # XXX Beware that revset break if droots is not strictly
1815 # XXX root we may want to ensure it is but it is costly
1815 # XXX root we may want to ensure it is but it is costly
1816 outdated = self.set('heads((%ln::%ln) and public())',
1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 droots, cheads)
1817 droots, cheads)
1818 for newremotehead in outdated:
1818 for newremotehead in outdated:
1819 r = remote.pushkey('phases',
1819 r = remote.pushkey('phases',
1820 newremotehead.hex(),
1820 newremotehead.hex(),
1821 str(phases.draft),
1821 str(phases.draft),
1822 str(phases.public))
1822 str(phases.public))
1823 if not r:
1823 if not r:
1824 self.ui.warn(_('updating %s to public failed!\n')
1824 self.ui.warn(_('updating %s to public failed!\n')
1825 % newremotehead)
1825 % newremotehead)
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 data = self.listkeys('obsolete')['dump']
1827 data = self.listkeys('obsolete')['dump']
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 if not r:
1829 if not r:
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 finally:
1831 finally:
1832 if lock is not None:
1832 if lock is not None:
1833 lock.release()
1833 lock.release()
1834 finally:
1834 finally:
1835 locallock.release()
1835 locallock.release()
1836
1836
1837 self.ui.debug("checking for updated bookmarks\n")
1837 self.ui.debug("checking for updated bookmarks\n")
1838 rb = remote.listkeys('bookmarks')
1838 rb = remote.listkeys('bookmarks')
1839 for k in rb.keys():
1839 for k in rb.keys():
1840 if k in self._bookmarks:
1840 if k in self._bookmarks:
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 if nr in self:
1842 if nr in self:
1843 cr = self[nr]
1843 cr = self[nr]
1844 cl = self[nl]
1844 cl = self[nl]
1845 if cl in cr.descendants():
1845 if cl in cr.descendants():
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 if r:
1847 if r:
1848 self.ui.status(_("updating bookmark %s\n") % k)
1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 else:
1849 else:
1850 self.ui.warn(_('updating bookmark %s'
1850 self.ui.warn(_('updating bookmark %s'
1851 ' failed!\n') % k)
1851 ' failed!\n') % k)
1852
1852
1853 return ret
1853 return ret
1854
1854
1855 def changegroupinfo(self, nodes, source):
1855 def changegroupinfo(self, nodes, source):
1856 if self.ui.verbose or source == 'bundle':
1856 if self.ui.verbose or source == 'bundle':
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 if self.ui.debugflag:
1858 if self.ui.debugflag:
1859 self.ui.debug("list of changesets:\n")
1859 self.ui.debug("list of changesets:\n")
1860 for node in nodes:
1860 for node in nodes:
1861 self.ui.debug("%s\n" % hex(node))
1861 self.ui.debug("%s\n" % hex(node))
1862
1862
1863 def changegroupsubset(self, bases, heads, source):
1863 def changegroupsubset(self, bases, heads, source):
1864 """Compute a changegroup consisting of all the nodes that are
1864 """Compute a changegroup consisting of all the nodes that are
1865 descendants of any of the bases and ancestors of any of the heads.
1865 descendants of any of the bases and ancestors of any of the heads.
1866 Return a chunkbuffer object whose read() method will return
1866 Return a chunkbuffer object whose read() method will return
1867 successive changegroup chunks.
1867 successive changegroup chunks.
1868
1868
1869 It is fairly complex as determining which filenodes and which
1869 It is fairly complex as determining which filenodes and which
1870 manifest nodes need to be included for the changeset to be complete
1870 manifest nodes need to be included for the changeset to be complete
1871 is non-trivial.
1871 is non-trivial.
1872
1872
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 the changegroup a particular filenode or manifestnode belongs to.
1874 the changegroup a particular filenode or manifestnode belongs to.
1875 """
1875 """
1876 cl = self.changelog
1876 cl = self.changelog
1877 if not bases:
1877 if not bases:
1878 bases = [nullid]
1878 bases = [nullid]
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 # We assume that all ancestors of bases are known
1880 # We assume that all ancestors of bases are known
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 return self._changegroupsubset(common, csets, heads, source)
1882 return self._changegroupsubset(common, csets, heads, source)
1883
1883
1884 def getlocalbundle(self, source, outgoing):
1884 def getlocalbundle(self, source, outgoing):
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886
1886
1887 This is only implemented for local repos and reuses potentially
1887 This is only implemented for local repos and reuses potentially
1888 precomputed sets in outgoing."""
1888 precomputed sets in outgoing."""
1889 if not outgoing.missing:
1889 if not outgoing.missing:
1890 return None
1890 return None
1891 return self._changegroupsubset(outgoing.common,
1891 return self._changegroupsubset(outgoing.common,
1892 outgoing.missing,
1892 outgoing.missing,
1893 outgoing.missingheads,
1893 outgoing.missingheads,
1894 source)
1894 source)
1895
1895
1896 def getbundle(self, source, heads=None, common=None):
1896 def getbundle(self, source, heads=None, common=None):
1897 """Like changegroupsubset, but returns the set difference between the
1897 """Like changegroupsubset, but returns the set difference between the
1898 ancestors of heads and the ancestors common.
1898 ancestors of heads and the ancestors common.
1899
1899
1900 If heads is None, use the local heads. If common is None, use [nullid].
1900 If heads is None, use the local heads. If common is None, use [nullid].
1901
1901
1902 The nodes in common might not all be known locally due to the way the
1902 The nodes in common might not all be known locally due to the way the
1903 current discovery protocol works.
1903 current discovery protocol works.
1904 """
1904 """
1905 cl = self.changelog
1905 cl = self.changelog
1906 if common:
1906 if common:
1907 nm = cl.nodemap
1907 nm = cl.nodemap
1908 common = [n for n in common if n in nm]
1908 common = [n for n in common if n in nm]
1909 else:
1909 else:
1910 common = [nullid]
1910 common = [nullid]
1911 if not heads:
1911 if not heads:
1912 heads = cl.heads()
1912 heads = cl.heads()
1913 return self.getlocalbundle(source,
1913 return self.getlocalbundle(source,
1914 discovery.outgoing(cl, common, heads))
1914 discovery.outgoing(cl, common, heads))
1915
1915
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917
1917
1918 cl = self.changelog
1918 cl = self.changelog
1919 mf = self.manifest
1919 mf = self.manifest
1920 mfs = {} # needed manifests
1920 mfs = {} # needed manifests
1921 fnodes = {} # needed file nodes
1921 fnodes = {} # needed file nodes
1922 changedfiles = set()
1922 changedfiles = set()
1923 fstate = ['', {}]
1923 fstate = ['', {}]
1924 count = [0, 0]
1924 count = [0, 0]
1925
1925
1926 # can we go through the fast path ?
1926 # can we go through the fast path ?
1927 heads.sort()
1927 heads.sort()
1928 if heads == sorted(self.heads()):
1928 if heads == sorted(self.heads()):
1929 return self._changegroup(csets, source)
1929 return self._changegroup(csets, source)
1930
1930
1931 # slow path
1931 # slow path
1932 self.hook('preoutgoing', throw=True, source=source)
1932 self.hook('preoutgoing', throw=True, source=source)
1933 self.changegroupinfo(csets, source)
1933 self.changegroupinfo(csets, source)
1934
1934
1935 # filter any nodes that claim to be part of the known set
1935 # filter any nodes that claim to be part of the known set
1936 def prune(revlog, missing):
1936 def prune(revlog, missing):
1937 rr, rl = revlog.rev, revlog.linkrev
1937 rr, rl = revlog.rev, revlog.linkrev
1938 return [n for n in missing
1938 return [n for n in missing
1939 if rl(rr(n)) not in commonrevs]
1939 if rl(rr(n)) not in commonrevs]
1940
1940
1941 progress = self.ui.progress
1941 progress = self.ui.progress
1942 _bundling = _('bundling')
1942 _bundling = _('bundling')
1943 _changesets = _('changesets')
1943 _changesets = _('changesets')
1944 _manifests = _('manifests')
1944 _manifests = _('manifests')
1945 _files = _('files')
1945 _files = _('files')
1946
1946
1947 def lookup(revlog, x):
1947 def lookup(revlog, x):
1948 if revlog == cl:
1948 if revlog == cl:
1949 c = cl.read(x)
1949 c = cl.read(x)
1950 changedfiles.update(c[3])
1950 changedfiles.update(c[3])
1951 mfs.setdefault(c[0], x)
1951 mfs.setdefault(c[0], x)
1952 count[0] += 1
1952 count[0] += 1
1953 progress(_bundling, count[0],
1953 progress(_bundling, count[0],
1954 unit=_changesets, total=count[1])
1954 unit=_changesets, total=count[1])
1955 return x
1955 return x
1956 elif revlog == mf:
1956 elif revlog == mf:
1957 clnode = mfs[x]
1957 clnode = mfs[x]
1958 mdata = mf.readfast(x)
1958 mdata = mf.readfast(x)
1959 for f, n in mdata.iteritems():
1959 for f, n in mdata.iteritems():
1960 if f in changedfiles:
1960 if f in changedfiles:
1961 fnodes[f].setdefault(n, clnode)
1961 fnodes[f].setdefault(n, clnode)
1962 count[0] += 1
1962 count[0] += 1
1963 progress(_bundling, count[0],
1963 progress(_bundling, count[0],
1964 unit=_manifests, total=count[1])
1964 unit=_manifests, total=count[1])
1965 return clnode
1965 return clnode
1966 else:
1966 else:
1967 progress(_bundling, count[0], item=fstate[0],
1967 progress(_bundling, count[0], item=fstate[0],
1968 unit=_files, total=count[1])
1968 unit=_files, total=count[1])
1969 return fstate[1][x]
1969 return fstate[1][x]
1970
1970
1971 bundler = changegroup.bundle10(lookup)
1971 bundler = changegroup.bundle10(lookup)
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 if reorder == 'auto':
1973 if reorder == 'auto':
1974 reorder = None
1974 reorder = None
1975 else:
1975 else:
1976 reorder = util.parsebool(reorder)
1976 reorder = util.parsebool(reorder)
1977
1977
1978 def gengroup():
1978 def gengroup():
1979 # Create a changenode group generator that will call our functions
1979 # Create a changenode group generator that will call our functions
1980 # back to lookup the owning changenode and collect information.
1980 # back to lookup the owning changenode and collect information.
1981 count[:] = [0, len(csets)]
1981 count[:] = [0, len(csets)]
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 yield chunk
1983 yield chunk
1984 progress(_bundling, None)
1984 progress(_bundling, None)
1985
1985
1986 # Create a generator for the manifestnodes that calls our lookup
1986 # Create a generator for the manifestnodes that calls our lookup
1987 # and data collection functions back.
1987 # and data collection functions back.
1988 for f in changedfiles:
1988 for f in changedfiles:
1989 fnodes[f] = {}
1989 fnodes[f] = {}
1990 count[:] = [0, len(mfs)]
1990 count[:] = [0, len(mfs)]
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 yield chunk
1992 yield chunk
1993 progress(_bundling, None)
1993 progress(_bundling, None)
1994
1994
1995 mfs.clear()
1995 mfs.clear()
1996
1996
1997 # Go through all our files in order sorted by name.
1997 # Go through all our files in order sorted by name.
1998 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1999 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
2000 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
2001 if not len(filerevlog):
2001 if not len(filerevlog):
2002 raise util.Abort(_("empty or missing revlog for %s")
2002 raise util.Abort(_("empty or missing revlog for %s")
2003 % fname)
2003 % fname)
2004 fstate[0] = fname
2004 fstate[0] = fname
2005 fstate[1] = fnodes.pop(fname, {})
2005 fstate[1] = fnodes.pop(fname, {})
2006
2006
2007 nodelist = prune(filerevlog, fstate[1])
2007 nodelist = prune(filerevlog, fstate[1])
2008 if nodelist:
2008 if nodelist:
2009 count[0] += 1
2009 count[0] += 1
2010 yield bundler.fileheader(fname)
2010 yield bundler.fileheader(fname)
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 yield chunk
2012 yield chunk
2013
2013
2014 # Signal that no more groups are left.
2014 # Signal that no more groups are left.
2015 yield bundler.close()
2015 yield bundler.close()
2016 progress(_bundling, None)
2016 progress(_bundling, None)
2017
2017
2018 if csets:
2018 if csets:
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020
2020
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022
2022
2023 def changegroup(self, basenodes, source):
2023 def changegroup(self, basenodes, source):
2024 # to avoid a race we use changegroupsubset() (issue1320)
2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026
2026
2027 def _changegroup(self, nodes, source):
2027 def _changegroup(self, nodes, source):
2028 """Compute the changegroup of all nodes that we have that a recipient
2028 """Compute the changegroup of all nodes that we have that a recipient
2029 doesn't. Return a chunkbuffer object whose read() method will return
2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 successive changegroup chunks.
2030 successive changegroup chunks.
2031
2031
2032 This is much easier than the previous function as we can assume that
2032 This is much easier than the previous function as we can assume that
2033 the recipient has any changenode we aren't sending them.
2033 the recipient has any changenode we aren't sending them.
2034
2034
2035 nodes is the set of nodes to send"""
2035 nodes is the set of nodes to send"""
2036
2036
2037 cl = self.changelog
2037 cl = self.changelog
2038 mf = self.manifest
2038 mf = self.manifest
2039 mfs = {}
2039 mfs = {}
2040 changedfiles = set()
2040 changedfiles = set()
2041 fstate = ['']
2041 fstate = ['']
2042 count = [0, 0]
2042 count = [0, 0]
2043
2043
2044 self.hook('preoutgoing', throw=True, source=source)
2044 self.hook('preoutgoing', throw=True, source=source)
2045 self.changegroupinfo(nodes, source)
2045 self.changegroupinfo(nodes, source)
2046
2046
2047 revset = set([cl.rev(n) for n in nodes])
2047 revset = set([cl.rev(n) for n in nodes])
2048
2048
2049 def gennodelst(log):
2049 def gennodelst(log):
2050 ln, llr = log.node, log.linkrev
2050 ln, llr = log.node, log.linkrev
2051 return [ln(r) for r in log if llr(r) in revset]
2051 return [ln(r) for r in log if llr(r) in revset]
2052
2052
2053 progress = self.ui.progress
2053 progress = self.ui.progress
2054 _bundling = _('bundling')
2054 _bundling = _('bundling')
2055 _changesets = _('changesets')
2055 _changesets = _('changesets')
2056 _manifests = _('manifests')
2056 _manifests = _('manifests')
2057 _files = _('files')
2057 _files = _('files')
2058
2058
2059 def lookup(revlog, x):
2059 def lookup(revlog, x):
2060 if revlog == cl:
2060 if revlog == cl:
2061 c = cl.read(x)
2061 c = cl.read(x)
2062 changedfiles.update(c[3])
2062 changedfiles.update(c[3])
2063 mfs.setdefault(c[0], x)
2063 mfs.setdefault(c[0], x)
2064 count[0] += 1
2064 count[0] += 1
2065 progress(_bundling, count[0],
2065 progress(_bundling, count[0],
2066 unit=_changesets, total=count[1])
2066 unit=_changesets, total=count[1])
2067 return x
2067 return x
2068 elif revlog == mf:
2068 elif revlog == mf:
2069 count[0] += 1
2069 count[0] += 1
2070 progress(_bundling, count[0],
2070 progress(_bundling, count[0],
2071 unit=_manifests, total=count[1])
2071 unit=_manifests, total=count[1])
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 else:
2073 else:
2074 progress(_bundling, count[0], item=fstate[0],
2074 progress(_bundling, count[0], item=fstate[0],
2075 total=count[1], unit=_files)
2075 total=count[1], unit=_files)
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077
2077
2078 bundler = changegroup.bundle10(lookup)
2078 bundler = changegroup.bundle10(lookup)
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 if reorder == 'auto':
2080 if reorder == 'auto':
2081 reorder = None
2081 reorder = None
2082 else:
2082 else:
2083 reorder = util.parsebool(reorder)
2083 reorder = util.parsebool(reorder)
2084
2084
2085 def gengroup():
2085 def gengroup():
2086 '''yield a sequence of changegroup chunks (strings)'''
2086 '''yield a sequence of changegroup chunks (strings)'''
2087 # construct a list of all changed files
2087 # construct a list of all changed files
2088
2088
2089 count[:] = [0, len(nodes)]
2089 count[:] = [0, len(nodes)]
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 yield chunk
2091 yield chunk
2092 progress(_bundling, None)
2092 progress(_bundling, None)
2093
2093
2094 count[:] = [0, len(mfs)]
2094 count[:] = [0, len(mfs)]
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 yield chunk
2096 yield chunk
2097 progress(_bundling, None)
2097 progress(_bundling, None)
2098
2098
2099 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2100 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2101 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2102 if not len(filerevlog):
2102 if not len(filerevlog):
2103 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2104 % fname)
2104 % fname)
2105 fstate[0] = fname
2105 fstate[0] = fname
2106 nodelist = gennodelst(filerevlog)
2106 nodelist = gennodelst(filerevlog)
2107 if nodelist:
2107 if nodelist:
2108 count[0] += 1
2108 count[0] += 1
2109 yield bundler.fileheader(fname)
2109 yield bundler.fileheader(fname)
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 yield chunk
2111 yield chunk
2112 yield bundler.close()
2112 yield bundler.close()
2113 progress(_bundling, None)
2113 progress(_bundling, None)
2114
2114
2115 if nodes:
2115 if nodes:
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117
2117
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119
2119
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 """Add the changegroup returned by source.read() to this repo.
2121 """Add the changegroup returned by source.read() to this repo.
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 the URL of the repo where this changegroup is coming from.
2123 the URL of the repo where this changegroup is coming from.
2124
2124
2125 Return an integer summarizing the change to this repo:
2125 Return an integer summarizing the change to this repo:
2126 - nothing changed or no source: 0
2126 - nothing changed or no source: 0
2127 - more heads than before: 1+added heads (2..n)
2127 - more heads than before: 1+added heads (2..n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 - number of heads stays the same: 1
2129 - number of heads stays the same: 1
2130 """
2130 """
2131 def csmap(x):
2131 def csmap(x):
2132 self.ui.debug("add changeset %s\n" % short(x))
2132 self.ui.debug("add changeset %s\n" % short(x))
2133 return len(cl)
2133 return len(cl)
2134
2134
2135 def revmap(x):
2135 def revmap(x):
2136 return cl.rev(x)
2136 return cl.rev(x)
2137
2137
2138 if not source:
2138 if not source:
2139 return 0
2139 return 0
2140
2140
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142
2142
2143 changesets = files = revisions = 0
2143 changesets = files = revisions = 0
2144 efiles = set()
2144 efiles = set()
2145
2145
2146 # write changelog data to temp files so concurrent readers will not see
2146 # write changelog data to temp files so concurrent readers will not see
2147 # inconsistent view
2147 # inconsistent view
2148 cl = self.changelog
2148 cl = self.changelog
2149 cl.delayupdate()
2149 cl.delayupdate()
2150 oldheads = cl.heads()
2150 oldheads = cl.heads()
2151
2151
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 try:
2153 try:
2154 trp = weakref.proxy(tr)
2154 trp = weakref.proxy(tr)
2155 # pull off the changeset group
2155 # pull off the changeset group
2156 self.ui.status(_("adding changesets\n"))
2156 self.ui.status(_("adding changesets\n"))
2157 clstart = len(cl)
2157 clstart = len(cl)
2158 class prog(object):
2158 class prog(object):
2159 step = _('changesets')
2159 step = _('changesets')
2160 count = 1
2160 count = 1
2161 ui = self.ui
2161 ui = self.ui
2162 total = None
2162 total = None
2163 def __call__(self):
2163 def __call__(self):
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 total=self.total)
2165 total=self.total)
2166 self.count += 1
2166 self.count += 1
2167 pr = prog()
2167 pr = prog()
2168 source.callback = pr
2168 source.callback = pr
2169
2169
2170 source.changelogheader()
2170 source.changelogheader()
2171 srccontent = cl.addgroup(source, csmap, trp)
2171 srccontent = cl.addgroup(source, csmap, trp)
2172 if not (srccontent or emptyok):
2172 if not (srccontent or emptyok):
2173 raise util.Abort(_("received changelog group is empty"))
2173 raise util.Abort(_("received changelog group is empty"))
2174 clend = len(cl)
2174 clend = len(cl)
2175 changesets = clend - clstart
2175 changesets = clend - clstart
2176 for c in xrange(clstart, clend):
2176 for c in xrange(clstart, clend):
2177 efiles.update(self[c].files())
2177 efiles.update(self[c].files())
2178 efiles = len(efiles)
2178 efiles = len(efiles)
2179 self.ui.progress(_('changesets'), None)
2179 self.ui.progress(_('changesets'), None)
2180
2180
2181 # pull off the manifest group
2181 # pull off the manifest group
2182 self.ui.status(_("adding manifests\n"))
2182 self.ui.status(_("adding manifests\n"))
2183 pr.step = _('manifests')
2183 pr.step = _('manifests')
2184 pr.count = 1
2184 pr.count = 1
2185 pr.total = changesets # manifests <= changesets
2185 pr.total = changesets # manifests <= changesets
2186 # no need to check for empty manifest group here:
2186 # no need to check for empty manifest group here:
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 # no new manifest will be created and the manifest group will
2188 # no new manifest will be created and the manifest group will
2189 # be empty during the pull
2189 # be empty during the pull
2190 source.manifestheader()
2190 source.manifestheader()
2191 self.manifest.addgroup(source, revmap, trp)
2191 self.manifest.addgroup(source, revmap, trp)
2192 self.ui.progress(_('manifests'), None)
2192 self.ui.progress(_('manifests'), None)
2193
2193
2194 needfiles = {}
2194 needfiles = {}
2195 if self.ui.configbool('server', 'validate', default=False):
2195 if self.ui.configbool('server', 'validate', default=False):
2196 # validate incoming csets have their manifests
2196 # validate incoming csets have their manifests
2197 for cset in xrange(clstart, clend):
2197 for cset in xrange(clstart, clend):
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 mfest = self.manifest.readdelta(mfest)
2199 mfest = self.manifest.readdelta(mfest)
2200 # store file nodes we must see
2200 # store file nodes we must see
2201 for f, n in mfest.iteritems():
2201 for f, n in mfest.iteritems():
2202 needfiles.setdefault(f, set()).add(n)
2202 needfiles.setdefault(f, set()).add(n)
2203
2203
2204 # process the files
2204 # process the files
2205 self.ui.status(_("adding file changes\n"))
2205 self.ui.status(_("adding file changes\n"))
2206 pr.step = _('files')
2206 pr.step = _('files')
2207 pr.count = 1
2207 pr.count = 1
2208 pr.total = efiles
2208 pr.total = efiles
2209 source.callback = None
2209 source.callback = None
2210
2210
2211 while True:
2211 while True:
2212 chunkdata = source.filelogheader()
2212 chunkdata = source.filelogheader()
2213 if not chunkdata:
2213 if not chunkdata:
2214 break
2214 break
2215 f = chunkdata["filename"]
2215 f = chunkdata["filename"]
2216 self.ui.debug("adding %s revisions\n" % f)
2216 self.ui.debug("adding %s revisions\n" % f)
2217 pr()
2217 pr()
2218 fl = self.file(f)
2218 fl = self.file(f)
2219 o = len(fl)
2219 o = len(fl)
2220 if not fl.addgroup(source, revmap, trp):
2220 if not fl.addgroup(source, revmap, trp):
2221 raise util.Abort(_("received file revlog group is empty"))
2221 raise util.Abort(_("received file revlog group is empty"))
2222 revisions += len(fl) - o
2222 revisions += len(fl) - o
2223 files += 1
2223 files += 1
2224 if f in needfiles:
2224 if f in needfiles:
2225 needs = needfiles[f]
2225 needs = needfiles[f]
2226 for new in xrange(o, len(fl)):
2226 for new in xrange(o, len(fl)):
2227 n = fl.node(new)
2227 n = fl.node(new)
2228 if n in needs:
2228 if n in needs:
2229 needs.remove(n)
2229 needs.remove(n)
2230 if not needs:
2230 if not needs:
2231 del needfiles[f]
2231 del needfiles[f]
2232 self.ui.progress(_('files'), None)
2232 self.ui.progress(_('files'), None)
2233
2233
2234 for f, needs in needfiles.iteritems():
2234 for f, needs in needfiles.iteritems():
2235 fl = self.file(f)
2235 fl = self.file(f)
2236 for n in needs:
2236 for n in needs:
2237 try:
2237 try:
2238 fl.rev(n)
2238 fl.rev(n)
2239 except error.LookupError:
2239 except error.LookupError:
2240 raise util.Abort(
2240 raise util.Abort(
2241 _('missing file data for %s:%s - run hg verify') %
2241 _('missing file data for %s:%s - run hg verify') %
2242 (f, hex(n)))
2242 (f, hex(n)))
2243
2243
2244 dh = 0
2244 dh = 0
2245 if oldheads:
2245 if oldheads:
2246 heads = cl.heads()
2246 heads = cl.heads()
2247 dh = len(heads) - len(oldheads)
2247 dh = len(heads) - len(oldheads)
2248 for h in heads:
2248 for h in heads:
2249 if h not in oldheads and self[h].closesbranch():
2249 if h not in oldheads and self[h].closesbranch():
2250 dh -= 1
2250 dh -= 1
2251 htext = ""
2251 htext = ""
2252 if dh:
2252 if dh:
2253 htext = _(" (%+d heads)") % dh
2253 htext = _(" (%+d heads)") % dh
2254
2254
2255 self.ui.status(_("added %d changesets"
2255 self.ui.status(_("added %d changesets"
2256 " with %d changes to %d files%s\n")
2256 " with %d changes to %d files%s\n")
2257 % (changesets, revisions, files, htext))
2257 % (changesets, revisions, files, htext))
2258
2258
2259 if changesets > 0:
2259 if changesets > 0:
2260 p = lambda: cl.writepending() and self.root or ""
2260 p = lambda: cl.writepending() and self.root or ""
2261 self.hook('pretxnchangegroup', throw=True,
2261 self.hook('pretxnchangegroup', throw=True,
2262 node=hex(cl.node(clstart)), source=srctype,
2262 node=hex(cl.node(clstart)), source=srctype,
2263 url=url, pending=p)
2263 url=url, pending=p)
2264
2264
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 publishing = self.ui.configbool('phases', 'publish', True)
2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 if srctype == 'push':
2267 if srctype == 'push':
2268 # Old server can not push the boundary themself.
2268 # Old server can not push the boundary themself.
2269 # New server won't push the boundary if changeset already
2269 # New server won't push the boundary if changeset already
2270 # existed locally as secrete
2270 # existed locally as secrete
2271 #
2271 #
2272 # We should not use added here but the list of all change in
2272 # We should not use added here but the list of all change in
2273 # the bundle
2273 # the bundle
2274 if publishing:
2274 if publishing:
2275 phases.advanceboundary(self, phases.public, srccontent)
2275 phases.advanceboundary(self, phases.public, srccontent)
2276 else:
2276 else:
2277 phases.advanceboundary(self, phases.draft, srccontent)
2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 phases.retractboundary(self, phases.draft, added)
2278 phases.retractboundary(self, phases.draft, added)
2279 elif srctype != 'strip':
2279 elif srctype != 'strip':
2280 # publishing only alter behavior during push
2280 # publishing only alter behavior during push
2281 #
2281 #
2282 # strip should not touch boundary at all
2282 # strip should not touch boundary at all
2283 phases.retractboundary(self, phases.draft, added)
2283 phases.retractboundary(self, phases.draft, added)
2284
2284
2285 # make changelog see real files again
2285 # make changelog see real files again
2286 cl.finalize(trp)
2286 cl.finalize(trp)
2287
2287
2288 tr.close()
2288 tr.close()
2289
2289
2290 if changesets > 0:
2290 if changesets > 0:
2291 def runhooks():
2291 def runhooks():
2292 # forcefully update the on-disk branch cache
2292 # forcefully update the on-disk branch cache
2293 self.ui.debug("updating the branch cache\n")
2293 self.ui.debug("updating the branch cache\n")
2294 self.updatebranchcache()
2294 self.updatebranchcache()
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 source=srctype, url=url)
2296 source=srctype, url=url)
2297
2297
2298 for n in added:
2298 for n in added:
2299 self.hook("incoming", node=hex(n), source=srctype,
2299 self.hook("incoming", node=hex(n), source=srctype,
2300 url=url)
2300 url=url)
2301 self._afterlock(runhooks)
2301 self._afterlock(runhooks)
2302
2302
2303 finally:
2303 finally:
2304 tr.release()
2304 tr.release()
2305 # never return 0 here:
2305 # never return 0 here:
2306 if dh < 0:
2306 if dh < 0:
2307 return dh - 1
2307 return dh - 1
2308 else:
2308 else:
2309 return dh + 1
2309 return dh + 1
2310
2310
2311 def stream_in(self, remote, requirements):
2311 def stream_in(self, remote, requirements):
2312 lock = self.lock()
2312 lock = self.lock()
2313 try:
2313 try:
2314 fp = remote.stream_out()
2314 fp = remote.stream_out()
2315 l = fp.readline()
2315 l = fp.readline()
2316 try:
2316 try:
2317 resp = int(l)
2317 resp = int(l)
2318 except ValueError:
2318 except ValueError:
2319 raise error.ResponseError(
2319 raise error.ResponseError(
2320 _('unexpected response from remote server:'), l)
2320 _('unexpected response from remote server:'), l)
2321 if resp == 1:
2321 if resp == 1:
2322 raise util.Abort(_('operation forbidden by server'))
2322 raise util.Abort(_('operation forbidden by server'))
2323 elif resp == 2:
2323 elif resp == 2:
2324 raise util.Abort(_('locking the remote repository failed'))
2324 raise util.Abort(_('locking the remote repository failed'))
2325 elif resp != 0:
2325 elif resp != 0:
2326 raise util.Abort(_('the server sent an unknown error code'))
2326 raise util.Abort(_('the server sent an unknown error code'))
2327 self.ui.status(_('streaming all changes\n'))
2327 self.ui.status(_('streaming all changes\n'))
2328 l = fp.readline()
2328 l = fp.readline()
2329 try:
2329 try:
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 except (ValueError, TypeError):
2331 except (ValueError, TypeError):
2332 raise error.ResponseError(
2332 raise error.ResponseError(
2333 _('unexpected response from remote server:'), l)
2333 _('unexpected response from remote server:'), l)
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 (total_files, util.bytecount(total_bytes)))
2335 (total_files, util.bytecount(total_bytes)))
2336 handled_bytes = 0
2336 handled_bytes = 0
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 start = time.time()
2338 start = time.time()
2339 for i in xrange(total_files):
2339 for i in xrange(total_files):
2340 # XXX doesn't support '\n' or '\r' in filenames
2340 # XXX doesn't support '\n' or '\r' in filenames
2341 l = fp.readline()
2341 l = fp.readline()
2342 try:
2342 try:
2343 name, size = l.split('\0', 1)
2343 name, size = l.split('\0', 1)
2344 size = int(size)
2344 size = int(size)
2345 except (ValueError, TypeError):
2345 except (ValueError, TypeError):
2346 raise error.ResponseError(
2346 raise error.ResponseError(
2347 _('unexpected response from remote server:'), l)
2347 _('unexpected response from remote server:'), l)
2348 if self.ui.debugflag:
2348 if self.ui.debugflag:
2349 self.ui.debug('adding %s (%s)\n' %
2349 self.ui.debug('adding %s (%s)\n' %
2350 (name, util.bytecount(size)))
2350 (name, util.bytecount(size)))
2351 # for backwards compat, name was partially encoded
2351 # for backwards compat, name was partially encoded
2352 ofp = self.sopener(store.decodedir(name), 'w')
2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 for chunk in util.filechunkiter(fp, limit=size):
2353 for chunk in util.filechunkiter(fp, limit=size):
2354 handled_bytes += len(chunk)
2354 handled_bytes += len(chunk)
2355 self.ui.progress(_('clone'), handled_bytes,
2355 self.ui.progress(_('clone'), handled_bytes,
2356 total=total_bytes)
2356 total=total_bytes)
2357 ofp.write(chunk)
2357 ofp.write(chunk)
2358 ofp.close()
2358 ofp.close()
2359 elapsed = time.time() - start
2359 elapsed = time.time() - start
2360 if elapsed <= 0:
2360 if elapsed <= 0:
2361 elapsed = 0.001
2361 elapsed = 0.001
2362 self.ui.progress(_('clone'), None)
2362 self.ui.progress(_('clone'), None)
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 (util.bytecount(total_bytes), elapsed,
2364 (util.bytecount(total_bytes), elapsed,
2365 util.bytecount(total_bytes / elapsed)))
2365 util.bytecount(total_bytes / elapsed)))
2366
2366
2367 # new requirements = old non-format requirements +
2367 # new requirements = old non-format requirements +
2368 # new format-related
2368 # new format-related
2369 # requirements from the streamed-in repository
2369 # requirements from the streamed-in repository
2370 requirements.update(set(self.requirements) - self.supportedformats)
2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 self._applyrequirements(requirements)
2371 self._applyrequirements(requirements)
2372 self._writerequirements()
2372 self._writerequirements()
2373
2373
2374 self.invalidate()
2374 self.invalidate()
2375 return len(self.heads()) + 1
2375 return len(self.heads()) + 1
2376 finally:
2376 finally:
2377 lock.release()
2377 lock.release()
2378
2378
2379 def clone(self, remote, heads=[], stream=False):
2379 def clone(self, remote, heads=[], stream=False):
2380 '''clone remote repository.
2380 '''clone remote repository.
2381
2381
2382 keyword arguments:
2382 keyword arguments:
2383 heads: list of revs to clone (forces use of pull)
2383 heads: list of revs to clone (forces use of pull)
2384 stream: use streaming clone if possible'''
2384 stream: use streaming clone if possible'''
2385
2385
2386 # now, all clients that can request uncompressed clones can
2386 # now, all clients that can request uncompressed clones can
2387 # read repo formats supported by all servers that can serve
2387 # read repo formats supported by all servers that can serve
2388 # them.
2388 # them.
2389
2389
2390 # if revlog format changes, client will have to check version
2390 # if revlog format changes, client will have to check version
2391 # and format flags on "stream" capability, and use
2391 # and format flags on "stream" capability, and use
2392 # uncompressed only if compatible.
2392 # uncompressed only if compatible.
2393
2393
2394 if not stream:
2394 if not stream:
2395 # if the server explicitely prefer to stream (for fast LANs)
2395 # if the server explicitely prefer to stream (for fast LANs)
2396 stream = remote.capable('stream-preferred')
2396 stream = remote.capable('stream-preferred')
2397
2397
2398 if stream and not heads:
2398 if stream and not heads:
2399 # 'stream' means remote revlog format is revlogv1 only
2399 # 'stream' means remote revlog format is revlogv1 only
2400 if remote.capable('stream'):
2400 if remote.capable('stream'):
2401 return self.stream_in(remote, set(('revlogv1',)))
2401 return self.stream_in(remote, set(('revlogv1',)))
2402 # otherwise, 'streamreqs' contains the remote revlog format
2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 streamreqs = remote.capable('streamreqs')
2403 streamreqs = remote.capable('streamreqs')
2404 if streamreqs:
2404 if streamreqs:
2405 streamreqs = set(streamreqs.split(','))
2405 streamreqs = set(streamreqs.split(','))
2406 # if we support it, stream in and adjust our requirements
2406 # if we support it, stream in and adjust our requirements
2407 if not streamreqs - self.supportedformats:
2407 if not streamreqs - self.supportedformats:
2408 return self.stream_in(remote, streamreqs)
2408 return self.stream_in(remote, streamreqs)
2409 return self.pull(remote, heads)
2409 return self.pull(remote, heads)
2410
2410
2411 def pushkey(self, namespace, key, old, new):
2411 def pushkey(self, namespace, key, old, new):
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 old=old, new=new)
2413 old=old, new=new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2414 ret = pushkey.push(self, namespace, key, old, new)
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 ret=ret)
2416 ret=ret)
2417 return ret
2417 return ret
2418
2418
2419 def listkeys(self, namespace):
2419 def listkeys(self, namespace):
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 values = pushkey.list(self, namespace)
2421 values = pushkey.list(self, namespace)
2422 self.hook('listkeys', namespace=namespace, values=values)
2422 self.hook('listkeys', namespace=namespace, values=values)
2423 return values
2423 return values
2424
2424
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 '''used to test argument passing over the wire'''
2426 '''used to test argument passing over the wire'''
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428
2428
2429 def savecommitmessage(self, text):
2429 def savecommitmessage(self, text):
2430 fp = self.opener('last-message.txt', 'wb')
2430 fp = self.opener('last-message.txt', 'wb')
2431 try:
2431 try:
2432 fp.write(text)
2432 fp.write(text)
2433 finally:
2433 finally:
2434 fp.close()
2434 fp.close()
2435 return self.pathto(fp.name[len(self.root)+1:])
2435 return self.pathto(fp.name[len(self.root)+1:])
2436
2436
2437 # used to avoid circular references so destructors work
2437 # used to avoid circular references so destructors work
2438 def aftertrans(files):
2438 def aftertrans(files):
2439 renamefiles = [tuple(t) for t in files]
2439 renamefiles = [tuple(t) for t in files]
2440 def a():
2440 def a():
2441 for src, dest in renamefiles:
2441 for src, dest in renamefiles:
2442 try:
2442 try:
2443 util.rename(src, dest)
2443 util.rename(src, dest)
2444 except OSError: # journal file does not yet exist
2444 except OSError: # journal file does not yet exist
2445 pass
2445 pass
2446 return a
2446 return a
2447
2447
2448 def undoname(fn):
2448 def undoname(fn):
2449 base, name = os.path.split(fn)
2449 base, name = os.path.split(fn)
2450 assert name.startswith('journal')
2450 assert name.startswith('journal')
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452
2452
2453 def instance(ui, path, create):
2453 def instance(ui, path, create):
2454 return localrepository(ui, util.urllocalpath(path), create)
2454 return localrepository(ui, util.urllocalpath(path), create)
2455
2455
2456 def islocal(path):
2456 def islocal(path):
2457 return True
2457 return True
@@ -1,889 +1,907 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error, osutil, revset, similar, encoding
9 import util, error, osutil, revset, similar, encoding
10 import match as matchmod
10 import match as matchmod
11 import os, errno, re, stat, sys, glob
11 import os, errno, re, stat, sys, glob
12
12
13 def nochangesfound(ui, secretlist=None):
13 def nochangesfound(ui, secretlist=None):
14 '''report no changes for push/pull'''
14 '''report no changes for push/pull'''
15 if secretlist:
15 if secretlist:
16 ui.status(_("no changes found (ignored %d secret changesets)\n")
16 ui.status(_("no changes found (ignored %d secret changesets)\n")
17 % len(secretlist))
17 % len(secretlist))
18 else:
18 else:
19 ui.status(_("no changes found\n"))
19 ui.status(_("no changes found\n"))
20
20
21 def checkfilename(f):
21 def checkfilename(f):
22 '''Check that the filename f is an acceptable filename for a tracked file'''
22 '''Check that the filename f is an acceptable filename for a tracked file'''
23 if '\r' in f or '\n' in f:
23 if '\r' in f or '\n' in f:
24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
24 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
25
25
26 def checkportable(ui, f):
26 def checkportable(ui, f):
27 '''Check if filename f is portable and warn or abort depending on config'''
27 '''Check if filename f is portable and warn or abort depending on config'''
28 checkfilename(f)
28 checkfilename(f)
29 abort, warn = checkportabilityalert(ui)
29 abort, warn = checkportabilityalert(ui)
30 if abort or warn:
30 if abort or warn:
31 msg = util.checkwinfilename(f)
31 msg = util.checkwinfilename(f)
32 if msg:
32 if msg:
33 msg = "%s: %r" % (msg, f)
33 msg = "%s: %r" % (msg, f)
34 if abort:
34 if abort:
35 raise util.Abort(msg)
35 raise util.Abort(msg)
36 ui.warn(_("warning: %s\n") % msg)
36 ui.warn(_("warning: %s\n") % msg)
37
37
38 def checkportabilityalert(ui):
38 def checkportabilityalert(ui):
39 '''check if the user's config requests nothing, a warning, or abort for
39 '''check if the user's config requests nothing, a warning, or abort for
40 non-portable filenames'''
40 non-portable filenames'''
41 val = ui.config('ui', 'portablefilenames', 'warn')
41 val = ui.config('ui', 'portablefilenames', 'warn')
42 lval = val.lower()
42 lval = val.lower()
43 bval = util.parsebool(val)
43 bval = util.parsebool(val)
44 abort = os.name == 'nt' or lval == 'abort'
44 abort = os.name == 'nt' or lval == 'abort'
45 warn = bval or lval == 'warn'
45 warn = bval or lval == 'warn'
46 if bval is None and not (warn or abort or lval == 'ignore'):
46 if bval is None and not (warn or abort or lval == 'ignore'):
47 raise error.ConfigError(
47 raise error.ConfigError(
48 _("ui.portablefilenames value is invalid ('%s')") % val)
48 _("ui.portablefilenames value is invalid ('%s')") % val)
49 return abort, warn
49 return abort, warn
50
50
51 class casecollisionauditor(object):
51 class casecollisionauditor(object):
52 def __init__(self, ui, abort, existingiter):
52 def __init__(self, ui, abort, existingiter):
53 self._ui = ui
53 self._ui = ui
54 self._abort = abort
54 self._abort = abort
55 self._map = {}
55 self._map = {}
56 for f in existingiter:
56 for f in existingiter:
57 self._map[encoding.lower(f)] = f
57 self._map[encoding.lower(f)] = f
58
58
59 def __call__(self, f):
59 def __call__(self, f):
60 fl = encoding.lower(f)
60 fl = encoding.lower(f)
61 map = self._map
61 map = self._map
62 if fl in map and map[fl] != f:
62 if fl in map and map[fl] != f:
63 msg = _('possible case-folding collision for %s') % f
63 msg = _('possible case-folding collision for %s') % f
64 if self._abort:
64 if self._abort:
65 raise util.Abort(msg)
65 raise util.Abort(msg)
66 self._ui.warn(_("warning: %s\n") % msg)
66 self._ui.warn(_("warning: %s\n") % msg)
67 map[fl] = f
67 map[fl] = f
68
68
69 class pathauditor(object):
69 class pathauditor(object):
70 '''ensure that a filesystem path contains no banned components.
70 '''ensure that a filesystem path contains no banned components.
71 the following properties of a path are checked:
71 the following properties of a path are checked:
72
72
73 - ends with a directory separator
73 - ends with a directory separator
74 - under top-level .hg
74 - under top-level .hg
75 - starts at the root of a windows drive
75 - starts at the root of a windows drive
76 - contains ".."
76 - contains ".."
77 - traverses a symlink (e.g. a/symlink_here/b)
77 - traverses a symlink (e.g. a/symlink_here/b)
78 - inside a nested repository (a callback can be used to approve
78 - inside a nested repository (a callback can be used to approve
79 some nested repositories, e.g., subrepositories)
79 some nested repositories, e.g., subrepositories)
80 '''
80 '''
81
81
82 def __init__(self, root, callback=None):
82 def __init__(self, root, callback=None):
83 self.audited = set()
83 self.audited = set()
84 self.auditeddir = set()
84 self.auditeddir = set()
85 self.root = root
85 self.root = root
86 self.callback = callback
86 self.callback = callback
87 if os.path.lexists(root) and not util.checkcase(root):
87 if os.path.lexists(root) and not util.checkcase(root):
88 self.normcase = util.normcase
88 self.normcase = util.normcase
89 else:
89 else:
90 self.normcase = lambda x: x
90 self.normcase = lambda x: x
91
91
92 def __call__(self, path):
92 def __call__(self, path):
93 '''Check the relative path.
93 '''Check the relative path.
94 path may contain a pattern (e.g. foodir/**.txt)'''
94 path may contain a pattern (e.g. foodir/**.txt)'''
95
95
96 path = util.localpath(path)
96 path = util.localpath(path)
97 normpath = self.normcase(path)
97 normpath = self.normcase(path)
98 if normpath in self.audited:
98 if normpath in self.audited:
99 return
99 return
100 # AIX ignores "/" at end of path, others raise EISDIR.
100 # AIX ignores "/" at end of path, others raise EISDIR.
101 if util.endswithsep(path):
101 if util.endswithsep(path):
102 raise util.Abort(_("path ends in directory separator: %s") % path)
102 raise util.Abort(_("path ends in directory separator: %s") % path)
103 parts = util.splitpath(path)
103 parts = util.splitpath(path)
104 if (os.path.splitdrive(path)[0]
104 if (os.path.splitdrive(path)[0]
105 or parts[0].lower() in ('.hg', '.hg.', '')
105 or parts[0].lower() in ('.hg', '.hg.', '')
106 or os.pardir in parts):
106 or os.pardir in parts):
107 raise util.Abort(_("path contains illegal component: %s") % path)
107 raise util.Abort(_("path contains illegal component: %s") % path)
108 if '.hg' in path.lower():
108 if '.hg' in path.lower():
109 lparts = [p.lower() for p in parts]
109 lparts = [p.lower() for p in parts]
110 for p in '.hg', '.hg.':
110 for p in '.hg', '.hg.':
111 if p in lparts[1:]:
111 if p in lparts[1:]:
112 pos = lparts.index(p)
112 pos = lparts.index(p)
113 base = os.path.join(*parts[:pos])
113 base = os.path.join(*parts[:pos])
114 raise util.Abort(_("path '%s' is inside nested repo %r")
114 raise util.Abort(_("path '%s' is inside nested repo %r")
115 % (path, base))
115 % (path, base))
116
116
117 normparts = util.splitpath(normpath)
117 normparts = util.splitpath(normpath)
118 assert len(parts) == len(normparts)
118 assert len(parts) == len(normparts)
119
119
120 parts.pop()
120 parts.pop()
121 normparts.pop()
121 normparts.pop()
122 prefixes = []
122 prefixes = []
123 while parts:
123 while parts:
124 prefix = os.sep.join(parts)
124 prefix = os.sep.join(parts)
125 normprefix = os.sep.join(normparts)
125 normprefix = os.sep.join(normparts)
126 if normprefix in self.auditeddir:
126 if normprefix in self.auditeddir:
127 break
127 break
128 curpath = os.path.join(self.root, prefix)
128 curpath = os.path.join(self.root, prefix)
129 try:
129 try:
130 st = os.lstat(curpath)
130 st = os.lstat(curpath)
131 except OSError, err:
131 except OSError, err:
132 # EINVAL can be raised as invalid path syntax under win32.
132 # EINVAL can be raised as invalid path syntax under win32.
133 # They must be ignored for patterns can be checked too.
133 # They must be ignored for patterns can be checked too.
134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
134 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
135 raise
135 raise
136 else:
136 else:
137 if stat.S_ISLNK(st.st_mode):
137 if stat.S_ISLNK(st.st_mode):
138 raise util.Abort(
138 raise util.Abort(
139 _('path %r traverses symbolic link %r')
139 _('path %r traverses symbolic link %r')
140 % (path, prefix))
140 % (path, prefix))
141 elif (stat.S_ISDIR(st.st_mode) and
141 elif (stat.S_ISDIR(st.st_mode) and
142 os.path.isdir(os.path.join(curpath, '.hg'))):
142 os.path.isdir(os.path.join(curpath, '.hg'))):
143 if not self.callback or not self.callback(curpath):
143 if not self.callback or not self.callback(curpath):
144 raise util.Abort(_("path '%s' is inside nested "
144 raise util.Abort(_("path '%s' is inside nested "
145 "repo %r")
145 "repo %r")
146 % (path, prefix))
146 % (path, prefix))
147 prefixes.append(normprefix)
147 prefixes.append(normprefix)
148 parts.pop()
148 parts.pop()
149 normparts.pop()
149 normparts.pop()
150
150
151 self.audited.add(normpath)
151 self.audited.add(normpath)
152 # only add prefixes to the cache after checking everything: we don't
152 # only add prefixes to the cache after checking everything: we don't
153 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
153 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
154 self.auditeddir.update(prefixes)
154 self.auditeddir.update(prefixes)
155
155
156 class abstractopener(object):
156 class abstractopener(object):
157 """Abstract base class; cannot be instantiated"""
157 """Abstract base class; cannot be instantiated"""
158
158
159 def __init__(self, *args, **kwargs):
159 def __init__(self, *args, **kwargs):
160 '''Prevent instantiation; don't call this from subclasses.'''
160 '''Prevent instantiation; don't call this from subclasses.'''
161 raise NotImplementedError('attempted instantiating ' + str(type(self)))
161 raise NotImplementedError('attempted instantiating ' + str(type(self)))
162
162
163 def tryread(self, path):
163 def tryread(self, path):
164 '''gracefully return an empty string for missing files'''
164 '''gracefully return an empty string for missing files'''
165 try:
165 try:
166 return self.read(path)
166 return self.read(path)
167 except IOError, inst:
167 except IOError, inst:
168 if inst.errno != errno.ENOENT:
168 if inst.errno != errno.ENOENT:
169 raise
169 raise
170 return ""
170 return ""
171
171
172 def read(self, path):
172 def read(self, path):
173 fp = self(path, 'rb')
173 fp = self(path, 'rb')
174 try:
174 try:
175 return fp.read()
175 return fp.read()
176 finally:
176 finally:
177 fp.close()
177 fp.close()
178
178
179 def write(self, path, data):
179 def write(self, path, data):
180 fp = self(path, 'wb')
180 fp = self(path, 'wb')
181 try:
181 try:
182 return fp.write(data)
182 return fp.write(data)
183 finally:
183 finally:
184 fp.close()
184 fp.close()
185
185
186 def append(self, path, data):
186 def append(self, path, data):
187 fp = self(path, 'ab')
187 fp = self(path, 'ab')
188 try:
188 try:
189 return fp.write(data)
189 return fp.write(data)
190 finally:
190 finally:
191 fp.close()
191 fp.close()
192
192
193 def mkdir(self, path=None):
194 return os.mkdir(self.join(path))
195
196 def exists(self, path=None):
197 return os.path.exists(self.join(path))
198
199 def isdir(self, path=None):
200 return os.path.isdir(self.join(path))
201
202 def makedir(self, path=None, notindexed=True):
203 return util.makedir(self.join(path), notindexed)
204
205 def makedirs(self, path=None, mode=None):
206 return util.makedirs(self.join(path), mode)
207
193 class opener(abstractopener):
208 class opener(abstractopener):
194 '''Open files relative to a base directory
209 '''Open files relative to a base directory
195
210
196 This class is used to hide the details of COW semantics and
211 This class is used to hide the details of COW semantics and
197 remote file access from higher level code.
212 remote file access from higher level code.
198 '''
213 '''
199 def __init__(self, base, audit=True, expand=False):
214 def __init__(self, base, audit=True, expand=False):
200 if expand:
215 if expand:
201 base = os.path.realpath(util.expandpath(base))
216 base = os.path.realpath(util.expandpath(base))
202 self.base = base
217 self.base = base
203 self._audit = audit
218 self._audit = audit
204 if audit:
219 if audit:
205 self.auditor = pathauditor(base)
220 self.auditor = pathauditor(base)
206 else:
221 else:
207 self.auditor = util.always
222 self.auditor = util.always
208 self.createmode = None
223 self.createmode = None
209 self._trustnlink = None
224 self._trustnlink = None
210
225
211 @util.propertycache
226 @util.propertycache
212 def _cansymlink(self):
227 def _cansymlink(self):
213 return util.checklink(self.base)
228 return util.checklink(self.base)
214
229
215 def _fixfilemode(self, name):
230 def _fixfilemode(self, name):
216 if self.createmode is None:
231 if self.createmode is None:
217 return
232 return
218 os.chmod(name, self.createmode & 0666)
233 os.chmod(name, self.createmode & 0666)
219
234
220 def __call__(self, path, mode="r", text=False, atomictemp=False):
235 def __call__(self, path, mode="r", text=False, atomictemp=False):
221 if self._audit:
236 if self._audit:
222 r = util.checkosfilename(path)
237 r = util.checkosfilename(path)
223 if r:
238 if r:
224 raise util.Abort("%s: %r" % (r, path))
239 raise util.Abort("%s: %r" % (r, path))
225 self.auditor(path)
240 self.auditor(path)
226 f = self.join(path)
241 f = self.join(path)
227
242
228 if not text and "b" not in mode:
243 if not text and "b" not in mode:
229 mode += "b" # for that other OS
244 mode += "b" # for that other OS
230
245
231 nlink = -1
246 nlink = -1
232 dirname, basename = os.path.split(f)
247 dirname, basename = os.path.split(f)
233 # If basename is empty, then the path is malformed because it points
248 # If basename is empty, then the path is malformed because it points
234 # to a directory. Let the posixfile() call below raise IOError.
249 # to a directory. Let the posixfile() call below raise IOError.
235 if basename and mode not in ('r', 'rb'):
250 if basename and mode not in ('r', 'rb'):
236 if atomictemp:
251 if atomictemp:
237 if not os.path.isdir(dirname):
252 if not os.path.isdir(dirname):
238 util.makedirs(dirname, self.createmode)
253 util.makedirs(dirname, self.createmode)
239 return util.atomictempfile(f, mode, self.createmode)
254 return util.atomictempfile(f, mode, self.createmode)
240 try:
255 try:
241 if 'w' in mode:
256 if 'w' in mode:
242 util.unlink(f)
257 util.unlink(f)
243 nlink = 0
258 nlink = 0
244 else:
259 else:
245 # nlinks() may behave differently for files on Windows
260 # nlinks() may behave differently for files on Windows
246 # shares if the file is open.
261 # shares if the file is open.
247 fd = util.posixfile(f)
262 fd = util.posixfile(f)
248 nlink = util.nlinks(f)
263 nlink = util.nlinks(f)
249 if nlink < 1:
264 if nlink < 1:
250 nlink = 2 # force mktempcopy (issue1922)
265 nlink = 2 # force mktempcopy (issue1922)
251 fd.close()
266 fd.close()
252 except (OSError, IOError), e:
267 except (OSError, IOError), e:
253 if e.errno != errno.ENOENT:
268 if e.errno != errno.ENOENT:
254 raise
269 raise
255 nlink = 0
270 nlink = 0
256 if not os.path.isdir(dirname):
271 if not os.path.isdir(dirname):
257 util.makedirs(dirname, self.createmode)
272 util.makedirs(dirname, self.createmode)
258 if nlink > 0:
273 if nlink > 0:
259 if self._trustnlink is None:
274 if self._trustnlink is None:
260 self._trustnlink = nlink > 1 or util.checknlink(f)
275 self._trustnlink = nlink > 1 or util.checknlink(f)
261 if nlink > 1 or not self._trustnlink:
276 if nlink > 1 or not self._trustnlink:
262 util.rename(util.mktempcopy(f), f)
277 util.rename(util.mktempcopy(f), f)
263 fp = util.posixfile(f, mode)
278 fp = util.posixfile(f, mode)
264 if nlink == 0:
279 if nlink == 0:
265 self._fixfilemode(f)
280 self._fixfilemode(f)
266 return fp
281 return fp
267
282
268 def symlink(self, src, dst):
283 def symlink(self, src, dst):
269 self.auditor(dst)
284 self.auditor(dst)
270 linkname = self.join(dst)
285 linkname = self.join(dst)
271 try:
286 try:
272 os.unlink(linkname)
287 os.unlink(linkname)
273 except OSError:
288 except OSError:
274 pass
289 pass
275
290
276 dirname = os.path.dirname(linkname)
291 dirname = os.path.dirname(linkname)
277 if not os.path.exists(dirname):
292 if not os.path.exists(dirname):
278 util.makedirs(dirname, self.createmode)
293 util.makedirs(dirname, self.createmode)
279
294
280 if self._cansymlink:
295 if self._cansymlink:
281 try:
296 try:
282 os.symlink(src, linkname)
297 os.symlink(src, linkname)
283 except OSError, err:
298 except OSError, err:
284 raise OSError(err.errno, _('could not symlink to %r: %s') %
299 raise OSError(err.errno, _('could not symlink to %r: %s') %
285 (src, err.strerror), linkname)
300 (src, err.strerror), linkname)
286 else:
301 else:
287 f = self(dst, "w")
302 f = self(dst, "w")
288 f.write(src)
303 f.write(src)
289 f.close()
304 f.close()
290 self._fixfilemode(dst)
305 self._fixfilemode(dst)
291
306
292 def audit(self, path):
307 def audit(self, path):
293 self.auditor(path)
308 self.auditor(path)
294
309
295 def join(self, path):
310 def join(self, path):
311 if path:
296 return os.path.join(self.base, path)
312 return os.path.join(self.base, path)
313 else:
314 return self.base
297
315
298 class filteropener(abstractopener):
316 class filteropener(abstractopener):
299 '''Wrapper opener for filtering filenames with a function.'''
317 '''Wrapper opener for filtering filenames with a function.'''
300
318
301 def __init__(self, opener, filter):
319 def __init__(self, opener, filter):
302 self._filter = filter
320 self._filter = filter
303 self._orig = opener
321 self._orig = opener
304
322
305 def __call__(self, path, *args, **kwargs):
323 def __call__(self, path, *args, **kwargs):
306 return self._orig(self._filter(path), *args, **kwargs)
324 return self._orig(self._filter(path), *args, **kwargs)
307
325
308 def canonpath(root, cwd, myname, auditor=None):
326 def canonpath(root, cwd, myname, auditor=None):
309 '''return the canonical path of myname, given cwd and root'''
327 '''return the canonical path of myname, given cwd and root'''
310 if util.endswithsep(root):
328 if util.endswithsep(root):
311 rootsep = root
329 rootsep = root
312 else:
330 else:
313 rootsep = root + os.sep
331 rootsep = root + os.sep
314 name = myname
332 name = myname
315 if not os.path.isabs(name):
333 if not os.path.isabs(name):
316 name = os.path.join(root, cwd, name)
334 name = os.path.join(root, cwd, name)
317 name = os.path.normpath(name)
335 name = os.path.normpath(name)
318 if auditor is None:
336 if auditor is None:
319 auditor = pathauditor(root)
337 auditor = pathauditor(root)
320 if name != rootsep and name.startswith(rootsep):
338 if name != rootsep and name.startswith(rootsep):
321 name = name[len(rootsep):]
339 name = name[len(rootsep):]
322 auditor(name)
340 auditor(name)
323 return util.pconvert(name)
341 return util.pconvert(name)
324 elif name == root:
342 elif name == root:
325 return ''
343 return ''
326 else:
344 else:
327 # Determine whether `name' is in the hierarchy at or beneath `root',
345 # Determine whether `name' is in the hierarchy at or beneath `root',
328 # by iterating name=dirname(name) until that causes no change (can't
346 # by iterating name=dirname(name) until that causes no change (can't
329 # check name == '/', because that doesn't work on windows). The list
347 # check name == '/', because that doesn't work on windows). The list
330 # `rel' holds the reversed list of components making up the relative
348 # `rel' holds the reversed list of components making up the relative
331 # file name we want.
349 # file name we want.
332 rel = []
350 rel = []
333 while True:
351 while True:
334 try:
352 try:
335 s = util.samefile(name, root)
353 s = util.samefile(name, root)
336 except OSError:
354 except OSError:
337 s = False
355 s = False
338 if s:
356 if s:
339 if not rel:
357 if not rel:
340 # name was actually the same as root (maybe a symlink)
358 # name was actually the same as root (maybe a symlink)
341 return ''
359 return ''
342 rel.reverse()
360 rel.reverse()
343 name = os.path.join(*rel)
361 name = os.path.join(*rel)
344 auditor(name)
362 auditor(name)
345 return util.pconvert(name)
363 return util.pconvert(name)
346 dirname, basename = os.path.split(name)
364 dirname, basename = os.path.split(name)
347 rel.append(basename)
365 rel.append(basename)
348 if dirname == name:
366 if dirname == name:
349 break
367 break
350 name = dirname
368 name = dirname
351
369
352 raise util.Abort('%s not under root' % myname)
370 raise util.Abort('%s not under root' % myname)
353
371
354 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
355 '''yield every hg repository under path, always recursively.
373 '''yield every hg repository under path, always recursively.
356 The recurse flag will only control recursion into repo working dirs'''
374 The recurse flag will only control recursion into repo working dirs'''
357 def errhandler(err):
375 def errhandler(err):
358 if err.filename == path:
376 if err.filename == path:
359 raise err
377 raise err
360 samestat = getattr(os.path, 'samestat', None)
378 samestat = getattr(os.path, 'samestat', None)
361 if followsym and samestat is not None:
379 if followsym and samestat is not None:
362 def adddir(dirlst, dirname):
380 def adddir(dirlst, dirname):
363 match = False
381 match = False
364 dirstat = os.stat(dirname)
382 dirstat = os.stat(dirname)
365 for lstdirstat in dirlst:
383 for lstdirstat in dirlst:
366 if samestat(dirstat, lstdirstat):
384 if samestat(dirstat, lstdirstat):
367 match = True
385 match = True
368 break
386 break
369 if not match:
387 if not match:
370 dirlst.append(dirstat)
388 dirlst.append(dirstat)
371 return not match
389 return not match
372 else:
390 else:
373 followsym = False
391 followsym = False
374
392
375 if (seen_dirs is None) and followsym:
393 if (seen_dirs is None) and followsym:
376 seen_dirs = []
394 seen_dirs = []
377 adddir(seen_dirs, path)
395 adddir(seen_dirs, path)
378 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
396 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
379 dirs.sort()
397 dirs.sort()
380 if '.hg' in dirs:
398 if '.hg' in dirs:
381 yield root # found a repository
399 yield root # found a repository
382 qroot = os.path.join(root, '.hg', 'patches')
400 qroot = os.path.join(root, '.hg', 'patches')
383 if os.path.isdir(os.path.join(qroot, '.hg')):
401 if os.path.isdir(os.path.join(qroot, '.hg')):
384 yield qroot # we have a patch queue repo here
402 yield qroot # we have a patch queue repo here
385 if recurse:
403 if recurse:
386 # avoid recursing inside the .hg directory
404 # avoid recursing inside the .hg directory
387 dirs.remove('.hg')
405 dirs.remove('.hg')
388 else:
406 else:
389 dirs[:] = [] # don't descend further
407 dirs[:] = [] # don't descend further
390 elif followsym:
408 elif followsym:
391 newdirs = []
409 newdirs = []
392 for d in dirs:
410 for d in dirs:
393 fname = os.path.join(root, d)
411 fname = os.path.join(root, d)
394 if adddir(seen_dirs, fname):
412 if adddir(seen_dirs, fname):
395 if os.path.islink(fname):
413 if os.path.islink(fname):
396 for hgname in walkrepos(fname, True, seen_dirs):
414 for hgname in walkrepos(fname, True, seen_dirs):
397 yield hgname
415 yield hgname
398 else:
416 else:
399 newdirs.append(d)
417 newdirs.append(d)
400 dirs[:] = newdirs
418 dirs[:] = newdirs
401
419
402 def osrcpath():
420 def osrcpath():
403 '''return default os-specific hgrc search path'''
421 '''return default os-specific hgrc search path'''
404 path = systemrcpath()
422 path = systemrcpath()
405 path.extend(userrcpath())
423 path.extend(userrcpath())
406 path = [os.path.normpath(f) for f in path]
424 path = [os.path.normpath(f) for f in path]
407 return path
425 return path
408
426
409 _rcpath = None
427 _rcpath = None
410
428
411 def rcpath():
429 def rcpath():
412 '''return hgrc search path. if env var HGRCPATH is set, use it.
430 '''return hgrc search path. if env var HGRCPATH is set, use it.
413 for each item in path, if directory, use files ending in .rc,
431 for each item in path, if directory, use files ending in .rc,
414 else use item.
432 else use item.
415 make HGRCPATH empty to only look in .hg/hgrc of current repo.
433 make HGRCPATH empty to only look in .hg/hgrc of current repo.
416 if no HGRCPATH, use default os-specific path.'''
434 if no HGRCPATH, use default os-specific path.'''
417 global _rcpath
435 global _rcpath
418 if _rcpath is None:
436 if _rcpath is None:
419 if 'HGRCPATH' in os.environ:
437 if 'HGRCPATH' in os.environ:
420 _rcpath = []
438 _rcpath = []
421 for p in os.environ['HGRCPATH'].split(os.pathsep):
439 for p in os.environ['HGRCPATH'].split(os.pathsep):
422 if not p:
440 if not p:
423 continue
441 continue
424 p = util.expandpath(p)
442 p = util.expandpath(p)
425 if os.path.isdir(p):
443 if os.path.isdir(p):
426 for f, kind in osutil.listdir(p):
444 for f, kind in osutil.listdir(p):
427 if f.endswith('.rc'):
445 if f.endswith('.rc'):
428 _rcpath.append(os.path.join(p, f))
446 _rcpath.append(os.path.join(p, f))
429 else:
447 else:
430 _rcpath.append(p)
448 _rcpath.append(p)
431 else:
449 else:
432 _rcpath = osrcpath()
450 _rcpath = osrcpath()
433 return _rcpath
451 return _rcpath
434
452
435 if os.name != 'nt':
453 if os.name != 'nt':
436
454
437 def rcfiles(path):
455 def rcfiles(path):
438 rcs = [os.path.join(path, 'hgrc')]
456 rcs = [os.path.join(path, 'hgrc')]
439 rcdir = os.path.join(path, 'hgrc.d')
457 rcdir = os.path.join(path, 'hgrc.d')
440 try:
458 try:
441 rcs.extend([os.path.join(rcdir, f)
459 rcs.extend([os.path.join(rcdir, f)
442 for f, kind in osutil.listdir(rcdir)
460 for f, kind in osutil.listdir(rcdir)
443 if f.endswith(".rc")])
461 if f.endswith(".rc")])
444 except OSError:
462 except OSError:
445 pass
463 pass
446 return rcs
464 return rcs
447
465
448 def systemrcpath():
466 def systemrcpath():
449 path = []
467 path = []
450 if sys.platform == 'plan9':
468 if sys.platform == 'plan9':
451 root = 'lib/mercurial'
469 root = 'lib/mercurial'
452 else:
470 else:
453 root = 'etc/mercurial'
471 root = 'etc/mercurial'
454 # old mod_python does not set sys.argv
472 # old mod_python does not set sys.argv
455 if len(getattr(sys, 'argv', [])) > 0:
473 if len(getattr(sys, 'argv', [])) > 0:
456 p = os.path.dirname(os.path.dirname(sys.argv[0]))
474 p = os.path.dirname(os.path.dirname(sys.argv[0]))
457 path.extend(rcfiles(os.path.join(p, root)))
475 path.extend(rcfiles(os.path.join(p, root)))
458 path.extend(rcfiles('/' + root))
476 path.extend(rcfiles('/' + root))
459 return path
477 return path
460
478
461 def userrcpath():
479 def userrcpath():
462 if sys.platform == 'plan9':
480 if sys.platform == 'plan9':
463 return [os.environ['home'] + '/lib/hgrc']
481 return [os.environ['home'] + '/lib/hgrc']
464 else:
482 else:
465 return [os.path.expanduser('~/.hgrc')]
483 return [os.path.expanduser('~/.hgrc')]
466
484
467 else:
485 else:
468
486
469 import _winreg
487 import _winreg
470
488
471 def systemrcpath():
489 def systemrcpath():
472 '''return default os-specific hgrc search path'''
490 '''return default os-specific hgrc search path'''
473 rcpath = []
491 rcpath = []
474 filename = util.executablepath()
492 filename = util.executablepath()
475 # Use mercurial.ini found in directory with hg.exe
493 # Use mercurial.ini found in directory with hg.exe
476 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
494 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
477 if os.path.isfile(progrc):
495 if os.path.isfile(progrc):
478 rcpath.append(progrc)
496 rcpath.append(progrc)
479 return rcpath
497 return rcpath
480 # Use hgrc.d found in directory with hg.exe
498 # Use hgrc.d found in directory with hg.exe
481 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
499 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
482 if os.path.isdir(progrcd):
500 if os.path.isdir(progrcd):
483 for f, kind in osutil.listdir(progrcd):
501 for f, kind in osutil.listdir(progrcd):
484 if f.endswith('.rc'):
502 if f.endswith('.rc'):
485 rcpath.append(os.path.join(progrcd, f))
503 rcpath.append(os.path.join(progrcd, f))
486 return rcpath
504 return rcpath
487 # else look for a system rcpath in the registry
505 # else look for a system rcpath in the registry
488 value = util.lookupreg('SOFTWARE\\Mercurial', None,
506 value = util.lookupreg('SOFTWARE\\Mercurial', None,
489 _winreg.HKEY_LOCAL_MACHINE)
507 _winreg.HKEY_LOCAL_MACHINE)
490 if not isinstance(value, str) or not value:
508 if not isinstance(value, str) or not value:
491 return rcpath
509 return rcpath
492 value = util.localpath(value)
510 value = util.localpath(value)
493 for p in value.split(os.pathsep):
511 for p in value.split(os.pathsep):
494 if p.lower().endswith('mercurial.ini'):
512 if p.lower().endswith('mercurial.ini'):
495 rcpath.append(p)
513 rcpath.append(p)
496 elif os.path.isdir(p):
514 elif os.path.isdir(p):
497 for f, kind in osutil.listdir(p):
515 for f, kind in osutil.listdir(p):
498 if f.endswith('.rc'):
516 if f.endswith('.rc'):
499 rcpath.append(os.path.join(p, f))
517 rcpath.append(os.path.join(p, f))
500 return rcpath
518 return rcpath
501
519
502 def userrcpath():
520 def userrcpath():
503 '''return os-specific hgrc search path to the user dir'''
521 '''return os-specific hgrc search path to the user dir'''
504 home = os.path.expanduser('~')
522 home = os.path.expanduser('~')
505 path = [os.path.join(home, 'mercurial.ini'),
523 path = [os.path.join(home, 'mercurial.ini'),
506 os.path.join(home, '.hgrc')]
524 os.path.join(home, '.hgrc')]
507 userprofile = os.environ.get('USERPROFILE')
525 userprofile = os.environ.get('USERPROFILE')
508 if userprofile:
526 if userprofile:
509 path.append(os.path.join(userprofile, 'mercurial.ini'))
527 path.append(os.path.join(userprofile, 'mercurial.ini'))
510 path.append(os.path.join(userprofile, '.hgrc'))
528 path.append(os.path.join(userprofile, '.hgrc'))
511 return path
529 return path
512
530
513 def revsingle(repo, revspec, default='.'):
531 def revsingle(repo, revspec, default='.'):
514 if not revspec:
532 if not revspec:
515 return repo[default]
533 return repo[default]
516
534
517 l = revrange(repo, [revspec])
535 l = revrange(repo, [revspec])
518 if len(l) < 1:
536 if len(l) < 1:
519 raise util.Abort(_('empty revision set'))
537 raise util.Abort(_('empty revision set'))
520 return repo[l[-1]]
538 return repo[l[-1]]
521
539
522 def revpair(repo, revs):
540 def revpair(repo, revs):
523 if not revs:
541 if not revs:
524 return repo.dirstate.p1(), None
542 return repo.dirstate.p1(), None
525
543
526 l = revrange(repo, revs)
544 l = revrange(repo, revs)
527
545
528 if len(l) == 0:
546 if len(l) == 0:
529 if revs:
547 if revs:
530 raise util.Abort(_('empty revision range'))
548 raise util.Abort(_('empty revision range'))
531 return repo.dirstate.p1(), None
549 return repo.dirstate.p1(), None
532
550
533 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
551 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
534 return repo.lookup(l[0]), None
552 return repo.lookup(l[0]), None
535
553
536 return repo.lookup(l[0]), repo.lookup(l[-1])
554 return repo.lookup(l[0]), repo.lookup(l[-1])
537
555
538 _revrangesep = ':'
556 _revrangesep = ':'
539
557
540 def revrange(repo, revs):
558 def revrange(repo, revs):
541 """Yield revision as strings from a list of revision specifications."""
559 """Yield revision as strings from a list of revision specifications."""
542
560
543 def revfix(repo, val, defval):
561 def revfix(repo, val, defval):
544 if not val and val != 0 and defval is not None:
562 if not val and val != 0 and defval is not None:
545 return defval
563 return defval
546 return repo[val].rev()
564 return repo[val].rev()
547
565
548 seen, l = set(), []
566 seen, l = set(), []
549 for spec in revs:
567 for spec in revs:
550 if l and not seen:
568 if l and not seen:
551 seen = set(l)
569 seen = set(l)
552 # attempt to parse old-style ranges first to deal with
570 # attempt to parse old-style ranges first to deal with
553 # things like old-tag which contain query metacharacters
571 # things like old-tag which contain query metacharacters
554 try:
572 try:
555 if isinstance(spec, int):
573 if isinstance(spec, int):
556 seen.add(spec)
574 seen.add(spec)
557 l.append(spec)
575 l.append(spec)
558 continue
576 continue
559
577
560 if _revrangesep in spec:
578 if _revrangesep in spec:
561 start, end = spec.split(_revrangesep, 1)
579 start, end = spec.split(_revrangesep, 1)
562 start = revfix(repo, start, 0)
580 start = revfix(repo, start, 0)
563 end = revfix(repo, end, len(repo) - 1)
581 end = revfix(repo, end, len(repo) - 1)
564 step = start > end and -1 or 1
582 step = start > end and -1 or 1
565 if not seen and not l:
583 if not seen and not l:
566 # by far the most common case: revs = ["-1:0"]
584 # by far the most common case: revs = ["-1:0"]
567 l = range(start, end + step, step)
585 l = range(start, end + step, step)
568 # defer syncing seen until next iteration
586 # defer syncing seen until next iteration
569 continue
587 continue
570 newrevs = set(xrange(start, end + step, step))
588 newrevs = set(xrange(start, end + step, step))
571 if seen:
589 if seen:
572 newrevs.difference_update(seen)
590 newrevs.difference_update(seen)
573 seen.update(newrevs)
591 seen.update(newrevs)
574 else:
592 else:
575 seen = newrevs
593 seen = newrevs
576 l.extend(sorted(newrevs, reverse=start > end))
594 l.extend(sorted(newrevs, reverse=start > end))
577 continue
595 continue
578 elif spec and spec in repo: # single unquoted rev
596 elif spec and spec in repo: # single unquoted rev
579 rev = revfix(repo, spec, None)
597 rev = revfix(repo, spec, None)
580 if rev in seen:
598 if rev in seen:
581 continue
599 continue
582 seen.add(rev)
600 seen.add(rev)
583 l.append(rev)
601 l.append(rev)
584 continue
602 continue
585 except error.RepoLookupError:
603 except error.RepoLookupError:
586 pass
604 pass
587
605
588 # fall through to new-style queries if old-style fails
606 # fall through to new-style queries if old-style fails
589 m = revset.match(repo.ui, spec)
607 m = revset.match(repo.ui, spec)
590 dl = [r for r in m(repo, xrange(len(repo))) if r not in seen]
608 dl = [r for r in m(repo, xrange(len(repo))) if r not in seen]
591 l.extend(dl)
609 l.extend(dl)
592 seen.update(dl)
610 seen.update(dl)
593
611
594 return l
612 return l
595
613
596 def expandpats(pats):
614 def expandpats(pats):
597 if not util.expandglobs:
615 if not util.expandglobs:
598 return list(pats)
616 return list(pats)
599 ret = []
617 ret = []
600 for p in pats:
618 for p in pats:
601 kind, name = matchmod._patsplit(p, None)
619 kind, name = matchmod._patsplit(p, None)
602 if kind is None:
620 if kind is None:
603 try:
621 try:
604 globbed = glob.glob(name)
622 globbed = glob.glob(name)
605 except re.error:
623 except re.error:
606 globbed = [name]
624 globbed = [name]
607 if globbed:
625 if globbed:
608 ret.extend(globbed)
626 ret.extend(globbed)
609 continue
627 continue
610 ret.append(p)
628 ret.append(p)
611 return ret
629 return ret
612
630
613 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
631 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
614 if pats == ("",):
632 if pats == ("",):
615 pats = []
633 pats = []
616 if not globbed and default == 'relpath':
634 if not globbed and default == 'relpath':
617 pats = expandpats(pats or [])
635 pats = expandpats(pats or [])
618
636
619 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
637 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
620 default)
638 default)
621 def badfn(f, msg):
639 def badfn(f, msg):
622 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
640 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
623 m.bad = badfn
641 m.bad = badfn
624 return m, pats
642 return m, pats
625
643
626 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
644 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
627 return matchandpats(ctx, pats, opts, globbed, default)[0]
645 return matchandpats(ctx, pats, opts, globbed, default)[0]
628
646
629 def matchall(repo):
647 def matchall(repo):
630 return matchmod.always(repo.root, repo.getcwd())
648 return matchmod.always(repo.root, repo.getcwd())
631
649
632 def matchfiles(repo, files):
650 def matchfiles(repo, files):
633 return matchmod.exact(repo.root, repo.getcwd(), files)
651 return matchmod.exact(repo.root, repo.getcwd(), files)
634
652
635 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
653 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
636 if dry_run is None:
654 if dry_run is None:
637 dry_run = opts.get('dry_run')
655 dry_run = opts.get('dry_run')
638 if similarity is None:
656 if similarity is None:
639 similarity = float(opts.get('similarity') or 0)
657 similarity = float(opts.get('similarity') or 0)
640 # we'd use status here, except handling of symlinks and ignore is tricky
658 # we'd use status here, except handling of symlinks and ignore is tricky
641 added, unknown, deleted, removed = [], [], [], []
659 added, unknown, deleted, removed = [], [], [], []
642 audit_path = pathauditor(repo.root)
660 audit_path = pathauditor(repo.root)
643 m = match(repo[None], pats, opts)
661 m = match(repo[None], pats, opts)
644 rejected = []
662 rejected = []
645 m.bad = lambda x, y: rejected.append(x)
663 m.bad = lambda x, y: rejected.append(x)
646
664
647 for abs in repo.walk(m):
665 for abs in repo.walk(m):
648 target = repo.wjoin(abs)
666 target = repo.wjoin(abs)
649 good = True
667 good = True
650 try:
668 try:
651 audit_path(abs)
669 audit_path(abs)
652 except (OSError, util.Abort):
670 except (OSError, util.Abort):
653 good = False
671 good = False
654 rel = m.rel(abs)
672 rel = m.rel(abs)
655 exact = m.exact(abs)
673 exact = m.exact(abs)
656 if good and abs not in repo.dirstate:
674 if good and abs not in repo.dirstate:
657 unknown.append(abs)
675 unknown.append(abs)
658 if repo.ui.verbose or not exact:
676 if repo.ui.verbose or not exact:
659 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
677 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
660 elif (repo.dirstate[abs] != 'r' and
678 elif (repo.dirstate[abs] != 'r' and
661 (not good or not os.path.lexists(target) or
679 (not good or not os.path.lexists(target) or
662 (os.path.isdir(target) and not os.path.islink(target)))):
680 (os.path.isdir(target) and not os.path.islink(target)))):
663 deleted.append(abs)
681 deleted.append(abs)
664 if repo.ui.verbose or not exact:
682 if repo.ui.verbose or not exact:
665 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
683 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
666 # for finding renames
684 # for finding renames
667 elif repo.dirstate[abs] == 'r':
685 elif repo.dirstate[abs] == 'r':
668 removed.append(abs)
686 removed.append(abs)
669 elif repo.dirstate[abs] == 'a':
687 elif repo.dirstate[abs] == 'a':
670 added.append(abs)
688 added.append(abs)
671 copies = {}
689 copies = {}
672 if similarity > 0:
690 if similarity > 0:
673 for old, new, score in similar.findrenames(repo,
691 for old, new, score in similar.findrenames(repo,
674 added + unknown, removed + deleted, similarity):
692 added + unknown, removed + deleted, similarity):
675 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
693 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
676 repo.ui.status(_('recording removal of %s as rename to %s '
694 repo.ui.status(_('recording removal of %s as rename to %s '
677 '(%d%% similar)\n') %
695 '(%d%% similar)\n') %
678 (m.rel(old), m.rel(new), score * 100))
696 (m.rel(old), m.rel(new), score * 100))
679 copies[new] = old
697 copies[new] = old
680
698
681 if not dry_run:
699 if not dry_run:
682 wctx = repo[None]
700 wctx = repo[None]
683 wlock = repo.wlock()
701 wlock = repo.wlock()
684 try:
702 try:
685 wctx.forget(deleted)
703 wctx.forget(deleted)
686 wctx.add(unknown)
704 wctx.add(unknown)
687 for new, old in copies.iteritems():
705 for new, old in copies.iteritems():
688 wctx.copy(old, new)
706 wctx.copy(old, new)
689 finally:
707 finally:
690 wlock.release()
708 wlock.release()
691
709
692 for f in rejected:
710 for f in rejected:
693 if f in m.files():
711 if f in m.files():
694 return 1
712 return 1
695 return 0
713 return 0
696
714
697 def updatedir(ui, repo, patches, similarity=0):
715 def updatedir(ui, repo, patches, similarity=0):
698 '''Update dirstate after patch application according to metadata'''
716 '''Update dirstate after patch application according to metadata'''
699 if not patches:
717 if not patches:
700 return []
718 return []
701 copies = []
719 copies = []
702 removes = set()
720 removes = set()
703 cfiles = patches.keys()
721 cfiles = patches.keys()
704 cwd = repo.getcwd()
722 cwd = repo.getcwd()
705 if cwd:
723 if cwd:
706 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
724 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
707 for f in patches:
725 for f in patches:
708 gp = patches[f]
726 gp = patches[f]
709 if not gp:
727 if not gp:
710 continue
728 continue
711 if gp.op == 'RENAME':
729 if gp.op == 'RENAME':
712 copies.append((gp.oldpath, gp.path))
730 copies.append((gp.oldpath, gp.path))
713 removes.add(gp.oldpath)
731 removes.add(gp.oldpath)
714 elif gp.op == 'COPY':
732 elif gp.op == 'COPY':
715 copies.append((gp.oldpath, gp.path))
733 copies.append((gp.oldpath, gp.path))
716 elif gp.op == 'DELETE':
734 elif gp.op == 'DELETE':
717 removes.add(gp.path)
735 removes.add(gp.path)
718
736
719 wctx = repo[None]
737 wctx = repo[None]
720 for src, dst in copies:
738 for src, dst in copies:
721 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
739 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
722 if (not similarity) and removes:
740 if (not similarity) and removes:
723 wctx.remove(sorted(removes), True)
741 wctx.remove(sorted(removes), True)
724
742
725 for f in patches:
743 for f in patches:
726 gp = patches[f]
744 gp = patches[f]
727 if gp and gp.mode:
745 if gp and gp.mode:
728 islink, isexec = gp.mode
746 islink, isexec = gp.mode
729 dst = repo.wjoin(gp.path)
747 dst = repo.wjoin(gp.path)
730 # patch won't create empty files
748 # patch won't create empty files
731 if gp.op == 'ADD' and not os.path.lexists(dst):
749 if gp.op == 'ADD' and not os.path.lexists(dst):
732 flags = (isexec and 'x' or '') + (islink and 'l' or '')
750 flags = (isexec and 'x' or '') + (islink and 'l' or '')
733 repo.wwrite(gp.path, '', flags)
751 repo.wwrite(gp.path, '', flags)
734 util.setflags(dst, islink, isexec)
752 util.setflags(dst, islink, isexec)
735 addremove(repo, cfiles, similarity=similarity)
753 addremove(repo, cfiles, similarity=similarity)
736 files = patches.keys()
754 files = patches.keys()
737 files.extend([r for r in removes if r not in files])
755 files.extend([r for r in removes if r not in files])
738 return sorted(files)
756 return sorted(files)
739
757
740 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
758 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
741 """Update the dirstate to reflect the intent of copying src to dst. For
759 """Update the dirstate to reflect the intent of copying src to dst. For
742 different reasons it might not end with dst being marked as copied from src.
760 different reasons it might not end with dst being marked as copied from src.
743 """
761 """
744 origsrc = repo.dirstate.copied(src) or src
762 origsrc = repo.dirstate.copied(src) or src
745 if dst == origsrc: # copying back a copy?
763 if dst == origsrc: # copying back a copy?
746 if repo.dirstate[dst] not in 'mn' and not dryrun:
764 if repo.dirstate[dst] not in 'mn' and not dryrun:
747 repo.dirstate.normallookup(dst)
765 repo.dirstate.normallookup(dst)
748 else:
766 else:
749 if repo.dirstate[origsrc] == 'a' and origsrc == src:
767 if repo.dirstate[origsrc] == 'a' and origsrc == src:
750 if not ui.quiet:
768 if not ui.quiet:
751 ui.warn(_("%s has not been committed yet, so no copy "
769 ui.warn(_("%s has not been committed yet, so no copy "
752 "data will be stored for %s.\n")
770 "data will be stored for %s.\n")
753 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
771 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
754 if repo.dirstate[dst] in '?r' and not dryrun:
772 if repo.dirstate[dst] in '?r' and not dryrun:
755 wctx.add([dst])
773 wctx.add([dst])
756 elif not dryrun:
774 elif not dryrun:
757 wctx.copy(origsrc, dst)
775 wctx.copy(origsrc, dst)
758
776
759 def readrequires(opener, supported):
777 def readrequires(opener, supported):
760 '''Reads and parses .hg/requires and checks if all entries found
778 '''Reads and parses .hg/requires and checks if all entries found
761 are in the list of supported features.'''
779 are in the list of supported features.'''
762 requirements = set(opener.read("requires").splitlines())
780 requirements = set(opener.read("requires").splitlines())
763 missings = []
781 missings = []
764 for r in requirements:
782 for r in requirements:
765 if r not in supported:
783 if r not in supported:
766 if not r or not r[0].isalnum():
784 if not r or not r[0].isalnum():
767 raise error.RequirementError(_(".hg/requires file is corrupt"))
785 raise error.RequirementError(_(".hg/requires file is corrupt"))
768 missings.append(r)
786 missings.append(r)
769 missings.sort()
787 missings.sort()
770 if missings:
788 if missings:
771 raise error.RequirementError(
789 raise error.RequirementError(
772 _("unknown repository format: requires features '%s' (upgrade "
790 _("unknown repository format: requires features '%s' (upgrade "
773 "Mercurial)") % "', '".join(missings))
791 "Mercurial)") % "', '".join(missings))
774 return requirements
792 return requirements
775
793
776 class filecacheentry(object):
794 class filecacheentry(object):
777 def __init__(self, path):
795 def __init__(self, path):
778 self.path = path
796 self.path = path
779 self.cachestat = filecacheentry.stat(self.path)
797 self.cachestat = filecacheentry.stat(self.path)
780
798
781 if self.cachestat:
799 if self.cachestat:
782 self._cacheable = self.cachestat.cacheable()
800 self._cacheable = self.cachestat.cacheable()
783 else:
801 else:
784 # None means we don't know yet
802 # None means we don't know yet
785 self._cacheable = None
803 self._cacheable = None
786
804
787 def refresh(self):
805 def refresh(self):
788 if self.cacheable():
806 if self.cacheable():
789 self.cachestat = filecacheentry.stat(self.path)
807 self.cachestat = filecacheentry.stat(self.path)
790
808
791 def cacheable(self):
809 def cacheable(self):
792 if self._cacheable is not None:
810 if self._cacheable is not None:
793 return self._cacheable
811 return self._cacheable
794
812
795 # we don't know yet, assume it is for now
813 # we don't know yet, assume it is for now
796 return True
814 return True
797
815
798 def changed(self):
816 def changed(self):
799 # no point in going further if we can't cache it
817 # no point in going further if we can't cache it
800 if not self.cacheable():
818 if not self.cacheable():
801 return True
819 return True
802
820
803 newstat = filecacheentry.stat(self.path)
821 newstat = filecacheentry.stat(self.path)
804
822
805 # we may not know if it's cacheable yet, check again now
823 # we may not know if it's cacheable yet, check again now
806 if newstat and self._cacheable is None:
824 if newstat and self._cacheable is None:
807 self._cacheable = newstat.cacheable()
825 self._cacheable = newstat.cacheable()
808
826
809 # check again
827 # check again
810 if not self._cacheable:
828 if not self._cacheable:
811 return True
829 return True
812
830
813 if self.cachestat != newstat:
831 if self.cachestat != newstat:
814 self.cachestat = newstat
832 self.cachestat = newstat
815 return True
833 return True
816 else:
834 else:
817 return False
835 return False
818
836
819 @staticmethod
837 @staticmethod
820 def stat(path):
838 def stat(path):
821 try:
839 try:
822 return util.cachestat(path)
840 return util.cachestat(path)
823 except OSError, e:
841 except OSError, e:
824 if e.errno != errno.ENOENT:
842 if e.errno != errno.ENOENT:
825 raise
843 raise
826
844
827 class filecache(object):
845 class filecache(object):
828 '''A property like decorator that tracks a file under .hg/ for updates.
846 '''A property like decorator that tracks a file under .hg/ for updates.
829
847
830 Records stat info when called in _filecache.
848 Records stat info when called in _filecache.
831
849
832 On subsequent calls, compares old stat info with new info, and recreates
850 On subsequent calls, compares old stat info with new info, and recreates
833 the object when needed, updating the new stat info in _filecache.
851 the object when needed, updating the new stat info in _filecache.
834
852
835 Mercurial either atomic renames or appends for files under .hg,
853 Mercurial either atomic renames or appends for files under .hg,
836 so to ensure the cache is reliable we need the filesystem to be able
854 so to ensure the cache is reliable we need the filesystem to be able
837 to tell us if a file has been replaced. If it can't, we fallback to
855 to tell us if a file has been replaced. If it can't, we fallback to
838 recreating the object on every call (essentially the same behaviour as
856 recreating the object on every call (essentially the same behaviour as
839 propertycache).'''
857 propertycache).'''
840 def __init__(self, path):
858 def __init__(self, path):
841 self.path = path
859 self.path = path
842
860
843 def join(self, obj, fname):
861 def join(self, obj, fname):
844 """Used to compute the runtime path of the cached file.
862 """Used to compute the runtime path of the cached file.
845
863
846 Users should subclass filecache and provide their own version of this
864 Users should subclass filecache and provide their own version of this
847 function to call the appropriate join function on 'obj' (an instance
865 function to call the appropriate join function on 'obj' (an instance
848 of the class that its member function was decorated).
866 of the class that its member function was decorated).
849 """
867 """
850 return obj.join(fname)
868 return obj.join(fname)
851
869
852 def __call__(self, func):
870 def __call__(self, func):
853 self.func = func
871 self.func = func
854 self.name = func.__name__
872 self.name = func.__name__
855 return self
873 return self
856
874
857 def __get__(self, obj, type=None):
875 def __get__(self, obj, type=None):
858 # do we need to check if the file changed?
876 # do we need to check if the file changed?
859 if self.name in obj.__dict__:
877 if self.name in obj.__dict__:
860 return obj.__dict__[self.name]
878 return obj.__dict__[self.name]
861
879
862 entry = obj._filecache.get(self.name)
880 entry = obj._filecache.get(self.name)
863
881
864 if entry:
882 if entry:
865 if entry.changed():
883 if entry.changed():
866 entry.obj = self.func(obj)
884 entry.obj = self.func(obj)
867 else:
885 else:
868 path = self.join(obj, self.path)
886 path = self.join(obj, self.path)
869
887
870 # We stat -before- creating the object so our cache doesn't lie if
888 # We stat -before- creating the object so our cache doesn't lie if
871 # a writer modified between the time we read and stat
889 # a writer modified between the time we read and stat
872 entry = filecacheentry(path)
890 entry = filecacheentry(path)
873 entry.obj = self.func(obj)
891 entry.obj = self.func(obj)
874
892
875 obj._filecache[self.name] = entry
893 obj._filecache[self.name] = entry
876
894
877 obj.__dict__[self.name] = entry.obj
895 obj.__dict__[self.name] = entry.obj
878 return entry.obj
896 return entry.obj
879
897
880 def __set__(self, obj, value):
898 def __set__(self, obj, value):
881 if self.name in obj._filecache:
899 if self.name in obj._filecache:
882 obj._filecache[self.name].obj = value # update cached copy
900 obj._filecache[self.name].obj = value # update cached copy
883 obj.__dict__[self.name] = value # update copy returned by obj.x
901 obj.__dict__[self.name] = value # update copy returned by obj.x
884
902
885 def __delete__(self, obj):
903 def __delete__(self, obj):
886 try:
904 try:
887 del obj.__dict__[self.name]
905 del obj.__dict__[self.name]
888 except KeyError:
906 except KeyError:
889 raise AttributeError, self.name
907 raise AttributeError, self.name
General Comments 0
You need to be logged in to leave comments. Login now