##// END OF EJS Templates
localrepo: optimize internode status calls using match.always...
Jesse Glick -
r16645:9a21fc2c default
parent child Browse files
Show More
@@ -1,2348 +1,2350 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 self._dirtyphases = False
44 self._dirtyphases = False
45 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
46 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
47 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
48 self._phasedefaults = []
48 self._phasedefaults = []
49
49
50 try:
50 try:
51 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
52 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
57 if create:
57 if create:
58 if not os.path.exists(path):
58 if not os.path.exists(path):
59 util.makedirs(path)
59 util.makedirs(path)
60 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
61 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
62 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
63 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
64 requirements.append("store")
64 requirements.append("store")
65 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
66 requirements.append("fncache")
66 requirements.append("fncache")
67 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
68 requirements.append('dotencode')
68 requirements.append('dotencode')
69 # create an invalid changelog
69 # create an invalid changelog
70 self.opener.append(
70 self.opener.append(
71 "00changelog.i",
71 "00changelog.i",
72 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
73 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
74 )
74 )
75 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
76 requirements.append("generaldelta")
76 requirements.append("generaldelta")
77 requirements = set(requirements)
77 requirements = set(requirements)
78 else:
78 else:
79 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
80 elif create:
80 elif create:
81 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
82 else:
82 else:
83 try:
83 try:
84 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 requirements = set()
88 requirements = set()
89
89
90 self.sharedpath = self.path
90 self.sharedpath = self.path
91 try:
91 try:
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 if not os.path.exists(s):
93 if not os.path.exists(s):
94 raise error.RepoError(
94 raise error.RepoError(
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 self.sharedpath = s
96 self.sharedpath = s
97 except IOError, inst:
97 except IOError, inst:
98 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
99 raise
99 raise
100
100
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 self.spath = self.store.path
102 self.spath = self.store.path
103 self.sopener = self.store.opener
103 self.sopener = self.store.opener
104 self.sjoin = self.store.join
104 self.sjoin = self.store.join
105 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
106 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
107 if create:
107 if create:
108 self._writerequirements()
108 self._writerequirements()
109
109
110
110
111 self._branchcache = None
111 self._branchcache = None
112 self._branchcachetip = None
112 self._branchcachetip = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
118 # (used by the filecache decorator)
118 # (used by the filecache decorator)
119 #
119 #
120 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
121 self._filecache = {}
121 self._filecache = {}
122
122
123 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
124 self.requirements = requirements
124 self.requirements = requirements
125 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
126 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
127 if r in openerreqs)
127 if r in openerreqs)
128
128
129 def _writerequirements(self):
129 def _writerequirements(self):
130 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
131 for r in self.requirements:
131 for r in self.requirements:
132 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
133 reqfile.close()
133 reqfile.close()
134
134
135 def _checknested(self, path):
135 def _checknested(self, path):
136 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
137 if not path.startswith(self.root):
137 if not path.startswith(self.root):
138 return False
138 return False
139 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
140 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
141
141
142 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
143 # the sense that it can reject things like
143 # the sense that it can reject things like
144 #
144 #
145 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
146 #
146 #
147 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
148 # parent revision.
148 # parent revision.
149 #
149 #
150 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
151 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
152 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
153 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
154 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
155 #
155 #
156 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
157 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
158 # the filesystem *now*.
158 # the filesystem *now*.
159 ctx = self[None]
159 ctx = self[None]
160 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
161 while parts:
161 while parts:
162 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
163 if prefix in ctx.substate:
163 if prefix in ctx.substate:
164 if prefix == normsubpath:
164 if prefix == normsubpath:
165 return True
165 return True
166 else:
166 else:
167 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
168 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
169 else:
169 else:
170 parts.pop()
170 parts.pop()
171 return False
171 return False
172
172
173 @filecache('bookmarks')
173 @filecache('bookmarks')
174 def _bookmarks(self):
174 def _bookmarks(self):
175 return bookmarks.read(self)
175 return bookmarks.read(self)
176
176
177 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
178 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
179 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
180
180
181 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
182 bookmarks.write(self)
182 bookmarks.write(self)
183
183
184 @storecache('phaseroots')
184 @storecache('phaseroots')
185 def _phaseroots(self):
185 def _phaseroots(self):
186 phaseroots, self._dirtyphases = phases.readroots(
186 phaseroots, self._dirtyphases = phases.readroots(
187 self, self._phasedefaults)
187 self, self._phasedefaults)
188 return phaseroots
188 return phaseroots
189
189
190 @propertycache
190 @propertycache
191 def _phaserev(self):
191 def _phaserev(self):
192 cache = [phases.public] * len(self)
192 cache = [phases.public] * len(self)
193 for phase in phases.trackedphases:
193 for phase in phases.trackedphases:
194 roots = map(self.changelog.rev, self._phaseroots[phase])
194 roots = map(self.changelog.rev, self._phaseroots[phase])
195 if roots:
195 if roots:
196 for rev in roots:
196 for rev in roots:
197 cache[rev] = phase
197 cache[rev] = phase
198 for rev in self.changelog.descendants(*roots):
198 for rev in self.changelog.descendants(*roots):
199 cache[rev] = phase
199 cache[rev] = phase
200 return cache
200 return cache
201
201
202 @storecache('00changelog.i')
202 @storecache('00changelog.i')
203 def changelog(self):
203 def changelog(self):
204 c = changelog.changelog(self.sopener)
204 c = changelog.changelog(self.sopener)
205 if 'HG_PENDING' in os.environ:
205 if 'HG_PENDING' in os.environ:
206 p = os.environ['HG_PENDING']
206 p = os.environ['HG_PENDING']
207 if p.startswith(self.root):
207 if p.startswith(self.root):
208 c.readpending('00changelog.i.a')
208 c.readpending('00changelog.i.a')
209 return c
209 return c
210
210
211 @storecache('00manifest.i')
211 @storecache('00manifest.i')
212 def manifest(self):
212 def manifest(self):
213 return manifest.manifest(self.sopener)
213 return manifest.manifest(self.sopener)
214
214
215 @filecache('dirstate')
215 @filecache('dirstate')
216 def dirstate(self):
216 def dirstate(self):
217 warned = [0]
217 warned = [0]
218 def validate(node):
218 def validate(node):
219 try:
219 try:
220 self.changelog.rev(node)
220 self.changelog.rev(node)
221 return node
221 return node
222 except error.LookupError:
222 except error.LookupError:
223 if not warned[0]:
223 if not warned[0]:
224 warned[0] = True
224 warned[0] = True
225 self.ui.warn(_("warning: ignoring unknown"
225 self.ui.warn(_("warning: ignoring unknown"
226 " working parent %s!\n") % short(node))
226 " working parent %s!\n") % short(node))
227 return nullid
227 return nullid
228
228
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230
230
231 def __getitem__(self, changeid):
231 def __getitem__(self, changeid):
232 if changeid is None:
232 if changeid is None:
233 return context.workingctx(self)
233 return context.workingctx(self)
234 return context.changectx(self, changeid)
234 return context.changectx(self, changeid)
235
235
236 def __contains__(self, changeid):
236 def __contains__(self, changeid):
237 try:
237 try:
238 return bool(self.lookup(changeid))
238 return bool(self.lookup(changeid))
239 except error.RepoLookupError:
239 except error.RepoLookupError:
240 return False
240 return False
241
241
242 def __nonzero__(self):
242 def __nonzero__(self):
243 return True
243 return True
244
244
245 def __len__(self):
245 def __len__(self):
246 return len(self.changelog)
246 return len(self.changelog)
247
247
248 def __iter__(self):
248 def __iter__(self):
249 for i in xrange(len(self)):
249 for i in xrange(len(self)):
250 yield i
250 yield i
251
251
252 def revs(self, expr, *args):
252 def revs(self, expr, *args):
253 '''Return a list of revisions matching the given revset'''
253 '''Return a list of revisions matching the given revset'''
254 expr = revset.formatspec(expr, *args)
254 expr = revset.formatspec(expr, *args)
255 m = revset.match(None, expr)
255 m = revset.match(None, expr)
256 return [r for r in m(self, range(len(self)))]
256 return [r for r in m(self, range(len(self)))]
257
257
258 def set(self, expr, *args):
258 def set(self, expr, *args):
259 '''
259 '''
260 Yield a context for each matching revision, after doing arg
260 Yield a context for each matching revision, after doing arg
261 replacement via revset.formatspec
261 replacement via revset.formatspec
262 '''
262 '''
263 for r in self.revs(expr, *args):
263 for r in self.revs(expr, *args):
264 yield self[r]
264 yield self[r]
265
265
266 def url(self):
266 def url(self):
267 return 'file:' + self.root
267 return 'file:' + self.root
268
268
269 def hook(self, name, throw=False, **args):
269 def hook(self, name, throw=False, **args):
270 return hook.hook(self.ui, self, name, throw, **args)
270 return hook.hook(self.ui, self, name, throw, **args)
271
271
272 tag_disallowed = ':\r\n'
272 tag_disallowed = ':\r\n'
273
273
274 def _tag(self, names, node, message, local, user, date, extra={}):
274 def _tag(self, names, node, message, local, user, date, extra={}):
275 if isinstance(names, str):
275 if isinstance(names, str):
276 allchars = names
276 allchars = names
277 names = (names,)
277 names = (names,)
278 else:
278 else:
279 allchars = ''.join(names)
279 allchars = ''.join(names)
280 for c in self.tag_disallowed:
280 for c in self.tag_disallowed:
281 if c in allchars:
281 if c in allchars:
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
283
283
284 branches = self.branchmap()
284 branches = self.branchmap()
285 for name in names:
285 for name in names:
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 local=local)
287 local=local)
288 if name in branches:
288 if name in branches:
289 self.ui.warn(_("warning: tag %s conflicts with existing"
289 self.ui.warn(_("warning: tag %s conflicts with existing"
290 " branch name\n") % name)
290 " branch name\n") % name)
291
291
292 def writetags(fp, names, munge, prevtags):
292 def writetags(fp, names, munge, prevtags):
293 fp.seek(0, 2)
293 fp.seek(0, 2)
294 if prevtags and prevtags[-1] != '\n':
294 if prevtags and prevtags[-1] != '\n':
295 fp.write('\n')
295 fp.write('\n')
296 for name in names:
296 for name in names:
297 m = munge and munge(name) or name
297 m = munge and munge(name) or name
298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 old = self.tags().get(name, nullid)
299 old = self.tags().get(name, nullid)
300 fp.write('%s %s\n' % (hex(old), m))
300 fp.write('%s %s\n' % (hex(old), m))
301 fp.write('%s %s\n' % (hex(node), m))
301 fp.write('%s %s\n' % (hex(node), m))
302 fp.close()
302 fp.close()
303
303
304 prevtags = ''
304 prevtags = ''
305 if local:
305 if local:
306 try:
306 try:
307 fp = self.opener('localtags', 'r+')
307 fp = self.opener('localtags', 'r+')
308 except IOError:
308 except IOError:
309 fp = self.opener('localtags', 'a')
309 fp = self.opener('localtags', 'a')
310 else:
310 else:
311 prevtags = fp.read()
311 prevtags = fp.read()
312
312
313 # local tags are stored in the current charset
313 # local tags are stored in the current charset
314 writetags(fp, names, None, prevtags)
314 writetags(fp, names, None, prevtags)
315 for name in names:
315 for name in names:
316 self.hook('tag', node=hex(node), tag=name, local=local)
316 self.hook('tag', node=hex(node), tag=name, local=local)
317 return
317 return
318
318
319 try:
319 try:
320 fp = self.wfile('.hgtags', 'rb+')
320 fp = self.wfile('.hgtags', 'rb+')
321 except IOError, e:
321 except IOError, e:
322 if e.errno != errno.ENOENT:
322 if e.errno != errno.ENOENT:
323 raise
323 raise
324 fp = self.wfile('.hgtags', 'ab')
324 fp = self.wfile('.hgtags', 'ab')
325 else:
325 else:
326 prevtags = fp.read()
326 prevtags = fp.read()
327
327
328 # committed tags are stored in UTF-8
328 # committed tags are stored in UTF-8
329 writetags(fp, names, encoding.fromlocal, prevtags)
329 writetags(fp, names, encoding.fromlocal, prevtags)
330
330
331 fp.close()
331 fp.close()
332
332
333 self.invalidatecaches()
333 self.invalidatecaches()
334
334
335 if '.hgtags' not in self.dirstate:
335 if '.hgtags' not in self.dirstate:
336 self[None].add(['.hgtags'])
336 self[None].add(['.hgtags'])
337
337
338 m = matchmod.exact(self.root, '', ['.hgtags'])
338 m = matchmod.exact(self.root, '', ['.hgtags'])
339 tagnode = self.commit(message, user, date, extra=extra, match=m)
339 tagnode = self.commit(message, user, date, extra=extra, match=m)
340
340
341 for name in names:
341 for name in names:
342 self.hook('tag', node=hex(node), tag=name, local=local)
342 self.hook('tag', node=hex(node), tag=name, local=local)
343
343
344 return tagnode
344 return tagnode
345
345
346 def tag(self, names, node, message, local, user, date):
346 def tag(self, names, node, message, local, user, date):
347 '''tag a revision with one or more symbolic names.
347 '''tag a revision with one or more symbolic names.
348
348
349 names is a list of strings or, when adding a single tag, names may be a
349 names is a list of strings or, when adding a single tag, names may be a
350 string.
350 string.
351
351
352 if local is True, the tags are stored in a per-repository file.
352 if local is True, the tags are stored in a per-repository file.
353 otherwise, they are stored in the .hgtags file, and a new
353 otherwise, they are stored in the .hgtags file, and a new
354 changeset is committed with the change.
354 changeset is committed with the change.
355
355
356 keyword arguments:
356 keyword arguments:
357
357
358 local: whether to store tags in non-version-controlled file
358 local: whether to store tags in non-version-controlled file
359 (default False)
359 (default False)
360
360
361 message: commit message to use if committing
361 message: commit message to use if committing
362
362
363 user: name of user to use if committing
363 user: name of user to use if committing
364
364
365 date: date tuple to use if committing'''
365 date: date tuple to use if committing'''
366
366
367 if not local:
367 if not local:
368 for x in self.status()[:5]:
368 for x in self.status()[:5]:
369 if '.hgtags' in x:
369 if '.hgtags' in x:
370 raise util.Abort(_('working copy of .hgtags is changed '
370 raise util.Abort(_('working copy of .hgtags is changed '
371 '(please commit .hgtags manually)'))
371 '(please commit .hgtags manually)'))
372
372
373 self.tags() # instantiate the cache
373 self.tags() # instantiate the cache
374 self._tag(names, node, message, local, user, date)
374 self._tag(names, node, message, local, user, date)
375
375
376 @propertycache
376 @propertycache
377 def _tagscache(self):
377 def _tagscache(self):
378 '''Returns a tagscache object that contains various tags related caches.'''
378 '''Returns a tagscache object that contains various tags related caches.'''
379
379
380 # This simplifies its cache management by having one decorated
380 # This simplifies its cache management by having one decorated
381 # function (this one) and the rest simply fetch things from it.
381 # function (this one) and the rest simply fetch things from it.
382 class tagscache(object):
382 class tagscache(object):
383 def __init__(self):
383 def __init__(self):
384 # These two define the set of tags for this repository. tags
384 # These two define the set of tags for this repository. tags
385 # maps tag name to node; tagtypes maps tag name to 'global' or
385 # maps tag name to node; tagtypes maps tag name to 'global' or
386 # 'local'. (Global tags are defined by .hgtags across all
386 # 'local'. (Global tags are defined by .hgtags across all
387 # heads, and local tags are defined in .hg/localtags.)
387 # heads, and local tags are defined in .hg/localtags.)
388 # They constitute the in-memory cache of tags.
388 # They constitute the in-memory cache of tags.
389 self.tags = self.tagtypes = None
389 self.tags = self.tagtypes = None
390
390
391 self.nodetagscache = self.tagslist = None
391 self.nodetagscache = self.tagslist = None
392
392
393 cache = tagscache()
393 cache = tagscache()
394 cache.tags, cache.tagtypes = self._findtags()
394 cache.tags, cache.tagtypes = self._findtags()
395
395
396 return cache
396 return cache
397
397
398 def tags(self):
398 def tags(self):
399 '''return a mapping of tag to node'''
399 '''return a mapping of tag to node'''
400 t = {}
400 t = {}
401 for k, v in self._tagscache.tags.iteritems():
401 for k, v in self._tagscache.tags.iteritems():
402 try:
402 try:
403 # ignore tags to unknown nodes
403 # ignore tags to unknown nodes
404 self.changelog.rev(v)
404 self.changelog.rev(v)
405 t[k] = v
405 t[k] = v
406 except error.LookupError:
406 except error.LookupError:
407 pass
407 pass
408 return t
408 return t
409
409
410 def _findtags(self):
410 def _findtags(self):
411 '''Do the hard work of finding tags. Return a pair of dicts
411 '''Do the hard work of finding tags. Return a pair of dicts
412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
413 maps tag name to a string like \'global\' or \'local\'.
413 maps tag name to a string like \'global\' or \'local\'.
414 Subclasses or extensions are free to add their own tags, but
414 Subclasses or extensions are free to add their own tags, but
415 should be aware that the returned dicts will be retained for the
415 should be aware that the returned dicts will be retained for the
416 duration of the localrepo object.'''
416 duration of the localrepo object.'''
417
417
418 # XXX what tagtype should subclasses/extensions use? Currently
418 # XXX what tagtype should subclasses/extensions use? Currently
419 # mq and bookmarks add tags, but do not set the tagtype at all.
419 # mq and bookmarks add tags, but do not set the tagtype at all.
420 # Should each extension invent its own tag type? Should there
420 # Should each extension invent its own tag type? Should there
421 # be one tagtype for all such "virtual" tags? Or is the status
421 # be one tagtype for all such "virtual" tags? Or is the status
422 # quo fine?
422 # quo fine?
423
423
424 alltags = {} # map tag name to (node, hist)
424 alltags = {} # map tag name to (node, hist)
425 tagtypes = {}
425 tagtypes = {}
426
426
427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
429
429
430 # Build the return dicts. Have to re-encode tag names because
430 # Build the return dicts. Have to re-encode tag names because
431 # the tags module always uses UTF-8 (in order not to lose info
431 # the tags module always uses UTF-8 (in order not to lose info
432 # writing to the cache), but the rest of Mercurial wants them in
432 # writing to the cache), but the rest of Mercurial wants them in
433 # local encoding.
433 # local encoding.
434 tags = {}
434 tags = {}
435 for (name, (node, hist)) in alltags.iteritems():
435 for (name, (node, hist)) in alltags.iteritems():
436 if node != nullid:
436 if node != nullid:
437 tags[encoding.tolocal(name)] = node
437 tags[encoding.tolocal(name)] = node
438 tags['tip'] = self.changelog.tip()
438 tags['tip'] = self.changelog.tip()
439 tagtypes = dict([(encoding.tolocal(name), value)
439 tagtypes = dict([(encoding.tolocal(name), value)
440 for (name, value) in tagtypes.iteritems()])
440 for (name, value) in tagtypes.iteritems()])
441 return (tags, tagtypes)
441 return (tags, tagtypes)
442
442
443 def tagtype(self, tagname):
443 def tagtype(self, tagname):
444 '''
444 '''
445 return the type of the given tag. result can be:
445 return the type of the given tag. result can be:
446
446
447 'local' : a local tag
447 'local' : a local tag
448 'global' : a global tag
448 'global' : a global tag
449 None : tag does not exist
449 None : tag does not exist
450 '''
450 '''
451
451
452 return self._tagscache.tagtypes.get(tagname)
452 return self._tagscache.tagtypes.get(tagname)
453
453
454 def tagslist(self):
454 def tagslist(self):
455 '''return a list of tags ordered by revision'''
455 '''return a list of tags ordered by revision'''
456 if not self._tagscache.tagslist:
456 if not self._tagscache.tagslist:
457 l = []
457 l = []
458 for t, n in self.tags().iteritems():
458 for t, n in self.tags().iteritems():
459 r = self.changelog.rev(n)
459 r = self.changelog.rev(n)
460 l.append((r, t, n))
460 l.append((r, t, n))
461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
462
462
463 return self._tagscache.tagslist
463 return self._tagscache.tagslist
464
464
465 def nodetags(self, node):
465 def nodetags(self, node):
466 '''return the tags associated with a node'''
466 '''return the tags associated with a node'''
467 if not self._tagscache.nodetagscache:
467 if not self._tagscache.nodetagscache:
468 nodetagscache = {}
468 nodetagscache = {}
469 for t, n in self._tagscache.tags.iteritems():
469 for t, n in self._tagscache.tags.iteritems():
470 nodetagscache.setdefault(n, []).append(t)
470 nodetagscache.setdefault(n, []).append(t)
471 for tags in nodetagscache.itervalues():
471 for tags in nodetagscache.itervalues():
472 tags.sort()
472 tags.sort()
473 self._tagscache.nodetagscache = nodetagscache
473 self._tagscache.nodetagscache = nodetagscache
474 return self._tagscache.nodetagscache.get(node, [])
474 return self._tagscache.nodetagscache.get(node, [])
475
475
476 def nodebookmarks(self, node):
476 def nodebookmarks(self, node):
477 marks = []
477 marks = []
478 for bookmark, n in self._bookmarks.iteritems():
478 for bookmark, n in self._bookmarks.iteritems():
479 if n == node:
479 if n == node:
480 marks.append(bookmark)
480 marks.append(bookmark)
481 return sorted(marks)
481 return sorted(marks)
482
482
483 def _branchtags(self, partial, lrev):
483 def _branchtags(self, partial, lrev):
484 # TODO: rename this function?
484 # TODO: rename this function?
485 tiprev = len(self) - 1
485 tiprev = len(self) - 1
486 if lrev != tiprev:
486 if lrev != tiprev:
487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
488 self._updatebranchcache(partial, ctxgen)
488 self._updatebranchcache(partial, ctxgen)
489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
490
490
491 return partial
491 return partial
492
492
493 def updatebranchcache(self):
493 def updatebranchcache(self):
494 tip = self.changelog.tip()
494 tip = self.changelog.tip()
495 if self._branchcache is not None and self._branchcachetip == tip:
495 if self._branchcache is not None and self._branchcachetip == tip:
496 return
496 return
497
497
498 oldtip = self._branchcachetip
498 oldtip = self._branchcachetip
499 self._branchcachetip = tip
499 self._branchcachetip = tip
500 if oldtip is None or oldtip not in self.changelog.nodemap:
500 if oldtip is None or oldtip not in self.changelog.nodemap:
501 partial, last, lrev = self._readbranchcache()
501 partial, last, lrev = self._readbranchcache()
502 else:
502 else:
503 lrev = self.changelog.rev(oldtip)
503 lrev = self.changelog.rev(oldtip)
504 partial = self._branchcache
504 partial = self._branchcache
505
505
506 self._branchtags(partial, lrev)
506 self._branchtags(partial, lrev)
507 # this private cache holds all heads (not just the branch tips)
507 # this private cache holds all heads (not just the branch tips)
508 self._branchcache = partial
508 self._branchcache = partial
509
509
510 def branchmap(self):
510 def branchmap(self):
511 '''returns a dictionary {branch: [branchheads]}'''
511 '''returns a dictionary {branch: [branchheads]}'''
512 self.updatebranchcache()
512 self.updatebranchcache()
513 return self._branchcache
513 return self._branchcache
514
514
515 def branchtags(self):
515 def branchtags(self):
516 '''return a dict where branch names map to the tipmost head of
516 '''return a dict where branch names map to the tipmost head of
517 the branch, open heads come before closed'''
517 the branch, open heads come before closed'''
518 bt = {}
518 bt = {}
519 for bn, heads in self.branchmap().iteritems():
519 for bn, heads in self.branchmap().iteritems():
520 tip = heads[-1]
520 tip = heads[-1]
521 for h in reversed(heads):
521 for h in reversed(heads):
522 if 'close' not in self.changelog.read(h)[5]:
522 if 'close' not in self.changelog.read(h)[5]:
523 tip = h
523 tip = h
524 break
524 break
525 bt[bn] = tip
525 bt[bn] = tip
526 return bt
526 return bt
527
527
528 def _readbranchcache(self):
528 def _readbranchcache(self):
529 partial = {}
529 partial = {}
530 try:
530 try:
531 f = self.opener("cache/branchheads")
531 f = self.opener("cache/branchheads")
532 lines = f.read().split('\n')
532 lines = f.read().split('\n')
533 f.close()
533 f.close()
534 except (IOError, OSError):
534 except (IOError, OSError):
535 return {}, nullid, nullrev
535 return {}, nullid, nullrev
536
536
537 try:
537 try:
538 last, lrev = lines.pop(0).split(" ", 1)
538 last, lrev = lines.pop(0).split(" ", 1)
539 last, lrev = bin(last), int(lrev)
539 last, lrev = bin(last), int(lrev)
540 if lrev >= len(self) or self[lrev].node() != last:
540 if lrev >= len(self) or self[lrev].node() != last:
541 # invalidate the cache
541 # invalidate the cache
542 raise ValueError('invalidating branch cache (tip differs)')
542 raise ValueError('invalidating branch cache (tip differs)')
543 for l in lines:
543 for l in lines:
544 if not l:
544 if not l:
545 continue
545 continue
546 node, label = l.split(" ", 1)
546 node, label = l.split(" ", 1)
547 label = encoding.tolocal(label.strip())
547 label = encoding.tolocal(label.strip())
548 partial.setdefault(label, []).append(bin(node))
548 partial.setdefault(label, []).append(bin(node))
549 except KeyboardInterrupt:
549 except KeyboardInterrupt:
550 raise
550 raise
551 except Exception, inst:
551 except Exception, inst:
552 if self.ui.debugflag:
552 if self.ui.debugflag:
553 self.ui.warn(str(inst), '\n')
553 self.ui.warn(str(inst), '\n')
554 partial, last, lrev = {}, nullid, nullrev
554 partial, last, lrev = {}, nullid, nullrev
555 return partial, last, lrev
555 return partial, last, lrev
556
556
557 def _writebranchcache(self, branches, tip, tiprev):
557 def _writebranchcache(self, branches, tip, tiprev):
558 try:
558 try:
559 f = self.opener("cache/branchheads", "w", atomictemp=True)
559 f = self.opener("cache/branchheads", "w", atomictemp=True)
560 f.write("%s %s\n" % (hex(tip), tiprev))
560 f.write("%s %s\n" % (hex(tip), tiprev))
561 for label, nodes in branches.iteritems():
561 for label, nodes in branches.iteritems():
562 for node in nodes:
562 for node in nodes:
563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
564 f.close()
564 f.close()
565 except (IOError, OSError):
565 except (IOError, OSError):
566 pass
566 pass
567
567
568 def _updatebranchcache(self, partial, ctxgen):
568 def _updatebranchcache(self, partial, ctxgen):
569 # collect new branch entries
569 # collect new branch entries
570 newbranches = {}
570 newbranches = {}
571 for c in ctxgen:
571 for c in ctxgen:
572 newbranches.setdefault(c.branch(), []).append(c.node())
572 newbranches.setdefault(c.branch(), []).append(c.node())
573 # if older branchheads are reachable from new ones, they aren't
573 # if older branchheads are reachable from new ones, they aren't
574 # really branchheads. Note checking parents is insufficient:
574 # really branchheads. Note checking parents is insufficient:
575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
576 for branch, newnodes in newbranches.iteritems():
576 for branch, newnodes in newbranches.iteritems():
577 bheads = partial.setdefault(branch, [])
577 bheads = partial.setdefault(branch, [])
578 bheads.extend(newnodes)
578 bheads.extend(newnodes)
579 if len(bheads) <= 1:
579 if len(bheads) <= 1:
580 continue
580 continue
581 bheads = sorted(bheads, key=lambda x: self[x].rev())
581 bheads = sorted(bheads, key=lambda x: self[x].rev())
582 # starting from tip means fewer passes over reachable
582 # starting from tip means fewer passes over reachable
583 while newnodes:
583 while newnodes:
584 latest = newnodes.pop()
584 latest = newnodes.pop()
585 if latest not in bheads:
585 if latest not in bheads:
586 continue
586 continue
587 minbhnode = self[bheads[0]].node()
587 minbhnode = self[bheads[0]].node()
588 reachable = self.changelog.reachable(latest, minbhnode)
588 reachable = self.changelog.reachable(latest, minbhnode)
589 reachable.remove(latest)
589 reachable.remove(latest)
590 if reachable:
590 if reachable:
591 bheads = [b for b in bheads if b not in reachable]
591 bheads = [b for b in bheads if b not in reachable]
592 partial[branch] = bheads
592 partial[branch] = bheads
593
593
594 def lookup(self, key):
594 def lookup(self, key):
595 return self[key].node()
595 return self[key].node()
596
596
597 def lookupbranch(self, key, remote=None):
597 def lookupbranch(self, key, remote=None):
598 repo = remote or self
598 repo = remote or self
599 if key in repo.branchmap():
599 if key in repo.branchmap():
600 return key
600 return key
601
601
602 repo = (remote and remote.local()) and remote or self
602 repo = (remote and remote.local()) and remote or self
603 return repo[key].branch()
603 return repo[key].branch()
604
604
605 def known(self, nodes):
605 def known(self, nodes):
606 nm = self.changelog.nodemap
606 nm = self.changelog.nodemap
607 result = []
607 result = []
608 for n in nodes:
608 for n in nodes:
609 r = nm.get(n)
609 r = nm.get(n)
610 resp = not (r is None or self._phaserev[r] >= phases.secret)
610 resp = not (r is None or self._phaserev[r] >= phases.secret)
611 result.append(resp)
611 result.append(resp)
612 return result
612 return result
613
613
614 def local(self):
614 def local(self):
615 return self
615 return self
616
616
617 def join(self, f):
617 def join(self, f):
618 return os.path.join(self.path, f)
618 return os.path.join(self.path, f)
619
619
620 def wjoin(self, f):
620 def wjoin(self, f):
621 return os.path.join(self.root, f)
621 return os.path.join(self.root, f)
622
622
623 def file(self, f):
623 def file(self, f):
624 if f[0] == '/':
624 if f[0] == '/':
625 f = f[1:]
625 f = f[1:]
626 return filelog.filelog(self.sopener, f)
626 return filelog.filelog(self.sopener, f)
627
627
628 def changectx(self, changeid):
628 def changectx(self, changeid):
629 return self[changeid]
629 return self[changeid]
630
630
631 def parents(self, changeid=None):
631 def parents(self, changeid=None):
632 '''get list of changectxs for parents of changeid'''
632 '''get list of changectxs for parents of changeid'''
633 return self[changeid].parents()
633 return self[changeid].parents()
634
634
635 def setparents(self, p1, p2=nullid):
635 def setparents(self, p1, p2=nullid):
636 copies = self.dirstate.setparents(p1, p2)
636 copies = self.dirstate.setparents(p1, p2)
637 if copies:
637 if copies:
638 # Adjust copy records, the dirstate cannot do it, it
638 # Adjust copy records, the dirstate cannot do it, it
639 # requires access to parents manifests. Preserve them
639 # requires access to parents manifests. Preserve them
640 # only for entries added to first parent.
640 # only for entries added to first parent.
641 pctx = self[p1]
641 pctx = self[p1]
642 for f in copies:
642 for f in copies:
643 if f not in pctx and copies[f] in pctx:
643 if f not in pctx and copies[f] in pctx:
644 self.dirstate.copy(copies[f], f)
644 self.dirstate.copy(copies[f], f)
645
645
646 def filectx(self, path, changeid=None, fileid=None):
646 def filectx(self, path, changeid=None, fileid=None):
647 """changeid can be a changeset revision, node, or tag.
647 """changeid can be a changeset revision, node, or tag.
648 fileid can be a file revision or node."""
648 fileid can be a file revision or node."""
649 return context.filectx(self, path, changeid, fileid)
649 return context.filectx(self, path, changeid, fileid)
650
650
651 def getcwd(self):
651 def getcwd(self):
652 return self.dirstate.getcwd()
652 return self.dirstate.getcwd()
653
653
654 def pathto(self, f, cwd=None):
654 def pathto(self, f, cwd=None):
655 return self.dirstate.pathto(f, cwd)
655 return self.dirstate.pathto(f, cwd)
656
656
657 def wfile(self, f, mode='r'):
657 def wfile(self, f, mode='r'):
658 return self.wopener(f, mode)
658 return self.wopener(f, mode)
659
659
660 def _link(self, f):
660 def _link(self, f):
661 return os.path.islink(self.wjoin(f))
661 return os.path.islink(self.wjoin(f))
662
662
663 def _loadfilter(self, filter):
663 def _loadfilter(self, filter):
664 if filter not in self.filterpats:
664 if filter not in self.filterpats:
665 l = []
665 l = []
666 for pat, cmd in self.ui.configitems(filter):
666 for pat, cmd in self.ui.configitems(filter):
667 if cmd == '!':
667 if cmd == '!':
668 continue
668 continue
669 mf = matchmod.match(self.root, '', [pat])
669 mf = matchmod.match(self.root, '', [pat])
670 fn = None
670 fn = None
671 params = cmd
671 params = cmd
672 for name, filterfn in self._datafilters.iteritems():
672 for name, filterfn in self._datafilters.iteritems():
673 if cmd.startswith(name):
673 if cmd.startswith(name):
674 fn = filterfn
674 fn = filterfn
675 params = cmd[len(name):].lstrip()
675 params = cmd[len(name):].lstrip()
676 break
676 break
677 if not fn:
677 if not fn:
678 fn = lambda s, c, **kwargs: util.filter(s, c)
678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 # Wrap old filters not supporting keyword arguments
679 # Wrap old filters not supporting keyword arguments
680 if not inspect.getargspec(fn)[2]:
680 if not inspect.getargspec(fn)[2]:
681 oldfn = fn
681 oldfn = fn
682 fn = lambda s, c, **kwargs: oldfn(s, c)
682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 l.append((mf, fn, params))
683 l.append((mf, fn, params))
684 self.filterpats[filter] = l
684 self.filterpats[filter] = l
685 return self.filterpats[filter]
685 return self.filterpats[filter]
686
686
687 def _filter(self, filterpats, filename, data):
687 def _filter(self, filterpats, filename, data):
688 for mf, fn, cmd in filterpats:
688 for mf, fn, cmd in filterpats:
689 if mf(filename):
689 if mf(filename):
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 break
692 break
693
693
694 return data
694 return data
695
695
696 @propertycache
696 @propertycache
697 def _encodefilterpats(self):
697 def _encodefilterpats(self):
698 return self._loadfilter('encode')
698 return self._loadfilter('encode')
699
699
700 @propertycache
700 @propertycache
701 def _decodefilterpats(self):
701 def _decodefilterpats(self):
702 return self._loadfilter('decode')
702 return self._loadfilter('decode')
703
703
704 def adddatafilter(self, name, filter):
704 def adddatafilter(self, name, filter):
705 self._datafilters[name] = filter
705 self._datafilters[name] = filter
706
706
707 def wread(self, filename):
707 def wread(self, filename):
708 if self._link(filename):
708 if self._link(filename):
709 data = os.readlink(self.wjoin(filename))
709 data = os.readlink(self.wjoin(filename))
710 else:
710 else:
711 data = self.wopener.read(filename)
711 data = self.wopener.read(filename)
712 return self._filter(self._encodefilterpats, filename, data)
712 return self._filter(self._encodefilterpats, filename, data)
713
713
714 def wwrite(self, filename, data, flags):
714 def wwrite(self, filename, data, flags):
715 data = self._filter(self._decodefilterpats, filename, data)
715 data = self._filter(self._decodefilterpats, filename, data)
716 if 'l' in flags:
716 if 'l' in flags:
717 self.wopener.symlink(data, filename)
717 self.wopener.symlink(data, filename)
718 else:
718 else:
719 self.wopener.write(filename, data)
719 self.wopener.write(filename, data)
720 if 'x' in flags:
720 if 'x' in flags:
721 util.setflags(self.wjoin(filename), False, True)
721 util.setflags(self.wjoin(filename), False, True)
722
722
723 def wwritedata(self, filename, data):
723 def wwritedata(self, filename, data):
724 return self._filter(self._decodefilterpats, filename, data)
724 return self._filter(self._decodefilterpats, filename, data)
725
725
726 def transaction(self, desc):
726 def transaction(self, desc):
727 tr = self._transref and self._transref() or None
727 tr = self._transref and self._transref() or None
728 if tr and tr.running():
728 if tr and tr.running():
729 return tr.nest()
729 return tr.nest()
730
730
731 # abort here if the journal already exists
731 # abort here if the journal already exists
732 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
733 raise error.RepoError(
733 raise error.RepoError(
734 _("abandoned transaction found - run hg recover"))
734 _("abandoned transaction found - run hg recover"))
735
735
736 self._writejournal(desc)
736 self._writejournal(desc)
737 renames = [(x, undoname(x)) for x in self._journalfiles()]
737 renames = [(x, undoname(x)) for x in self._journalfiles()]
738
738
739 tr = transaction.transaction(self.ui.warn, self.sopener,
739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 self.sjoin("journal"),
740 self.sjoin("journal"),
741 aftertrans(renames),
741 aftertrans(renames),
742 self.store.createmode)
742 self.store.createmode)
743 self._transref = weakref.ref(tr)
743 self._transref = weakref.ref(tr)
744 return tr
744 return tr
745
745
746 def _journalfiles(self):
746 def _journalfiles(self):
747 return (self.sjoin('journal'), self.join('journal.dirstate'),
747 return (self.sjoin('journal'), self.join('journal.dirstate'),
748 self.join('journal.branch'), self.join('journal.desc'),
748 self.join('journal.branch'), self.join('journal.desc'),
749 self.join('journal.bookmarks'),
749 self.join('journal.bookmarks'),
750 self.sjoin('journal.phaseroots'))
750 self.sjoin('journal.phaseroots'))
751
751
752 def undofiles(self):
752 def undofiles(self):
753 return [undoname(x) for x in self._journalfiles()]
753 return [undoname(x) for x in self._journalfiles()]
754
754
755 def _writejournal(self, desc):
755 def _writejournal(self, desc):
756 self.opener.write("journal.dirstate",
756 self.opener.write("journal.dirstate",
757 self.opener.tryread("dirstate"))
757 self.opener.tryread("dirstate"))
758 self.opener.write("journal.branch",
758 self.opener.write("journal.branch",
759 encoding.fromlocal(self.dirstate.branch()))
759 encoding.fromlocal(self.dirstate.branch()))
760 self.opener.write("journal.desc",
760 self.opener.write("journal.desc",
761 "%d\n%s\n" % (len(self), desc))
761 "%d\n%s\n" % (len(self), desc))
762 self.opener.write("journal.bookmarks",
762 self.opener.write("journal.bookmarks",
763 self.opener.tryread("bookmarks"))
763 self.opener.tryread("bookmarks"))
764 self.sopener.write("journal.phaseroots",
764 self.sopener.write("journal.phaseroots",
765 self.sopener.tryread("phaseroots"))
765 self.sopener.tryread("phaseroots"))
766
766
767 def recover(self):
767 def recover(self):
768 lock = self.lock()
768 lock = self.lock()
769 try:
769 try:
770 if os.path.exists(self.sjoin("journal")):
770 if os.path.exists(self.sjoin("journal")):
771 self.ui.status(_("rolling back interrupted transaction\n"))
771 self.ui.status(_("rolling back interrupted transaction\n"))
772 transaction.rollback(self.sopener, self.sjoin("journal"),
772 transaction.rollback(self.sopener, self.sjoin("journal"),
773 self.ui.warn)
773 self.ui.warn)
774 self.invalidate()
774 self.invalidate()
775 return True
775 return True
776 else:
776 else:
777 self.ui.warn(_("no interrupted transaction available\n"))
777 self.ui.warn(_("no interrupted transaction available\n"))
778 return False
778 return False
779 finally:
779 finally:
780 lock.release()
780 lock.release()
781
781
782 def rollback(self, dryrun=False, force=False):
782 def rollback(self, dryrun=False, force=False):
783 wlock = lock = None
783 wlock = lock = None
784 try:
784 try:
785 wlock = self.wlock()
785 wlock = self.wlock()
786 lock = self.lock()
786 lock = self.lock()
787 if os.path.exists(self.sjoin("undo")):
787 if os.path.exists(self.sjoin("undo")):
788 return self._rollback(dryrun, force)
788 return self._rollback(dryrun, force)
789 else:
789 else:
790 self.ui.warn(_("no rollback information available\n"))
790 self.ui.warn(_("no rollback information available\n"))
791 return 1
791 return 1
792 finally:
792 finally:
793 release(lock, wlock)
793 release(lock, wlock)
794
794
795 def _rollback(self, dryrun, force):
795 def _rollback(self, dryrun, force):
796 ui = self.ui
796 ui = self.ui
797 try:
797 try:
798 args = self.opener.read('undo.desc').splitlines()
798 args = self.opener.read('undo.desc').splitlines()
799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
800 if len(args) >= 3:
800 if len(args) >= 3:
801 detail = args[2]
801 detail = args[2]
802 oldtip = oldlen - 1
802 oldtip = oldlen - 1
803
803
804 if detail and ui.verbose:
804 if detail and ui.verbose:
805 msg = (_('repository tip rolled back to revision %s'
805 msg = (_('repository tip rolled back to revision %s'
806 ' (undo %s: %s)\n')
806 ' (undo %s: %s)\n')
807 % (oldtip, desc, detail))
807 % (oldtip, desc, detail))
808 else:
808 else:
809 msg = (_('repository tip rolled back to revision %s'
809 msg = (_('repository tip rolled back to revision %s'
810 ' (undo %s)\n')
810 ' (undo %s)\n')
811 % (oldtip, desc))
811 % (oldtip, desc))
812 except IOError:
812 except IOError:
813 msg = _('rolling back unknown transaction\n')
813 msg = _('rolling back unknown transaction\n')
814 desc = None
814 desc = None
815
815
816 if not force and self['.'] != self['tip'] and desc == 'commit':
816 if not force and self['.'] != self['tip'] and desc == 'commit':
817 raise util.Abort(
817 raise util.Abort(
818 _('rollback of last commit while not checked out '
818 _('rollback of last commit while not checked out '
819 'may lose data'), hint=_('use -f to force'))
819 'may lose data'), hint=_('use -f to force'))
820
820
821 ui.status(msg)
821 ui.status(msg)
822 if dryrun:
822 if dryrun:
823 return 0
823 return 0
824
824
825 parents = self.dirstate.parents()
825 parents = self.dirstate.parents()
826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
827 if os.path.exists(self.join('undo.bookmarks')):
827 if os.path.exists(self.join('undo.bookmarks')):
828 util.rename(self.join('undo.bookmarks'),
828 util.rename(self.join('undo.bookmarks'),
829 self.join('bookmarks'))
829 self.join('bookmarks'))
830 if os.path.exists(self.sjoin('undo.phaseroots')):
830 if os.path.exists(self.sjoin('undo.phaseroots')):
831 util.rename(self.sjoin('undo.phaseroots'),
831 util.rename(self.sjoin('undo.phaseroots'),
832 self.sjoin('phaseroots'))
832 self.sjoin('phaseroots'))
833 self.invalidate()
833 self.invalidate()
834
834
835 parentgone = (parents[0] not in self.changelog.nodemap or
835 parentgone = (parents[0] not in self.changelog.nodemap or
836 parents[1] not in self.changelog.nodemap)
836 parents[1] not in self.changelog.nodemap)
837 if parentgone:
837 if parentgone:
838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
839 try:
839 try:
840 branch = self.opener.read('undo.branch')
840 branch = self.opener.read('undo.branch')
841 self.dirstate.setbranch(branch)
841 self.dirstate.setbranch(branch)
842 except IOError:
842 except IOError:
843 ui.warn(_('named branch could not be reset: '
843 ui.warn(_('named branch could not be reset: '
844 'current branch is still \'%s\'\n')
844 'current branch is still \'%s\'\n')
845 % self.dirstate.branch())
845 % self.dirstate.branch())
846
846
847 self.dirstate.invalidate()
847 self.dirstate.invalidate()
848 parents = tuple([p.rev() for p in self.parents()])
848 parents = tuple([p.rev() for p in self.parents()])
849 if len(parents) > 1:
849 if len(parents) > 1:
850 ui.status(_('working directory now based on '
850 ui.status(_('working directory now based on '
851 'revisions %d and %d\n') % parents)
851 'revisions %d and %d\n') % parents)
852 else:
852 else:
853 ui.status(_('working directory now based on '
853 ui.status(_('working directory now based on '
854 'revision %d\n') % parents)
854 'revision %d\n') % parents)
855 self.destroyed()
855 self.destroyed()
856 return 0
856 return 0
857
857
858 def invalidatecaches(self):
858 def invalidatecaches(self):
859 def delcache(name):
859 def delcache(name):
860 try:
860 try:
861 delattr(self, name)
861 delattr(self, name)
862 except AttributeError:
862 except AttributeError:
863 pass
863 pass
864
864
865 delcache('_tagscache')
865 delcache('_tagscache')
866 delcache('_phaserev')
866 delcache('_phaserev')
867
867
868 self._branchcache = None # in UTF-8
868 self._branchcache = None # in UTF-8
869 self._branchcachetip = None
869 self._branchcachetip = None
870
870
871 def invalidatedirstate(self):
871 def invalidatedirstate(self):
872 '''Invalidates the dirstate, causing the next call to dirstate
872 '''Invalidates the dirstate, causing the next call to dirstate
873 to check if it was modified since the last time it was read,
873 to check if it was modified since the last time it was read,
874 rereading it if it has.
874 rereading it if it has.
875
875
876 This is different to dirstate.invalidate() that it doesn't always
876 This is different to dirstate.invalidate() that it doesn't always
877 rereads the dirstate. Use dirstate.invalidate() if you want to
877 rereads the dirstate. Use dirstate.invalidate() if you want to
878 explicitly read the dirstate again (i.e. restoring it to a previous
878 explicitly read the dirstate again (i.e. restoring it to a previous
879 known good state).'''
879 known good state).'''
880 if 'dirstate' in self.__dict__:
880 if 'dirstate' in self.__dict__:
881 for k in self.dirstate._filecache:
881 for k in self.dirstate._filecache:
882 try:
882 try:
883 delattr(self.dirstate, k)
883 delattr(self.dirstate, k)
884 except AttributeError:
884 except AttributeError:
885 pass
885 pass
886 delattr(self, 'dirstate')
886 delattr(self, 'dirstate')
887
887
888 def invalidate(self):
888 def invalidate(self):
889 for k in self._filecache:
889 for k in self._filecache:
890 # dirstate is invalidated separately in invalidatedirstate()
890 # dirstate is invalidated separately in invalidatedirstate()
891 if k == 'dirstate':
891 if k == 'dirstate':
892 continue
892 continue
893
893
894 try:
894 try:
895 delattr(self, k)
895 delattr(self, k)
896 except AttributeError:
896 except AttributeError:
897 pass
897 pass
898 self.invalidatecaches()
898 self.invalidatecaches()
899
899
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 try:
901 try:
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 except error.LockHeld, inst:
903 except error.LockHeld, inst:
904 if not wait:
904 if not wait:
905 raise
905 raise
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 (desc, inst.locker))
907 (desc, inst.locker))
908 # default to 600 seconds timeout
908 # default to 600 seconds timeout
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 releasefn, desc=desc)
910 releasefn, desc=desc)
911 if acquirefn:
911 if acquirefn:
912 acquirefn()
912 acquirefn()
913 return l
913 return l
914
914
915 def _afterlock(self, callback):
915 def _afterlock(self, callback):
916 """add a callback to the current repository lock.
916 """add a callback to the current repository lock.
917
917
918 The callback will be executed on lock release."""
918 The callback will be executed on lock release."""
919 l = self._lockref and self._lockref()
919 l = self._lockref and self._lockref()
920 if l:
920 if l:
921 l.postrelease.append(callback)
921 l.postrelease.append(callback)
922
922
923 def lock(self, wait=True):
923 def lock(self, wait=True):
924 '''Lock the repository store (.hg/store) and return a weak reference
924 '''Lock the repository store (.hg/store) and return a weak reference
925 to the lock. Use this before modifying the store (e.g. committing or
925 to the lock. Use this before modifying the store (e.g. committing or
926 stripping). If you are opening a transaction, get a lock as well.)'''
926 stripping). If you are opening a transaction, get a lock as well.)'''
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l is not None and l.held:
928 if l is not None and l.held:
929 l.lock()
929 l.lock()
930 return l
930 return l
931
931
932 def unlock():
932 def unlock():
933 self.store.write()
933 self.store.write()
934 if self._dirtyphases:
934 if self._dirtyphases:
935 phases.writeroots(self, self._phaseroots)
935 phases.writeroots(self, self._phaseroots)
936 self._dirtyphases = False
936 self._dirtyphases = False
937 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
938 if k == 'dirstate':
938 if k == 'dirstate':
939 continue
939 continue
940 ce.refresh()
940 ce.refresh()
941
941
942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
944 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
945 return l
945 return l
946
946
947 def wlock(self, wait=True):
947 def wlock(self, wait=True):
948 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
949 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
950 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
951 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
952 if l is not None and l.held:
952 if l is not None and l.held:
953 l.lock()
953 l.lock()
954 return l
954 return l
955
955
956 def unlock():
956 def unlock():
957 self.dirstate.write()
957 self.dirstate.write()
958 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
959 if ce:
959 if ce:
960 ce.refresh()
960 ce.refresh()
961
961
962 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
963 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
964 self.origroot)
964 self.origroot)
965 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
966 return l
966 return l
967
967
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 """
969 """
970 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
971 """
971 """
972
972
973 fname = fctx.path()
973 fname = fctx.path()
974 text = fctx.data()
974 text = fctx.data()
975 flog = self.file(fname)
975 flog = self.file(fname)
976 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978
978
979 meta = {}
979 meta = {}
980 copy = fctx.renamed()
980 copy = fctx.renamed()
981 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
982 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
983 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
984 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
985 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
986 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
987 #
987 #
988 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
989 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
990 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
991 # should record that bar descends from
991 # should record that bar descends from
992 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
993 #
993 #
994 # this allows this merge to succeed:
994 # this allows this merge to succeed:
995 #
995 #
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
998 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
999 #
999 #
1000
1000
1001 cfname = copy[0]
1001 cfname = copy[0]
1002 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1003 newfparent = fparent2
1003 newfparent = fparent2
1004
1004
1005 if manifest2: # branch merge
1005 if manifest2: # branch merge
1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 if cfname in manifest2:
1007 if cfname in manifest2:
1008 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1009 newfparent = fparent1
1009 newfparent = fparent1
1010
1010
1011 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1012 if not crev:
1012 if not crev:
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 (fname, cfname))
1014 (fname, cfname))
1015 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1016 if cfname in ancestor:
1016 if cfname in ancestor:
1017 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1018 break
1018 break
1019
1019
1020 if crev:
1020 if crev:
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 meta["copy"] = cfname
1022 meta["copy"] = cfname
1023 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1024 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1025 else:
1025 else:
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1028
1028
1029 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1030 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1033 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1034 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1035 fparent2 = nullid
1035 fparent2 = nullid
1036
1036
1037 # is the file changed?
1037 # is the file changed?
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 changelist.append(fname)
1039 changelist.append(fname)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041
1041
1042 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 changelist.append(fname)
1044 changelist.append(fname)
1045
1045
1046 return fparent1
1046 return fparent1
1047
1047
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 editor=False, extra={}):
1049 editor=False, extra={}):
1050 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1051
1051
1052 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1053 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1054 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1055 """
1055 """
1056
1056
1057 def fail(f, msg):
1057 def fail(f, msg):
1058 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1059
1059
1060 if not match:
1060 if not match:
1061 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1062
1062
1063 if not force:
1063 if not force:
1064 vdirs = []
1064 vdirs = []
1065 match.dir = vdirs.append
1065 match.dir = vdirs.append
1066 match.bad = fail
1066 match.bad = fail
1067
1067
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 wctx = self[None]
1070 wctx = self[None]
1071 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1072
1072
1073 if (not force and merge and match and
1073 if (not force and merge and match and
1074 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1075 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1076 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1077
1077
1078 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1079 if force:
1079 if force:
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081
1081
1082 # check subrepos
1082 # check subrepos
1083 subs = []
1083 subs = []
1084 commitsubs = set()
1084 commitsubs = set()
1085 newstate = wctx.substate.copy()
1085 newstate = wctx.substate.copy()
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 if '.hgsub' in wctx:
1087 if '.hgsub' in wctx:
1088 # we'll decide whether to track this ourselves, thanks
1088 # we'll decide whether to track this ourselves, thanks
1089 if '.hgsubstate' in changes[0]:
1089 if '.hgsubstate' in changes[0]:
1090 changes[0].remove('.hgsubstate')
1090 changes[0].remove('.hgsubstate')
1091 if '.hgsubstate' in changes[2]:
1091 if '.hgsubstate' in changes[2]:
1092 changes[2].remove('.hgsubstate')
1092 changes[2].remove('.hgsubstate')
1093
1093
1094 # compare current state to last committed state
1094 # compare current state to last committed state
1095 # build new substate based on last committed state
1095 # build new substate based on last committed state
1096 oldstate = wctx.p1().substate
1096 oldstate = wctx.p1().substate
1097 for s in sorted(newstate.keys()):
1097 for s in sorted(newstate.keys()):
1098 if not match(s):
1098 if not match(s):
1099 # ignore working copy, use old state if present
1099 # ignore working copy, use old state if present
1100 if s in oldstate:
1100 if s in oldstate:
1101 newstate[s] = oldstate[s]
1101 newstate[s] = oldstate[s]
1102 continue
1102 continue
1103 if not force:
1103 if not force:
1104 raise util.Abort(
1104 raise util.Abort(
1105 _("commit with new subrepo %s excluded") % s)
1105 _("commit with new subrepo %s excluded") % s)
1106 if wctx.sub(s).dirty(True):
1106 if wctx.sub(s).dirty(True):
1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1108 raise util.Abort(
1108 raise util.Abort(
1109 _("uncommitted changes in subrepo %s") % s,
1109 _("uncommitted changes in subrepo %s") % s,
1110 hint=_("use --subrepos for recursive commit"))
1110 hint=_("use --subrepos for recursive commit"))
1111 subs.append(s)
1111 subs.append(s)
1112 commitsubs.add(s)
1112 commitsubs.add(s)
1113 else:
1113 else:
1114 bs = wctx.sub(s).basestate()
1114 bs = wctx.sub(s).basestate()
1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1116 if oldstate.get(s, (None, None, None))[1] != bs:
1116 if oldstate.get(s, (None, None, None))[1] != bs:
1117 subs.append(s)
1117 subs.append(s)
1118
1118
1119 # check for removed subrepos
1119 # check for removed subrepos
1120 for p in wctx.parents():
1120 for p in wctx.parents():
1121 r = [s for s in p.substate if s not in newstate]
1121 r = [s for s in p.substate if s not in newstate]
1122 subs += [s for s in r if match(s)]
1122 subs += [s for s in r if match(s)]
1123 if subs:
1123 if subs:
1124 if (not match('.hgsub') and
1124 if (not match('.hgsub') and
1125 '.hgsub' in (wctx.modified() + wctx.added())):
1125 '.hgsub' in (wctx.modified() + wctx.added())):
1126 raise util.Abort(
1126 raise util.Abort(
1127 _("can't commit subrepos without .hgsub"))
1127 _("can't commit subrepos without .hgsub"))
1128 changes[0].insert(0, '.hgsubstate')
1128 changes[0].insert(0, '.hgsubstate')
1129
1129
1130 elif '.hgsub' in changes[2]:
1130 elif '.hgsub' in changes[2]:
1131 # clean up .hgsubstate when .hgsub is removed
1131 # clean up .hgsubstate when .hgsub is removed
1132 if ('.hgsubstate' in wctx and
1132 if ('.hgsubstate' in wctx and
1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1134 changes[2].insert(0, '.hgsubstate')
1134 changes[2].insert(0, '.hgsubstate')
1135
1135
1136 # make sure all explicit patterns are matched
1136 # make sure all explicit patterns are matched
1137 if not force and match.files():
1137 if not force and match.files():
1138 matched = set(changes[0] + changes[1] + changes[2])
1138 matched = set(changes[0] + changes[1] + changes[2])
1139
1139
1140 for f in match.files():
1140 for f in match.files():
1141 if f == '.' or f in matched or f in wctx.substate:
1141 if f == '.' or f in matched or f in wctx.substate:
1142 continue
1142 continue
1143 if f in changes[3]: # missing
1143 if f in changes[3]: # missing
1144 fail(f, _('file not found!'))
1144 fail(f, _('file not found!'))
1145 if f in vdirs: # visited directory
1145 if f in vdirs: # visited directory
1146 d = f + '/'
1146 d = f + '/'
1147 for mf in matched:
1147 for mf in matched:
1148 if mf.startswith(d):
1148 if mf.startswith(d):
1149 break
1149 break
1150 else:
1150 else:
1151 fail(f, _("no match under directory!"))
1151 fail(f, _("no match under directory!"))
1152 elif f not in self.dirstate:
1152 elif f not in self.dirstate:
1153 fail(f, _("file not tracked!"))
1153 fail(f, _("file not tracked!"))
1154
1154
1155 if (not force and not extra.get("close") and not merge
1155 if (not force and not extra.get("close") and not merge
1156 and not (changes[0] or changes[1] or changes[2])
1156 and not (changes[0] or changes[1] or changes[2])
1157 and wctx.branch() == wctx.p1().branch()):
1157 and wctx.branch() == wctx.p1().branch()):
1158 return None
1158 return None
1159
1159
1160 if merge and changes[3]:
1160 if merge and changes[3]:
1161 raise util.Abort(_("cannot commit merge with missing files"))
1161 raise util.Abort(_("cannot commit merge with missing files"))
1162
1162
1163 ms = mergemod.mergestate(self)
1163 ms = mergemod.mergestate(self)
1164 for f in changes[0]:
1164 for f in changes[0]:
1165 if f in ms and ms[f] == 'u':
1165 if f in ms and ms[f] == 'u':
1166 raise util.Abort(_("unresolved merge conflicts "
1166 raise util.Abort(_("unresolved merge conflicts "
1167 "(see hg help resolve)"))
1167 "(see hg help resolve)"))
1168
1168
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 if editor:
1170 if editor:
1171 cctx._text = editor(self, cctx, subs)
1171 cctx._text = editor(self, cctx, subs)
1172 edited = (text != cctx._text)
1172 edited = (text != cctx._text)
1173
1173
1174 # commit subs and write new state
1174 # commit subs and write new state
1175 if subs:
1175 if subs:
1176 for s in sorted(commitsubs):
1176 for s in sorted(commitsubs):
1177 sub = wctx.sub(s)
1177 sub = wctx.sub(s)
1178 self.ui.status(_('committing subrepository %s\n') %
1178 self.ui.status(_('committing subrepository %s\n') %
1179 subrepo.subrelpath(sub))
1179 subrepo.subrelpath(sub))
1180 sr = sub.commit(cctx._text, user, date)
1180 sr = sub.commit(cctx._text, user, date)
1181 newstate[s] = (newstate[s][0], sr)
1181 newstate[s] = (newstate[s][0], sr)
1182 subrepo.writestate(self, newstate)
1182 subrepo.writestate(self, newstate)
1183
1183
1184 # Save commit message in case this transaction gets rolled back
1184 # Save commit message in case this transaction gets rolled back
1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1186 # the assumption that the user will use the same editor again.
1186 # the assumption that the user will use the same editor again.
1187 msgfn = self.savecommitmessage(cctx._text)
1187 msgfn = self.savecommitmessage(cctx._text)
1188
1188
1189 p1, p2 = self.dirstate.parents()
1189 p1, p2 = self.dirstate.parents()
1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1191 try:
1191 try:
1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1193 ret = self.commitctx(cctx, True)
1193 ret = self.commitctx(cctx, True)
1194 except:
1194 except:
1195 if edited:
1195 if edited:
1196 self.ui.write(
1196 self.ui.write(
1197 _('note: commit message saved in %s\n') % msgfn)
1197 _('note: commit message saved in %s\n') % msgfn)
1198 raise
1198 raise
1199
1199
1200 # update bookmarks, dirstate and mergestate
1200 # update bookmarks, dirstate and mergestate
1201 bookmarks.update(self, p1, ret)
1201 bookmarks.update(self, p1, ret)
1202 for f in changes[0] + changes[1]:
1202 for f in changes[0] + changes[1]:
1203 self.dirstate.normal(f)
1203 self.dirstate.normal(f)
1204 for f in changes[2]:
1204 for f in changes[2]:
1205 self.dirstate.drop(f)
1205 self.dirstate.drop(f)
1206 self.dirstate.setparents(ret)
1206 self.dirstate.setparents(ret)
1207 ms.reset()
1207 ms.reset()
1208 finally:
1208 finally:
1209 wlock.release()
1209 wlock.release()
1210
1210
1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1212 return ret
1212 return ret
1213
1213
1214 def commitctx(self, ctx, error=False):
1214 def commitctx(self, ctx, error=False):
1215 """Add a new revision to current repository.
1215 """Add a new revision to current repository.
1216 Revision information is passed via the context argument.
1216 Revision information is passed via the context argument.
1217 """
1217 """
1218
1218
1219 tr = lock = None
1219 tr = lock = None
1220 removed = list(ctx.removed())
1220 removed = list(ctx.removed())
1221 p1, p2 = ctx.p1(), ctx.p2()
1221 p1, p2 = ctx.p1(), ctx.p2()
1222 user = ctx.user()
1222 user = ctx.user()
1223
1223
1224 lock = self.lock()
1224 lock = self.lock()
1225 try:
1225 try:
1226 tr = self.transaction("commit")
1226 tr = self.transaction("commit")
1227 trp = weakref.proxy(tr)
1227 trp = weakref.proxy(tr)
1228
1228
1229 if ctx.files():
1229 if ctx.files():
1230 m1 = p1.manifest().copy()
1230 m1 = p1.manifest().copy()
1231 m2 = p2.manifest()
1231 m2 = p2.manifest()
1232
1232
1233 # check in files
1233 # check in files
1234 new = {}
1234 new = {}
1235 changed = []
1235 changed = []
1236 linkrev = len(self)
1236 linkrev = len(self)
1237 for f in sorted(ctx.modified() + ctx.added()):
1237 for f in sorted(ctx.modified() + ctx.added()):
1238 self.ui.note(f + "\n")
1238 self.ui.note(f + "\n")
1239 try:
1239 try:
1240 fctx = ctx[f]
1240 fctx = ctx[f]
1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1242 changed)
1242 changed)
1243 m1.set(f, fctx.flags())
1243 m1.set(f, fctx.flags())
1244 except OSError, inst:
1244 except OSError, inst:
1245 self.ui.warn(_("trouble committing %s!\n") % f)
1245 self.ui.warn(_("trouble committing %s!\n") % f)
1246 raise
1246 raise
1247 except IOError, inst:
1247 except IOError, inst:
1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1249 if error or errcode and errcode != errno.ENOENT:
1249 if error or errcode and errcode != errno.ENOENT:
1250 self.ui.warn(_("trouble committing %s!\n") % f)
1250 self.ui.warn(_("trouble committing %s!\n") % f)
1251 raise
1251 raise
1252 else:
1252 else:
1253 removed.append(f)
1253 removed.append(f)
1254
1254
1255 # update manifest
1255 # update manifest
1256 m1.update(new)
1256 m1.update(new)
1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1258 drop = [f for f in removed if f in m1]
1258 drop = [f for f in removed if f in m1]
1259 for f in drop:
1259 for f in drop:
1260 del m1[f]
1260 del m1[f]
1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1262 p2.manifestnode(), (new, drop))
1262 p2.manifestnode(), (new, drop))
1263 files = changed + removed
1263 files = changed + removed
1264 else:
1264 else:
1265 mn = p1.manifestnode()
1265 mn = p1.manifestnode()
1266 files = []
1266 files = []
1267
1267
1268 # update changelog
1268 # update changelog
1269 self.changelog.delayupdate()
1269 self.changelog.delayupdate()
1270 n = self.changelog.add(mn, files, ctx.description(),
1270 n = self.changelog.add(mn, files, ctx.description(),
1271 trp, p1.node(), p2.node(),
1271 trp, p1.node(), p2.node(),
1272 user, ctx.date(), ctx.extra().copy())
1272 user, ctx.date(), ctx.extra().copy())
1273 p = lambda: self.changelog.writepending() and self.root or ""
1273 p = lambda: self.changelog.writepending() and self.root or ""
1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1276 parent2=xp2, pending=p)
1276 parent2=xp2, pending=p)
1277 self.changelog.finalize(trp)
1277 self.changelog.finalize(trp)
1278 # set the new commit is proper phase
1278 # set the new commit is proper phase
1279 targetphase = phases.newcommitphase(self.ui)
1279 targetphase = phases.newcommitphase(self.ui)
1280 if targetphase:
1280 if targetphase:
1281 # retract boundary do not alter parent changeset.
1281 # retract boundary do not alter parent changeset.
1282 # if a parent have higher the resulting phase will
1282 # if a parent have higher the resulting phase will
1283 # be compliant anyway
1283 # be compliant anyway
1284 #
1284 #
1285 # if minimal phase was 0 we don't need to retract anything
1285 # if minimal phase was 0 we don't need to retract anything
1286 phases.retractboundary(self, targetphase, [n])
1286 phases.retractboundary(self, targetphase, [n])
1287 tr.close()
1287 tr.close()
1288 self.updatebranchcache()
1288 self.updatebranchcache()
1289 return n
1289 return n
1290 finally:
1290 finally:
1291 if tr:
1291 if tr:
1292 tr.release()
1292 tr.release()
1293 lock.release()
1293 lock.release()
1294
1294
1295 def destroyed(self):
1295 def destroyed(self):
1296 '''Inform the repository that nodes have been destroyed.
1296 '''Inform the repository that nodes have been destroyed.
1297 Intended for use by strip and rollback, so there's a common
1297 Intended for use by strip and rollback, so there's a common
1298 place for anything that has to be done after destroying history.'''
1298 place for anything that has to be done after destroying history.'''
1299 # XXX it might be nice if we could take the list of destroyed
1299 # XXX it might be nice if we could take the list of destroyed
1300 # nodes, but I don't see an easy way for rollback() to do that
1300 # nodes, but I don't see an easy way for rollback() to do that
1301
1301
1302 # Ensure the persistent tag cache is updated. Doing it now
1302 # Ensure the persistent tag cache is updated. Doing it now
1303 # means that the tag cache only has to worry about destroyed
1303 # means that the tag cache only has to worry about destroyed
1304 # heads immediately after a strip/rollback. That in turn
1304 # heads immediately after a strip/rollback. That in turn
1305 # guarantees that "cachetip == currenttip" (comparing both rev
1305 # guarantees that "cachetip == currenttip" (comparing both rev
1306 # and node) always means no nodes have been added or destroyed.
1306 # and node) always means no nodes have been added or destroyed.
1307
1307
1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1309 # head, refresh the tag cache, then immediately add a new head.
1309 # head, refresh the tag cache, then immediately add a new head.
1310 # But I think doing it this way is necessary for the "instant
1310 # But I think doing it this way is necessary for the "instant
1311 # tag cache retrieval" case to work.
1311 # tag cache retrieval" case to work.
1312 self.invalidatecaches()
1312 self.invalidatecaches()
1313
1313
1314 # Discard all cache entries to force reloading everything.
1314 # Discard all cache entries to force reloading everything.
1315 self._filecache.clear()
1315 self._filecache.clear()
1316
1316
1317 def walk(self, match, node=None):
1317 def walk(self, match, node=None):
1318 '''
1318 '''
1319 walk recursively through the directory tree or a given
1319 walk recursively through the directory tree or a given
1320 changeset, finding all files matched by the match
1320 changeset, finding all files matched by the match
1321 function
1321 function
1322 '''
1322 '''
1323 return self[node].walk(match)
1323 return self[node].walk(match)
1324
1324
1325 def status(self, node1='.', node2=None, match=None,
1325 def status(self, node1='.', node2=None, match=None,
1326 ignored=False, clean=False, unknown=False,
1326 ignored=False, clean=False, unknown=False,
1327 listsubrepos=False):
1327 listsubrepos=False):
1328 """return status of files between two nodes or node and working directory
1328 """return status of files between two nodes or node and working directory
1329
1329
1330 If node1 is None, use the first dirstate parent instead.
1330 If node1 is None, use the first dirstate parent instead.
1331 If node2 is None, compare node1 with working directory.
1331 If node2 is None, compare node1 with working directory.
1332 """
1332 """
1333
1333
1334 def mfmatches(ctx):
1334 def mfmatches(ctx):
1335 mf = ctx.manifest().copy()
1335 mf = ctx.manifest().copy()
1336 if match.always():
1337 return mf
1336 for fn in mf.keys():
1338 for fn in mf.keys():
1337 if not match(fn):
1339 if not match(fn):
1338 del mf[fn]
1340 del mf[fn]
1339 return mf
1341 return mf
1340
1342
1341 if isinstance(node1, context.changectx):
1343 if isinstance(node1, context.changectx):
1342 ctx1 = node1
1344 ctx1 = node1
1343 else:
1345 else:
1344 ctx1 = self[node1]
1346 ctx1 = self[node1]
1345 if isinstance(node2, context.changectx):
1347 if isinstance(node2, context.changectx):
1346 ctx2 = node2
1348 ctx2 = node2
1347 else:
1349 else:
1348 ctx2 = self[node2]
1350 ctx2 = self[node2]
1349
1351
1350 working = ctx2.rev() is None
1352 working = ctx2.rev() is None
1351 parentworking = working and ctx1 == self['.']
1353 parentworking = working and ctx1 == self['.']
1352 match = match or matchmod.always(self.root, self.getcwd())
1354 match = match or matchmod.always(self.root, self.getcwd())
1353 listignored, listclean, listunknown = ignored, clean, unknown
1355 listignored, listclean, listunknown = ignored, clean, unknown
1354
1356
1355 # load earliest manifest first for caching reasons
1357 # load earliest manifest first for caching reasons
1356 if not working and ctx2.rev() < ctx1.rev():
1358 if not working and ctx2.rev() < ctx1.rev():
1357 ctx2.manifest()
1359 ctx2.manifest()
1358
1360
1359 if not parentworking:
1361 if not parentworking:
1360 def bad(f, msg):
1362 def bad(f, msg):
1361 # 'f' may be a directory pattern from 'match.files()',
1363 # 'f' may be a directory pattern from 'match.files()',
1362 # so 'f not in ctx1' is not enough
1364 # so 'f not in ctx1' is not enough
1363 if f not in ctx1 and f not in ctx1.dirs():
1365 if f not in ctx1 and f not in ctx1.dirs():
1364 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1365 match.bad = bad
1367 match.bad = bad
1366
1368
1367 if working: # we need to scan the working dir
1369 if working: # we need to scan the working dir
1368 subrepos = []
1370 subrepos = []
1369 if '.hgsub' in self.dirstate:
1371 if '.hgsub' in self.dirstate:
1370 subrepos = ctx2.substate.keys()
1372 subrepos = ctx2.substate.keys()
1371 s = self.dirstate.status(match, subrepos, listignored,
1373 s = self.dirstate.status(match, subrepos, listignored,
1372 listclean, listunknown)
1374 listclean, listunknown)
1373 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1374
1376
1375 # check for any possibly clean files
1377 # check for any possibly clean files
1376 if parentworking and cmp:
1378 if parentworking and cmp:
1377 fixup = []
1379 fixup = []
1378 # do a full compare of any files that might have changed
1380 # do a full compare of any files that might have changed
1379 for f in sorted(cmp):
1381 for f in sorted(cmp):
1380 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1381 or ctx1[f].cmp(ctx2[f])):
1383 or ctx1[f].cmp(ctx2[f])):
1382 modified.append(f)
1384 modified.append(f)
1383 else:
1385 else:
1384 fixup.append(f)
1386 fixup.append(f)
1385
1387
1386 # update dirstate for files that are actually clean
1388 # update dirstate for files that are actually clean
1387 if fixup:
1389 if fixup:
1388 if listclean:
1390 if listclean:
1389 clean += fixup
1391 clean += fixup
1390
1392
1391 try:
1393 try:
1392 # updating the dirstate is optional
1394 # updating the dirstate is optional
1393 # so we don't wait on the lock
1395 # so we don't wait on the lock
1394 wlock = self.wlock(False)
1396 wlock = self.wlock(False)
1395 try:
1397 try:
1396 for f in fixup:
1398 for f in fixup:
1397 self.dirstate.normal(f)
1399 self.dirstate.normal(f)
1398 finally:
1400 finally:
1399 wlock.release()
1401 wlock.release()
1400 except error.LockError:
1402 except error.LockError:
1401 pass
1403 pass
1402
1404
1403 if not parentworking:
1405 if not parentworking:
1404 mf1 = mfmatches(ctx1)
1406 mf1 = mfmatches(ctx1)
1405 if working:
1407 if working:
1406 # we are comparing working dir against non-parent
1408 # we are comparing working dir against non-parent
1407 # generate a pseudo-manifest for the working dir
1409 # generate a pseudo-manifest for the working dir
1408 mf2 = mfmatches(self['.'])
1410 mf2 = mfmatches(self['.'])
1409 for f in cmp + modified + added:
1411 for f in cmp + modified + added:
1410 mf2[f] = None
1412 mf2[f] = None
1411 mf2.set(f, ctx2.flags(f))
1413 mf2.set(f, ctx2.flags(f))
1412 for f in removed:
1414 for f in removed:
1413 if f in mf2:
1415 if f in mf2:
1414 del mf2[f]
1416 del mf2[f]
1415 else:
1417 else:
1416 # we are comparing two revisions
1418 # we are comparing two revisions
1417 deleted, unknown, ignored = [], [], []
1419 deleted, unknown, ignored = [], [], []
1418 mf2 = mfmatches(ctx2)
1420 mf2 = mfmatches(ctx2)
1419
1421
1420 modified, added, clean = [], [], []
1422 modified, added, clean = [], [], []
1421 for fn in mf2:
1423 for fn in mf2:
1422 if fn in mf1:
1424 if fn in mf1:
1423 if (fn not in deleted and
1425 if (fn not in deleted and
1424 (mf1.flags(fn) != mf2.flags(fn) or
1426 (mf1.flags(fn) != mf2.flags(fn) or
1425 (mf1[fn] != mf2[fn] and
1427 (mf1[fn] != mf2[fn] and
1426 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1427 modified.append(fn)
1429 modified.append(fn)
1428 elif listclean:
1430 elif listclean:
1429 clean.append(fn)
1431 clean.append(fn)
1430 del mf1[fn]
1432 del mf1[fn]
1431 elif fn not in deleted:
1433 elif fn not in deleted:
1432 added.append(fn)
1434 added.append(fn)
1433 removed = mf1.keys()
1435 removed = mf1.keys()
1434
1436
1435 if working and modified and not self.dirstate._checklink:
1437 if working and modified and not self.dirstate._checklink:
1436 # Symlink placeholders may get non-symlink-like contents
1438 # Symlink placeholders may get non-symlink-like contents
1437 # via user error or dereferencing by NFS or Samba servers,
1439 # via user error or dereferencing by NFS or Samba servers,
1438 # so we filter out any placeholders that don't look like a
1440 # so we filter out any placeholders that don't look like a
1439 # symlink
1441 # symlink
1440 sane = []
1442 sane = []
1441 for f in modified:
1443 for f in modified:
1442 if ctx2.flags(f) == 'l':
1444 if ctx2.flags(f) == 'l':
1443 d = ctx2[f].data()
1445 d = ctx2[f].data()
1444 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 if len(d) >= 1024 or '\n' in d or util.binary(d):
1445 self.ui.debug('ignoring suspect symlink placeholder'
1447 self.ui.debug('ignoring suspect symlink placeholder'
1446 ' "%s"\n' % f)
1448 ' "%s"\n' % f)
1447 continue
1449 continue
1448 sane.append(f)
1450 sane.append(f)
1449 modified = sane
1451 modified = sane
1450
1452
1451 r = modified, added, removed, deleted, unknown, ignored, clean
1453 r = modified, added, removed, deleted, unknown, ignored, clean
1452
1454
1453 if listsubrepos:
1455 if listsubrepos:
1454 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1455 if working:
1457 if working:
1456 rev2 = None
1458 rev2 = None
1457 else:
1459 else:
1458 rev2 = ctx2.substate[subpath][1]
1460 rev2 = ctx2.substate[subpath][1]
1459 try:
1461 try:
1460 submatch = matchmod.narrowmatcher(subpath, match)
1462 submatch = matchmod.narrowmatcher(subpath, match)
1461 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 s = sub.status(rev2, match=submatch, ignored=listignored,
1462 clean=listclean, unknown=listunknown,
1464 clean=listclean, unknown=listunknown,
1463 listsubrepos=True)
1465 listsubrepos=True)
1464 for rfiles, sfiles in zip(r, s):
1466 for rfiles, sfiles in zip(r, s):
1465 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1466 except error.LookupError:
1468 except error.LookupError:
1467 self.ui.status(_("skipping missing subrepository: %s\n")
1469 self.ui.status(_("skipping missing subrepository: %s\n")
1468 % subpath)
1470 % subpath)
1469
1471
1470 for l in r:
1472 for l in r:
1471 l.sort()
1473 l.sort()
1472 return r
1474 return r
1473
1475
1474 def heads(self, start=None):
1476 def heads(self, start=None):
1475 heads = self.changelog.heads(start)
1477 heads = self.changelog.heads(start)
1476 # sort the output in rev descending order
1478 # sort the output in rev descending order
1477 return sorted(heads, key=self.changelog.rev, reverse=True)
1479 return sorted(heads, key=self.changelog.rev, reverse=True)
1478
1480
1479 def branchheads(self, branch=None, start=None, closed=False):
1481 def branchheads(self, branch=None, start=None, closed=False):
1480 '''return a (possibly filtered) list of heads for the given branch
1482 '''return a (possibly filtered) list of heads for the given branch
1481
1483
1482 Heads are returned in topological order, from newest to oldest.
1484 Heads are returned in topological order, from newest to oldest.
1483 If branch is None, use the dirstate branch.
1485 If branch is None, use the dirstate branch.
1484 If start is not None, return only heads reachable from start.
1486 If start is not None, return only heads reachable from start.
1485 If closed is True, return heads that are marked as closed as well.
1487 If closed is True, return heads that are marked as closed as well.
1486 '''
1488 '''
1487 if branch is None:
1489 if branch is None:
1488 branch = self[None].branch()
1490 branch = self[None].branch()
1489 branches = self.branchmap()
1491 branches = self.branchmap()
1490 if branch not in branches:
1492 if branch not in branches:
1491 return []
1493 return []
1492 # the cache returns heads ordered lowest to highest
1494 # the cache returns heads ordered lowest to highest
1493 bheads = list(reversed(branches[branch]))
1495 bheads = list(reversed(branches[branch]))
1494 if start is not None:
1496 if start is not None:
1495 # filter out the heads that cannot be reached from startrev
1497 # filter out the heads that cannot be reached from startrev
1496 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1497 bheads = [h for h in bheads if h in fbheads]
1499 bheads = [h for h in bheads if h in fbheads]
1498 if not closed:
1500 if not closed:
1499 bheads = [h for h in bheads if
1501 bheads = [h for h in bheads if
1500 ('close' not in self.changelog.read(h)[5])]
1502 ('close' not in self.changelog.read(h)[5])]
1501 return bheads
1503 return bheads
1502
1504
1503 def branches(self, nodes):
1505 def branches(self, nodes):
1504 if not nodes:
1506 if not nodes:
1505 nodes = [self.changelog.tip()]
1507 nodes = [self.changelog.tip()]
1506 b = []
1508 b = []
1507 for n in nodes:
1509 for n in nodes:
1508 t = n
1510 t = n
1509 while True:
1511 while True:
1510 p = self.changelog.parents(n)
1512 p = self.changelog.parents(n)
1511 if p[1] != nullid or p[0] == nullid:
1513 if p[1] != nullid or p[0] == nullid:
1512 b.append((t, n, p[0], p[1]))
1514 b.append((t, n, p[0], p[1]))
1513 break
1515 break
1514 n = p[0]
1516 n = p[0]
1515 return b
1517 return b
1516
1518
1517 def between(self, pairs):
1519 def between(self, pairs):
1518 r = []
1520 r = []
1519
1521
1520 for top, bottom in pairs:
1522 for top, bottom in pairs:
1521 n, l, i = top, [], 0
1523 n, l, i = top, [], 0
1522 f = 1
1524 f = 1
1523
1525
1524 while n != bottom and n != nullid:
1526 while n != bottom and n != nullid:
1525 p = self.changelog.parents(n)[0]
1527 p = self.changelog.parents(n)[0]
1526 if i == f:
1528 if i == f:
1527 l.append(n)
1529 l.append(n)
1528 f = f * 2
1530 f = f * 2
1529 n = p
1531 n = p
1530 i += 1
1532 i += 1
1531
1533
1532 r.append(l)
1534 r.append(l)
1533
1535
1534 return r
1536 return r
1535
1537
1536 def pull(self, remote, heads=None, force=False):
1538 def pull(self, remote, heads=None, force=False):
1537 lock = self.lock()
1539 lock = self.lock()
1538 try:
1540 try:
1539 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1540 force=force)
1542 force=force)
1541 common, fetch, rheads = tmp
1543 common, fetch, rheads = tmp
1542 if not fetch:
1544 if not fetch:
1543 self.ui.status(_("no changes found\n"))
1545 self.ui.status(_("no changes found\n"))
1544 added = []
1546 added = []
1545 result = 0
1547 result = 0
1546 else:
1548 else:
1547 if heads is None and list(common) == [nullid]:
1549 if heads is None and list(common) == [nullid]:
1548 self.ui.status(_("requesting all changes\n"))
1550 self.ui.status(_("requesting all changes\n"))
1549 elif heads is None and remote.capable('changegroupsubset'):
1551 elif heads is None and remote.capable('changegroupsubset'):
1550 # issue1320, avoid a race if remote changed after discovery
1552 # issue1320, avoid a race if remote changed after discovery
1551 heads = rheads
1553 heads = rheads
1552
1554
1553 if remote.capable('getbundle'):
1555 if remote.capable('getbundle'):
1554 cg = remote.getbundle('pull', common=common,
1556 cg = remote.getbundle('pull', common=common,
1555 heads=heads or rheads)
1557 heads=heads or rheads)
1556 elif heads is None:
1558 elif heads is None:
1557 cg = remote.changegroup(fetch, 'pull')
1559 cg = remote.changegroup(fetch, 'pull')
1558 elif not remote.capable('changegroupsubset'):
1560 elif not remote.capable('changegroupsubset'):
1559 raise util.Abort(_("partial pull cannot be done because "
1561 raise util.Abort(_("partial pull cannot be done because "
1560 "other repository doesn't support "
1562 "other repository doesn't support "
1561 "changegroupsubset."))
1563 "changegroupsubset."))
1562 else:
1564 else:
1563 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 cg = remote.changegroupsubset(fetch, heads, 'pull')
1564 clstart = len(self.changelog)
1566 clstart = len(self.changelog)
1565 result = self.addchangegroup(cg, 'pull', remote.url())
1567 result = self.addchangegroup(cg, 'pull', remote.url())
1566 clend = len(self.changelog)
1568 clend = len(self.changelog)
1567 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1568
1570
1569 # compute target subset
1571 # compute target subset
1570 if heads is None:
1572 if heads is None:
1571 # We pulled every thing possible
1573 # We pulled every thing possible
1572 # sync on everything common
1574 # sync on everything common
1573 subset = common + added
1575 subset = common + added
1574 else:
1576 else:
1575 # We pulled a specific subset
1577 # We pulled a specific subset
1576 # sync on this subset
1578 # sync on this subset
1577 subset = heads
1579 subset = heads
1578
1580
1579 # Get remote phases data from remote
1581 # Get remote phases data from remote
1580 remotephases = remote.listkeys('phases')
1582 remotephases = remote.listkeys('phases')
1581 publishing = bool(remotephases.get('publishing', False))
1583 publishing = bool(remotephases.get('publishing', False))
1582 if remotephases and not publishing:
1584 if remotephases and not publishing:
1583 # remote is new and unpublishing
1585 # remote is new and unpublishing
1584 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 pheads, _dr = phases.analyzeremotephases(self, subset,
1585 remotephases)
1587 remotephases)
1586 phases.advanceboundary(self, phases.public, pheads)
1588 phases.advanceboundary(self, phases.public, pheads)
1587 phases.advanceboundary(self, phases.draft, subset)
1589 phases.advanceboundary(self, phases.draft, subset)
1588 else:
1590 else:
1589 # Remote is old or publishing all common changesets
1591 # Remote is old or publishing all common changesets
1590 # should be seen as public
1592 # should be seen as public
1591 phases.advanceboundary(self, phases.public, subset)
1593 phases.advanceboundary(self, phases.public, subset)
1592 finally:
1594 finally:
1593 lock.release()
1595 lock.release()
1594
1596
1595 return result
1597 return result
1596
1598
1597 def checkpush(self, force, revs):
1599 def checkpush(self, force, revs):
1598 """Extensions can override this function if additional checks have
1600 """Extensions can override this function if additional checks have
1599 to be performed before pushing, or call it if they override push
1601 to be performed before pushing, or call it if they override push
1600 command.
1602 command.
1601 """
1603 """
1602 pass
1604 pass
1603
1605
1604 def push(self, remote, force=False, revs=None, newbranch=False):
1606 def push(self, remote, force=False, revs=None, newbranch=False):
1605 '''Push outgoing changesets (limited by revs) from the current
1607 '''Push outgoing changesets (limited by revs) from the current
1606 repository to remote. Return an integer:
1608 repository to remote. Return an integer:
1607 - None means nothing to push
1609 - None means nothing to push
1608 - 0 means HTTP error
1610 - 0 means HTTP error
1609 - 1 means we pushed and remote head count is unchanged *or*
1611 - 1 means we pushed and remote head count is unchanged *or*
1610 we have outgoing changesets but refused to push
1612 we have outgoing changesets but refused to push
1611 - other values as described by addchangegroup()
1613 - other values as described by addchangegroup()
1612 '''
1614 '''
1613 # there are two ways to push to remote repo:
1615 # there are two ways to push to remote repo:
1614 #
1616 #
1615 # addchangegroup assumes local user can lock remote
1617 # addchangegroup assumes local user can lock remote
1616 # repo (local filesystem, old ssh servers).
1618 # repo (local filesystem, old ssh servers).
1617 #
1619 #
1618 # unbundle assumes local user cannot lock remote repo (new ssh
1620 # unbundle assumes local user cannot lock remote repo (new ssh
1619 # servers, http servers).
1621 # servers, http servers).
1620
1622
1621 # get local lock as we might write phase data
1623 # get local lock as we might write phase data
1622 locallock = self.lock()
1624 locallock = self.lock()
1623 try:
1625 try:
1624 self.checkpush(force, revs)
1626 self.checkpush(force, revs)
1625 lock = None
1627 lock = None
1626 unbundle = remote.capable('unbundle')
1628 unbundle = remote.capable('unbundle')
1627 if not unbundle:
1629 if not unbundle:
1628 lock = remote.lock()
1630 lock = remote.lock()
1629 try:
1631 try:
1630 # discovery
1632 # discovery
1631 fci = discovery.findcommonincoming
1633 fci = discovery.findcommonincoming
1632 commoninc = fci(self, remote, force=force)
1634 commoninc = fci(self, remote, force=force)
1633 common, inc, remoteheads = commoninc
1635 common, inc, remoteheads = commoninc
1634 fco = discovery.findcommonoutgoing
1636 fco = discovery.findcommonoutgoing
1635 outgoing = fco(self, remote, onlyheads=revs,
1637 outgoing = fco(self, remote, onlyheads=revs,
1636 commoninc=commoninc, force=force)
1638 commoninc=commoninc, force=force)
1637
1639
1638
1640
1639 if not outgoing.missing:
1641 if not outgoing.missing:
1640 # nothing to push
1642 # nothing to push
1641 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 scmutil.nochangesfound(self.ui, outgoing.excluded)
1642 ret = None
1644 ret = None
1643 else:
1645 else:
1644 # something to push
1646 # something to push
1645 if not force:
1647 if not force:
1646 discovery.checkheads(self, remote, outgoing,
1648 discovery.checkheads(self, remote, outgoing,
1647 remoteheads, newbranch,
1649 remoteheads, newbranch,
1648 bool(inc))
1650 bool(inc))
1649
1651
1650 # create a changegroup from local
1652 # create a changegroup from local
1651 if revs is None and not outgoing.excluded:
1653 if revs is None and not outgoing.excluded:
1652 # push everything,
1654 # push everything,
1653 # use the fast path, no race possible on push
1655 # use the fast path, no race possible on push
1654 cg = self._changegroup(outgoing.missing, 'push')
1656 cg = self._changegroup(outgoing.missing, 'push')
1655 else:
1657 else:
1656 cg = self.getlocalbundle('push', outgoing)
1658 cg = self.getlocalbundle('push', outgoing)
1657
1659
1658 # apply changegroup to remote
1660 # apply changegroup to remote
1659 if unbundle:
1661 if unbundle:
1660 # local repo finds heads on server, finds out what
1662 # local repo finds heads on server, finds out what
1661 # revs it must push. once revs transferred, if server
1663 # revs it must push. once revs transferred, if server
1662 # finds it has different heads (someone else won
1664 # finds it has different heads (someone else won
1663 # commit/push race), server aborts.
1665 # commit/push race), server aborts.
1664 if force:
1666 if force:
1665 remoteheads = ['force']
1667 remoteheads = ['force']
1666 # ssh: return remote's addchangegroup()
1668 # ssh: return remote's addchangegroup()
1667 # http: return remote's addchangegroup() or 0 for error
1669 # http: return remote's addchangegroup() or 0 for error
1668 ret = remote.unbundle(cg, remoteheads, 'push')
1670 ret = remote.unbundle(cg, remoteheads, 'push')
1669 else:
1671 else:
1670 # we return an integer indicating remote head count change
1672 # we return an integer indicating remote head count change
1671 ret = remote.addchangegroup(cg, 'push', self.url())
1673 ret = remote.addchangegroup(cg, 'push', self.url())
1672
1674
1673 if ret:
1675 if ret:
1674 # push succeed, synchonize target of the push
1676 # push succeed, synchonize target of the push
1675 cheads = outgoing.missingheads
1677 cheads = outgoing.missingheads
1676 elif revs is None:
1678 elif revs is None:
1677 # All out push fails. synchronize all common
1679 # All out push fails. synchronize all common
1678 cheads = outgoing.commonheads
1680 cheads = outgoing.commonheads
1679 else:
1681 else:
1680 # I want cheads = heads(::missingheads and ::commonheads)
1682 # I want cheads = heads(::missingheads and ::commonheads)
1681 # (missingheads is revs with secret changeset filtered out)
1683 # (missingheads is revs with secret changeset filtered out)
1682 #
1684 #
1683 # This can be expressed as:
1685 # This can be expressed as:
1684 # cheads = ( (missingheads and ::commonheads)
1686 # cheads = ( (missingheads and ::commonheads)
1685 # + (commonheads and ::missingheads))"
1687 # + (commonheads and ::missingheads))"
1686 # )
1688 # )
1687 #
1689 #
1688 # while trying to push we already computed the following:
1690 # while trying to push we already computed the following:
1689 # common = (::commonheads)
1691 # common = (::commonheads)
1690 # missing = ((commonheads::missingheads) - commonheads)
1692 # missing = ((commonheads::missingheads) - commonheads)
1691 #
1693 #
1692 # We can pick:
1694 # We can pick:
1693 # * missingheads part of comon (::commonheads)
1695 # * missingheads part of comon (::commonheads)
1694 common = set(outgoing.common)
1696 common = set(outgoing.common)
1695 cheads = [node for node in revs if node in common]
1697 cheads = [node for node in revs if node in common]
1696 # and
1698 # and
1697 # * commonheads parents on missing
1699 # * commonheads parents on missing
1698 revset = self.set('%ln and parents(roots(%ln))',
1700 revset = self.set('%ln and parents(roots(%ln))',
1699 outgoing.commonheads,
1701 outgoing.commonheads,
1700 outgoing.missing)
1702 outgoing.missing)
1701 cheads.extend(c.node() for c in revset)
1703 cheads.extend(c.node() for c in revset)
1702 # even when we don't push, exchanging phase data is useful
1704 # even when we don't push, exchanging phase data is useful
1703 remotephases = remote.listkeys('phases')
1705 remotephases = remote.listkeys('phases')
1704 if not remotephases: # old server or public only repo
1706 if not remotephases: # old server or public only repo
1705 phases.advanceboundary(self, phases.public, cheads)
1707 phases.advanceboundary(self, phases.public, cheads)
1706 # don't push any phase data as there is nothing to push
1708 # don't push any phase data as there is nothing to push
1707 else:
1709 else:
1708 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 ana = phases.analyzeremotephases(self, cheads, remotephases)
1709 pheads, droots = ana
1711 pheads, droots = ana
1710 ### Apply remote phase on local
1712 ### Apply remote phase on local
1711 if remotephases.get('publishing', False):
1713 if remotephases.get('publishing', False):
1712 phases.advanceboundary(self, phases.public, cheads)
1714 phases.advanceboundary(self, phases.public, cheads)
1713 else: # publish = False
1715 else: # publish = False
1714 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.public, pheads)
1715 phases.advanceboundary(self, phases.draft, cheads)
1717 phases.advanceboundary(self, phases.draft, cheads)
1716 ### Apply local phase on remote
1718 ### Apply local phase on remote
1717
1719
1718 # Get the list of all revs draft on remote by public here.
1720 # Get the list of all revs draft on remote by public here.
1719 # XXX Beware that revset break if droots is not strictly
1721 # XXX Beware that revset break if droots is not strictly
1720 # XXX root we may want to ensure it is but it is costly
1722 # XXX root we may want to ensure it is but it is costly
1721 outdated = self.set('heads((%ln::%ln) and public())',
1723 outdated = self.set('heads((%ln::%ln) and public())',
1722 droots, cheads)
1724 droots, cheads)
1723 for newremotehead in outdated:
1725 for newremotehead in outdated:
1724 r = remote.pushkey('phases',
1726 r = remote.pushkey('phases',
1725 newremotehead.hex(),
1727 newremotehead.hex(),
1726 str(phases.draft),
1728 str(phases.draft),
1727 str(phases.public))
1729 str(phases.public))
1728 if not r:
1730 if not r:
1729 self.ui.warn(_('updating %s to public failed!\n')
1731 self.ui.warn(_('updating %s to public failed!\n')
1730 % newremotehead)
1732 % newremotehead)
1731 finally:
1733 finally:
1732 if lock is not None:
1734 if lock is not None:
1733 lock.release()
1735 lock.release()
1734 finally:
1736 finally:
1735 locallock.release()
1737 locallock.release()
1736
1738
1737 self.ui.debug("checking for updated bookmarks\n")
1739 self.ui.debug("checking for updated bookmarks\n")
1738 rb = remote.listkeys('bookmarks')
1740 rb = remote.listkeys('bookmarks')
1739 for k in rb.keys():
1741 for k in rb.keys():
1740 if k in self._bookmarks:
1742 if k in self._bookmarks:
1741 nr, nl = rb[k], hex(self._bookmarks[k])
1743 nr, nl = rb[k], hex(self._bookmarks[k])
1742 if nr in self:
1744 if nr in self:
1743 cr = self[nr]
1745 cr = self[nr]
1744 cl = self[nl]
1746 cl = self[nl]
1745 if cl in cr.descendants():
1747 if cl in cr.descendants():
1746 r = remote.pushkey('bookmarks', k, nr, nl)
1748 r = remote.pushkey('bookmarks', k, nr, nl)
1747 if r:
1749 if r:
1748 self.ui.status(_("updating bookmark %s\n") % k)
1750 self.ui.status(_("updating bookmark %s\n") % k)
1749 else:
1751 else:
1750 self.ui.warn(_('updating bookmark %s'
1752 self.ui.warn(_('updating bookmark %s'
1751 ' failed!\n') % k)
1753 ' failed!\n') % k)
1752
1754
1753 return ret
1755 return ret
1754
1756
1755 def changegroupinfo(self, nodes, source):
1757 def changegroupinfo(self, nodes, source):
1756 if self.ui.verbose or source == 'bundle':
1758 if self.ui.verbose or source == 'bundle':
1757 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 self.ui.status(_("%d changesets found\n") % len(nodes))
1758 if self.ui.debugflag:
1760 if self.ui.debugflag:
1759 self.ui.debug("list of changesets:\n")
1761 self.ui.debug("list of changesets:\n")
1760 for node in nodes:
1762 for node in nodes:
1761 self.ui.debug("%s\n" % hex(node))
1763 self.ui.debug("%s\n" % hex(node))
1762
1764
1763 def changegroupsubset(self, bases, heads, source):
1765 def changegroupsubset(self, bases, heads, source):
1764 """Compute a changegroup consisting of all the nodes that are
1766 """Compute a changegroup consisting of all the nodes that are
1765 descendants of any of the bases and ancestors of any of the heads.
1767 descendants of any of the bases and ancestors of any of the heads.
1766 Return a chunkbuffer object whose read() method will return
1768 Return a chunkbuffer object whose read() method will return
1767 successive changegroup chunks.
1769 successive changegroup chunks.
1768
1770
1769 It is fairly complex as determining which filenodes and which
1771 It is fairly complex as determining which filenodes and which
1770 manifest nodes need to be included for the changeset to be complete
1772 manifest nodes need to be included for the changeset to be complete
1771 is non-trivial.
1773 is non-trivial.
1772
1774
1773 Another wrinkle is doing the reverse, figuring out which changeset in
1775 Another wrinkle is doing the reverse, figuring out which changeset in
1774 the changegroup a particular filenode or manifestnode belongs to.
1776 the changegroup a particular filenode or manifestnode belongs to.
1775 """
1777 """
1776 cl = self.changelog
1778 cl = self.changelog
1777 if not bases:
1779 if not bases:
1778 bases = [nullid]
1780 bases = [nullid]
1779 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 csets, bases, heads = cl.nodesbetween(bases, heads)
1780 # We assume that all ancestors of bases are known
1782 # We assume that all ancestors of bases are known
1781 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1782 return self._changegroupsubset(common, csets, heads, source)
1784 return self._changegroupsubset(common, csets, heads, source)
1783
1785
1784 def getlocalbundle(self, source, outgoing):
1786 def getlocalbundle(self, source, outgoing):
1785 """Like getbundle, but taking a discovery.outgoing as an argument.
1787 """Like getbundle, but taking a discovery.outgoing as an argument.
1786
1788
1787 This is only implemented for local repos and reuses potentially
1789 This is only implemented for local repos and reuses potentially
1788 precomputed sets in outgoing."""
1790 precomputed sets in outgoing."""
1789 if not outgoing.missing:
1791 if not outgoing.missing:
1790 return None
1792 return None
1791 return self._changegroupsubset(outgoing.common,
1793 return self._changegroupsubset(outgoing.common,
1792 outgoing.missing,
1794 outgoing.missing,
1793 outgoing.missingheads,
1795 outgoing.missingheads,
1794 source)
1796 source)
1795
1797
1796 def getbundle(self, source, heads=None, common=None):
1798 def getbundle(self, source, heads=None, common=None):
1797 """Like changegroupsubset, but returns the set difference between the
1799 """Like changegroupsubset, but returns the set difference between the
1798 ancestors of heads and the ancestors common.
1800 ancestors of heads and the ancestors common.
1799
1801
1800 If heads is None, use the local heads. If common is None, use [nullid].
1802 If heads is None, use the local heads. If common is None, use [nullid].
1801
1803
1802 The nodes in common might not all be known locally due to the way the
1804 The nodes in common might not all be known locally due to the way the
1803 current discovery protocol works.
1805 current discovery protocol works.
1804 """
1806 """
1805 cl = self.changelog
1807 cl = self.changelog
1806 if common:
1808 if common:
1807 nm = cl.nodemap
1809 nm = cl.nodemap
1808 common = [n for n in common if n in nm]
1810 common = [n for n in common if n in nm]
1809 else:
1811 else:
1810 common = [nullid]
1812 common = [nullid]
1811 if not heads:
1813 if not heads:
1812 heads = cl.heads()
1814 heads = cl.heads()
1813 return self.getlocalbundle(source,
1815 return self.getlocalbundle(source,
1814 discovery.outgoing(cl, common, heads))
1816 discovery.outgoing(cl, common, heads))
1815
1817
1816 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818 def _changegroupsubset(self, commonrevs, csets, heads, source):
1817
1819
1818 cl = self.changelog
1820 cl = self.changelog
1819 mf = self.manifest
1821 mf = self.manifest
1820 mfs = {} # needed manifests
1822 mfs = {} # needed manifests
1821 fnodes = {} # needed file nodes
1823 fnodes = {} # needed file nodes
1822 changedfiles = set()
1824 changedfiles = set()
1823 fstate = ['', {}]
1825 fstate = ['', {}]
1824 count = [0, 0]
1826 count = [0, 0]
1825
1827
1826 # can we go through the fast path ?
1828 # can we go through the fast path ?
1827 heads.sort()
1829 heads.sort()
1828 if heads == sorted(self.heads()):
1830 if heads == sorted(self.heads()):
1829 return self._changegroup(csets, source)
1831 return self._changegroup(csets, source)
1830
1832
1831 # slow path
1833 # slow path
1832 self.hook('preoutgoing', throw=True, source=source)
1834 self.hook('preoutgoing', throw=True, source=source)
1833 self.changegroupinfo(csets, source)
1835 self.changegroupinfo(csets, source)
1834
1836
1835 # filter any nodes that claim to be part of the known set
1837 # filter any nodes that claim to be part of the known set
1836 def prune(revlog, missing):
1838 def prune(revlog, missing):
1837 rr, rl = revlog.rev, revlog.linkrev
1839 rr, rl = revlog.rev, revlog.linkrev
1838 return [n for n in missing
1840 return [n for n in missing
1839 if rl(rr(n)) not in commonrevs]
1841 if rl(rr(n)) not in commonrevs]
1840
1842
1841 progress = self.ui.progress
1843 progress = self.ui.progress
1842 _bundling = _('bundling')
1844 _bundling = _('bundling')
1843 _changesets = _('changesets')
1845 _changesets = _('changesets')
1844 _manifests = _('manifests')
1846 _manifests = _('manifests')
1845 _files = _('files')
1847 _files = _('files')
1846
1848
1847 def lookup(revlog, x):
1849 def lookup(revlog, x):
1848 if revlog == cl:
1850 if revlog == cl:
1849 c = cl.read(x)
1851 c = cl.read(x)
1850 changedfiles.update(c[3])
1852 changedfiles.update(c[3])
1851 mfs.setdefault(c[0], x)
1853 mfs.setdefault(c[0], x)
1852 count[0] += 1
1854 count[0] += 1
1853 progress(_bundling, count[0],
1855 progress(_bundling, count[0],
1854 unit=_changesets, total=count[1])
1856 unit=_changesets, total=count[1])
1855 return x
1857 return x
1856 elif revlog == mf:
1858 elif revlog == mf:
1857 clnode = mfs[x]
1859 clnode = mfs[x]
1858 mdata = mf.readfast(x)
1860 mdata = mf.readfast(x)
1859 for f, n in mdata.iteritems():
1861 for f, n in mdata.iteritems():
1860 if f in changedfiles:
1862 if f in changedfiles:
1861 fnodes[f].setdefault(n, clnode)
1863 fnodes[f].setdefault(n, clnode)
1862 count[0] += 1
1864 count[0] += 1
1863 progress(_bundling, count[0],
1865 progress(_bundling, count[0],
1864 unit=_manifests, total=count[1])
1866 unit=_manifests, total=count[1])
1865 return clnode
1867 return clnode
1866 else:
1868 else:
1867 progress(_bundling, count[0], item=fstate[0],
1869 progress(_bundling, count[0], item=fstate[0],
1868 unit=_files, total=count[1])
1870 unit=_files, total=count[1])
1869 return fstate[1][x]
1871 return fstate[1][x]
1870
1872
1871 bundler = changegroup.bundle10(lookup)
1873 bundler = changegroup.bundle10(lookup)
1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1874 reorder = self.ui.config('bundle', 'reorder', 'auto')
1873 if reorder == 'auto':
1875 if reorder == 'auto':
1874 reorder = None
1876 reorder = None
1875 else:
1877 else:
1876 reorder = util.parsebool(reorder)
1878 reorder = util.parsebool(reorder)
1877
1879
1878 def gengroup():
1880 def gengroup():
1879 # Create a changenode group generator that will call our functions
1881 # Create a changenode group generator that will call our functions
1880 # back to lookup the owning changenode and collect information.
1882 # back to lookup the owning changenode and collect information.
1881 count[:] = [0, len(csets)]
1883 count[:] = [0, len(csets)]
1882 for chunk in cl.group(csets, bundler, reorder=reorder):
1884 for chunk in cl.group(csets, bundler, reorder=reorder):
1883 yield chunk
1885 yield chunk
1884 progress(_bundling, None)
1886 progress(_bundling, None)
1885
1887
1886 # Create a generator for the manifestnodes that calls our lookup
1888 # Create a generator for the manifestnodes that calls our lookup
1887 # and data collection functions back.
1889 # and data collection functions back.
1888 for f in changedfiles:
1890 for f in changedfiles:
1889 fnodes[f] = {}
1891 fnodes[f] = {}
1890 count[:] = [0, len(mfs)]
1892 count[:] = [0, len(mfs)]
1891 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1893 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1892 yield chunk
1894 yield chunk
1893 progress(_bundling, None)
1895 progress(_bundling, None)
1894
1896
1895 mfs.clear()
1897 mfs.clear()
1896
1898
1897 # Go through all our files in order sorted by name.
1899 # Go through all our files in order sorted by name.
1898 count[:] = [0, len(changedfiles)]
1900 count[:] = [0, len(changedfiles)]
1899 for fname in sorted(changedfiles):
1901 for fname in sorted(changedfiles):
1900 filerevlog = self.file(fname)
1902 filerevlog = self.file(fname)
1901 if not len(filerevlog):
1903 if not len(filerevlog):
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 fstate[0] = fname
1905 fstate[0] = fname
1904 fstate[1] = fnodes.pop(fname, {})
1906 fstate[1] = fnodes.pop(fname, {})
1905
1907
1906 nodelist = prune(filerevlog, fstate[1])
1908 nodelist = prune(filerevlog, fstate[1])
1907 if nodelist:
1909 if nodelist:
1908 count[0] += 1
1910 count[0] += 1
1909 yield bundler.fileheader(fname)
1911 yield bundler.fileheader(fname)
1910 for chunk in filerevlog.group(nodelist, bundler, reorder):
1912 for chunk in filerevlog.group(nodelist, bundler, reorder):
1911 yield chunk
1913 yield chunk
1912
1914
1913 # Signal that no more groups are left.
1915 # Signal that no more groups are left.
1914 yield bundler.close()
1916 yield bundler.close()
1915 progress(_bundling, None)
1917 progress(_bundling, None)
1916
1918
1917 if csets:
1919 if csets:
1918 self.hook('outgoing', node=hex(csets[0]), source=source)
1920 self.hook('outgoing', node=hex(csets[0]), source=source)
1919
1921
1920 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1922 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1921
1923
1922 def changegroup(self, basenodes, source):
1924 def changegroup(self, basenodes, source):
1923 # to avoid a race we use changegroupsubset() (issue1320)
1925 # to avoid a race we use changegroupsubset() (issue1320)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1926 return self.changegroupsubset(basenodes, self.heads(), source)
1925
1927
1926 def _changegroup(self, nodes, source):
1928 def _changegroup(self, nodes, source):
1927 """Compute the changegroup of all nodes that we have that a recipient
1929 """Compute the changegroup of all nodes that we have that a recipient
1928 doesn't. Return a chunkbuffer object whose read() method will return
1930 doesn't. Return a chunkbuffer object whose read() method will return
1929 successive changegroup chunks.
1931 successive changegroup chunks.
1930
1932
1931 This is much easier than the previous function as we can assume that
1933 This is much easier than the previous function as we can assume that
1932 the recipient has any changenode we aren't sending them.
1934 the recipient has any changenode we aren't sending them.
1933
1935
1934 nodes is the set of nodes to send"""
1936 nodes is the set of nodes to send"""
1935
1937
1936 cl = self.changelog
1938 cl = self.changelog
1937 mf = self.manifest
1939 mf = self.manifest
1938 mfs = {}
1940 mfs = {}
1939 changedfiles = set()
1941 changedfiles = set()
1940 fstate = ['']
1942 fstate = ['']
1941 count = [0, 0]
1943 count = [0, 0]
1942
1944
1943 self.hook('preoutgoing', throw=True, source=source)
1945 self.hook('preoutgoing', throw=True, source=source)
1944 self.changegroupinfo(nodes, source)
1946 self.changegroupinfo(nodes, source)
1945
1947
1946 revset = set([cl.rev(n) for n in nodes])
1948 revset = set([cl.rev(n) for n in nodes])
1947
1949
1948 def gennodelst(log):
1950 def gennodelst(log):
1949 ln, llr = log.node, log.linkrev
1951 ln, llr = log.node, log.linkrev
1950 return [ln(r) for r in log if llr(r) in revset]
1952 return [ln(r) for r in log if llr(r) in revset]
1951
1953
1952 progress = self.ui.progress
1954 progress = self.ui.progress
1953 _bundling = _('bundling')
1955 _bundling = _('bundling')
1954 _changesets = _('changesets')
1956 _changesets = _('changesets')
1955 _manifests = _('manifests')
1957 _manifests = _('manifests')
1956 _files = _('files')
1958 _files = _('files')
1957
1959
1958 def lookup(revlog, x):
1960 def lookup(revlog, x):
1959 if revlog == cl:
1961 if revlog == cl:
1960 c = cl.read(x)
1962 c = cl.read(x)
1961 changedfiles.update(c[3])
1963 changedfiles.update(c[3])
1962 mfs.setdefault(c[0], x)
1964 mfs.setdefault(c[0], x)
1963 count[0] += 1
1965 count[0] += 1
1964 progress(_bundling, count[0],
1966 progress(_bundling, count[0],
1965 unit=_changesets, total=count[1])
1967 unit=_changesets, total=count[1])
1966 return x
1968 return x
1967 elif revlog == mf:
1969 elif revlog == mf:
1968 count[0] += 1
1970 count[0] += 1
1969 progress(_bundling, count[0],
1971 progress(_bundling, count[0],
1970 unit=_manifests, total=count[1])
1972 unit=_manifests, total=count[1])
1971 return cl.node(revlog.linkrev(revlog.rev(x)))
1973 return cl.node(revlog.linkrev(revlog.rev(x)))
1972 else:
1974 else:
1973 progress(_bundling, count[0], item=fstate[0],
1975 progress(_bundling, count[0], item=fstate[0],
1974 total=count[1], unit=_files)
1976 total=count[1], unit=_files)
1975 return cl.node(revlog.linkrev(revlog.rev(x)))
1977 return cl.node(revlog.linkrev(revlog.rev(x)))
1976
1978
1977 bundler = changegroup.bundle10(lookup)
1979 bundler = changegroup.bundle10(lookup)
1978 reorder = self.ui.config('bundle', 'reorder', 'auto')
1980 reorder = self.ui.config('bundle', 'reorder', 'auto')
1979 if reorder == 'auto':
1981 if reorder == 'auto':
1980 reorder = None
1982 reorder = None
1981 else:
1983 else:
1982 reorder = util.parsebool(reorder)
1984 reorder = util.parsebool(reorder)
1983
1985
1984 def gengroup():
1986 def gengroup():
1985 '''yield a sequence of changegroup chunks (strings)'''
1987 '''yield a sequence of changegroup chunks (strings)'''
1986 # construct a list of all changed files
1988 # construct a list of all changed files
1987
1989
1988 count[:] = [0, len(nodes)]
1990 count[:] = [0, len(nodes)]
1989 for chunk in cl.group(nodes, bundler, reorder=reorder):
1991 for chunk in cl.group(nodes, bundler, reorder=reorder):
1990 yield chunk
1992 yield chunk
1991 progress(_bundling, None)
1993 progress(_bundling, None)
1992
1994
1993 count[:] = [0, len(mfs)]
1995 count[:] = [0, len(mfs)]
1994 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1996 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1995 yield chunk
1997 yield chunk
1996 progress(_bundling, None)
1998 progress(_bundling, None)
1997
1999
1998 count[:] = [0, len(changedfiles)]
2000 count[:] = [0, len(changedfiles)]
1999 for fname in sorted(changedfiles):
2001 for fname in sorted(changedfiles):
2000 filerevlog = self.file(fname)
2002 filerevlog = self.file(fname)
2001 if not len(filerevlog):
2003 if not len(filerevlog):
2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2004 raise util.Abort(_("empty or missing revlog for %s") % fname)
2003 fstate[0] = fname
2005 fstate[0] = fname
2004 nodelist = gennodelst(filerevlog)
2006 nodelist = gennodelst(filerevlog)
2005 if nodelist:
2007 if nodelist:
2006 count[0] += 1
2008 count[0] += 1
2007 yield bundler.fileheader(fname)
2009 yield bundler.fileheader(fname)
2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2010 for chunk in filerevlog.group(nodelist, bundler, reorder):
2009 yield chunk
2011 yield chunk
2010 yield bundler.close()
2012 yield bundler.close()
2011 progress(_bundling, None)
2013 progress(_bundling, None)
2012
2014
2013 if nodes:
2015 if nodes:
2014 self.hook('outgoing', node=hex(nodes[0]), source=source)
2016 self.hook('outgoing', node=hex(nodes[0]), source=source)
2015
2017
2016 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2017
2019
2018 def addchangegroup(self, source, srctype, url, emptyok=False):
2020 def addchangegroup(self, source, srctype, url, emptyok=False):
2019 """Add the changegroup returned by source.read() to this repo.
2021 """Add the changegroup returned by source.read() to this repo.
2020 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2022 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2021 the URL of the repo where this changegroup is coming from.
2023 the URL of the repo where this changegroup is coming from.
2022
2024
2023 Return an integer summarizing the change to this repo:
2025 Return an integer summarizing the change to this repo:
2024 - nothing changed or no source: 0
2026 - nothing changed or no source: 0
2025 - more heads than before: 1+added heads (2..n)
2027 - more heads than before: 1+added heads (2..n)
2026 - fewer heads than before: -1-removed heads (-2..-n)
2028 - fewer heads than before: -1-removed heads (-2..-n)
2027 - number of heads stays the same: 1
2029 - number of heads stays the same: 1
2028 """
2030 """
2029 def csmap(x):
2031 def csmap(x):
2030 self.ui.debug("add changeset %s\n" % short(x))
2032 self.ui.debug("add changeset %s\n" % short(x))
2031 return len(cl)
2033 return len(cl)
2032
2034
2033 def revmap(x):
2035 def revmap(x):
2034 return cl.rev(x)
2036 return cl.rev(x)
2035
2037
2036 if not source:
2038 if not source:
2037 return 0
2039 return 0
2038
2040
2039 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2041 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2040
2042
2041 changesets = files = revisions = 0
2043 changesets = files = revisions = 0
2042 efiles = set()
2044 efiles = set()
2043
2045
2044 # write changelog data to temp files so concurrent readers will not see
2046 # write changelog data to temp files so concurrent readers will not see
2045 # inconsistent view
2047 # inconsistent view
2046 cl = self.changelog
2048 cl = self.changelog
2047 cl.delayupdate()
2049 cl.delayupdate()
2048 oldheads = cl.heads()
2050 oldheads = cl.heads()
2049
2051
2050 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2052 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2051 try:
2053 try:
2052 trp = weakref.proxy(tr)
2054 trp = weakref.proxy(tr)
2053 # pull off the changeset group
2055 # pull off the changeset group
2054 self.ui.status(_("adding changesets\n"))
2056 self.ui.status(_("adding changesets\n"))
2055 clstart = len(cl)
2057 clstart = len(cl)
2056 class prog(object):
2058 class prog(object):
2057 step = _('changesets')
2059 step = _('changesets')
2058 count = 1
2060 count = 1
2059 ui = self.ui
2061 ui = self.ui
2060 total = None
2062 total = None
2061 def __call__(self):
2063 def __call__(self):
2062 self.ui.progress(self.step, self.count, unit=_('chunks'),
2064 self.ui.progress(self.step, self.count, unit=_('chunks'),
2063 total=self.total)
2065 total=self.total)
2064 self.count += 1
2066 self.count += 1
2065 pr = prog()
2067 pr = prog()
2066 source.callback = pr
2068 source.callback = pr
2067
2069
2068 source.changelogheader()
2070 source.changelogheader()
2069 srccontent = cl.addgroup(source, csmap, trp)
2071 srccontent = cl.addgroup(source, csmap, trp)
2070 if not (srccontent or emptyok):
2072 if not (srccontent or emptyok):
2071 raise util.Abort(_("received changelog group is empty"))
2073 raise util.Abort(_("received changelog group is empty"))
2072 clend = len(cl)
2074 clend = len(cl)
2073 changesets = clend - clstart
2075 changesets = clend - clstart
2074 for c in xrange(clstart, clend):
2076 for c in xrange(clstart, clend):
2075 efiles.update(self[c].files())
2077 efiles.update(self[c].files())
2076 efiles = len(efiles)
2078 efiles = len(efiles)
2077 self.ui.progress(_('changesets'), None)
2079 self.ui.progress(_('changesets'), None)
2078
2080
2079 # pull off the manifest group
2081 # pull off the manifest group
2080 self.ui.status(_("adding manifests\n"))
2082 self.ui.status(_("adding manifests\n"))
2081 pr.step = _('manifests')
2083 pr.step = _('manifests')
2082 pr.count = 1
2084 pr.count = 1
2083 pr.total = changesets # manifests <= changesets
2085 pr.total = changesets # manifests <= changesets
2084 # no need to check for empty manifest group here:
2086 # no need to check for empty manifest group here:
2085 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2087 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2086 # no new manifest will be created and the manifest group will
2088 # no new manifest will be created and the manifest group will
2087 # be empty during the pull
2089 # be empty during the pull
2088 source.manifestheader()
2090 source.manifestheader()
2089 self.manifest.addgroup(source, revmap, trp)
2091 self.manifest.addgroup(source, revmap, trp)
2090 self.ui.progress(_('manifests'), None)
2092 self.ui.progress(_('manifests'), None)
2091
2093
2092 needfiles = {}
2094 needfiles = {}
2093 if self.ui.configbool('server', 'validate', default=False):
2095 if self.ui.configbool('server', 'validate', default=False):
2094 # validate incoming csets have their manifests
2096 # validate incoming csets have their manifests
2095 for cset in xrange(clstart, clend):
2097 for cset in xrange(clstart, clend):
2096 mfest = self.changelog.read(self.changelog.node(cset))[0]
2098 mfest = self.changelog.read(self.changelog.node(cset))[0]
2097 mfest = self.manifest.readdelta(mfest)
2099 mfest = self.manifest.readdelta(mfest)
2098 # store file nodes we must see
2100 # store file nodes we must see
2099 for f, n in mfest.iteritems():
2101 for f, n in mfest.iteritems():
2100 needfiles.setdefault(f, set()).add(n)
2102 needfiles.setdefault(f, set()).add(n)
2101
2103
2102 # process the files
2104 # process the files
2103 self.ui.status(_("adding file changes\n"))
2105 self.ui.status(_("adding file changes\n"))
2104 pr.step = _('files')
2106 pr.step = _('files')
2105 pr.count = 1
2107 pr.count = 1
2106 pr.total = efiles
2108 pr.total = efiles
2107 source.callback = None
2109 source.callback = None
2108
2110
2109 while True:
2111 while True:
2110 chunkdata = source.filelogheader()
2112 chunkdata = source.filelogheader()
2111 if not chunkdata:
2113 if not chunkdata:
2112 break
2114 break
2113 f = chunkdata["filename"]
2115 f = chunkdata["filename"]
2114 self.ui.debug("adding %s revisions\n" % f)
2116 self.ui.debug("adding %s revisions\n" % f)
2115 pr()
2117 pr()
2116 fl = self.file(f)
2118 fl = self.file(f)
2117 o = len(fl)
2119 o = len(fl)
2118 if not fl.addgroup(source, revmap, trp):
2120 if not fl.addgroup(source, revmap, trp):
2119 raise util.Abort(_("received file revlog group is empty"))
2121 raise util.Abort(_("received file revlog group is empty"))
2120 revisions += len(fl) - o
2122 revisions += len(fl) - o
2121 files += 1
2123 files += 1
2122 if f in needfiles:
2124 if f in needfiles:
2123 needs = needfiles[f]
2125 needs = needfiles[f]
2124 for new in xrange(o, len(fl)):
2126 for new in xrange(o, len(fl)):
2125 n = fl.node(new)
2127 n = fl.node(new)
2126 if n in needs:
2128 if n in needs:
2127 needs.remove(n)
2129 needs.remove(n)
2128 if not needs:
2130 if not needs:
2129 del needfiles[f]
2131 del needfiles[f]
2130 self.ui.progress(_('files'), None)
2132 self.ui.progress(_('files'), None)
2131
2133
2132 for f, needs in needfiles.iteritems():
2134 for f, needs in needfiles.iteritems():
2133 fl = self.file(f)
2135 fl = self.file(f)
2134 for n in needs:
2136 for n in needs:
2135 try:
2137 try:
2136 fl.rev(n)
2138 fl.rev(n)
2137 except error.LookupError:
2139 except error.LookupError:
2138 raise util.Abort(
2140 raise util.Abort(
2139 _('missing file data for %s:%s - run hg verify') %
2141 _('missing file data for %s:%s - run hg verify') %
2140 (f, hex(n)))
2142 (f, hex(n)))
2141
2143
2142 dh = 0
2144 dh = 0
2143 if oldheads:
2145 if oldheads:
2144 heads = cl.heads()
2146 heads = cl.heads()
2145 dh = len(heads) - len(oldheads)
2147 dh = len(heads) - len(oldheads)
2146 for h in heads:
2148 for h in heads:
2147 if h not in oldheads and 'close' in self[h].extra():
2149 if h not in oldheads and 'close' in self[h].extra():
2148 dh -= 1
2150 dh -= 1
2149 htext = ""
2151 htext = ""
2150 if dh:
2152 if dh:
2151 htext = _(" (%+d heads)") % dh
2153 htext = _(" (%+d heads)") % dh
2152
2154
2153 self.ui.status(_("added %d changesets"
2155 self.ui.status(_("added %d changesets"
2154 " with %d changes to %d files%s\n")
2156 " with %d changes to %d files%s\n")
2155 % (changesets, revisions, files, htext))
2157 % (changesets, revisions, files, htext))
2156
2158
2157 if changesets > 0:
2159 if changesets > 0:
2158 p = lambda: cl.writepending() and self.root or ""
2160 p = lambda: cl.writepending() and self.root or ""
2159 self.hook('pretxnchangegroup', throw=True,
2161 self.hook('pretxnchangegroup', throw=True,
2160 node=hex(cl.node(clstart)), source=srctype,
2162 node=hex(cl.node(clstart)), source=srctype,
2161 url=url, pending=p)
2163 url=url, pending=p)
2162
2164
2163 added = [cl.node(r) for r in xrange(clstart, clend)]
2165 added = [cl.node(r) for r in xrange(clstart, clend)]
2164 publishing = self.ui.configbool('phases', 'publish', True)
2166 publishing = self.ui.configbool('phases', 'publish', True)
2165 if srctype == 'push':
2167 if srctype == 'push':
2166 # Old server can not push the boundary themself.
2168 # Old server can not push the boundary themself.
2167 # New server won't push the boundary if changeset already
2169 # New server won't push the boundary if changeset already
2168 # existed locally as secrete
2170 # existed locally as secrete
2169 #
2171 #
2170 # We should not use added here but the list of all change in
2172 # We should not use added here but the list of all change in
2171 # the bundle
2173 # the bundle
2172 if publishing:
2174 if publishing:
2173 phases.advanceboundary(self, phases.public, srccontent)
2175 phases.advanceboundary(self, phases.public, srccontent)
2174 else:
2176 else:
2175 phases.advanceboundary(self, phases.draft, srccontent)
2177 phases.advanceboundary(self, phases.draft, srccontent)
2176 phases.retractboundary(self, phases.draft, added)
2178 phases.retractboundary(self, phases.draft, added)
2177 elif srctype != 'strip':
2179 elif srctype != 'strip':
2178 # publishing only alter behavior during push
2180 # publishing only alter behavior during push
2179 #
2181 #
2180 # strip should not touch boundary at all
2182 # strip should not touch boundary at all
2181 phases.retractboundary(self, phases.draft, added)
2183 phases.retractboundary(self, phases.draft, added)
2182
2184
2183 # make changelog see real files again
2185 # make changelog see real files again
2184 cl.finalize(trp)
2186 cl.finalize(trp)
2185
2187
2186 tr.close()
2188 tr.close()
2187
2189
2188 if changesets > 0:
2190 if changesets > 0:
2189 def runhooks():
2191 def runhooks():
2190 # forcefully update the on-disk branch cache
2192 # forcefully update the on-disk branch cache
2191 self.ui.debug("updating the branch cache\n")
2193 self.ui.debug("updating the branch cache\n")
2192 self.updatebranchcache()
2194 self.updatebranchcache()
2193 self.hook("changegroup", node=hex(cl.node(clstart)),
2195 self.hook("changegroup", node=hex(cl.node(clstart)),
2194 source=srctype, url=url)
2196 source=srctype, url=url)
2195
2197
2196 for n in added:
2198 for n in added:
2197 self.hook("incoming", node=hex(n), source=srctype,
2199 self.hook("incoming", node=hex(n), source=srctype,
2198 url=url)
2200 url=url)
2199 self._afterlock(runhooks)
2201 self._afterlock(runhooks)
2200
2202
2201 finally:
2203 finally:
2202 tr.release()
2204 tr.release()
2203 # never return 0 here:
2205 # never return 0 here:
2204 if dh < 0:
2206 if dh < 0:
2205 return dh - 1
2207 return dh - 1
2206 else:
2208 else:
2207 return dh + 1
2209 return dh + 1
2208
2210
2209 def stream_in(self, remote, requirements):
2211 def stream_in(self, remote, requirements):
2210 lock = self.lock()
2212 lock = self.lock()
2211 try:
2213 try:
2212 fp = remote.stream_out()
2214 fp = remote.stream_out()
2213 l = fp.readline()
2215 l = fp.readline()
2214 try:
2216 try:
2215 resp = int(l)
2217 resp = int(l)
2216 except ValueError:
2218 except ValueError:
2217 raise error.ResponseError(
2219 raise error.ResponseError(
2218 _('Unexpected response from remote server:'), l)
2220 _('Unexpected response from remote server:'), l)
2219 if resp == 1:
2221 if resp == 1:
2220 raise util.Abort(_('operation forbidden by server'))
2222 raise util.Abort(_('operation forbidden by server'))
2221 elif resp == 2:
2223 elif resp == 2:
2222 raise util.Abort(_('locking the remote repository failed'))
2224 raise util.Abort(_('locking the remote repository failed'))
2223 elif resp != 0:
2225 elif resp != 0:
2224 raise util.Abort(_('the server sent an unknown error code'))
2226 raise util.Abort(_('the server sent an unknown error code'))
2225 self.ui.status(_('streaming all changes\n'))
2227 self.ui.status(_('streaming all changes\n'))
2226 l = fp.readline()
2228 l = fp.readline()
2227 try:
2229 try:
2228 total_files, total_bytes = map(int, l.split(' ', 1))
2230 total_files, total_bytes = map(int, l.split(' ', 1))
2229 except (ValueError, TypeError):
2231 except (ValueError, TypeError):
2230 raise error.ResponseError(
2232 raise error.ResponseError(
2231 _('Unexpected response from remote server:'), l)
2233 _('Unexpected response from remote server:'), l)
2232 self.ui.status(_('%d files to transfer, %s of data\n') %
2234 self.ui.status(_('%d files to transfer, %s of data\n') %
2233 (total_files, util.bytecount(total_bytes)))
2235 (total_files, util.bytecount(total_bytes)))
2234 start = time.time()
2236 start = time.time()
2235 for i in xrange(total_files):
2237 for i in xrange(total_files):
2236 # XXX doesn't support '\n' or '\r' in filenames
2238 # XXX doesn't support '\n' or '\r' in filenames
2237 l = fp.readline()
2239 l = fp.readline()
2238 try:
2240 try:
2239 name, size = l.split('\0', 1)
2241 name, size = l.split('\0', 1)
2240 size = int(size)
2242 size = int(size)
2241 except (ValueError, TypeError):
2243 except (ValueError, TypeError):
2242 raise error.ResponseError(
2244 raise error.ResponseError(
2243 _('Unexpected response from remote server:'), l)
2245 _('Unexpected response from remote server:'), l)
2244 if self.ui.debugflag:
2246 if self.ui.debugflag:
2245 self.ui.debug('adding %s (%s)\n' %
2247 self.ui.debug('adding %s (%s)\n' %
2246 (name, util.bytecount(size)))
2248 (name, util.bytecount(size)))
2247 # for backwards compat, name was partially encoded
2249 # for backwards compat, name was partially encoded
2248 ofp = self.sopener(store.decodedir(name), 'w')
2250 ofp = self.sopener(store.decodedir(name), 'w')
2249 for chunk in util.filechunkiter(fp, limit=size):
2251 for chunk in util.filechunkiter(fp, limit=size):
2250 ofp.write(chunk)
2252 ofp.write(chunk)
2251 ofp.close()
2253 ofp.close()
2252 elapsed = time.time() - start
2254 elapsed = time.time() - start
2253 if elapsed <= 0:
2255 if elapsed <= 0:
2254 elapsed = 0.001
2256 elapsed = 0.001
2255 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2257 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2256 (util.bytecount(total_bytes), elapsed,
2258 (util.bytecount(total_bytes), elapsed,
2257 util.bytecount(total_bytes / elapsed)))
2259 util.bytecount(total_bytes / elapsed)))
2258
2260
2259 # new requirements = old non-format requirements + new format-related
2261 # new requirements = old non-format requirements + new format-related
2260 # requirements from the streamed-in repository
2262 # requirements from the streamed-in repository
2261 requirements.update(set(self.requirements) - self.supportedformats)
2263 requirements.update(set(self.requirements) - self.supportedformats)
2262 self._applyrequirements(requirements)
2264 self._applyrequirements(requirements)
2263 self._writerequirements()
2265 self._writerequirements()
2264
2266
2265 self.invalidate()
2267 self.invalidate()
2266 return len(self.heads()) + 1
2268 return len(self.heads()) + 1
2267 finally:
2269 finally:
2268 lock.release()
2270 lock.release()
2269
2271
2270 def clone(self, remote, heads=[], stream=False):
2272 def clone(self, remote, heads=[], stream=False):
2271 '''clone remote repository.
2273 '''clone remote repository.
2272
2274
2273 keyword arguments:
2275 keyword arguments:
2274 heads: list of revs to clone (forces use of pull)
2276 heads: list of revs to clone (forces use of pull)
2275 stream: use streaming clone if possible'''
2277 stream: use streaming clone if possible'''
2276
2278
2277 # now, all clients that can request uncompressed clones can
2279 # now, all clients that can request uncompressed clones can
2278 # read repo formats supported by all servers that can serve
2280 # read repo formats supported by all servers that can serve
2279 # them.
2281 # them.
2280
2282
2281 # if revlog format changes, client will have to check version
2283 # if revlog format changes, client will have to check version
2282 # and format flags on "stream" capability, and use
2284 # and format flags on "stream" capability, and use
2283 # uncompressed only if compatible.
2285 # uncompressed only if compatible.
2284
2286
2285 if not stream:
2287 if not stream:
2286 # if the server explicitely prefer to stream (for fast LANs)
2288 # if the server explicitely prefer to stream (for fast LANs)
2287 stream = remote.capable('stream-preferred')
2289 stream = remote.capable('stream-preferred')
2288
2290
2289 if stream and not heads:
2291 if stream and not heads:
2290 # 'stream' means remote revlog format is revlogv1 only
2292 # 'stream' means remote revlog format is revlogv1 only
2291 if remote.capable('stream'):
2293 if remote.capable('stream'):
2292 return self.stream_in(remote, set(('revlogv1',)))
2294 return self.stream_in(remote, set(('revlogv1',)))
2293 # otherwise, 'streamreqs' contains the remote revlog format
2295 # otherwise, 'streamreqs' contains the remote revlog format
2294 streamreqs = remote.capable('streamreqs')
2296 streamreqs = remote.capable('streamreqs')
2295 if streamreqs:
2297 if streamreqs:
2296 streamreqs = set(streamreqs.split(','))
2298 streamreqs = set(streamreqs.split(','))
2297 # if we support it, stream in and adjust our requirements
2299 # if we support it, stream in and adjust our requirements
2298 if not streamreqs - self.supportedformats:
2300 if not streamreqs - self.supportedformats:
2299 return self.stream_in(remote, streamreqs)
2301 return self.stream_in(remote, streamreqs)
2300 return self.pull(remote, heads)
2302 return self.pull(remote, heads)
2301
2303
2302 def pushkey(self, namespace, key, old, new):
2304 def pushkey(self, namespace, key, old, new):
2303 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2305 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2304 old=old, new=new)
2306 old=old, new=new)
2305 ret = pushkey.push(self, namespace, key, old, new)
2307 ret = pushkey.push(self, namespace, key, old, new)
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2308 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2307 ret=ret)
2309 ret=ret)
2308 return ret
2310 return ret
2309
2311
2310 def listkeys(self, namespace):
2312 def listkeys(self, namespace):
2311 self.hook('prelistkeys', throw=True, namespace=namespace)
2313 self.hook('prelistkeys', throw=True, namespace=namespace)
2312 values = pushkey.list(self, namespace)
2314 values = pushkey.list(self, namespace)
2313 self.hook('listkeys', namespace=namespace, values=values)
2315 self.hook('listkeys', namespace=namespace, values=values)
2314 return values
2316 return values
2315
2317
2316 def debugwireargs(self, one, two, three=None, four=None, five=None):
2318 def debugwireargs(self, one, two, three=None, four=None, five=None):
2317 '''used to test argument passing over the wire'''
2319 '''used to test argument passing over the wire'''
2318 return "%s %s %s %s %s" % (one, two, three, four, five)
2320 return "%s %s %s %s %s" % (one, two, three, four, five)
2319
2321
2320 def savecommitmessage(self, text):
2322 def savecommitmessage(self, text):
2321 fp = self.opener('last-message.txt', 'wb')
2323 fp = self.opener('last-message.txt', 'wb')
2322 try:
2324 try:
2323 fp.write(text)
2325 fp.write(text)
2324 finally:
2326 finally:
2325 fp.close()
2327 fp.close()
2326 return self.pathto(fp.name[len(self.root)+1:])
2328 return self.pathto(fp.name[len(self.root)+1:])
2327
2329
2328 # used to avoid circular references so destructors work
2330 # used to avoid circular references so destructors work
2329 def aftertrans(files):
2331 def aftertrans(files):
2330 renamefiles = [tuple(t) for t in files]
2332 renamefiles = [tuple(t) for t in files]
2331 def a():
2333 def a():
2332 for src, dest in renamefiles:
2334 for src, dest in renamefiles:
2333 try:
2335 try:
2334 util.rename(src, dest)
2336 util.rename(src, dest)
2335 except OSError: # journal file does not yet exist
2337 except OSError: # journal file does not yet exist
2336 pass
2338 pass
2337 return a
2339 return a
2338
2340
2339 def undoname(fn):
2341 def undoname(fn):
2340 base, name = os.path.split(fn)
2342 base, name = os.path.split(fn)
2341 assert name.startswith('journal')
2343 assert name.startswith('journal')
2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2344 return os.path.join(base, name.replace('journal', 'undo', 1))
2343
2345
2344 def instance(ui, path, create):
2346 def instance(ui, path, create):
2345 return localrepository(ui, util.urllocalpath(path), create)
2347 return localrepository(ui, util.urllocalpath(path), create)
2346
2348
2347 def islocal(path):
2349 def islocal(path):
2348 return True
2350 return True
@@ -1,337 +1,341 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import scmutil, util, fileset
9 import scmutil, util, fileset
10 from i18n import _
10 from i18n import _
11
11
12 def _expandsets(pats, ctx):
12 def _expandsets(pats, ctx):
13 '''convert set: patterns into a list of files in the given context'''
13 '''convert set: patterns into a list of files in the given context'''
14 fset = set()
14 fset = set()
15 other = []
15 other = []
16
16
17 for kind, expr in pats:
17 for kind, expr in pats:
18 if kind == 'set':
18 if kind == 'set':
19 if not ctx:
19 if not ctx:
20 raise util.Abort("fileset expression with no context")
20 raise util.Abort("fileset expression with no context")
21 s = fileset.getfileset(ctx, expr)
21 s = fileset.getfileset(ctx, expr)
22 fset.update(s)
22 fset.update(s)
23 continue
23 continue
24 other.append((kind, expr))
24 other.append((kind, expr))
25 return fset, other
25 return fset, other
26
26
27 class match(object):
27 class match(object):
28 def __init__(self, root, cwd, patterns, include=[], exclude=[],
28 def __init__(self, root, cwd, patterns, include=[], exclude=[],
29 default='glob', exact=False, auditor=None, ctx=None):
29 default='glob', exact=False, auditor=None, ctx=None):
30 """build an object to match a set of file patterns
30 """build an object to match a set of file patterns
31
31
32 arguments:
32 arguments:
33 root - the canonical root of the tree you're matching against
33 root - the canonical root of the tree you're matching against
34 cwd - the current working directory, if relevant
34 cwd - the current working directory, if relevant
35 patterns - patterns to find
35 patterns - patterns to find
36 include - patterns to include
36 include - patterns to include
37 exclude - patterns to exclude
37 exclude - patterns to exclude
38 default - if a pattern in names has no explicit type, assume this one
38 default - if a pattern in names has no explicit type, assume this one
39 exact - patterns are actually literals
39 exact - patterns are actually literals
40
40
41 a pattern is one of:
41 a pattern is one of:
42 'glob:<glob>' - a glob relative to cwd
42 'glob:<glob>' - a glob relative to cwd
43 're:<regexp>' - a regular expression
43 're:<regexp>' - a regular expression
44 'path:<path>' - a path relative to canonroot
44 'path:<path>' - a path relative to canonroot
45 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
45 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
46 'relpath:<path>' - a path relative to cwd
46 'relpath:<path>' - a path relative to cwd
47 'relre:<regexp>' - a regexp that needn't match the start of a name
47 'relre:<regexp>' - a regexp that needn't match the start of a name
48 'set:<fileset>' - a fileset expression
48 'set:<fileset>' - a fileset expression
49 '<something>' - a pattern of the specified default type
49 '<something>' - a pattern of the specified default type
50 """
50 """
51
51
52 self._root = root
52 self._root = root
53 self._cwd = cwd
53 self._cwd = cwd
54 self._files = []
54 self._files = []
55 self._anypats = bool(include or exclude)
55 self._anypats = bool(include or exclude)
56 self._ctx = ctx
56 self._ctx = ctx
57
57
58 if include:
58 if include:
59 pats = _normalize(include, 'glob', root, cwd, auditor)
59 pats = _normalize(include, 'glob', root, cwd, auditor)
60 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
60 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
61 if exclude:
61 if exclude:
62 pats = _normalize(exclude, 'glob', root, cwd, auditor)
62 pats = _normalize(exclude, 'glob', root, cwd, auditor)
63 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
63 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
64 if exact:
64 if exact:
65 self._files = patterns
65 self._files = patterns
66 pm = self.exact
66 pm = self.exact
67 elif patterns:
67 elif patterns:
68 pats = _normalize(patterns, default, root, cwd, auditor)
68 pats = _normalize(patterns, default, root, cwd, auditor)
69 self._files = _roots(pats)
69 self._files = _roots(pats)
70 self._anypats = self._anypats or _anypats(pats)
70 self._anypats = self._anypats or _anypats(pats)
71 self.patternspat, pm = _buildmatch(ctx, pats, '$')
71 self.patternspat, pm = _buildmatch(ctx, pats, '$')
72
72
73 if patterns or exact:
73 if patterns or exact:
74 if include:
74 if include:
75 if exclude:
75 if exclude:
76 m = lambda f: im(f) and not em(f) and pm(f)
76 m = lambda f: im(f) and not em(f) and pm(f)
77 else:
77 else:
78 m = lambda f: im(f) and pm(f)
78 m = lambda f: im(f) and pm(f)
79 else:
79 else:
80 if exclude:
80 if exclude:
81 m = lambda f: not em(f) and pm(f)
81 m = lambda f: not em(f) and pm(f)
82 else:
82 else:
83 m = pm
83 m = pm
84 else:
84 else:
85 if include:
85 if include:
86 if exclude:
86 if exclude:
87 m = lambda f: im(f) and not em(f)
87 m = lambda f: im(f) and not em(f)
88 else:
88 else:
89 m = im
89 m = im
90 else:
90 else:
91 if exclude:
91 if exclude:
92 m = lambda f: not em(f)
92 m = lambda f: not em(f)
93 else:
93 else:
94 m = lambda f: True
94 m = lambda f: True
95
95
96 self.matchfn = m
96 self.matchfn = m
97 self._fmap = set(self._files)
97 self._fmap = set(self._files)
98
98
99 def __call__(self, fn):
99 def __call__(self, fn):
100 return self.matchfn(fn)
100 return self.matchfn(fn)
101 def __iter__(self):
101 def __iter__(self):
102 for f in self._files:
102 for f in self._files:
103 yield f
103 yield f
104 def bad(self, f, msg):
104 def bad(self, f, msg):
105 '''callback for each explicit file that can't be
105 '''callback for each explicit file that can't be
106 found/accessed, with an error message
106 found/accessed, with an error message
107 '''
107 '''
108 pass
108 pass
109 def dir(self, f):
109 def dir(self, f):
110 pass
110 pass
111 def missing(self, f):
111 def missing(self, f):
112 pass
112 pass
113 def exact(self, f):
113 def exact(self, f):
114 return f in self._fmap
114 return f in self._fmap
115 def rel(self, f):
115 def rel(self, f):
116 return util.pathto(self._root, self._cwd, f)
116 return util.pathto(self._root, self._cwd, f)
117 def files(self):
117 def files(self):
118 return self._files
118 return self._files
119 def anypats(self):
119 def anypats(self):
120 return self._anypats
120 return self._anypats
121 def always(self):
122 return False
121
123
122 class exact(match):
124 class exact(match):
123 def __init__(self, root, cwd, files):
125 def __init__(self, root, cwd, files):
124 match.__init__(self, root, cwd, files, exact = True)
126 match.__init__(self, root, cwd, files, exact = True)
125
127
126 class always(match):
128 class always(match):
127 def __init__(self, root, cwd):
129 def __init__(self, root, cwd):
128 match.__init__(self, root, cwd, [])
130 match.__init__(self, root, cwd, [])
131 def always(self):
132 return True
129
133
130 class narrowmatcher(match):
134 class narrowmatcher(match):
131 """Adapt a matcher to work on a subdirectory only.
135 """Adapt a matcher to work on a subdirectory only.
132
136
133 The paths are remapped to remove/insert the path as needed:
137 The paths are remapped to remove/insert the path as needed:
134
138
135 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
139 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
136 >>> m2 = narrowmatcher('sub', m1)
140 >>> m2 = narrowmatcher('sub', m1)
137 >>> bool(m2('a.txt'))
141 >>> bool(m2('a.txt'))
138 False
142 False
139 >>> bool(m2('b.txt'))
143 >>> bool(m2('b.txt'))
140 True
144 True
141 >>> bool(m2.matchfn('a.txt'))
145 >>> bool(m2.matchfn('a.txt'))
142 False
146 False
143 >>> bool(m2.matchfn('b.txt'))
147 >>> bool(m2.matchfn('b.txt'))
144 True
148 True
145 >>> m2.files()
149 >>> m2.files()
146 ['b.txt']
150 ['b.txt']
147 >>> m2.exact('b.txt')
151 >>> m2.exact('b.txt')
148 True
152 True
149 >>> m2.rel('b.txt')
153 >>> m2.rel('b.txt')
150 'b.txt'
154 'b.txt'
151 >>> def bad(f, msg):
155 >>> def bad(f, msg):
152 ... print "%s: %s" % (f, msg)
156 ... print "%s: %s" % (f, msg)
153 >>> m1.bad = bad
157 >>> m1.bad = bad
154 >>> m2.bad('x.txt', 'No such file')
158 >>> m2.bad('x.txt', 'No such file')
155 sub/x.txt: No such file
159 sub/x.txt: No such file
156 """
160 """
157
161
158 def __init__(self, path, matcher):
162 def __init__(self, path, matcher):
159 self._root = matcher._root
163 self._root = matcher._root
160 self._cwd = matcher._cwd
164 self._cwd = matcher._cwd
161 self._path = path
165 self._path = path
162 self._matcher = matcher
166 self._matcher = matcher
163
167
164 self._files = [f[len(path) + 1:] for f in matcher._files
168 self._files = [f[len(path) + 1:] for f in matcher._files
165 if f.startswith(path + "/")]
169 if f.startswith(path + "/")]
166 self._anypats = matcher._anypats
170 self._anypats = matcher._anypats
167 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
171 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
168 self._fmap = set(self._files)
172 self._fmap = set(self._files)
169
173
170 def bad(self, f, msg):
174 def bad(self, f, msg):
171 self._matcher.bad(self._path + "/" + f, msg)
175 self._matcher.bad(self._path + "/" + f, msg)
172
176
173 def patkind(pat):
177 def patkind(pat):
174 return _patsplit(pat, None)[0]
178 return _patsplit(pat, None)[0]
175
179
176 def _patsplit(pat, default):
180 def _patsplit(pat, default):
177 """Split a string into an optional pattern kind prefix and the
181 """Split a string into an optional pattern kind prefix and the
178 actual pattern."""
182 actual pattern."""
179 if ':' in pat:
183 if ':' in pat:
180 kind, val = pat.split(':', 1)
184 kind, val = pat.split(':', 1)
181 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
185 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
182 'listfile', 'listfile0', 'set'):
186 'listfile', 'listfile0', 'set'):
183 return kind, val
187 return kind, val
184 return default, pat
188 return default, pat
185
189
186 def _globre(pat):
190 def _globre(pat):
187 "convert a glob pattern into a regexp"
191 "convert a glob pattern into a regexp"
188 i, n = 0, len(pat)
192 i, n = 0, len(pat)
189 res = ''
193 res = ''
190 group = 0
194 group = 0
191 escape = re.escape
195 escape = re.escape
192 def peek():
196 def peek():
193 return i < n and pat[i]
197 return i < n and pat[i]
194 while i < n:
198 while i < n:
195 c = pat[i]
199 c = pat[i]
196 i += 1
200 i += 1
197 if c not in '*?[{},\\':
201 if c not in '*?[{},\\':
198 res += escape(c)
202 res += escape(c)
199 elif c == '*':
203 elif c == '*':
200 if peek() == '*':
204 if peek() == '*':
201 i += 1
205 i += 1
202 res += '.*'
206 res += '.*'
203 else:
207 else:
204 res += '[^/]*'
208 res += '[^/]*'
205 elif c == '?':
209 elif c == '?':
206 res += '.'
210 res += '.'
207 elif c == '[':
211 elif c == '[':
208 j = i
212 j = i
209 if j < n and pat[j] in '!]':
213 if j < n and pat[j] in '!]':
210 j += 1
214 j += 1
211 while j < n and pat[j] != ']':
215 while j < n and pat[j] != ']':
212 j += 1
216 j += 1
213 if j >= n:
217 if j >= n:
214 res += '\\['
218 res += '\\['
215 else:
219 else:
216 stuff = pat[i:j].replace('\\','\\\\')
220 stuff = pat[i:j].replace('\\','\\\\')
217 i = j + 1
221 i = j + 1
218 if stuff[0] == '!':
222 if stuff[0] == '!':
219 stuff = '^' + stuff[1:]
223 stuff = '^' + stuff[1:]
220 elif stuff[0] == '^':
224 elif stuff[0] == '^':
221 stuff = '\\' + stuff
225 stuff = '\\' + stuff
222 res = '%s[%s]' % (res, stuff)
226 res = '%s[%s]' % (res, stuff)
223 elif c == '{':
227 elif c == '{':
224 group += 1
228 group += 1
225 res += '(?:'
229 res += '(?:'
226 elif c == '}' and group:
230 elif c == '}' and group:
227 res += ')'
231 res += ')'
228 group -= 1
232 group -= 1
229 elif c == ',' and group:
233 elif c == ',' and group:
230 res += '|'
234 res += '|'
231 elif c == '\\':
235 elif c == '\\':
232 p = peek()
236 p = peek()
233 if p:
237 if p:
234 i += 1
238 i += 1
235 res += escape(p)
239 res += escape(p)
236 else:
240 else:
237 res += escape(c)
241 res += escape(c)
238 else:
242 else:
239 res += escape(c)
243 res += escape(c)
240 return res
244 return res
241
245
242 def _regex(kind, name, tail):
246 def _regex(kind, name, tail):
243 '''convert a pattern into a regular expression'''
247 '''convert a pattern into a regular expression'''
244 if not name:
248 if not name:
245 return ''
249 return ''
246 if kind == 're':
250 if kind == 're':
247 return name
251 return name
248 elif kind == 'path':
252 elif kind == 'path':
249 return '^' + re.escape(name) + '(?:/|$)'
253 return '^' + re.escape(name) + '(?:/|$)'
250 elif kind == 'relglob':
254 elif kind == 'relglob':
251 return '(?:|.*/)' + _globre(name) + tail
255 return '(?:|.*/)' + _globre(name) + tail
252 elif kind == 'relpath':
256 elif kind == 'relpath':
253 return re.escape(name) + '(?:/|$)'
257 return re.escape(name) + '(?:/|$)'
254 elif kind == 'relre':
258 elif kind == 'relre':
255 if name.startswith('^'):
259 if name.startswith('^'):
256 return name
260 return name
257 return '.*' + name
261 return '.*' + name
258 return _globre(name) + tail
262 return _globre(name) + tail
259
263
260 def _buildmatch(ctx, pats, tail):
264 def _buildmatch(ctx, pats, tail):
261 fset, pats = _expandsets(pats, ctx)
265 fset, pats = _expandsets(pats, ctx)
262 if not pats:
266 if not pats:
263 return "", fset.__contains__
267 return "", fset.__contains__
264
268
265 pat, mf = _buildregexmatch(pats, tail)
269 pat, mf = _buildregexmatch(pats, tail)
266 if fset:
270 if fset:
267 return pat, lambda f: f in fset or mf(f)
271 return pat, lambda f: f in fset or mf(f)
268 return pat, mf
272 return pat, mf
269
273
270 def _buildregexmatch(pats, tail):
274 def _buildregexmatch(pats, tail):
271 """build a matching function from a set of patterns"""
275 """build a matching function from a set of patterns"""
272 try:
276 try:
273 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
277 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
274 if len(pat) > 20000:
278 if len(pat) > 20000:
275 raise OverflowError()
279 raise OverflowError()
276 return pat, re.compile(pat).match
280 return pat, re.compile(pat).match
277 except OverflowError:
281 except OverflowError:
278 # We're using a Python with a tiny regex engine and we
282 # We're using a Python with a tiny regex engine and we
279 # made it explode, so we'll divide the pattern list in two
283 # made it explode, so we'll divide the pattern list in two
280 # until it works
284 # until it works
281 l = len(pats)
285 l = len(pats)
282 if l < 2:
286 if l < 2:
283 raise
287 raise
284 pata, a = _buildregexmatch(pats[:l//2], tail)
288 pata, a = _buildregexmatch(pats[:l//2], tail)
285 patb, b = _buildregexmatch(pats[l//2:], tail)
289 patb, b = _buildregexmatch(pats[l//2:], tail)
286 return pat, lambda s: a(s) or b(s)
290 return pat, lambda s: a(s) or b(s)
287 except re.error:
291 except re.error:
288 for k, p in pats:
292 for k, p in pats:
289 try:
293 try:
290 re.compile('(?:%s)' % _regex(k, p, tail))
294 re.compile('(?:%s)' % _regex(k, p, tail))
291 except re.error:
295 except re.error:
292 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
296 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
293 raise util.Abort(_("invalid pattern"))
297 raise util.Abort(_("invalid pattern"))
294
298
295 def _normalize(names, default, root, cwd, auditor):
299 def _normalize(names, default, root, cwd, auditor):
296 pats = []
300 pats = []
297 for kind, name in [_patsplit(p, default) for p in names]:
301 for kind, name in [_patsplit(p, default) for p in names]:
298 if kind in ('glob', 'relpath'):
302 if kind in ('glob', 'relpath'):
299 name = scmutil.canonpath(root, cwd, name, auditor)
303 name = scmutil.canonpath(root, cwd, name, auditor)
300 elif kind in ('relglob', 'path'):
304 elif kind in ('relglob', 'path'):
301 name = util.normpath(name)
305 name = util.normpath(name)
302 elif kind in ('listfile', 'listfile0'):
306 elif kind in ('listfile', 'listfile0'):
303 try:
307 try:
304 files = util.readfile(name)
308 files = util.readfile(name)
305 if kind == 'listfile0':
309 if kind == 'listfile0':
306 files = files.split('\0')
310 files = files.split('\0')
307 else:
311 else:
308 files = files.splitlines()
312 files = files.splitlines()
309 files = [f for f in files if f]
313 files = [f for f in files if f]
310 except EnvironmentError:
314 except EnvironmentError:
311 raise util.Abort(_("unable to read file list (%s)") % name)
315 raise util.Abort(_("unable to read file list (%s)") % name)
312 pats += _normalize(files, default, root, cwd, auditor)
316 pats += _normalize(files, default, root, cwd, auditor)
313 continue
317 continue
314
318
315 pats.append((kind, name))
319 pats.append((kind, name))
316 return pats
320 return pats
317
321
318 def _roots(patterns):
322 def _roots(patterns):
319 r = []
323 r = []
320 for kind, name in patterns:
324 for kind, name in patterns:
321 if kind == 'glob': # find the non-glob prefix
325 if kind == 'glob': # find the non-glob prefix
322 root = []
326 root = []
323 for p in name.split('/'):
327 for p in name.split('/'):
324 if '[' in p or '{' in p or '*' in p or '?' in p:
328 if '[' in p or '{' in p or '*' in p or '?' in p:
325 break
329 break
326 root.append(p)
330 root.append(p)
327 r.append('/'.join(root) or '.')
331 r.append('/'.join(root) or '.')
328 elif kind in ('relpath', 'path'):
332 elif kind in ('relpath', 'path'):
329 r.append(name or '.')
333 r.append(name or '.')
330 elif kind == 'relglob':
334 elif kind == 'relglob':
331 r.append('.')
335 r.append('.')
332 return r
336 return r
333
337
334 def _anypats(patterns):
338 def _anypats(patterns):
335 for kind, name in patterns:
339 for kind, name in patterns:
336 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
340 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
337 return True
341 return True
General Comments 0
You need to be logged in to leave comments. Login now