##// END OF EJS Templates
tag: don't check .hgtags status if --local passed...
Kevin Bullock -
r13133:c1492615 stable
parent child Browse files
Show More
@@ -1,1905 +1,1906 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 return dirstate.dirstate(self.opener, self.ui, self.root)
181 return dirstate.dirstate(self.opener, self.ui, self.root)
182
182
183 def __getitem__(self, changeid):
183 def __getitem__(self, changeid):
184 if changeid is None:
184 if changeid is None:
185 return context.workingctx(self)
185 return context.workingctx(self)
186 return context.changectx(self, changeid)
186 return context.changectx(self, changeid)
187
187
188 def __contains__(self, changeid):
188 def __contains__(self, changeid):
189 try:
189 try:
190 return bool(self.lookup(changeid))
190 return bool(self.lookup(changeid))
191 except error.RepoLookupError:
191 except error.RepoLookupError:
192 return False
192 return False
193
193
194 def __nonzero__(self):
194 def __nonzero__(self):
195 return True
195 return True
196
196
197 def __len__(self):
197 def __len__(self):
198 return len(self.changelog)
198 return len(self.changelog)
199
199
200 def __iter__(self):
200 def __iter__(self):
201 for i in xrange(len(self)):
201 for i in xrange(len(self)):
202 yield i
202 yield i
203
203
204 def url(self):
204 def url(self):
205 return 'file:' + self.root
205 return 'file:' + self.root
206
206
207 def hook(self, name, throw=False, **args):
207 def hook(self, name, throw=False, **args):
208 return hook.hook(self.ui, self, name, throw, **args)
208 return hook.hook(self.ui, self, name, throw, **args)
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def _tag(self, names, node, message, local, user, date, extra={}):
212 def _tag(self, names, node, message, local, user, date, extra={}):
213 if isinstance(names, str):
213 if isinstance(names, str):
214 allchars = names
214 allchars = names
215 names = (names,)
215 names = (names,)
216 else:
216 else:
217 allchars = ''.join(names)
217 allchars = ''.join(names)
218 for c in self.tag_disallowed:
218 for c in self.tag_disallowed:
219 if c in allchars:
219 if c in allchars:
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
221
221
222 branches = self.branchmap()
222 branches = self.branchmap()
223 for name in names:
223 for name in names:
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 local=local)
225 local=local)
226 if name in branches:
226 if name in branches:
227 self.ui.warn(_("warning: tag %s conflicts with existing"
227 self.ui.warn(_("warning: tag %s conflicts with existing"
228 " branch name\n") % name)
228 " branch name\n") % name)
229
229
230 def writetags(fp, names, munge, prevtags):
230 def writetags(fp, names, munge, prevtags):
231 fp.seek(0, 2)
231 fp.seek(0, 2)
232 if prevtags and prevtags[-1] != '\n':
232 if prevtags and prevtags[-1] != '\n':
233 fp.write('\n')
233 fp.write('\n')
234 for name in names:
234 for name in names:
235 m = munge and munge(name) or name
235 m = munge and munge(name) or name
236 if self._tagtypes and name in self._tagtypes:
236 if self._tagtypes and name in self._tagtypes:
237 old = self._tags.get(name, nullid)
237 old = self._tags.get(name, nullid)
238 fp.write('%s %s\n' % (hex(old), m))
238 fp.write('%s %s\n' % (hex(old), m))
239 fp.write('%s %s\n' % (hex(node), m))
239 fp.write('%s %s\n' % (hex(node), m))
240 fp.close()
240 fp.close()
241
241
242 prevtags = ''
242 prevtags = ''
243 if local:
243 if local:
244 try:
244 try:
245 fp = self.opener('localtags', 'r+')
245 fp = self.opener('localtags', 'r+')
246 except IOError:
246 except IOError:
247 fp = self.opener('localtags', 'a')
247 fp = self.opener('localtags', 'a')
248 else:
248 else:
249 prevtags = fp.read()
249 prevtags = fp.read()
250
250
251 # local tags are stored in the current charset
251 # local tags are stored in the current charset
252 writetags(fp, names, None, prevtags)
252 writetags(fp, names, None, prevtags)
253 for name in names:
253 for name in names:
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255 return
255 return
256
256
257 try:
257 try:
258 fp = self.wfile('.hgtags', 'rb+')
258 fp = self.wfile('.hgtags', 'rb+')
259 except IOError:
259 except IOError:
260 fp = self.wfile('.hgtags', 'ab')
260 fp = self.wfile('.hgtags', 'ab')
261 else:
261 else:
262 prevtags = fp.read()
262 prevtags = fp.read()
263
263
264 # committed tags are stored in UTF-8
264 # committed tags are stored in UTF-8
265 writetags(fp, names, encoding.fromlocal, prevtags)
265 writetags(fp, names, encoding.fromlocal, prevtags)
266
266
267 if '.hgtags' not in self.dirstate:
267 if '.hgtags' not in self.dirstate:
268 self[None].add(['.hgtags'])
268 self[None].add(['.hgtags'])
269
269
270 m = matchmod.exact(self.root, '', ['.hgtags'])
270 m = matchmod.exact(self.root, '', ['.hgtags'])
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
272
272
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275
275
276 return tagnode
276 return tagnode
277
277
278 def tag(self, names, node, message, local, user, date):
278 def tag(self, names, node, message, local, user, date):
279 '''tag a revision with one or more symbolic names.
279 '''tag a revision with one or more symbolic names.
280
280
281 names is a list of strings or, when adding a single tag, names may be a
281 names is a list of strings or, when adding a single tag, names may be a
282 string.
282 string.
283
283
284 if local is True, the tags are stored in a per-repository file.
284 if local is True, the tags are stored in a per-repository file.
285 otherwise, they are stored in the .hgtags file, and a new
285 otherwise, they are stored in the .hgtags file, and a new
286 changeset is committed with the change.
286 changeset is committed with the change.
287
287
288 keyword arguments:
288 keyword arguments:
289
289
290 local: whether to store tags in non-version-controlled file
290 local: whether to store tags in non-version-controlled file
291 (default False)
291 (default False)
292
292
293 message: commit message to use if committing
293 message: commit message to use if committing
294
294
295 user: name of user to use if committing
295 user: name of user to use if committing
296
296
297 date: date tuple to use if committing'''
297 date: date tuple to use if committing'''
298
298
299 for x in self.status()[:5]:
299 if not local:
300 if '.hgtags' in x:
300 for x in self.status()[:5]:
301 raise util.Abort(_('working copy of .hgtags is changed '
301 if '.hgtags' in x:
302 '(please commit .hgtags manually)'))
302 raise util.Abort(_('working copy of .hgtags is changed '
303 '(please commit .hgtags manually)'))
303
304
304 self.tags() # instantiate the cache
305 self.tags() # instantiate the cache
305 self._tag(names, node, message, local, user, date)
306 self._tag(names, node, message, local, user, date)
306
307
307 def tags(self):
308 def tags(self):
308 '''return a mapping of tag to node'''
309 '''return a mapping of tag to node'''
309 if self._tags is None:
310 if self._tags is None:
310 (self._tags, self._tagtypes) = self._findtags()
311 (self._tags, self._tagtypes) = self._findtags()
311
312
312 return self._tags
313 return self._tags
313
314
314 def _findtags(self):
315 def _findtags(self):
315 '''Do the hard work of finding tags. Return a pair of dicts
316 '''Do the hard work of finding tags. Return a pair of dicts
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 maps tag name to a string like \'global\' or \'local\'.
318 maps tag name to a string like \'global\' or \'local\'.
318 Subclasses or extensions are free to add their own tags, but
319 Subclasses or extensions are free to add their own tags, but
319 should be aware that the returned dicts will be retained for the
320 should be aware that the returned dicts will be retained for the
320 duration of the localrepo object.'''
321 duration of the localrepo object.'''
321
322
322 # XXX what tagtype should subclasses/extensions use? Currently
323 # XXX what tagtype should subclasses/extensions use? Currently
323 # mq and bookmarks add tags, but do not set the tagtype at all.
324 # mq and bookmarks add tags, but do not set the tagtype at all.
324 # Should each extension invent its own tag type? Should there
325 # Should each extension invent its own tag type? Should there
325 # be one tagtype for all such "virtual" tags? Or is the status
326 # be one tagtype for all such "virtual" tags? Or is the status
326 # quo fine?
327 # quo fine?
327
328
328 alltags = {} # map tag name to (node, hist)
329 alltags = {} # map tag name to (node, hist)
329 tagtypes = {}
330 tagtypes = {}
330
331
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333
334
334 # Build the return dicts. Have to re-encode tag names because
335 # Build the return dicts. Have to re-encode tag names because
335 # the tags module always uses UTF-8 (in order not to lose info
336 # the tags module always uses UTF-8 (in order not to lose info
336 # writing to the cache), but the rest of Mercurial wants them in
337 # writing to the cache), but the rest of Mercurial wants them in
337 # local encoding.
338 # local encoding.
338 tags = {}
339 tags = {}
339 for (name, (node, hist)) in alltags.iteritems():
340 for (name, (node, hist)) in alltags.iteritems():
340 if node != nullid:
341 if node != nullid:
341 tags[encoding.tolocal(name)] = node
342 tags[encoding.tolocal(name)] = node
342 tags['tip'] = self.changelog.tip()
343 tags['tip'] = self.changelog.tip()
343 tagtypes = dict([(encoding.tolocal(name), value)
344 tagtypes = dict([(encoding.tolocal(name), value)
344 for (name, value) in tagtypes.iteritems()])
345 for (name, value) in tagtypes.iteritems()])
345 return (tags, tagtypes)
346 return (tags, tagtypes)
346
347
347 def tagtype(self, tagname):
348 def tagtype(self, tagname):
348 '''
349 '''
349 return the type of the given tag. result can be:
350 return the type of the given tag. result can be:
350
351
351 'local' : a local tag
352 'local' : a local tag
352 'global' : a global tag
353 'global' : a global tag
353 None : tag does not exist
354 None : tag does not exist
354 '''
355 '''
355
356
356 self.tags()
357 self.tags()
357
358
358 return self._tagtypes.get(tagname)
359 return self._tagtypes.get(tagname)
359
360
360 def tagslist(self):
361 def tagslist(self):
361 '''return a list of tags ordered by revision'''
362 '''return a list of tags ordered by revision'''
362 l = []
363 l = []
363 for t, n in self.tags().iteritems():
364 for t, n in self.tags().iteritems():
364 try:
365 try:
365 r = self.changelog.rev(n)
366 r = self.changelog.rev(n)
366 except:
367 except:
367 r = -2 # sort to the beginning of the list if unknown
368 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
369 l.append((r, t, n))
369 return [(t, n) for r, t, n in sorted(l)]
370 return [(t, n) for r, t, n in sorted(l)]
370
371
371 def nodetags(self, node):
372 def nodetags(self, node):
372 '''return the tags associated with a node'''
373 '''return the tags associated with a node'''
373 if not self.nodetagscache:
374 if not self.nodetagscache:
374 self.nodetagscache = {}
375 self.nodetagscache = {}
375 for t, n in self.tags().iteritems():
376 for t, n in self.tags().iteritems():
376 self.nodetagscache.setdefault(n, []).append(t)
377 self.nodetagscache.setdefault(n, []).append(t)
377 for tags in self.nodetagscache.itervalues():
378 for tags in self.nodetagscache.itervalues():
378 tags.sort()
379 tags.sort()
379 return self.nodetagscache.get(node, [])
380 return self.nodetagscache.get(node, [])
380
381
381 def _branchtags(self, partial, lrev):
382 def _branchtags(self, partial, lrev):
382 # TODO: rename this function?
383 # TODO: rename this function?
383 tiprev = len(self) - 1
384 tiprev = len(self) - 1
384 if lrev != tiprev:
385 if lrev != tiprev:
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 self._updatebranchcache(partial, ctxgen)
387 self._updatebranchcache(partial, ctxgen)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388
389
389 return partial
390 return partial
390
391
391 def updatebranchcache(self):
392 def updatebranchcache(self):
392 tip = self.changelog.tip()
393 tip = self.changelog.tip()
393 if self._branchcache is not None and self._branchcachetip == tip:
394 if self._branchcache is not None and self._branchcachetip == tip:
394 return self._branchcache
395 return self._branchcache
395
396
396 oldtip = self._branchcachetip
397 oldtip = self._branchcachetip
397 self._branchcachetip = tip
398 self._branchcachetip = tip
398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
400 partial, last, lrev = self._readbranchcache()
400 else:
401 else:
401 lrev = self.changelog.rev(oldtip)
402 lrev = self.changelog.rev(oldtip)
402 partial = self._branchcache
403 partial = self._branchcache
403
404
404 self._branchtags(partial, lrev)
405 self._branchtags(partial, lrev)
405 # this private cache holds all heads (not just tips)
406 # this private cache holds all heads (not just tips)
406 self._branchcache = partial
407 self._branchcache = partial
407
408
408 def branchmap(self):
409 def branchmap(self):
409 '''returns a dictionary {branch: [branchheads]}'''
410 '''returns a dictionary {branch: [branchheads]}'''
410 self.updatebranchcache()
411 self.updatebranchcache()
411 return self._branchcache
412 return self._branchcache
412
413
413 def branchtags(self):
414 def branchtags(self):
414 '''return a dict where branch names map to the tipmost head of
415 '''return a dict where branch names map to the tipmost head of
415 the branch, open heads come before closed'''
416 the branch, open heads come before closed'''
416 bt = {}
417 bt = {}
417 for bn, heads in self.branchmap().iteritems():
418 for bn, heads in self.branchmap().iteritems():
418 tip = heads[-1]
419 tip = heads[-1]
419 for h in reversed(heads):
420 for h in reversed(heads):
420 if 'close' not in self.changelog.read(h)[5]:
421 if 'close' not in self.changelog.read(h)[5]:
421 tip = h
422 tip = h
422 break
423 break
423 bt[bn] = tip
424 bt[bn] = tip
424 return bt
425 return bt
425
426
426
427
427 def _readbranchcache(self):
428 def _readbranchcache(self):
428 partial = {}
429 partial = {}
429 try:
430 try:
430 f = self.opener("branchheads.cache")
431 f = self.opener("branchheads.cache")
431 lines = f.read().split('\n')
432 lines = f.read().split('\n')
432 f.close()
433 f.close()
433 except (IOError, OSError):
434 except (IOError, OSError):
434 return {}, nullid, nullrev
435 return {}, nullid, nullrev
435
436
436 try:
437 try:
437 last, lrev = lines.pop(0).split(" ", 1)
438 last, lrev = lines.pop(0).split(" ", 1)
438 last, lrev = bin(last), int(lrev)
439 last, lrev = bin(last), int(lrev)
439 if lrev >= len(self) or self[lrev].node() != last:
440 if lrev >= len(self) or self[lrev].node() != last:
440 # invalidate the cache
441 # invalidate the cache
441 raise ValueError('invalidating branch cache (tip differs)')
442 raise ValueError('invalidating branch cache (tip differs)')
442 for l in lines:
443 for l in lines:
443 if not l:
444 if not l:
444 continue
445 continue
445 node, label = l.split(" ", 1)
446 node, label = l.split(" ", 1)
446 partial.setdefault(label.strip(), []).append(bin(node))
447 partial.setdefault(label.strip(), []).append(bin(node))
447 except KeyboardInterrupt:
448 except KeyboardInterrupt:
448 raise
449 raise
449 except Exception, inst:
450 except Exception, inst:
450 if self.ui.debugflag:
451 if self.ui.debugflag:
451 self.ui.warn(str(inst), '\n')
452 self.ui.warn(str(inst), '\n')
452 partial, last, lrev = {}, nullid, nullrev
453 partial, last, lrev = {}, nullid, nullrev
453 return partial, last, lrev
454 return partial, last, lrev
454
455
455 def _writebranchcache(self, branches, tip, tiprev):
456 def _writebranchcache(self, branches, tip, tiprev):
456 try:
457 try:
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 f.write("%s %s\n" % (hex(tip), tiprev))
459 f.write("%s %s\n" % (hex(tip), tiprev))
459 for label, nodes in branches.iteritems():
460 for label, nodes in branches.iteritems():
460 for node in nodes:
461 for node in nodes:
461 f.write("%s %s\n" % (hex(node), label))
462 f.write("%s %s\n" % (hex(node), label))
462 f.rename()
463 f.rename()
463 except (IOError, OSError):
464 except (IOError, OSError):
464 pass
465 pass
465
466
466 def _updatebranchcache(self, partial, ctxgen):
467 def _updatebranchcache(self, partial, ctxgen):
467 # collect new branch entries
468 # collect new branch entries
468 newbranches = {}
469 newbranches = {}
469 for c in ctxgen:
470 for c in ctxgen:
470 newbranches.setdefault(c.branch(), []).append(c.node())
471 newbranches.setdefault(c.branch(), []).append(c.node())
471 # if older branchheads are reachable from new ones, they aren't
472 # if older branchheads are reachable from new ones, they aren't
472 # really branchheads. Note checking parents is insufficient:
473 # really branchheads. Note checking parents is insufficient:
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 for branch, newnodes in newbranches.iteritems():
475 for branch, newnodes in newbranches.iteritems():
475 bheads = partial.setdefault(branch, [])
476 bheads = partial.setdefault(branch, [])
476 bheads.extend(newnodes)
477 bheads.extend(newnodes)
477 if len(bheads) <= 1:
478 if len(bheads) <= 1:
478 continue
479 continue
479 # starting from tip means fewer passes over reachable
480 # starting from tip means fewer passes over reachable
480 while newnodes:
481 while newnodes:
481 latest = newnodes.pop()
482 latest = newnodes.pop()
482 if latest not in bheads:
483 if latest not in bheads:
483 continue
484 continue
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 reachable = self.changelog.reachable(latest, minbhrev)
486 reachable = self.changelog.reachable(latest, minbhrev)
486 reachable.remove(latest)
487 reachable.remove(latest)
487 bheads = [b for b in bheads if b not in reachable]
488 bheads = [b for b in bheads if b not in reachable]
488 partial[branch] = bheads
489 partial[branch] = bheads
489
490
490 def lookup(self, key):
491 def lookup(self, key):
491 if isinstance(key, int):
492 if isinstance(key, int):
492 return self.changelog.node(key)
493 return self.changelog.node(key)
493 elif key == '.':
494 elif key == '.':
494 return self.dirstate.parents()[0]
495 return self.dirstate.parents()[0]
495 elif key == 'null':
496 elif key == 'null':
496 return nullid
497 return nullid
497 elif key == 'tip':
498 elif key == 'tip':
498 return self.changelog.tip()
499 return self.changelog.tip()
499 n = self.changelog._match(key)
500 n = self.changelog._match(key)
500 if n:
501 if n:
501 return n
502 return n
502 if key in self.tags():
503 if key in self.tags():
503 return self.tags()[key]
504 return self.tags()[key]
504 if key in self.branchtags():
505 if key in self.branchtags():
505 return self.branchtags()[key]
506 return self.branchtags()[key]
506 n = self.changelog._partialmatch(key)
507 n = self.changelog._partialmatch(key)
507 if n:
508 if n:
508 return n
509 return n
509
510
510 # can't find key, check if it might have come from damaged dirstate
511 # can't find key, check if it might have come from damaged dirstate
511 if key in self.dirstate.parents():
512 if key in self.dirstate.parents():
512 raise error.Abort(_("working directory has unknown parent '%s'!")
513 raise error.Abort(_("working directory has unknown parent '%s'!")
513 % short(key))
514 % short(key))
514 try:
515 try:
515 if len(key) == 20:
516 if len(key) == 20:
516 key = hex(key)
517 key = hex(key)
517 except:
518 except:
518 pass
519 pass
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520
521
521 def lookupbranch(self, key, remote=None):
522 def lookupbranch(self, key, remote=None):
522 repo = remote or self
523 repo = remote or self
523 if key in repo.branchmap():
524 if key in repo.branchmap():
524 return key
525 return key
525
526
526 repo = (remote and remote.local()) and remote or self
527 repo = (remote and remote.local()) and remote or self
527 return repo[key].branch()
528 return repo[key].branch()
528
529
529 def local(self):
530 def local(self):
530 return True
531 return True
531
532
532 def join(self, f):
533 def join(self, f):
533 return os.path.join(self.path, f)
534 return os.path.join(self.path, f)
534
535
535 def wjoin(self, f):
536 def wjoin(self, f):
536 return os.path.join(self.root, f)
537 return os.path.join(self.root, f)
537
538
538 def file(self, f):
539 def file(self, f):
539 if f[0] == '/':
540 if f[0] == '/':
540 f = f[1:]
541 f = f[1:]
541 return filelog.filelog(self.sopener, f)
542 return filelog.filelog(self.sopener, f)
542
543
543 def changectx(self, changeid):
544 def changectx(self, changeid):
544 return self[changeid]
545 return self[changeid]
545
546
546 def parents(self, changeid=None):
547 def parents(self, changeid=None):
547 '''get list of changectxs for parents of changeid'''
548 '''get list of changectxs for parents of changeid'''
548 return self[changeid].parents()
549 return self[changeid].parents()
549
550
550 def filectx(self, path, changeid=None, fileid=None):
551 def filectx(self, path, changeid=None, fileid=None):
551 """changeid can be a changeset revision, node, or tag.
552 """changeid can be a changeset revision, node, or tag.
552 fileid can be a file revision or node."""
553 fileid can be a file revision or node."""
553 return context.filectx(self, path, changeid, fileid)
554 return context.filectx(self, path, changeid, fileid)
554
555
555 def getcwd(self):
556 def getcwd(self):
556 return self.dirstate.getcwd()
557 return self.dirstate.getcwd()
557
558
558 def pathto(self, f, cwd=None):
559 def pathto(self, f, cwd=None):
559 return self.dirstate.pathto(f, cwd)
560 return self.dirstate.pathto(f, cwd)
560
561
561 def wfile(self, f, mode='r'):
562 def wfile(self, f, mode='r'):
562 return self.wopener(f, mode)
563 return self.wopener(f, mode)
563
564
564 def _link(self, f):
565 def _link(self, f):
565 return os.path.islink(self.wjoin(f))
566 return os.path.islink(self.wjoin(f))
566
567
567 def _loadfilter(self, filter):
568 def _loadfilter(self, filter):
568 if filter not in self.filterpats:
569 if filter not in self.filterpats:
569 l = []
570 l = []
570 for pat, cmd in self.ui.configitems(filter):
571 for pat, cmd in self.ui.configitems(filter):
571 if cmd == '!':
572 if cmd == '!':
572 continue
573 continue
573 mf = matchmod.match(self.root, '', [pat])
574 mf = matchmod.match(self.root, '', [pat])
574 fn = None
575 fn = None
575 params = cmd
576 params = cmd
576 for name, filterfn in self._datafilters.iteritems():
577 for name, filterfn in self._datafilters.iteritems():
577 if cmd.startswith(name):
578 if cmd.startswith(name):
578 fn = filterfn
579 fn = filterfn
579 params = cmd[len(name):].lstrip()
580 params = cmd[len(name):].lstrip()
580 break
581 break
581 if not fn:
582 if not fn:
582 fn = lambda s, c, **kwargs: util.filter(s, c)
583 fn = lambda s, c, **kwargs: util.filter(s, c)
583 # Wrap old filters not supporting keyword arguments
584 # Wrap old filters not supporting keyword arguments
584 if not inspect.getargspec(fn)[2]:
585 if not inspect.getargspec(fn)[2]:
585 oldfn = fn
586 oldfn = fn
586 fn = lambda s, c, **kwargs: oldfn(s, c)
587 fn = lambda s, c, **kwargs: oldfn(s, c)
587 l.append((mf, fn, params))
588 l.append((mf, fn, params))
588 self.filterpats[filter] = l
589 self.filterpats[filter] = l
589 return self.filterpats[filter]
590 return self.filterpats[filter]
590
591
591 def _filter(self, filterpats, filename, data):
592 def _filter(self, filterpats, filename, data):
592 for mf, fn, cmd in filterpats:
593 for mf, fn, cmd in filterpats:
593 if mf(filename):
594 if mf(filename):
594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 break
597 break
597
598
598 return data
599 return data
599
600
600 @propertycache
601 @propertycache
601 def _encodefilterpats(self):
602 def _encodefilterpats(self):
602 return self._loadfilter('encode')
603 return self._loadfilter('encode')
603
604
604 @propertycache
605 @propertycache
605 def _decodefilterpats(self):
606 def _decodefilterpats(self):
606 return self._loadfilter('decode')
607 return self._loadfilter('decode')
607
608
608 def adddatafilter(self, name, filter):
609 def adddatafilter(self, name, filter):
609 self._datafilters[name] = filter
610 self._datafilters[name] = filter
610
611
611 def wread(self, filename):
612 def wread(self, filename):
612 if self._link(filename):
613 if self._link(filename):
613 data = os.readlink(self.wjoin(filename))
614 data = os.readlink(self.wjoin(filename))
614 else:
615 else:
615 data = self.wopener(filename, 'r').read()
616 data = self.wopener(filename, 'r').read()
616 return self._filter(self._encodefilterpats, filename, data)
617 return self._filter(self._encodefilterpats, filename, data)
617
618
618 def wwrite(self, filename, data, flags):
619 def wwrite(self, filename, data, flags):
619 data = self._filter(self._decodefilterpats, filename, data)
620 data = self._filter(self._decodefilterpats, filename, data)
620 try:
621 try:
621 os.unlink(self.wjoin(filename))
622 os.unlink(self.wjoin(filename))
622 except OSError:
623 except OSError:
623 pass
624 pass
624 if 'l' in flags:
625 if 'l' in flags:
625 self.wopener.symlink(data, filename)
626 self.wopener.symlink(data, filename)
626 else:
627 else:
627 self.wopener(filename, 'w').write(data)
628 self.wopener(filename, 'w').write(data)
628 if 'x' in flags:
629 if 'x' in flags:
629 util.set_flags(self.wjoin(filename), False, True)
630 util.set_flags(self.wjoin(filename), False, True)
630
631
631 def wwritedata(self, filename, data):
632 def wwritedata(self, filename, data):
632 return self._filter(self._decodefilterpats, filename, data)
633 return self._filter(self._decodefilterpats, filename, data)
633
634
634 def transaction(self, desc):
635 def transaction(self, desc):
635 tr = self._transref and self._transref() or None
636 tr = self._transref and self._transref() or None
636 if tr and tr.running():
637 if tr and tr.running():
637 return tr.nest()
638 return tr.nest()
638
639
639 # abort here if the journal already exists
640 # abort here if the journal already exists
640 if os.path.exists(self.sjoin("journal")):
641 if os.path.exists(self.sjoin("journal")):
641 raise error.RepoError(
642 raise error.RepoError(
642 _("abandoned transaction found - run hg recover"))
643 _("abandoned transaction found - run hg recover"))
643
644
644 # save dirstate for rollback
645 # save dirstate for rollback
645 try:
646 try:
646 ds = self.opener("dirstate").read()
647 ds = self.opener("dirstate").read()
647 except IOError:
648 except IOError:
648 ds = ""
649 ds = ""
649 self.opener("journal.dirstate", "w").write(ds)
650 self.opener("journal.dirstate", "w").write(ds)
650 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652
653
653 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 (self.join("journal.branch"), self.join("undo.branch")),
656 (self.join("journal.branch"), self.join("undo.branch")),
656 (self.join("journal.desc"), self.join("undo.desc"))]
657 (self.join("journal.desc"), self.join("undo.desc"))]
657 tr = transaction.transaction(self.ui.warn, self.sopener,
658 tr = transaction.transaction(self.ui.warn, self.sopener,
658 self.sjoin("journal"),
659 self.sjoin("journal"),
659 aftertrans(renames),
660 aftertrans(renames),
660 self.store.createmode)
661 self.store.createmode)
661 self._transref = weakref.ref(tr)
662 self._transref = weakref.ref(tr)
662 return tr
663 return tr
663
664
664 def recover(self):
665 def recover(self):
665 lock = self.lock()
666 lock = self.lock()
666 try:
667 try:
667 if os.path.exists(self.sjoin("journal")):
668 if os.path.exists(self.sjoin("journal")):
668 self.ui.status(_("rolling back interrupted transaction\n"))
669 self.ui.status(_("rolling back interrupted transaction\n"))
669 transaction.rollback(self.sopener, self.sjoin("journal"),
670 transaction.rollback(self.sopener, self.sjoin("journal"),
670 self.ui.warn)
671 self.ui.warn)
671 self.invalidate()
672 self.invalidate()
672 return True
673 return True
673 else:
674 else:
674 self.ui.warn(_("no interrupted transaction available\n"))
675 self.ui.warn(_("no interrupted transaction available\n"))
675 return False
676 return False
676 finally:
677 finally:
677 lock.release()
678 lock.release()
678
679
679 def rollback(self, dryrun=False):
680 def rollback(self, dryrun=False):
680 wlock = lock = None
681 wlock = lock = None
681 try:
682 try:
682 wlock = self.wlock()
683 wlock = self.wlock()
683 lock = self.lock()
684 lock = self.lock()
684 if os.path.exists(self.sjoin("undo")):
685 if os.path.exists(self.sjoin("undo")):
685 try:
686 try:
686 args = self.opener("undo.desc", "r").read().splitlines()
687 args = self.opener("undo.desc", "r").read().splitlines()
687 if len(args) >= 3 and self.ui.verbose:
688 if len(args) >= 3 and self.ui.verbose:
688 desc = _("rolling back to revision %s"
689 desc = _("rolling back to revision %s"
689 " (undo %s: %s)\n") % (
690 " (undo %s: %s)\n") % (
690 int(args[0]) - 1, args[1], args[2])
691 int(args[0]) - 1, args[1], args[2])
691 elif len(args) >= 2:
692 elif len(args) >= 2:
692 desc = _("rolling back to revision %s (undo %s)\n") % (
693 desc = _("rolling back to revision %s (undo %s)\n") % (
693 int(args[0]) - 1, args[1])
694 int(args[0]) - 1, args[1])
694 except IOError:
695 except IOError:
695 desc = _("rolling back unknown transaction\n")
696 desc = _("rolling back unknown transaction\n")
696 self.ui.status(desc)
697 self.ui.status(desc)
697 if dryrun:
698 if dryrun:
698 return
699 return
699 transaction.rollback(self.sopener, self.sjoin("undo"),
700 transaction.rollback(self.sopener, self.sjoin("undo"),
700 self.ui.warn)
701 self.ui.warn)
701 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 try:
703 try:
703 branch = self.opener("undo.branch").read()
704 branch = self.opener("undo.branch").read()
704 self.dirstate.setbranch(branch)
705 self.dirstate.setbranch(branch)
705 except IOError:
706 except IOError:
706 self.ui.warn(_("Named branch could not be reset, "
707 self.ui.warn(_("Named branch could not be reset, "
707 "current branch still is: %s\n")
708 "current branch still is: %s\n")
708 % encoding.tolocal(self.dirstate.branch()))
709 % encoding.tolocal(self.dirstate.branch()))
709 self.invalidate()
710 self.invalidate()
710 self.dirstate.invalidate()
711 self.dirstate.invalidate()
711 self.destroyed()
712 self.destroyed()
712 else:
713 else:
713 self.ui.warn(_("no rollback information available\n"))
714 self.ui.warn(_("no rollback information available\n"))
714 return 1
715 return 1
715 finally:
716 finally:
716 release(lock, wlock)
717 release(lock, wlock)
717
718
718 def invalidatecaches(self):
719 def invalidatecaches(self):
719 self._tags = None
720 self._tags = None
720 self._tagtypes = None
721 self._tagtypes = None
721 self.nodetagscache = None
722 self.nodetagscache = None
722 self._branchcache = None # in UTF-8
723 self._branchcache = None # in UTF-8
723 self._branchcachetip = None
724 self._branchcachetip = None
724
725
725 def invalidate(self):
726 def invalidate(self):
726 for a in "changelog manifest".split():
727 for a in "changelog manifest".split():
727 if a in self.__dict__:
728 if a in self.__dict__:
728 delattr(self, a)
729 delattr(self, a)
729 self.invalidatecaches()
730 self.invalidatecaches()
730
731
731 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 try:
733 try:
733 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 except error.LockHeld, inst:
735 except error.LockHeld, inst:
735 if not wait:
736 if not wait:
736 raise
737 raise
737 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 (desc, inst.locker))
739 (desc, inst.locker))
739 # default to 600 seconds timeout
740 # default to 600 seconds timeout
740 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 releasefn, desc=desc)
742 releasefn, desc=desc)
742 if acquirefn:
743 if acquirefn:
743 acquirefn()
744 acquirefn()
744 return l
745 return l
745
746
746 def lock(self, wait=True):
747 def lock(self, wait=True):
747 '''Lock the repository store (.hg/store) and return a weak reference
748 '''Lock the repository store (.hg/store) and return a weak reference
748 to the lock. Use this before modifying the store (e.g. committing or
749 to the lock. Use this before modifying the store (e.g. committing or
749 stripping). If you are opening a transaction, get a lock as well.)'''
750 stripping). If you are opening a transaction, get a lock as well.)'''
750 l = self._lockref and self._lockref()
751 l = self._lockref and self._lockref()
751 if l is not None and l.held:
752 if l is not None and l.held:
752 l.lock()
753 l.lock()
753 return l
754 return l
754
755
755 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 _('repository %s') % self.origroot)
757 _('repository %s') % self.origroot)
757 self._lockref = weakref.ref(l)
758 self._lockref = weakref.ref(l)
758 return l
759 return l
759
760
760 def wlock(self, wait=True):
761 def wlock(self, wait=True):
761 '''Lock the non-store parts of the repository (everything under
762 '''Lock the non-store parts of the repository (everything under
762 .hg except .hg/store) and return a weak reference to the lock.
763 .hg except .hg/store) and return a weak reference to the lock.
763 Use this before modifying files in .hg.'''
764 Use this before modifying files in .hg.'''
764 l = self._wlockref and self._wlockref()
765 l = self._wlockref and self._wlockref()
765 if l is not None and l.held:
766 if l is not None and l.held:
766 l.lock()
767 l.lock()
767 return l
768 return l
768
769
769 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 self.dirstate.invalidate, _('working directory of %s') %
771 self.dirstate.invalidate, _('working directory of %s') %
771 self.origroot)
772 self.origroot)
772 self._wlockref = weakref.ref(l)
773 self._wlockref = weakref.ref(l)
773 return l
774 return l
774
775
775 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 """
777 """
777 commit an individual file as part of a larger transaction
778 commit an individual file as part of a larger transaction
778 """
779 """
779
780
780 fname = fctx.path()
781 fname = fctx.path()
781 text = fctx.data()
782 text = fctx.data()
782 flog = self.file(fname)
783 flog = self.file(fname)
783 fparent1 = manifest1.get(fname, nullid)
784 fparent1 = manifest1.get(fname, nullid)
784 fparent2 = fparent2o = manifest2.get(fname, nullid)
785 fparent2 = fparent2o = manifest2.get(fname, nullid)
785
786
786 meta = {}
787 meta = {}
787 copy = fctx.renamed()
788 copy = fctx.renamed()
788 if copy and copy[0] != fname:
789 if copy and copy[0] != fname:
789 # Mark the new revision of this file as a copy of another
790 # Mark the new revision of this file as a copy of another
790 # file. This copy data will effectively act as a parent
791 # file. This copy data will effectively act as a parent
791 # of this new revision. If this is a merge, the first
792 # of this new revision. If this is a merge, the first
792 # parent will be the nullid (meaning "look up the copy data")
793 # parent will be the nullid (meaning "look up the copy data")
793 # and the second one will be the other parent. For example:
794 # and the second one will be the other parent. For example:
794 #
795 #
795 # 0 --- 1 --- 3 rev1 changes file foo
796 # 0 --- 1 --- 3 rev1 changes file foo
796 # \ / rev2 renames foo to bar and changes it
797 # \ / rev2 renames foo to bar and changes it
797 # \- 2 -/ rev3 should have bar with all changes and
798 # \- 2 -/ rev3 should have bar with all changes and
798 # should record that bar descends from
799 # should record that bar descends from
799 # bar in rev2 and foo in rev1
800 # bar in rev2 and foo in rev1
800 #
801 #
801 # this allows this merge to succeed:
802 # this allows this merge to succeed:
802 #
803 #
803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 # \ / merging rev3 and rev4 should use bar@rev2
805 # \ / merging rev3 and rev4 should use bar@rev2
805 # \- 2 --- 4 as the merge base
806 # \- 2 --- 4 as the merge base
806 #
807 #
807
808
808 cfname = copy[0]
809 cfname = copy[0]
809 crev = manifest1.get(cfname)
810 crev = manifest1.get(cfname)
810 newfparent = fparent2
811 newfparent = fparent2
811
812
812 if manifest2: # branch merge
813 if manifest2: # branch merge
813 if fparent2 == nullid or crev is None: # copied on remote side
814 if fparent2 == nullid or crev is None: # copied on remote side
814 if cfname in manifest2:
815 if cfname in manifest2:
815 crev = manifest2[cfname]
816 crev = manifest2[cfname]
816 newfparent = fparent1
817 newfparent = fparent1
817
818
818 # find source in nearest ancestor if we've lost track
819 # find source in nearest ancestor if we've lost track
819 if not crev:
820 if not crev:
820 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 (fname, cfname))
822 (fname, cfname))
822 for ancestor in self[None].ancestors():
823 for ancestor in self[None].ancestors():
823 if cfname in ancestor:
824 if cfname in ancestor:
824 crev = ancestor[cfname].filenode()
825 crev = ancestor[cfname].filenode()
825 break
826 break
826
827
827 if crev:
828 if crev:
828 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
829 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
829 meta["copy"] = cfname
830 meta["copy"] = cfname
830 meta["copyrev"] = hex(crev)
831 meta["copyrev"] = hex(crev)
831 fparent1, fparent2 = nullid, newfparent
832 fparent1, fparent2 = nullid, newfparent
832 else:
833 else:
833 self.ui.warn(_("warning: can't find ancestor for '%s' "
834 self.ui.warn(_("warning: can't find ancestor for '%s' "
834 "copied from '%s'!\n") % (fname, cfname))
835 "copied from '%s'!\n") % (fname, cfname))
835
836
836 elif fparent2 != nullid:
837 elif fparent2 != nullid:
837 # is one parent an ancestor of the other?
838 # is one parent an ancestor of the other?
838 fparentancestor = flog.ancestor(fparent1, fparent2)
839 fparentancestor = flog.ancestor(fparent1, fparent2)
839 if fparentancestor == fparent1:
840 if fparentancestor == fparent1:
840 fparent1, fparent2 = fparent2, nullid
841 fparent1, fparent2 = fparent2, nullid
841 elif fparentancestor == fparent2:
842 elif fparentancestor == fparent2:
842 fparent2 = nullid
843 fparent2 = nullid
843
844
844 # is the file changed?
845 # is the file changed?
845 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
846 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
846 changelist.append(fname)
847 changelist.append(fname)
847 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
848 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
848
849
849 # are just the flags changed during merge?
850 # are just the flags changed during merge?
850 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
851 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
851 changelist.append(fname)
852 changelist.append(fname)
852
853
853 return fparent1
854 return fparent1
854
855
855 def commit(self, text="", user=None, date=None, match=None, force=False,
856 def commit(self, text="", user=None, date=None, match=None, force=False,
856 editor=False, extra={}):
857 editor=False, extra={}):
857 """Add a new revision to current repository.
858 """Add a new revision to current repository.
858
859
859 Revision information is gathered from the working directory,
860 Revision information is gathered from the working directory,
860 match can be used to filter the committed files. If editor is
861 match can be used to filter the committed files. If editor is
861 supplied, it is called to get a commit message.
862 supplied, it is called to get a commit message.
862 """
863 """
863
864
864 def fail(f, msg):
865 def fail(f, msg):
865 raise util.Abort('%s: %s' % (f, msg))
866 raise util.Abort('%s: %s' % (f, msg))
866
867
867 if not match:
868 if not match:
868 match = matchmod.always(self.root, '')
869 match = matchmod.always(self.root, '')
869
870
870 if not force:
871 if not force:
871 vdirs = []
872 vdirs = []
872 match.dir = vdirs.append
873 match.dir = vdirs.append
873 match.bad = fail
874 match.bad = fail
874
875
875 wlock = self.wlock()
876 wlock = self.wlock()
876 try:
877 try:
877 wctx = self[None]
878 wctx = self[None]
878 merge = len(wctx.parents()) > 1
879 merge = len(wctx.parents()) > 1
879
880
880 if (not force and merge and match and
881 if (not force and merge and match and
881 (match.files() or match.anypats())):
882 (match.files() or match.anypats())):
882 raise util.Abort(_('cannot partially commit a merge '
883 raise util.Abort(_('cannot partially commit a merge '
883 '(do not specify files or patterns)'))
884 '(do not specify files or patterns)'))
884
885
885 changes = self.status(match=match, clean=force)
886 changes = self.status(match=match, clean=force)
886 if force:
887 if force:
887 changes[0].extend(changes[6]) # mq may commit unchanged files
888 changes[0].extend(changes[6]) # mq may commit unchanged files
888
889
889 # check subrepos
890 # check subrepos
890 subs = []
891 subs = []
891 removedsubs = set()
892 removedsubs = set()
892 for p in wctx.parents():
893 for p in wctx.parents():
893 removedsubs.update(s for s in p.substate if match(s))
894 removedsubs.update(s for s in p.substate if match(s))
894 for s in wctx.substate:
895 for s in wctx.substate:
895 removedsubs.discard(s)
896 removedsubs.discard(s)
896 if match(s) and wctx.sub(s).dirty():
897 if match(s) and wctx.sub(s).dirty():
897 subs.append(s)
898 subs.append(s)
898 if (subs or removedsubs):
899 if (subs or removedsubs):
899 if (not match('.hgsub') and
900 if (not match('.hgsub') and
900 '.hgsub' in (wctx.modified() + wctx.added())):
901 '.hgsub' in (wctx.modified() + wctx.added())):
901 raise util.Abort(_("can't commit subrepos without .hgsub"))
902 raise util.Abort(_("can't commit subrepos without .hgsub"))
902 if '.hgsubstate' not in changes[0]:
903 if '.hgsubstate' not in changes[0]:
903 changes[0].insert(0, '.hgsubstate')
904 changes[0].insert(0, '.hgsubstate')
904
905
905 # make sure all explicit patterns are matched
906 # make sure all explicit patterns are matched
906 if not force and match.files():
907 if not force and match.files():
907 matched = set(changes[0] + changes[1] + changes[2])
908 matched = set(changes[0] + changes[1] + changes[2])
908
909
909 for f in match.files():
910 for f in match.files():
910 if f == '.' or f in matched or f in wctx.substate:
911 if f == '.' or f in matched or f in wctx.substate:
911 continue
912 continue
912 if f in changes[3]: # missing
913 if f in changes[3]: # missing
913 fail(f, _('file not found!'))
914 fail(f, _('file not found!'))
914 if f in vdirs: # visited directory
915 if f in vdirs: # visited directory
915 d = f + '/'
916 d = f + '/'
916 for mf in matched:
917 for mf in matched:
917 if mf.startswith(d):
918 if mf.startswith(d):
918 break
919 break
919 else:
920 else:
920 fail(f, _("no match under directory!"))
921 fail(f, _("no match under directory!"))
921 elif f not in self.dirstate:
922 elif f not in self.dirstate:
922 fail(f, _("file not tracked!"))
923 fail(f, _("file not tracked!"))
923
924
924 if (not force and not extra.get("close") and not merge
925 if (not force and not extra.get("close") and not merge
925 and not (changes[0] or changes[1] or changes[2])
926 and not (changes[0] or changes[1] or changes[2])
926 and wctx.branch() == wctx.p1().branch()):
927 and wctx.branch() == wctx.p1().branch()):
927 return None
928 return None
928
929
929 ms = mergemod.mergestate(self)
930 ms = mergemod.mergestate(self)
930 for f in changes[0]:
931 for f in changes[0]:
931 if f in ms and ms[f] == 'u':
932 if f in ms and ms[f] == 'u':
932 raise util.Abort(_("unresolved merge conflicts "
933 raise util.Abort(_("unresolved merge conflicts "
933 "(see hg resolve)"))
934 "(see hg resolve)"))
934
935
935 cctx = context.workingctx(self, text, user, date, extra, changes)
936 cctx = context.workingctx(self, text, user, date, extra, changes)
936 if editor:
937 if editor:
937 cctx._text = editor(self, cctx, subs)
938 cctx._text = editor(self, cctx, subs)
938 edited = (text != cctx._text)
939 edited = (text != cctx._text)
939
940
940 # commit subs
941 # commit subs
941 if subs or removedsubs:
942 if subs or removedsubs:
942 state = wctx.substate.copy()
943 state = wctx.substate.copy()
943 for s in sorted(subs):
944 for s in sorted(subs):
944 sub = wctx.sub(s)
945 sub = wctx.sub(s)
945 self.ui.status(_('committing subrepository %s\n') %
946 self.ui.status(_('committing subrepository %s\n') %
946 subrepo.subrelpath(sub))
947 subrepo.subrelpath(sub))
947 sr = sub.commit(cctx._text, user, date)
948 sr = sub.commit(cctx._text, user, date)
948 state[s] = (state[s][0], sr)
949 state[s] = (state[s][0], sr)
949 subrepo.writestate(self, state)
950 subrepo.writestate(self, state)
950
951
951 # Save commit message in case this transaction gets rolled back
952 # Save commit message in case this transaction gets rolled back
952 # (e.g. by a pretxncommit hook). Leave the content alone on
953 # (e.g. by a pretxncommit hook). Leave the content alone on
953 # the assumption that the user will use the same editor again.
954 # the assumption that the user will use the same editor again.
954 msgfile = self.opener('last-message.txt', 'wb')
955 msgfile = self.opener('last-message.txt', 'wb')
955 msgfile.write(cctx._text)
956 msgfile.write(cctx._text)
956 msgfile.close()
957 msgfile.close()
957
958
958 p1, p2 = self.dirstate.parents()
959 p1, p2 = self.dirstate.parents()
959 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
960 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
960 try:
961 try:
961 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
962 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
962 ret = self.commitctx(cctx, True)
963 ret = self.commitctx(cctx, True)
963 except:
964 except:
964 if edited:
965 if edited:
965 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
966 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
966 self.ui.write(
967 self.ui.write(
967 _('note: commit message saved in %s\n') % msgfn)
968 _('note: commit message saved in %s\n') % msgfn)
968 raise
969 raise
969
970
970 # update dirstate and mergestate
971 # update dirstate and mergestate
971 for f in changes[0] + changes[1]:
972 for f in changes[0] + changes[1]:
972 self.dirstate.normal(f)
973 self.dirstate.normal(f)
973 for f in changes[2]:
974 for f in changes[2]:
974 self.dirstate.forget(f)
975 self.dirstate.forget(f)
975 self.dirstate.setparents(ret)
976 self.dirstate.setparents(ret)
976 ms.reset()
977 ms.reset()
977 finally:
978 finally:
978 wlock.release()
979 wlock.release()
979
980
980 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
981 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
981 return ret
982 return ret
982
983
983 def commitctx(self, ctx, error=False):
984 def commitctx(self, ctx, error=False):
984 """Add a new revision to current repository.
985 """Add a new revision to current repository.
985 Revision information is passed via the context argument.
986 Revision information is passed via the context argument.
986 """
987 """
987
988
988 tr = lock = None
989 tr = lock = None
989 removed = list(ctx.removed())
990 removed = list(ctx.removed())
990 p1, p2 = ctx.p1(), ctx.p2()
991 p1, p2 = ctx.p1(), ctx.p2()
991 m1 = p1.manifest().copy()
992 m1 = p1.manifest().copy()
992 m2 = p2.manifest()
993 m2 = p2.manifest()
993 user = ctx.user()
994 user = ctx.user()
994
995
995 lock = self.lock()
996 lock = self.lock()
996 try:
997 try:
997 tr = self.transaction("commit")
998 tr = self.transaction("commit")
998 trp = weakref.proxy(tr)
999 trp = weakref.proxy(tr)
999
1000
1000 # check in files
1001 # check in files
1001 new = {}
1002 new = {}
1002 changed = []
1003 changed = []
1003 linkrev = len(self)
1004 linkrev = len(self)
1004 for f in sorted(ctx.modified() + ctx.added()):
1005 for f in sorted(ctx.modified() + ctx.added()):
1005 self.ui.note(f + "\n")
1006 self.ui.note(f + "\n")
1006 try:
1007 try:
1007 fctx = ctx[f]
1008 fctx = ctx[f]
1008 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1009 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1009 changed)
1010 changed)
1010 m1.set(f, fctx.flags())
1011 m1.set(f, fctx.flags())
1011 except OSError, inst:
1012 except OSError, inst:
1012 self.ui.warn(_("trouble committing %s!\n") % f)
1013 self.ui.warn(_("trouble committing %s!\n") % f)
1013 raise
1014 raise
1014 except IOError, inst:
1015 except IOError, inst:
1015 errcode = getattr(inst, 'errno', errno.ENOENT)
1016 errcode = getattr(inst, 'errno', errno.ENOENT)
1016 if error or errcode and errcode != errno.ENOENT:
1017 if error or errcode and errcode != errno.ENOENT:
1017 self.ui.warn(_("trouble committing %s!\n") % f)
1018 self.ui.warn(_("trouble committing %s!\n") % f)
1018 raise
1019 raise
1019 else:
1020 else:
1020 removed.append(f)
1021 removed.append(f)
1021
1022
1022 # update manifest
1023 # update manifest
1023 m1.update(new)
1024 m1.update(new)
1024 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1025 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1025 drop = [f for f in removed if f in m1]
1026 drop = [f for f in removed if f in m1]
1026 for f in drop:
1027 for f in drop:
1027 del m1[f]
1028 del m1[f]
1028 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1029 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1029 p2.manifestnode(), (new, drop))
1030 p2.manifestnode(), (new, drop))
1030
1031
1031 # update changelog
1032 # update changelog
1032 self.changelog.delayupdate()
1033 self.changelog.delayupdate()
1033 n = self.changelog.add(mn, changed + removed, ctx.description(),
1034 n = self.changelog.add(mn, changed + removed, ctx.description(),
1034 trp, p1.node(), p2.node(),
1035 trp, p1.node(), p2.node(),
1035 user, ctx.date(), ctx.extra().copy())
1036 user, ctx.date(), ctx.extra().copy())
1036 p = lambda: self.changelog.writepending() and self.root or ""
1037 p = lambda: self.changelog.writepending() and self.root or ""
1037 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1038 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1038 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1039 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1039 parent2=xp2, pending=p)
1040 parent2=xp2, pending=p)
1040 self.changelog.finalize(trp)
1041 self.changelog.finalize(trp)
1041 tr.close()
1042 tr.close()
1042
1043
1043 if self._branchcache:
1044 if self._branchcache:
1044 self.updatebranchcache()
1045 self.updatebranchcache()
1045 return n
1046 return n
1046 finally:
1047 finally:
1047 if tr:
1048 if tr:
1048 tr.release()
1049 tr.release()
1049 lock.release()
1050 lock.release()
1050
1051
1051 def destroyed(self):
1052 def destroyed(self):
1052 '''Inform the repository that nodes have been destroyed.
1053 '''Inform the repository that nodes have been destroyed.
1053 Intended for use by strip and rollback, so there's a common
1054 Intended for use by strip and rollback, so there's a common
1054 place for anything that has to be done after destroying history.'''
1055 place for anything that has to be done after destroying history.'''
1055 # XXX it might be nice if we could take the list of destroyed
1056 # XXX it might be nice if we could take the list of destroyed
1056 # nodes, but I don't see an easy way for rollback() to do that
1057 # nodes, but I don't see an easy way for rollback() to do that
1057
1058
1058 # Ensure the persistent tag cache is updated. Doing it now
1059 # Ensure the persistent tag cache is updated. Doing it now
1059 # means that the tag cache only has to worry about destroyed
1060 # means that the tag cache only has to worry about destroyed
1060 # heads immediately after a strip/rollback. That in turn
1061 # heads immediately after a strip/rollback. That in turn
1061 # guarantees that "cachetip == currenttip" (comparing both rev
1062 # guarantees that "cachetip == currenttip" (comparing both rev
1062 # and node) always means no nodes have been added or destroyed.
1063 # and node) always means no nodes have been added or destroyed.
1063
1064
1064 # XXX this is suboptimal when qrefresh'ing: we strip the current
1065 # XXX this is suboptimal when qrefresh'ing: we strip the current
1065 # head, refresh the tag cache, then immediately add a new head.
1066 # head, refresh the tag cache, then immediately add a new head.
1066 # But I think doing it this way is necessary for the "instant
1067 # But I think doing it this way is necessary for the "instant
1067 # tag cache retrieval" case to work.
1068 # tag cache retrieval" case to work.
1068 self.invalidatecaches()
1069 self.invalidatecaches()
1069
1070
1070 def walk(self, match, node=None):
1071 def walk(self, match, node=None):
1071 '''
1072 '''
1072 walk recursively through the directory tree or a given
1073 walk recursively through the directory tree or a given
1073 changeset, finding all files matched by the match
1074 changeset, finding all files matched by the match
1074 function
1075 function
1075 '''
1076 '''
1076 return self[node].walk(match)
1077 return self[node].walk(match)
1077
1078
1078 def status(self, node1='.', node2=None, match=None,
1079 def status(self, node1='.', node2=None, match=None,
1079 ignored=False, clean=False, unknown=False,
1080 ignored=False, clean=False, unknown=False,
1080 listsubrepos=False):
1081 listsubrepos=False):
1081 """return status of files between two nodes or node and working directory
1082 """return status of files between two nodes or node and working directory
1082
1083
1083 If node1 is None, use the first dirstate parent instead.
1084 If node1 is None, use the first dirstate parent instead.
1084 If node2 is None, compare node1 with working directory.
1085 If node2 is None, compare node1 with working directory.
1085 """
1086 """
1086
1087
1087 def mfmatches(ctx):
1088 def mfmatches(ctx):
1088 mf = ctx.manifest().copy()
1089 mf = ctx.manifest().copy()
1089 for fn in mf.keys():
1090 for fn in mf.keys():
1090 if not match(fn):
1091 if not match(fn):
1091 del mf[fn]
1092 del mf[fn]
1092 return mf
1093 return mf
1093
1094
1094 if isinstance(node1, context.changectx):
1095 if isinstance(node1, context.changectx):
1095 ctx1 = node1
1096 ctx1 = node1
1096 else:
1097 else:
1097 ctx1 = self[node1]
1098 ctx1 = self[node1]
1098 if isinstance(node2, context.changectx):
1099 if isinstance(node2, context.changectx):
1099 ctx2 = node2
1100 ctx2 = node2
1100 else:
1101 else:
1101 ctx2 = self[node2]
1102 ctx2 = self[node2]
1102
1103
1103 working = ctx2.rev() is None
1104 working = ctx2.rev() is None
1104 parentworking = working and ctx1 == self['.']
1105 parentworking = working and ctx1 == self['.']
1105 match = match or matchmod.always(self.root, self.getcwd())
1106 match = match or matchmod.always(self.root, self.getcwd())
1106 listignored, listclean, listunknown = ignored, clean, unknown
1107 listignored, listclean, listunknown = ignored, clean, unknown
1107
1108
1108 # load earliest manifest first for caching reasons
1109 # load earliest manifest first for caching reasons
1109 if not working and ctx2.rev() < ctx1.rev():
1110 if not working and ctx2.rev() < ctx1.rev():
1110 ctx2.manifest()
1111 ctx2.manifest()
1111
1112
1112 if not parentworking:
1113 if not parentworking:
1113 def bad(f, msg):
1114 def bad(f, msg):
1114 if f not in ctx1:
1115 if f not in ctx1:
1115 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1116 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1116 match.bad = bad
1117 match.bad = bad
1117
1118
1118 if working: # we need to scan the working dir
1119 if working: # we need to scan the working dir
1119 subrepos = []
1120 subrepos = []
1120 if '.hgsub' in self.dirstate:
1121 if '.hgsub' in self.dirstate:
1121 subrepos = ctx1.substate.keys()
1122 subrepos = ctx1.substate.keys()
1122 s = self.dirstate.status(match, subrepos, listignored,
1123 s = self.dirstate.status(match, subrepos, listignored,
1123 listclean, listunknown)
1124 listclean, listunknown)
1124 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1125 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1125
1126
1126 # check for any possibly clean files
1127 # check for any possibly clean files
1127 if parentworking and cmp:
1128 if parentworking and cmp:
1128 fixup = []
1129 fixup = []
1129 # do a full compare of any files that might have changed
1130 # do a full compare of any files that might have changed
1130 for f in sorted(cmp):
1131 for f in sorted(cmp):
1131 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1132 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1132 or ctx1[f].cmp(ctx2[f])):
1133 or ctx1[f].cmp(ctx2[f])):
1133 modified.append(f)
1134 modified.append(f)
1134 else:
1135 else:
1135 fixup.append(f)
1136 fixup.append(f)
1136
1137
1137 # update dirstate for files that are actually clean
1138 # update dirstate for files that are actually clean
1138 if fixup:
1139 if fixup:
1139 if listclean:
1140 if listclean:
1140 clean += fixup
1141 clean += fixup
1141
1142
1142 try:
1143 try:
1143 # updating the dirstate is optional
1144 # updating the dirstate is optional
1144 # so we don't wait on the lock
1145 # so we don't wait on the lock
1145 wlock = self.wlock(False)
1146 wlock = self.wlock(False)
1146 try:
1147 try:
1147 for f in fixup:
1148 for f in fixup:
1148 self.dirstate.normal(f)
1149 self.dirstate.normal(f)
1149 finally:
1150 finally:
1150 wlock.release()
1151 wlock.release()
1151 except error.LockError:
1152 except error.LockError:
1152 pass
1153 pass
1153
1154
1154 if not parentworking:
1155 if not parentworking:
1155 mf1 = mfmatches(ctx1)
1156 mf1 = mfmatches(ctx1)
1156 if working:
1157 if working:
1157 # we are comparing working dir against non-parent
1158 # we are comparing working dir against non-parent
1158 # generate a pseudo-manifest for the working dir
1159 # generate a pseudo-manifest for the working dir
1159 mf2 = mfmatches(self['.'])
1160 mf2 = mfmatches(self['.'])
1160 for f in cmp + modified + added:
1161 for f in cmp + modified + added:
1161 mf2[f] = None
1162 mf2[f] = None
1162 mf2.set(f, ctx2.flags(f))
1163 mf2.set(f, ctx2.flags(f))
1163 for f in removed:
1164 for f in removed:
1164 if f in mf2:
1165 if f in mf2:
1165 del mf2[f]
1166 del mf2[f]
1166 else:
1167 else:
1167 # we are comparing two revisions
1168 # we are comparing two revisions
1168 deleted, unknown, ignored = [], [], []
1169 deleted, unknown, ignored = [], [], []
1169 mf2 = mfmatches(ctx2)
1170 mf2 = mfmatches(ctx2)
1170
1171
1171 modified, added, clean = [], [], []
1172 modified, added, clean = [], [], []
1172 for fn in mf2:
1173 for fn in mf2:
1173 if fn in mf1:
1174 if fn in mf1:
1174 if (mf1.flags(fn) != mf2.flags(fn) or
1175 if (mf1.flags(fn) != mf2.flags(fn) or
1175 (mf1[fn] != mf2[fn] and
1176 (mf1[fn] != mf2[fn] and
1176 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1177 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1177 modified.append(fn)
1178 modified.append(fn)
1178 elif listclean:
1179 elif listclean:
1179 clean.append(fn)
1180 clean.append(fn)
1180 del mf1[fn]
1181 del mf1[fn]
1181 else:
1182 else:
1182 added.append(fn)
1183 added.append(fn)
1183 removed = mf1.keys()
1184 removed = mf1.keys()
1184
1185
1185 r = modified, added, removed, deleted, unknown, ignored, clean
1186 r = modified, added, removed, deleted, unknown, ignored, clean
1186
1187
1187 if listsubrepos:
1188 if listsubrepos:
1188 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1189 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1189 if working:
1190 if working:
1190 rev2 = None
1191 rev2 = None
1191 else:
1192 else:
1192 rev2 = ctx2.substate[subpath][1]
1193 rev2 = ctx2.substate[subpath][1]
1193 try:
1194 try:
1194 submatch = matchmod.narrowmatcher(subpath, match)
1195 submatch = matchmod.narrowmatcher(subpath, match)
1195 s = sub.status(rev2, match=submatch, ignored=listignored,
1196 s = sub.status(rev2, match=submatch, ignored=listignored,
1196 clean=listclean, unknown=listunknown,
1197 clean=listclean, unknown=listunknown,
1197 listsubrepos=True)
1198 listsubrepos=True)
1198 for rfiles, sfiles in zip(r, s):
1199 for rfiles, sfiles in zip(r, s):
1199 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1200 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1200 except error.LookupError:
1201 except error.LookupError:
1201 self.ui.status(_("skipping missing subrepository: %s\n")
1202 self.ui.status(_("skipping missing subrepository: %s\n")
1202 % subpath)
1203 % subpath)
1203
1204
1204 [l.sort() for l in r]
1205 [l.sort() for l in r]
1205 return r
1206 return r
1206
1207
1207 def heads(self, start=None):
1208 def heads(self, start=None):
1208 heads = self.changelog.heads(start)
1209 heads = self.changelog.heads(start)
1209 # sort the output in rev descending order
1210 # sort the output in rev descending order
1210 heads = [(-self.changelog.rev(h), h) for h in heads]
1211 heads = [(-self.changelog.rev(h), h) for h in heads]
1211 return [n for (r, n) in sorted(heads)]
1212 return [n for (r, n) in sorted(heads)]
1212
1213
1213 def branchheads(self, branch=None, start=None, closed=False):
1214 def branchheads(self, branch=None, start=None, closed=False):
1214 '''return a (possibly filtered) list of heads for the given branch
1215 '''return a (possibly filtered) list of heads for the given branch
1215
1216
1216 Heads are returned in topological order, from newest to oldest.
1217 Heads are returned in topological order, from newest to oldest.
1217 If branch is None, use the dirstate branch.
1218 If branch is None, use the dirstate branch.
1218 If start is not None, return only heads reachable from start.
1219 If start is not None, return only heads reachable from start.
1219 If closed is True, return heads that are marked as closed as well.
1220 If closed is True, return heads that are marked as closed as well.
1220 '''
1221 '''
1221 if branch is None:
1222 if branch is None:
1222 branch = self[None].branch()
1223 branch = self[None].branch()
1223 branches = self.branchmap()
1224 branches = self.branchmap()
1224 if branch not in branches:
1225 if branch not in branches:
1225 return []
1226 return []
1226 # the cache returns heads ordered lowest to highest
1227 # the cache returns heads ordered lowest to highest
1227 bheads = list(reversed(branches[branch]))
1228 bheads = list(reversed(branches[branch]))
1228 if start is not None:
1229 if start is not None:
1229 # filter out the heads that cannot be reached from startrev
1230 # filter out the heads that cannot be reached from startrev
1230 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1231 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1231 bheads = [h for h in bheads if h in fbheads]
1232 bheads = [h for h in bheads if h in fbheads]
1232 if not closed:
1233 if not closed:
1233 bheads = [h for h in bheads if
1234 bheads = [h for h in bheads if
1234 ('close' not in self.changelog.read(h)[5])]
1235 ('close' not in self.changelog.read(h)[5])]
1235 return bheads
1236 return bheads
1236
1237
1237 def branches(self, nodes):
1238 def branches(self, nodes):
1238 if not nodes:
1239 if not nodes:
1239 nodes = [self.changelog.tip()]
1240 nodes = [self.changelog.tip()]
1240 b = []
1241 b = []
1241 for n in nodes:
1242 for n in nodes:
1242 t = n
1243 t = n
1243 while 1:
1244 while 1:
1244 p = self.changelog.parents(n)
1245 p = self.changelog.parents(n)
1245 if p[1] != nullid or p[0] == nullid:
1246 if p[1] != nullid or p[0] == nullid:
1246 b.append((t, n, p[0], p[1]))
1247 b.append((t, n, p[0], p[1]))
1247 break
1248 break
1248 n = p[0]
1249 n = p[0]
1249 return b
1250 return b
1250
1251
1251 def between(self, pairs):
1252 def between(self, pairs):
1252 r = []
1253 r = []
1253
1254
1254 for top, bottom in pairs:
1255 for top, bottom in pairs:
1255 n, l, i = top, [], 0
1256 n, l, i = top, [], 0
1256 f = 1
1257 f = 1
1257
1258
1258 while n != bottom and n != nullid:
1259 while n != bottom and n != nullid:
1259 p = self.changelog.parents(n)[0]
1260 p = self.changelog.parents(n)[0]
1260 if i == f:
1261 if i == f:
1261 l.append(n)
1262 l.append(n)
1262 f = f * 2
1263 f = f * 2
1263 n = p
1264 n = p
1264 i += 1
1265 i += 1
1265
1266
1266 r.append(l)
1267 r.append(l)
1267
1268
1268 return r
1269 return r
1269
1270
1270 def pull(self, remote, heads=None, force=False):
1271 def pull(self, remote, heads=None, force=False):
1271 lock = self.lock()
1272 lock = self.lock()
1272 try:
1273 try:
1273 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1274 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1274 force=force)
1275 force=force)
1275 common, fetch, rheads = tmp
1276 common, fetch, rheads = tmp
1276 if not fetch:
1277 if not fetch:
1277 self.ui.status(_("no changes found\n"))
1278 self.ui.status(_("no changes found\n"))
1278 return 0
1279 return 0
1279
1280
1280 if heads is None and fetch == [nullid]:
1281 if heads is None and fetch == [nullid]:
1281 self.ui.status(_("requesting all changes\n"))
1282 self.ui.status(_("requesting all changes\n"))
1282 elif heads is None and remote.capable('changegroupsubset'):
1283 elif heads is None and remote.capable('changegroupsubset'):
1283 # issue1320, avoid a race if remote changed after discovery
1284 # issue1320, avoid a race if remote changed after discovery
1284 heads = rheads
1285 heads = rheads
1285
1286
1286 if heads is None:
1287 if heads is None:
1287 cg = remote.changegroup(fetch, 'pull')
1288 cg = remote.changegroup(fetch, 'pull')
1288 else:
1289 else:
1289 if not remote.capable('changegroupsubset'):
1290 if not remote.capable('changegroupsubset'):
1290 raise util.Abort(_("partial pull cannot be done because "
1291 raise util.Abort(_("partial pull cannot be done because "
1291 "other repository doesn't support "
1292 "other repository doesn't support "
1292 "changegroupsubset."))
1293 "changegroupsubset."))
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1295 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1295 finally:
1296 finally:
1296 lock.release()
1297 lock.release()
1297
1298
1298 def push(self, remote, force=False, revs=None, newbranch=False):
1299 def push(self, remote, force=False, revs=None, newbranch=False):
1299 '''Push outgoing changesets (limited by revs) from the current
1300 '''Push outgoing changesets (limited by revs) from the current
1300 repository to remote. Return an integer:
1301 repository to remote. Return an integer:
1301 - 0 means HTTP error *or* nothing to push
1302 - 0 means HTTP error *or* nothing to push
1302 - 1 means we pushed and remote head count is unchanged *or*
1303 - 1 means we pushed and remote head count is unchanged *or*
1303 we have outgoing changesets but refused to push
1304 we have outgoing changesets but refused to push
1304 - other values as described by addchangegroup()
1305 - other values as described by addchangegroup()
1305 '''
1306 '''
1306 # there are two ways to push to remote repo:
1307 # there are two ways to push to remote repo:
1307 #
1308 #
1308 # addchangegroup assumes local user can lock remote
1309 # addchangegroup assumes local user can lock remote
1309 # repo (local filesystem, old ssh servers).
1310 # repo (local filesystem, old ssh servers).
1310 #
1311 #
1311 # unbundle assumes local user cannot lock remote repo (new ssh
1312 # unbundle assumes local user cannot lock remote repo (new ssh
1312 # servers, http servers).
1313 # servers, http servers).
1313
1314
1314 lock = None
1315 lock = None
1315 unbundle = remote.capable('unbundle')
1316 unbundle = remote.capable('unbundle')
1316 if not unbundle:
1317 if not unbundle:
1317 lock = remote.lock()
1318 lock = remote.lock()
1318 try:
1319 try:
1319 ret = discovery.prepush(self, remote, force, revs, newbranch)
1320 ret = discovery.prepush(self, remote, force, revs, newbranch)
1320 if ret[0] is None:
1321 if ret[0] is None:
1321 # and here we return 0 for "nothing to push" or 1 for
1322 # and here we return 0 for "nothing to push" or 1 for
1322 # "something to push but I refuse"
1323 # "something to push but I refuse"
1323 return ret[1]
1324 return ret[1]
1324
1325
1325 cg, remote_heads = ret
1326 cg, remote_heads = ret
1326 if unbundle:
1327 if unbundle:
1327 # local repo finds heads on server, finds out what revs it must
1328 # local repo finds heads on server, finds out what revs it must
1328 # push. once revs transferred, if server finds it has
1329 # push. once revs transferred, if server finds it has
1329 # different heads (someone else won commit/push race), server
1330 # different heads (someone else won commit/push race), server
1330 # aborts.
1331 # aborts.
1331 if force:
1332 if force:
1332 remote_heads = ['force']
1333 remote_heads = ['force']
1333 # ssh: return remote's addchangegroup()
1334 # ssh: return remote's addchangegroup()
1334 # http: return remote's addchangegroup() or 0 for error
1335 # http: return remote's addchangegroup() or 0 for error
1335 return remote.unbundle(cg, remote_heads, 'push')
1336 return remote.unbundle(cg, remote_heads, 'push')
1336 else:
1337 else:
1337 # we return an integer indicating remote head count change
1338 # we return an integer indicating remote head count change
1338 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1339 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1339 finally:
1340 finally:
1340 if lock is not None:
1341 if lock is not None:
1341 lock.release()
1342 lock.release()
1342
1343
1343 def changegroupinfo(self, nodes, source):
1344 def changegroupinfo(self, nodes, source):
1344 if self.ui.verbose or source == 'bundle':
1345 if self.ui.verbose or source == 'bundle':
1345 self.ui.status(_("%d changesets found\n") % len(nodes))
1346 self.ui.status(_("%d changesets found\n") % len(nodes))
1346 if self.ui.debugflag:
1347 if self.ui.debugflag:
1347 self.ui.debug("list of changesets:\n")
1348 self.ui.debug("list of changesets:\n")
1348 for node in nodes:
1349 for node in nodes:
1349 self.ui.debug("%s\n" % hex(node))
1350 self.ui.debug("%s\n" % hex(node))
1350
1351
1351 def changegroupsubset(self, bases, heads, source, extranodes=None):
1352 def changegroupsubset(self, bases, heads, source, extranodes=None):
1352 """Compute a changegroup consisting of all the nodes that are
1353 """Compute a changegroup consisting of all the nodes that are
1353 descendents of any of the bases and ancestors of any of the heads.
1354 descendents of any of the bases and ancestors of any of the heads.
1354 Return a chunkbuffer object whose read() method will return
1355 Return a chunkbuffer object whose read() method will return
1355 successive changegroup chunks.
1356 successive changegroup chunks.
1356
1357
1357 It is fairly complex as determining which filenodes and which
1358 It is fairly complex as determining which filenodes and which
1358 manifest nodes need to be included for the changeset to be complete
1359 manifest nodes need to be included for the changeset to be complete
1359 is non-trivial.
1360 is non-trivial.
1360
1361
1361 Another wrinkle is doing the reverse, figuring out which changeset in
1362 Another wrinkle is doing the reverse, figuring out which changeset in
1362 the changegroup a particular filenode or manifestnode belongs to.
1363 the changegroup a particular filenode or manifestnode belongs to.
1363
1364
1364 The caller can specify some nodes that must be included in the
1365 The caller can specify some nodes that must be included in the
1365 changegroup using the extranodes argument. It should be a dict
1366 changegroup using the extranodes argument. It should be a dict
1366 where the keys are the filenames (or 1 for the manifest), and the
1367 where the keys are the filenames (or 1 for the manifest), and the
1367 values are lists of (node, linknode) tuples, where node is a wanted
1368 values are lists of (node, linknode) tuples, where node is a wanted
1368 node and linknode is the changelog node that should be transmitted as
1369 node and linknode is the changelog node that should be transmitted as
1369 the linkrev.
1370 the linkrev.
1370 """
1371 """
1371
1372
1372 # Set up some initial variables
1373 # Set up some initial variables
1373 # Make it easy to refer to self.changelog
1374 # Make it easy to refer to self.changelog
1374 cl = self.changelog
1375 cl = self.changelog
1375 # Compute the list of changesets in this changegroup.
1376 # Compute the list of changesets in this changegroup.
1376 # Some bases may turn out to be superfluous, and some heads may be
1377 # Some bases may turn out to be superfluous, and some heads may be
1377 # too. nodesbetween will return the minimal set of bases and heads
1378 # too. nodesbetween will return the minimal set of bases and heads
1378 # necessary to re-create the changegroup.
1379 # necessary to re-create the changegroup.
1379 if not bases:
1380 if not bases:
1380 bases = [nullid]
1381 bases = [nullid]
1381 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1382 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1382
1383
1383 if extranodes is None:
1384 if extranodes is None:
1384 # can we go through the fast path ?
1385 # can we go through the fast path ?
1385 heads.sort()
1386 heads.sort()
1386 allheads = self.heads()
1387 allheads = self.heads()
1387 allheads.sort()
1388 allheads.sort()
1388 if heads == allheads:
1389 if heads == allheads:
1389 return self._changegroup(msng_cl_lst, source)
1390 return self._changegroup(msng_cl_lst, source)
1390
1391
1391 # slow path
1392 # slow path
1392 self.hook('preoutgoing', throw=True, source=source)
1393 self.hook('preoutgoing', throw=True, source=source)
1393
1394
1394 self.changegroupinfo(msng_cl_lst, source)
1395 self.changegroupinfo(msng_cl_lst, source)
1395
1396
1396 # We assume that all ancestors of bases are known
1397 # We assume that all ancestors of bases are known
1397 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1398 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1398
1399
1399 # Make it easy to refer to self.manifest
1400 # Make it easy to refer to self.manifest
1400 mnfst = self.manifest
1401 mnfst = self.manifest
1401 # We don't know which manifests are missing yet
1402 # We don't know which manifests are missing yet
1402 msng_mnfst_set = {}
1403 msng_mnfst_set = {}
1403 # Nor do we know which filenodes are missing.
1404 # Nor do we know which filenodes are missing.
1404 msng_filenode_set = {}
1405 msng_filenode_set = {}
1405
1406
1406 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1407 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1407 junk = None
1408 junk = None
1408
1409
1409 # A changeset always belongs to itself, so the changenode lookup
1410 # A changeset always belongs to itself, so the changenode lookup
1410 # function for a changenode is identity.
1411 # function for a changenode is identity.
1411 def identity(x):
1412 def identity(x):
1412 return x
1413 return x
1413
1414
1414 # A function generating function that sets up the initial environment
1415 # A function generating function that sets up the initial environment
1415 # the inner function.
1416 # the inner function.
1416 def filenode_collector(changedfiles):
1417 def filenode_collector(changedfiles):
1417 # This gathers information from each manifestnode included in the
1418 # This gathers information from each manifestnode included in the
1418 # changegroup about which filenodes the manifest node references
1419 # changegroup about which filenodes the manifest node references
1419 # so we can include those in the changegroup too.
1420 # so we can include those in the changegroup too.
1420 #
1421 #
1421 # It also remembers which changenode each filenode belongs to. It
1422 # It also remembers which changenode each filenode belongs to. It
1422 # does this by assuming the a filenode belongs to the changenode
1423 # does this by assuming the a filenode belongs to the changenode
1423 # the first manifest that references it belongs to.
1424 # the first manifest that references it belongs to.
1424 def collect_msng_filenodes(mnfstnode):
1425 def collect_msng_filenodes(mnfstnode):
1425 r = mnfst.rev(mnfstnode)
1426 r = mnfst.rev(mnfstnode)
1426 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1427 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1427 # If the previous rev is one of the parents,
1428 # If the previous rev is one of the parents,
1428 # we only need to see a diff.
1429 # we only need to see a diff.
1429 deltamf = mnfst.readdelta(mnfstnode)
1430 deltamf = mnfst.readdelta(mnfstnode)
1430 # For each line in the delta
1431 # For each line in the delta
1431 for f, fnode in deltamf.iteritems():
1432 for f, fnode in deltamf.iteritems():
1432 # And if the file is in the list of files we care
1433 # And if the file is in the list of files we care
1433 # about.
1434 # about.
1434 if f in changedfiles:
1435 if f in changedfiles:
1435 # Get the changenode this manifest belongs to
1436 # Get the changenode this manifest belongs to
1436 clnode = msng_mnfst_set[mnfstnode]
1437 clnode = msng_mnfst_set[mnfstnode]
1437 # Create the set of filenodes for the file if
1438 # Create the set of filenodes for the file if
1438 # there isn't one already.
1439 # there isn't one already.
1439 ndset = msng_filenode_set.setdefault(f, {})
1440 ndset = msng_filenode_set.setdefault(f, {})
1440 # And set the filenode's changelog node to the
1441 # And set the filenode's changelog node to the
1441 # manifest's if it hasn't been set already.
1442 # manifest's if it hasn't been set already.
1442 ndset.setdefault(fnode, clnode)
1443 ndset.setdefault(fnode, clnode)
1443 else:
1444 else:
1444 # Otherwise we need a full manifest.
1445 # Otherwise we need a full manifest.
1445 m = mnfst.read(mnfstnode)
1446 m = mnfst.read(mnfstnode)
1446 # For every file in we care about.
1447 # For every file in we care about.
1447 for f in changedfiles:
1448 for f in changedfiles:
1448 fnode = m.get(f, None)
1449 fnode = m.get(f, None)
1449 # If it's in the manifest
1450 # If it's in the manifest
1450 if fnode is not None:
1451 if fnode is not None:
1451 # See comments above.
1452 # See comments above.
1452 clnode = msng_mnfst_set[mnfstnode]
1453 clnode = msng_mnfst_set[mnfstnode]
1453 ndset = msng_filenode_set.setdefault(f, {})
1454 ndset = msng_filenode_set.setdefault(f, {})
1454 ndset.setdefault(fnode, clnode)
1455 ndset.setdefault(fnode, clnode)
1455 return collect_msng_filenodes
1456 return collect_msng_filenodes
1456
1457
1457 # If we determine that a particular file or manifest node must be a
1458 # If we determine that a particular file or manifest node must be a
1458 # node that the recipient of the changegroup will already have, we can
1459 # node that the recipient of the changegroup will already have, we can
1459 # also assume the recipient will have all the parents. This function
1460 # also assume the recipient will have all the parents. This function
1460 # prunes them from the set of missing nodes.
1461 # prunes them from the set of missing nodes.
1461 def prune(revlog, missingnodes):
1462 def prune(revlog, missingnodes):
1462 hasset = set()
1463 hasset = set()
1463 # If a 'missing' filenode thinks it belongs to a changenode we
1464 # If a 'missing' filenode thinks it belongs to a changenode we
1464 # assume the recipient must have, then the recipient must have
1465 # assume the recipient must have, then the recipient must have
1465 # that filenode.
1466 # that filenode.
1466 for n in missingnodes:
1467 for n in missingnodes:
1467 clrev = revlog.linkrev(revlog.rev(n))
1468 clrev = revlog.linkrev(revlog.rev(n))
1468 if clrev in commonrevs:
1469 if clrev in commonrevs:
1469 hasset.add(n)
1470 hasset.add(n)
1470 for n in hasset:
1471 for n in hasset:
1471 missingnodes.pop(n, None)
1472 missingnodes.pop(n, None)
1472 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1473 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1473 missingnodes.pop(revlog.node(r), None)
1474 missingnodes.pop(revlog.node(r), None)
1474
1475
1475 # Add the nodes that were explicitly requested.
1476 # Add the nodes that were explicitly requested.
1476 def add_extra_nodes(name, nodes):
1477 def add_extra_nodes(name, nodes):
1477 if not extranodes or name not in extranodes:
1478 if not extranodes or name not in extranodes:
1478 return
1479 return
1479
1480
1480 for node, linknode in extranodes[name]:
1481 for node, linknode in extranodes[name]:
1481 if node not in nodes:
1482 if node not in nodes:
1482 nodes[node] = linknode
1483 nodes[node] = linknode
1483
1484
1484 # Now that we have all theses utility functions to help out and
1485 # Now that we have all theses utility functions to help out and
1485 # logically divide up the task, generate the group.
1486 # logically divide up the task, generate the group.
1486 def gengroup():
1487 def gengroup():
1487 # The set of changed files starts empty.
1488 # The set of changed files starts empty.
1488 changedfiles = set()
1489 changedfiles = set()
1489 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1490 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1490
1491
1491 # Create a changenode group generator that will call our functions
1492 # Create a changenode group generator that will call our functions
1492 # back to lookup the owning changenode and collect information.
1493 # back to lookup the owning changenode and collect information.
1493 group = cl.group(msng_cl_lst, identity, collect)
1494 group = cl.group(msng_cl_lst, identity, collect)
1494 for cnt, chnk in enumerate(group):
1495 for cnt, chnk in enumerate(group):
1495 yield chnk
1496 yield chnk
1496 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1497 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1497 self.ui.progress(_('bundling changes'), None)
1498 self.ui.progress(_('bundling changes'), None)
1498
1499
1499 prune(mnfst, msng_mnfst_set)
1500 prune(mnfst, msng_mnfst_set)
1500 add_extra_nodes(1, msng_mnfst_set)
1501 add_extra_nodes(1, msng_mnfst_set)
1501 msng_mnfst_lst = msng_mnfst_set.keys()
1502 msng_mnfst_lst = msng_mnfst_set.keys()
1502 # Sort the manifestnodes by revision number.
1503 # Sort the manifestnodes by revision number.
1503 msng_mnfst_lst.sort(key=mnfst.rev)
1504 msng_mnfst_lst.sort(key=mnfst.rev)
1504 # Create a generator for the manifestnodes that calls our lookup
1505 # Create a generator for the manifestnodes that calls our lookup
1505 # and data collection functions back.
1506 # and data collection functions back.
1506 group = mnfst.group(msng_mnfst_lst,
1507 group = mnfst.group(msng_mnfst_lst,
1507 lambda mnode: msng_mnfst_set[mnode],
1508 lambda mnode: msng_mnfst_set[mnode],
1508 filenode_collector(changedfiles))
1509 filenode_collector(changedfiles))
1509 for cnt, chnk in enumerate(group):
1510 for cnt, chnk in enumerate(group):
1510 yield chnk
1511 yield chnk
1511 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1512 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1512 self.ui.progress(_('bundling manifests'), None)
1513 self.ui.progress(_('bundling manifests'), None)
1513
1514
1514 # These are no longer needed, dereference and toss the memory for
1515 # These are no longer needed, dereference and toss the memory for
1515 # them.
1516 # them.
1516 msng_mnfst_lst = None
1517 msng_mnfst_lst = None
1517 msng_mnfst_set.clear()
1518 msng_mnfst_set.clear()
1518
1519
1519 if extranodes:
1520 if extranodes:
1520 for fname in extranodes:
1521 for fname in extranodes:
1521 if isinstance(fname, int):
1522 if isinstance(fname, int):
1522 continue
1523 continue
1523 msng_filenode_set.setdefault(fname, {})
1524 msng_filenode_set.setdefault(fname, {})
1524 changedfiles.add(fname)
1525 changedfiles.add(fname)
1525 # Go through all our files in order sorted by name.
1526 # Go through all our files in order sorted by name.
1526 cnt = 0
1527 cnt = 0
1527 for fname in sorted(changedfiles):
1528 for fname in sorted(changedfiles):
1528 filerevlog = self.file(fname)
1529 filerevlog = self.file(fname)
1529 if not len(filerevlog):
1530 if not len(filerevlog):
1530 raise util.Abort(_("empty or missing revlog for %s") % fname)
1531 raise util.Abort(_("empty or missing revlog for %s") % fname)
1531 # Toss out the filenodes that the recipient isn't really
1532 # Toss out the filenodes that the recipient isn't really
1532 # missing.
1533 # missing.
1533 missingfnodes = msng_filenode_set.pop(fname, {})
1534 missingfnodes = msng_filenode_set.pop(fname, {})
1534 prune(filerevlog, missingfnodes)
1535 prune(filerevlog, missingfnodes)
1535 add_extra_nodes(fname, missingfnodes)
1536 add_extra_nodes(fname, missingfnodes)
1536 # If any filenodes are left, generate the group for them,
1537 # If any filenodes are left, generate the group for them,
1537 # otherwise don't bother.
1538 # otherwise don't bother.
1538 if missingfnodes:
1539 if missingfnodes:
1539 yield changegroup.chunkheader(len(fname))
1540 yield changegroup.chunkheader(len(fname))
1540 yield fname
1541 yield fname
1541 # Sort the filenodes by their revision # (topological order)
1542 # Sort the filenodes by their revision # (topological order)
1542 nodeiter = list(missingfnodes)
1543 nodeiter = list(missingfnodes)
1543 nodeiter.sort(key=filerevlog.rev)
1544 nodeiter.sort(key=filerevlog.rev)
1544 # Create a group generator and only pass in a changenode
1545 # Create a group generator and only pass in a changenode
1545 # lookup function as we need to collect no information
1546 # lookup function as we need to collect no information
1546 # from filenodes.
1547 # from filenodes.
1547 group = filerevlog.group(nodeiter,
1548 group = filerevlog.group(nodeiter,
1548 lambda fnode: missingfnodes[fnode])
1549 lambda fnode: missingfnodes[fnode])
1549 for chnk in group:
1550 for chnk in group:
1550 self.ui.progress(
1551 self.ui.progress(
1551 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1552 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1552 cnt += 1
1553 cnt += 1
1553 yield chnk
1554 yield chnk
1554 # Signal that no more groups are left.
1555 # Signal that no more groups are left.
1555 yield changegroup.closechunk()
1556 yield changegroup.closechunk()
1556 self.ui.progress(_('bundling files'), None)
1557 self.ui.progress(_('bundling files'), None)
1557
1558
1558 if msng_cl_lst:
1559 if msng_cl_lst:
1559 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1560 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1560
1561
1561 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1562 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1562
1563
1563 def changegroup(self, basenodes, source):
1564 def changegroup(self, basenodes, source):
1564 # to avoid a race we use changegroupsubset() (issue1320)
1565 # to avoid a race we use changegroupsubset() (issue1320)
1565 return self.changegroupsubset(basenodes, self.heads(), source)
1566 return self.changegroupsubset(basenodes, self.heads(), source)
1566
1567
1567 def _changegroup(self, nodes, source):
1568 def _changegroup(self, nodes, source):
1568 """Compute the changegroup of all nodes that we have that a recipient
1569 """Compute the changegroup of all nodes that we have that a recipient
1569 doesn't. Return a chunkbuffer object whose read() method will return
1570 doesn't. Return a chunkbuffer object whose read() method will return
1570 successive changegroup chunks.
1571 successive changegroup chunks.
1571
1572
1572 This is much easier than the previous function as we can assume that
1573 This is much easier than the previous function as we can assume that
1573 the recipient has any changenode we aren't sending them.
1574 the recipient has any changenode we aren't sending them.
1574
1575
1575 nodes is the set of nodes to send"""
1576 nodes is the set of nodes to send"""
1576
1577
1577 self.hook('preoutgoing', throw=True, source=source)
1578 self.hook('preoutgoing', throw=True, source=source)
1578
1579
1579 cl = self.changelog
1580 cl = self.changelog
1580 revset = set([cl.rev(n) for n in nodes])
1581 revset = set([cl.rev(n) for n in nodes])
1581 self.changegroupinfo(nodes, source)
1582 self.changegroupinfo(nodes, source)
1582
1583
1583 def identity(x):
1584 def identity(x):
1584 return x
1585 return x
1585
1586
1586 def gennodelst(log):
1587 def gennodelst(log):
1587 for r in log:
1588 for r in log:
1588 if log.linkrev(r) in revset:
1589 if log.linkrev(r) in revset:
1589 yield log.node(r)
1590 yield log.node(r)
1590
1591
1591 def lookuplinkrev_func(revlog):
1592 def lookuplinkrev_func(revlog):
1592 def lookuplinkrev(n):
1593 def lookuplinkrev(n):
1593 return cl.node(revlog.linkrev(revlog.rev(n)))
1594 return cl.node(revlog.linkrev(revlog.rev(n)))
1594 return lookuplinkrev
1595 return lookuplinkrev
1595
1596
1596 def gengroup():
1597 def gengroup():
1597 '''yield a sequence of changegroup chunks (strings)'''
1598 '''yield a sequence of changegroup chunks (strings)'''
1598 # construct a list of all changed files
1599 # construct a list of all changed files
1599 changedfiles = set()
1600 changedfiles = set()
1600 mmfs = {}
1601 mmfs = {}
1601 collect = changegroup.collector(cl, mmfs, changedfiles)
1602 collect = changegroup.collector(cl, mmfs, changedfiles)
1602
1603
1603 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1604 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1604 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1605 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1605 yield chnk
1606 yield chnk
1606 self.ui.progress(_('bundling changes'), None)
1607 self.ui.progress(_('bundling changes'), None)
1607
1608
1608 mnfst = self.manifest
1609 mnfst = self.manifest
1609 nodeiter = gennodelst(mnfst)
1610 nodeiter = gennodelst(mnfst)
1610 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1611 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1611 lookuplinkrev_func(mnfst))):
1612 lookuplinkrev_func(mnfst))):
1612 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1613 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1613 yield chnk
1614 yield chnk
1614 self.ui.progress(_('bundling manifests'), None)
1615 self.ui.progress(_('bundling manifests'), None)
1615
1616
1616 cnt = 0
1617 cnt = 0
1617 for fname in sorted(changedfiles):
1618 for fname in sorted(changedfiles):
1618 filerevlog = self.file(fname)
1619 filerevlog = self.file(fname)
1619 if not len(filerevlog):
1620 if not len(filerevlog):
1620 raise util.Abort(_("empty or missing revlog for %s") % fname)
1621 raise util.Abort(_("empty or missing revlog for %s") % fname)
1621 nodeiter = gennodelst(filerevlog)
1622 nodeiter = gennodelst(filerevlog)
1622 nodeiter = list(nodeiter)
1623 nodeiter = list(nodeiter)
1623 if nodeiter:
1624 if nodeiter:
1624 yield changegroup.chunkheader(len(fname))
1625 yield changegroup.chunkheader(len(fname))
1625 yield fname
1626 yield fname
1626 lookup = lookuplinkrev_func(filerevlog)
1627 lookup = lookuplinkrev_func(filerevlog)
1627 for chnk in filerevlog.group(nodeiter, lookup):
1628 for chnk in filerevlog.group(nodeiter, lookup):
1628 self.ui.progress(
1629 self.ui.progress(
1629 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1630 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1630 cnt += 1
1631 cnt += 1
1631 yield chnk
1632 yield chnk
1632 self.ui.progress(_('bundling files'), None)
1633 self.ui.progress(_('bundling files'), None)
1633
1634
1634 yield changegroup.closechunk()
1635 yield changegroup.closechunk()
1635
1636
1636 if nodes:
1637 if nodes:
1637 self.hook('outgoing', node=hex(nodes[0]), source=source)
1638 self.hook('outgoing', node=hex(nodes[0]), source=source)
1638
1639
1639 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1640 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1640
1641
1641 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1642 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1642 """Add the changegroup returned by source.read() to this repo.
1643 """Add the changegroup returned by source.read() to this repo.
1643 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1644 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1644 the URL of the repo where this changegroup is coming from.
1645 the URL of the repo where this changegroup is coming from.
1645
1646
1646 Return an integer summarizing the change to this repo:
1647 Return an integer summarizing the change to this repo:
1647 - nothing changed or no source: 0
1648 - nothing changed or no source: 0
1648 - more heads than before: 1+added heads (2..n)
1649 - more heads than before: 1+added heads (2..n)
1649 - fewer heads than before: -1-removed heads (-2..-n)
1650 - fewer heads than before: -1-removed heads (-2..-n)
1650 - number of heads stays the same: 1
1651 - number of heads stays the same: 1
1651 """
1652 """
1652 def csmap(x):
1653 def csmap(x):
1653 self.ui.debug("add changeset %s\n" % short(x))
1654 self.ui.debug("add changeset %s\n" % short(x))
1654 return len(cl)
1655 return len(cl)
1655
1656
1656 def revmap(x):
1657 def revmap(x):
1657 return cl.rev(x)
1658 return cl.rev(x)
1658
1659
1659 if not source:
1660 if not source:
1660 return 0
1661 return 0
1661
1662
1662 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1663 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1663
1664
1664 changesets = files = revisions = 0
1665 changesets = files = revisions = 0
1665 efiles = set()
1666 efiles = set()
1666
1667
1667 # write changelog data to temp files so concurrent readers will not see
1668 # write changelog data to temp files so concurrent readers will not see
1668 # inconsistent view
1669 # inconsistent view
1669 cl = self.changelog
1670 cl = self.changelog
1670 cl.delayupdate()
1671 cl.delayupdate()
1671 oldheads = len(cl.heads())
1672 oldheads = len(cl.heads())
1672
1673
1673 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1674 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1674 try:
1675 try:
1675 trp = weakref.proxy(tr)
1676 trp = weakref.proxy(tr)
1676 # pull off the changeset group
1677 # pull off the changeset group
1677 self.ui.status(_("adding changesets\n"))
1678 self.ui.status(_("adding changesets\n"))
1678 clstart = len(cl)
1679 clstart = len(cl)
1679 class prog(object):
1680 class prog(object):
1680 step = _('changesets')
1681 step = _('changesets')
1681 count = 1
1682 count = 1
1682 ui = self.ui
1683 ui = self.ui
1683 total = None
1684 total = None
1684 def __call__(self):
1685 def __call__(self):
1685 self.ui.progress(self.step, self.count, unit=_('chunks'),
1686 self.ui.progress(self.step, self.count, unit=_('chunks'),
1686 total=self.total)
1687 total=self.total)
1687 self.count += 1
1688 self.count += 1
1688 pr = prog()
1689 pr = prog()
1689 source.callback = pr
1690 source.callback = pr
1690
1691
1691 if (cl.addgroup(source, csmap, trp) is None
1692 if (cl.addgroup(source, csmap, trp) is None
1692 and not emptyok):
1693 and not emptyok):
1693 raise util.Abort(_("received changelog group is empty"))
1694 raise util.Abort(_("received changelog group is empty"))
1694 clend = len(cl)
1695 clend = len(cl)
1695 changesets = clend - clstart
1696 changesets = clend - clstart
1696 for c in xrange(clstart, clend):
1697 for c in xrange(clstart, clend):
1697 efiles.update(self[c].files())
1698 efiles.update(self[c].files())
1698 efiles = len(efiles)
1699 efiles = len(efiles)
1699 self.ui.progress(_('changesets'), None)
1700 self.ui.progress(_('changesets'), None)
1700
1701
1701 # pull off the manifest group
1702 # pull off the manifest group
1702 self.ui.status(_("adding manifests\n"))
1703 self.ui.status(_("adding manifests\n"))
1703 pr.step = _('manifests')
1704 pr.step = _('manifests')
1704 pr.count = 1
1705 pr.count = 1
1705 pr.total = changesets # manifests <= changesets
1706 pr.total = changesets # manifests <= changesets
1706 # no need to check for empty manifest group here:
1707 # no need to check for empty manifest group here:
1707 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1708 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1708 # no new manifest will be created and the manifest group will
1709 # no new manifest will be created and the manifest group will
1709 # be empty during the pull
1710 # be empty during the pull
1710 self.manifest.addgroup(source, revmap, trp)
1711 self.manifest.addgroup(source, revmap, trp)
1711 self.ui.progress(_('manifests'), None)
1712 self.ui.progress(_('manifests'), None)
1712
1713
1713 needfiles = {}
1714 needfiles = {}
1714 if self.ui.configbool('server', 'validate', default=False):
1715 if self.ui.configbool('server', 'validate', default=False):
1715 # validate incoming csets have their manifests
1716 # validate incoming csets have their manifests
1716 for cset in xrange(clstart, clend):
1717 for cset in xrange(clstart, clend):
1717 mfest = self.changelog.read(self.changelog.node(cset))[0]
1718 mfest = self.changelog.read(self.changelog.node(cset))[0]
1718 mfest = self.manifest.readdelta(mfest)
1719 mfest = self.manifest.readdelta(mfest)
1719 # store file nodes we must see
1720 # store file nodes we must see
1720 for f, n in mfest.iteritems():
1721 for f, n in mfest.iteritems():
1721 needfiles.setdefault(f, set()).add(n)
1722 needfiles.setdefault(f, set()).add(n)
1722
1723
1723 # process the files
1724 # process the files
1724 self.ui.status(_("adding file changes\n"))
1725 self.ui.status(_("adding file changes\n"))
1725 pr.step = 'files'
1726 pr.step = 'files'
1726 pr.count = 1
1727 pr.count = 1
1727 pr.total = efiles
1728 pr.total = efiles
1728 source.callback = None
1729 source.callback = None
1729
1730
1730 while 1:
1731 while 1:
1731 f = source.chunk()
1732 f = source.chunk()
1732 if not f:
1733 if not f:
1733 break
1734 break
1734 self.ui.debug("adding %s revisions\n" % f)
1735 self.ui.debug("adding %s revisions\n" % f)
1735 pr()
1736 pr()
1736 fl = self.file(f)
1737 fl = self.file(f)
1737 o = len(fl)
1738 o = len(fl)
1738 if fl.addgroup(source, revmap, trp) is None:
1739 if fl.addgroup(source, revmap, trp) is None:
1739 raise util.Abort(_("received file revlog group is empty"))
1740 raise util.Abort(_("received file revlog group is empty"))
1740 revisions += len(fl) - o
1741 revisions += len(fl) - o
1741 files += 1
1742 files += 1
1742 if f in needfiles:
1743 if f in needfiles:
1743 needs = needfiles[f]
1744 needs = needfiles[f]
1744 for new in xrange(o, len(fl)):
1745 for new in xrange(o, len(fl)):
1745 n = fl.node(new)
1746 n = fl.node(new)
1746 if n in needs:
1747 if n in needs:
1747 needs.remove(n)
1748 needs.remove(n)
1748 if not needs:
1749 if not needs:
1749 del needfiles[f]
1750 del needfiles[f]
1750 self.ui.progress(_('files'), None)
1751 self.ui.progress(_('files'), None)
1751
1752
1752 for f, needs in needfiles.iteritems():
1753 for f, needs in needfiles.iteritems():
1753 fl = self.file(f)
1754 fl = self.file(f)
1754 for n in needs:
1755 for n in needs:
1755 try:
1756 try:
1756 fl.rev(n)
1757 fl.rev(n)
1757 except error.LookupError:
1758 except error.LookupError:
1758 raise util.Abort(
1759 raise util.Abort(
1759 _('missing file data for %s:%s - run hg verify') %
1760 _('missing file data for %s:%s - run hg verify') %
1760 (f, hex(n)))
1761 (f, hex(n)))
1761
1762
1762 newheads = len(cl.heads())
1763 newheads = len(cl.heads())
1763 heads = ""
1764 heads = ""
1764 if oldheads and newheads != oldheads:
1765 if oldheads and newheads != oldheads:
1765 heads = _(" (%+d heads)") % (newheads - oldheads)
1766 heads = _(" (%+d heads)") % (newheads - oldheads)
1766
1767
1767 self.ui.status(_("added %d changesets"
1768 self.ui.status(_("added %d changesets"
1768 " with %d changes to %d files%s\n")
1769 " with %d changes to %d files%s\n")
1769 % (changesets, revisions, files, heads))
1770 % (changesets, revisions, files, heads))
1770
1771
1771 if changesets > 0:
1772 if changesets > 0:
1772 p = lambda: cl.writepending() and self.root or ""
1773 p = lambda: cl.writepending() and self.root or ""
1773 self.hook('pretxnchangegroup', throw=True,
1774 self.hook('pretxnchangegroup', throw=True,
1774 node=hex(cl.node(clstart)), source=srctype,
1775 node=hex(cl.node(clstart)), source=srctype,
1775 url=url, pending=p)
1776 url=url, pending=p)
1776
1777
1777 # make changelog see real files again
1778 # make changelog see real files again
1778 cl.finalize(trp)
1779 cl.finalize(trp)
1779
1780
1780 tr.close()
1781 tr.close()
1781 finally:
1782 finally:
1782 tr.release()
1783 tr.release()
1783 if lock:
1784 if lock:
1784 lock.release()
1785 lock.release()
1785
1786
1786 if changesets > 0:
1787 if changesets > 0:
1787 # forcefully update the on-disk branch cache
1788 # forcefully update the on-disk branch cache
1788 self.ui.debug("updating the branch cache\n")
1789 self.ui.debug("updating the branch cache\n")
1789 self.updatebranchcache()
1790 self.updatebranchcache()
1790 self.hook("changegroup", node=hex(cl.node(clstart)),
1791 self.hook("changegroup", node=hex(cl.node(clstart)),
1791 source=srctype, url=url)
1792 source=srctype, url=url)
1792
1793
1793 for i in xrange(clstart, clend):
1794 for i in xrange(clstart, clend):
1794 self.hook("incoming", node=hex(cl.node(i)),
1795 self.hook("incoming", node=hex(cl.node(i)),
1795 source=srctype, url=url)
1796 source=srctype, url=url)
1796
1797
1797 # never return 0 here:
1798 # never return 0 here:
1798 if newheads < oldheads:
1799 if newheads < oldheads:
1799 return newheads - oldheads - 1
1800 return newheads - oldheads - 1
1800 else:
1801 else:
1801 return newheads - oldheads + 1
1802 return newheads - oldheads + 1
1802
1803
1803
1804
1804 def stream_in(self, remote, requirements):
1805 def stream_in(self, remote, requirements):
1805 fp = remote.stream_out()
1806 fp = remote.stream_out()
1806 l = fp.readline()
1807 l = fp.readline()
1807 try:
1808 try:
1808 resp = int(l)
1809 resp = int(l)
1809 except ValueError:
1810 except ValueError:
1810 raise error.ResponseError(
1811 raise error.ResponseError(
1811 _('Unexpected response from remote server:'), l)
1812 _('Unexpected response from remote server:'), l)
1812 if resp == 1:
1813 if resp == 1:
1813 raise util.Abort(_('operation forbidden by server'))
1814 raise util.Abort(_('operation forbidden by server'))
1814 elif resp == 2:
1815 elif resp == 2:
1815 raise util.Abort(_('locking the remote repository failed'))
1816 raise util.Abort(_('locking the remote repository failed'))
1816 elif resp != 0:
1817 elif resp != 0:
1817 raise util.Abort(_('the server sent an unknown error code'))
1818 raise util.Abort(_('the server sent an unknown error code'))
1818 self.ui.status(_('streaming all changes\n'))
1819 self.ui.status(_('streaming all changes\n'))
1819 l = fp.readline()
1820 l = fp.readline()
1820 try:
1821 try:
1821 total_files, total_bytes = map(int, l.split(' ', 1))
1822 total_files, total_bytes = map(int, l.split(' ', 1))
1822 except (ValueError, TypeError):
1823 except (ValueError, TypeError):
1823 raise error.ResponseError(
1824 raise error.ResponseError(
1824 _('Unexpected response from remote server:'), l)
1825 _('Unexpected response from remote server:'), l)
1825 self.ui.status(_('%d files to transfer, %s of data\n') %
1826 self.ui.status(_('%d files to transfer, %s of data\n') %
1826 (total_files, util.bytecount(total_bytes)))
1827 (total_files, util.bytecount(total_bytes)))
1827 start = time.time()
1828 start = time.time()
1828 for i in xrange(total_files):
1829 for i in xrange(total_files):
1829 # XXX doesn't support '\n' or '\r' in filenames
1830 # XXX doesn't support '\n' or '\r' in filenames
1830 l = fp.readline()
1831 l = fp.readline()
1831 try:
1832 try:
1832 name, size = l.split('\0', 1)
1833 name, size = l.split('\0', 1)
1833 size = int(size)
1834 size = int(size)
1834 except (ValueError, TypeError):
1835 except (ValueError, TypeError):
1835 raise error.ResponseError(
1836 raise error.ResponseError(
1836 _('Unexpected response from remote server:'), l)
1837 _('Unexpected response from remote server:'), l)
1837 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1838 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1838 # for backwards compat, name was partially encoded
1839 # for backwards compat, name was partially encoded
1839 ofp = self.sopener(store.decodedir(name), 'w')
1840 ofp = self.sopener(store.decodedir(name), 'w')
1840 for chunk in util.filechunkiter(fp, limit=size):
1841 for chunk in util.filechunkiter(fp, limit=size):
1841 ofp.write(chunk)
1842 ofp.write(chunk)
1842 ofp.close()
1843 ofp.close()
1843 elapsed = time.time() - start
1844 elapsed = time.time() - start
1844 if elapsed <= 0:
1845 if elapsed <= 0:
1845 elapsed = 0.001
1846 elapsed = 0.001
1846 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1847 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1847 (util.bytecount(total_bytes), elapsed,
1848 (util.bytecount(total_bytes), elapsed,
1848 util.bytecount(total_bytes / elapsed)))
1849 util.bytecount(total_bytes / elapsed)))
1849
1850
1850 # new requirements = old non-format requirements + new format-related
1851 # new requirements = old non-format requirements + new format-related
1851 # requirements from the streamed-in repository
1852 # requirements from the streamed-in repository
1852 requirements.update(set(self.requirements) - self.supportedformats)
1853 requirements.update(set(self.requirements) - self.supportedformats)
1853 self._applyrequirements(requirements)
1854 self._applyrequirements(requirements)
1854 self._writerequirements()
1855 self._writerequirements()
1855
1856
1856 self.invalidate()
1857 self.invalidate()
1857 return len(self.heads()) + 1
1858 return len(self.heads()) + 1
1858
1859
1859 def clone(self, remote, heads=[], stream=False):
1860 def clone(self, remote, heads=[], stream=False):
1860 '''clone remote repository.
1861 '''clone remote repository.
1861
1862
1862 keyword arguments:
1863 keyword arguments:
1863 heads: list of revs to clone (forces use of pull)
1864 heads: list of revs to clone (forces use of pull)
1864 stream: use streaming clone if possible'''
1865 stream: use streaming clone if possible'''
1865
1866
1866 # now, all clients that can request uncompressed clones can
1867 # now, all clients that can request uncompressed clones can
1867 # read repo formats supported by all servers that can serve
1868 # read repo formats supported by all servers that can serve
1868 # them.
1869 # them.
1869
1870
1870 # if revlog format changes, client will have to check version
1871 # if revlog format changes, client will have to check version
1871 # and format flags on "stream" capability, and use
1872 # and format flags on "stream" capability, and use
1872 # uncompressed only if compatible.
1873 # uncompressed only if compatible.
1873
1874
1874 if stream and not heads:
1875 if stream and not heads:
1875 # 'stream' means remote revlog format is revlogv1 only
1876 # 'stream' means remote revlog format is revlogv1 only
1876 if remote.capable('stream'):
1877 if remote.capable('stream'):
1877 return self.stream_in(remote, set(('revlogv1',)))
1878 return self.stream_in(remote, set(('revlogv1',)))
1878 # otherwise, 'streamreqs' contains the remote revlog format
1879 # otherwise, 'streamreqs' contains the remote revlog format
1879 streamreqs = remote.capable('streamreqs')
1880 streamreqs = remote.capable('streamreqs')
1880 if streamreqs:
1881 if streamreqs:
1881 streamreqs = set(streamreqs.split(','))
1882 streamreqs = set(streamreqs.split(','))
1882 # if we support it, stream in and adjust our requirements
1883 # if we support it, stream in and adjust our requirements
1883 if not streamreqs - self.supportedformats:
1884 if not streamreqs - self.supportedformats:
1884 return self.stream_in(remote, streamreqs)
1885 return self.stream_in(remote, streamreqs)
1885 return self.pull(remote, heads)
1886 return self.pull(remote, heads)
1886
1887
1887 def pushkey(self, namespace, key, old, new):
1888 def pushkey(self, namespace, key, old, new):
1888 return pushkey.push(self, namespace, key, old, new)
1889 return pushkey.push(self, namespace, key, old, new)
1889
1890
1890 def listkeys(self, namespace):
1891 def listkeys(self, namespace):
1891 return pushkey.list(self, namespace)
1892 return pushkey.list(self, namespace)
1892
1893
1893 # used to avoid circular references so destructors work
1894 # used to avoid circular references so destructors work
1894 def aftertrans(files):
1895 def aftertrans(files):
1895 renamefiles = [tuple(t) for t in files]
1896 renamefiles = [tuple(t) for t in files]
1896 def a():
1897 def a():
1897 for src, dest in renamefiles:
1898 for src, dest in renamefiles:
1898 util.rename(src, dest)
1899 util.rename(src, dest)
1899 return a
1900 return a
1900
1901
1901 def instance(ui, path, create):
1902 def instance(ui, path, create):
1902 return localrepository(ui, util.drop_scheme('file', path), create)
1903 return localrepository(ui, util.drop_scheme('file', path), create)
1903
1904
1904 def islocal(path):
1905 def islocal(path):
1905 return True
1906 return True
@@ -1,198 +1,210 b''
1 $ hg init test
1 $ hg init test
2 $ cd test
2 $ cd test
3
3
4 $ echo a > a
4 $ echo a > a
5 $ hg add a
5 $ hg add a
6 $ hg commit -m "test"
6 $ hg commit -m "test"
7 $ hg history
7 $ hg history
8 changeset: 0:acb14030fe0a
8 changeset: 0:acb14030fe0a
9 tag: tip
9 tag: tip
10 user: test
10 user: test
11 date: Thu Jan 01 00:00:00 1970 +0000
11 date: Thu Jan 01 00:00:00 1970 +0000
12 summary: test
12 summary: test
13
13
14
14
15 $ hg tag ' '
15 $ hg tag ' '
16 abort: tag names cannot consist entirely of whitespace
16 abort: tag names cannot consist entirely of whitespace
17 [255]
17 [255]
18
18
19 $ hg tag "bleah"
19 $ hg tag "bleah"
20 $ hg history
20 $ hg history
21 changeset: 1:d4f0d2909abc
21 changeset: 1:d4f0d2909abc
22 tag: tip
22 tag: tip
23 user: test
23 user: test
24 date: Thu Jan 01 00:00:00 1970 +0000
24 date: Thu Jan 01 00:00:00 1970 +0000
25 summary: Added tag bleah for changeset acb14030fe0a
25 summary: Added tag bleah for changeset acb14030fe0a
26
26
27 changeset: 0:acb14030fe0a
27 changeset: 0:acb14030fe0a
28 tag: bleah
28 tag: bleah
29 user: test
29 user: test
30 date: Thu Jan 01 00:00:00 1970 +0000
30 date: Thu Jan 01 00:00:00 1970 +0000
31 summary: test
31 summary: test
32
32
33
33
34 $ echo foo >> .hgtags
34 $ echo foo >> .hgtags
35 $ hg tag "bleah2"
35 $ hg tag "bleah2"
36 abort: working copy of .hgtags is changed (please commit .hgtags manually)
36 abort: working copy of .hgtags is changed (please commit .hgtags manually)
37 [255]
37 [255]
38
38
39 $ hg revert .hgtags
39 $ hg revert .hgtags
40 $ hg tag -r 0 x y z y y z
40 $ hg tag -r 0 x y z y y z
41 abort: tag names must be unique
41 abort: tag names must be unique
42 [255]
42 [255]
43 $ hg tag tap nada dot tip null .
43 $ hg tag tap nada dot tip null .
44 abort: the name 'tip' is reserved
44 abort: the name 'tip' is reserved
45 [255]
45 [255]
46 $ hg tag "bleah"
46 $ hg tag "bleah"
47 abort: tag 'bleah' already exists (use -f to force)
47 abort: tag 'bleah' already exists (use -f to force)
48 [255]
48 [255]
49 $ hg tag "blecch" "bleah"
49 $ hg tag "blecch" "bleah"
50 abort: tag 'bleah' already exists (use -f to force)
50 abort: tag 'bleah' already exists (use -f to force)
51 [255]
51 [255]
52
52
53 $ hg tag --remove "blecch"
53 $ hg tag --remove "blecch"
54 abort: tag 'blecch' does not exist
54 abort: tag 'blecch' does not exist
55 [255]
55 [255]
56 $ hg tag --remove "bleah" "blecch" "blough"
56 $ hg tag --remove "bleah" "blecch" "blough"
57 abort: tag 'blecch' does not exist
57 abort: tag 'blecch' does not exist
58 [255]
58 [255]
59
59
60 $ hg tag -r 0 "bleah0"
60 $ hg tag -r 0 "bleah0"
61 $ hg tag -l -r 1 "bleah1"
61 $ hg tag -l -r 1 "bleah1"
62 $ hg tag gack gawk gorp
62 $ hg tag gack gawk gorp
63 $ hg tag -f gack
63 $ hg tag -f gack
64 $ hg tag --remove gack gorp
64 $ hg tag --remove gack gorp
65
65
66 $ cat .hgtags
66 $ cat .hgtags
67 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
67 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah
68 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
68 acb14030fe0a21b60322c440ad2d20cf7685a376 bleah0
69 336fccc858a4eb69609a291105009e484a6b6b8d gack
69 336fccc858a4eb69609a291105009e484a6b6b8d gack
70 336fccc858a4eb69609a291105009e484a6b6b8d gawk
70 336fccc858a4eb69609a291105009e484a6b6b8d gawk
71 336fccc858a4eb69609a291105009e484a6b6b8d gorp
71 336fccc858a4eb69609a291105009e484a6b6b8d gorp
72 336fccc858a4eb69609a291105009e484a6b6b8d gack
72 336fccc858a4eb69609a291105009e484a6b6b8d gack
73 799667b6f2d9b957f73fa644a918c2df22bab58f gack
73 799667b6f2d9b957f73fa644a918c2df22bab58f gack
74 799667b6f2d9b957f73fa644a918c2df22bab58f gack
74 799667b6f2d9b957f73fa644a918c2df22bab58f gack
75 0000000000000000000000000000000000000000 gack
75 0000000000000000000000000000000000000000 gack
76 336fccc858a4eb69609a291105009e484a6b6b8d gorp
76 336fccc858a4eb69609a291105009e484a6b6b8d gorp
77 0000000000000000000000000000000000000000 gorp
77 0000000000000000000000000000000000000000 gorp
78 $ cat .hg/localtags
78 $ cat .hg/localtags
79 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
79 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
80
80
81 $ hg update 0
81 $ hg update 0
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 $ hg tag "foobar"
83 $ hg tag "foobar"
84 $ cat .hgtags
84 $ cat .hgtags
85 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
85 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
86 $ cat .hg/localtags
86 $ cat .hg/localtags
87 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
87 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
88
88
89 $ hg tag -l 'xx
89 $ hg tag -l 'xx
90 > newline'
90 > newline'
91 abort: '\n' cannot be used in a tag name
91 abort: '\n' cannot be used in a tag name
92 [255]
92 [255]
93 $ hg tag -l 'xx:xx'
93 $ hg tag -l 'xx:xx'
94 abort: ':' cannot be used in a tag name
94 abort: ':' cannot be used in a tag name
95 [255]
95 [255]
96
96
97 cloning local tags
97 cloning local tags
98
98
99 $ cd ..
99 $ cd ..
100 $ hg -R test log -r0:5
100 $ hg -R test log -r0:5
101 changeset: 0:acb14030fe0a
101 changeset: 0:acb14030fe0a
102 tag: bleah
102 tag: bleah
103 tag: bleah0
103 tag: bleah0
104 tag: foobar
104 tag: foobar
105 user: test
105 user: test
106 date: Thu Jan 01 00:00:00 1970 +0000
106 date: Thu Jan 01 00:00:00 1970 +0000
107 summary: test
107 summary: test
108
108
109 changeset: 1:d4f0d2909abc
109 changeset: 1:d4f0d2909abc
110 tag: bleah1
110 tag: bleah1
111 user: test
111 user: test
112 date: Thu Jan 01 00:00:00 1970 +0000
112 date: Thu Jan 01 00:00:00 1970 +0000
113 summary: Added tag bleah for changeset acb14030fe0a
113 summary: Added tag bleah for changeset acb14030fe0a
114
114
115 changeset: 2:336fccc858a4
115 changeset: 2:336fccc858a4
116 tag: gawk
116 tag: gawk
117 user: test
117 user: test
118 date: Thu Jan 01 00:00:00 1970 +0000
118 date: Thu Jan 01 00:00:00 1970 +0000
119 summary: Added tag bleah0 for changeset acb14030fe0a
119 summary: Added tag bleah0 for changeset acb14030fe0a
120
120
121 changeset: 3:799667b6f2d9
121 changeset: 3:799667b6f2d9
122 user: test
122 user: test
123 date: Thu Jan 01 00:00:00 1970 +0000
123 date: Thu Jan 01 00:00:00 1970 +0000
124 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
124 summary: Added tag gack, gawk, gorp for changeset 336fccc858a4
125
125
126 changeset: 4:154eeb7c0138
126 changeset: 4:154eeb7c0138
127 user: test
127 user: test
128 date: Thu Jan 01 00:00:00 1970 +0000
128 date: Thu Jan 01 00:00:00 1970 +0000
129 summary: Added tag gack for changeset 799667b6f2d9
129 summary: Added tag gack for changeset 799667b6f2d9
130
130
131 changeset: 5:b4bb47aaff09
131 changeset: 5:b4bb47aaff09
132 user: test
132 user: test
133 date: Thu Jan 01 00:00:00 1970 +0000
133 date: Thu Jan 01 00:00:00 1970 +0000
134 summary: Removed tag gack, gorp
134 summary: Removed tag gack, gorp
135
135
136 $ hg clone -q -rbleah1 test test1
136 $ hg clone -q -rbleah1 test test1
137 $ hg -R test1 parents --style=compact
137 $ hg -R test1 parents --style=compact
138 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
138 1[tip] d4f0d2909abc 1970-01-01 00:00 +0000 test
139 Added tag bleah for changeset acb14030fe0a
139 Added tag bleah for changeset acb14030fe0a
140
140
141 $ hg clone -q -r5 test#bleah1 test2
141 $ hg clone -q -r5 test#bleah1 test2
142 $ hg -R test2 parents --style=compact
142 $ hg -R test2 parents --style=compact
143 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
143 5[tip] b4bb47aaff09 1970-01-01 00:00 +0000 test
144 Removed tag gack, gorp
144 Removed tag gack, gorp
145
145
146 $ hg clone -q -U test#bleah1 test3
146 $ hg clone -q -U test#bleah1 test3
147 $ hg -R test3 parents --style=compact
147 $ hg -R test3 parents --style=compact
148
148
149 $ cd test
149 $ cd test
150
150
151 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
151 Issue601: hg tag doesn't do the right thing if .hgtags or localtags
152 doesn't end with EOL
152 doesn't end with EOL
153
153
154 $ python << EOF
154 $ python << EOF
155 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
155 > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
156 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
156 > f = file('.hg/localtags', 'w'); f.write(last); f.close()
157 > EOF
157 > EOF
158 $ cat .hg/localtags; echo
158 $ cat .hg/localtags; echo
159 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
159 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
160 $ hg tag -l localnewline
160 $ hg tag -l localnewline
161 $ cat .hg/localtags; echo
161 $ cat .hg/localtags; echo
162 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
162 d4f0d2909abc9290e2773c08837d70c1794e3f5a bleah1
163 c2899151f4e76890c602a2597a650a72666681bf localnewline
163 c2899151f4e76890c602a2597a650a72666681bf localnewline
164
164
165
165
166 $ python << EOF
166 $ python << EOF
167 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
167 > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
168 > f = file('.hgtags', 'w'); f.write(last); f.close()
168 > f = file('.hgtags', 'w'); f.write(last); f.close()
169 > EOF
169 > EOF
170 $ hg ci -m'broken manual edit of .hgtags'
170 $ hg ci -m'broken manual edit of .hgtags'
171 $ cat .hgtags; echo
171 $ cat .hgtags; echo
172 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
172 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
173 $ hg tag newline
173 $ hg tag newline
174 $ cat .hgtags; echo
174 $ cat .hgtags; echo
175 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
175 acb14030fe0a21b60322c440ad2d20cf7685a376 foobar
176 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
176 a0eea09de1eeec777b46f2085260a373b2fbc293 newline
177
177
178
178
179 tag and branch using same name
179 tag and branch using same name
180
180
181 $ hg branch tag-and-branch-same-name
181 $ hg branch tag-and-branch-same-name
182 marked working directory as branch tag-and-branch-same-name
182 marked working directory as branch tag-and-branch-same-name
183 $ hg ci -m"discouraged"
183 $ hg ci -m"discouraged"
184 $ hg tag tag-and-branch-same-name
184 $ hg tag tag-and-branch-same-name
185 warning: tag tag-and-branch-same-name conflicts with existing branch name
185 warning: tag tag-and-branch-same-name conflicts with existing branch name
186
186
187 test custom commit messages
187 test custom commit messages
188
188
189 $ cat > editor << '__EOF__'
189 $ cat > editor << '__EOF__'
190 > #!/bin/sh
190 > #!/bin/sh
191 > echo "custom tag message" > "$1"
191 > echo "custom tag message" > "$1"
192 > echo "second line" >> "$1"
192 > echo "second line" >> "$1"
193 > __EOF__
193 > __EOF__
194 $ chmod +x editor
194 $ chmod +x editor
195 $ HGEDITOR="'`pwd`'"/editor hg tag custom-tag -e
195 $ HGEDITOR="'`pwd`'"/editor hg tag custom-tag -e
196 $ hg log -l1 --template "{desc}\n"
196 $ hg log -l1 --template "{desc}\n"
197 custom tag message
197 custom tag message
198 second line
198 second line
199
200 local tag with .hgtags modified
201
202 $ hg tag hgtags-modified
203 $ hg rollback
204 rolling back to revision 11 (undo commit)
205 $ hg st
206 M .hgtags
207 ? .hgtags.orig
208 ? editor
209 $ hg tag --local baz
210 $ hg revert --no-backup .hgtags
General Comments 0
You need to be logged in to leave comments. Login now