##// END OF EJS Templates
localrepo: ignore tags to unknown nodes (issue2750)
Idan Kamara -
r14499:a281981e stable
parent child Browse files
Show More
@@ -1,2046 +1,2051 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RequirementError(
78 raise error.RequirementError(
79 _("requirement '%s' not supported") % r)
79 _("requirement '%s' not supported") % r)
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener("sharedpath").read())
83 s = os.path.realpath(self.opener("sharedpath").read())
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101 # These two define the set of tags for this repository. _tags
101 # These two define the set of tags for this repository. _tags
102 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # 'local'. (Global tags are defined by .hgtags across all
103 # 'local'. (Global tags are defined by .hgtags across all
104 # heads, and local tags are defined in .hg/localtags.) They
104 # heads, and local tags are defined in .hg/localtags.) They
105 # constitute the in-memory cache of tags.
105 # constitute the in-memory cache of tags.
106 self._tags = None
106 self._tags = None
107 self._tagtypes = None
107 self._tagtypes = None
108
108
109 self._branchcache = None
109 self._branchcache = None
110 self._branchcachetip = None
110 self._branchcachetip = None
111 self.nodetagscache = None
111 self.nodetagscache = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 def _applyrequirements(self, requirements):
116 def _applyrequirements(self, requirements):
117 self.requirements = requirements
117 self.requirements = requirements
118 self.sopener.options = {}
118 self.sopener.options = {}
119 if 'parentdelta' in requirements:
119 if 'parentdelta' in requirements:
120 self.sopener.options['parentdelta'] = 1
120 self.sopener.options['parentdelta'] = 1
121
121
122 def _writerequirements(self):
122 def _writerequirements(self):
123 reqfile = self.opener("requires", "w")
123 reqfile = self.opener("requires", "w")
124 for r in self.requirements:
124 for r in self.requirements:
125 reqfile.write("%s\n" % r)
125 reqfile.write("%s\n" % r)
126 reqfile.close()
126 reqfile.close()
127
127
128 def _checknested(self, path):
128 def _checknested(self, path):
129 """Determine if path is a legal nested repository."""
129 """Determine if path is a legal nested repository."""
130 if not path.startswith(self.root):
130 if not path.startswith(self.root):
131 return False
131 return False
132 subpath = path[len(self.root) + 1:]
132 subpath = path[len(self.root) + 1:]
133
133
134 # XXX: Checking against the current working copy is wrong in
134 # XXX: Checking against the current working copy is wrong in
135 # the sense that it can reject things like
135 # the sense that it can reject things like
136 #
136 #
137 # $ hg cat -r 10 sub/x.txt
137 # $ hg cat -r 10 sub/x.txt
138 #
138 #
139 # if sub/ is no longer a subrepository in the working copy
139 # if sub/ is no longer a subrepository in the working copy
140 # parent revision.
140 # parent revision.
141 #
141 #
142 # However, it can of course also allow things that would have
142 # However, it can of course also allow things that would have
143 # been rejected before, such as the above cat command if sub/
143 # been rejected before, such as the above cat command if sub/
144 # is a subrepository now, but was a normal directory before.
144 # is a subrepository now, but was a normal directory before.
145 # The old path auditor would have rejected by mistake since it
145 # The old path auditor would have rejected by mistake since it
146 # panics when it sees sub/.hg/.
146 # panics when it sees sub/.hg/.
147 #
147 #
148 # All in all, checking against the working copy seems sensible
148 # All in all, checking against the working copy seems sensible
149 # since we want to prevent access to nested repositories on
149 # since we want to prevent access to nested repositories on
150 # the filesystem *now*.
150 # the filesystem *now*.
151 ctx = self[None]
151 ctx = self[None]
152 parts = util.splitpath(subpath)
152 parts = util.splitpath(subpath)
153 while parts:
153 while parts:
154 prefix = os.sep.join(parts)
154 prefix = os.sep.join(parts)
155 if prefix in ctx.substate:
155 if prefix in ctx.substate:
156 if prefix == subpath:
156 if prefix == subpath:
157 return True
157 return True
158 else:
158 else:
159 sub = ctx.sub(prefix)
159 sub = ctx.sub(prefix)
160 return sub.checknested(subpath[len(prefix) + 1:])
160 return sub.checknested(subpath[len(prefix) + 1:])
161 else:
161 else:
162 parts.pop()
162 parts.pop()
163 return False
163 return False
164
164
165 @util.propertycache
165 @util.propertycache
166 def _bookmarks(self):
166 def _bookmarks(self):
167 return bookmarks.read(self)
167 return bookmarks.read(self)
168
168
169 @util.propertycache
169 @util.propertycache
170 def _bookmarkcurrent(self):
170 def _bookmarkcurrent(self):
171 return bookmarks.readcurrent(self)
171 return bookmarks.readcurrent(self)
172
172
173 @propertycache
173 @propertycache
174 def changelog(self):
174 def changelog(self):
175 c = changelog.changelog(self.sopener)
175 c = changelog.changelog(self.sopener)
176 if 'HG_PENDING' in os.environ:
176 if 'HG_PENDING' in os.environ:
177 p = os.environ['HG_PENDING']
177 p = os.environ['HG_PENDING']
178 if p.startswith(self.root):
178 if p.startswith(self.root):
179 c.readpending('00changelog.i.a')
179 c.readpending('00changelog.i.a')
180 self.sopener.options['defversion'] = c.version
180 self.sopener.options['defversion'] = c.version
181 return c
181 return c
182
182
183 @propertycache
183 @propertycache
184 def manifest(self):
184 def manifest(self):
185 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
186
186
187 @propertycache
187 @propertycache
188 def dirstate(self):
188 def dirstate(self):
189 warned = [0]
189 warned = [0]
190 def validate(node):
190 def validate(node):
191 try:
191 try:
192 r = self.changelog.rev(node)
192 r = self.changelog.rev(node)
193 return node
193 return node
194 except error.LookupError:
194 except error.LookupError:
195 if not warned[0]:
195 if not warned[0]:
196 warned[0] = True
196 warned[0] = True
197 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
198 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
199 return nullid
199 return nullid
200
200
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202
202
203 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
204 if changeid is None:
204 if changeid is None:
205 return context.workingctx(self)
205 return context.workingctx(self)
206 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
207
207
208 def __contains__(self, changeid):
208 def __contains__(self, changeid):
209 try:
209 try:
210 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
211 except error.RepoLookupError:
211 except error.RepoLookupError:
212 return False
212 return False
213
213
214 def __nonzero__(self):
214 def __nonzero__(self):
215 return True
215 return True
216
216
217 def __len__(self):
217 def __len__(self):
218 return len(self.changelog)
218 return len(self.changelog)
219
219
220 def __iter__(self):
220 def __iter__(self):
221 for i in xrange(len(self)):
221 for i in xrange(len(self)):
222 yield i
222 yield i
223
223
224 def url(self):
224 def url(self):
225 return 'file:' + self.root
225 return 'file:' + self.root
226
226
227 def hook(self, name, throw=False, **args):
227 def hook(self, name, throw=False, **args):
228 return hook.hook(self.ui, self, name, throw, **args)
228 return hook.hook(self.ui, self, name, throw, **args)
229
229
230 tag_disallowed = ':\r\n'
230 tag_disallowed = ':\r\n'
231
231
232 def _tag(self, names, node, message, local, user, date, extra={}):
232 def _tag(self, names, node, message, local, user, date, extra={}):
233 if isinstance(names, str):
233 if isinstance(names, str):
234 allchars = names
234 allchars = names
235 names = (names,)
235 names = (names,)
236 else:
236 else:
237 allchars = ''.join(names)
237 allchars = ''.join(names)
238 for c in self.tag_disallowed:
238 for c in self.tag_disallowed:
239 if c in allchars:
239 if c in allchars:
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241
241
242 branches = self.branchmap()
242 branches = self.branchmap()
243 for name in names:
243 for name in names:
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 local=local)
245 local=local)
246 if name in branches:
246 if name in branches:
247 self.ui.warn(_("warning: tag %s conflicts with existing"
247 self.ui.warn(_("warning: tag %s conflicts with existing"
248 " branch name\n") % name)
248 " branch name\n") % name)
249
249
250 def writetags(fp, names, munge, prevtags):
250 def writetags(fp, names, munge, prevtags):
251 fp.seek(0, 2)
251 fp.seek(0, 2)
252 if prevtags and prevtags[-1] != '\n':
252 if prevtags and prevtags[-1] != '\n':
253 fp.write('\n')
253 fp.write('\n')
254 for name in names:
254 for name in names:
255 m = munge and munge(name) or name
255 m = munge and munge(name) or name
256 if self._tagtypes and name in self._tagtypes:
256 if self._tagtypes and name in self._tagtypes:
257 old = self._tags.get(name, nullid)
257 old = self._tags.get(name, nullid)
258 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(node), m))
259 fp.write('%s %s\n' % (hex(node), m))
260 fp.close()
260 fp.close()
261
261
262 prevtags = ''
262 prevtags = ''
263 if local:
263 if local:
264 try:
264 try:
265 fp = self.opener('localtags', 'r+')
265 fp = self.opener('localtags', 'r+')
266 except IOError:
266 except IOError:
267 fp = self.opener('localtags', 'a')
267 fp = self.opener('localtags', 'a')
268 else:
268 else:
269 prevtags = fp.read()
269 prevtags = fp.read()
270
270
271 # local tags are stored in the current charset
271 # local tags are stored in the current charset
272 writetags(fp, names, None, prevtags)
272 writetags(fp, names, None, prevtags)
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275 return
275 return
276
276
277 try:
277 try:
278 fp = self.wfile('.hgtags', 'rb+')
278 fp = self.wfile('.hgtags', 'rb+')
279 except IOError:
279 except IOError:
280 fp = self.wfile('.hgtags', 'ab')
280 fp = self.wfile('.hgtags', 'ab')
281 else:
281 else:
282 prevtags = fp.read()
282 prevtags = fp.read()
283
283
284 # committed tags are stored in UTF-8
284 # committed tags are stored in UTF-8
285 writetags(fp, names, encoding.fromlocal, prevtags)
285 writetags(fp, names, encoding.fromlocal, prevtags)
286
286
287 fp.close()
287 fp.close()
288
288
289 if '.hgtags' not in self.dirstate:
289 if '.hgtags' not in self.dirstate:
290 self[None].add(['.hgtags'])
290 self[None].add(['.hgtags'])
291
291
292 m = matchmod.exact(self.root, '', ['.hgtags'])
292 m = matchmod.exact(self.root, '', ['.hgtags'])
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
294
294
295 for name in names:
295 for name in names:
296 self.hook('tag', node=hex(node), tag=name, local=local)
296 self.hook('tag', node=hex(node), tag=name, local=local)
297
297
298 return tagnode
298 return tagnode
299
299
300 def tag(self, names, node, message, local, user, date):
300 def tag(self, names, node, message, local, user, date):
301 '''tag a revision with one or more symbolic names.
301 '''tag a revision with one or more symbolic names.
302
302
303 names is a list of strings or, when adding a single tag, names may be a
303 names is a list of strings or, when adding a single tag, names may be a
304 string.
304 string.
305
305
306 if local is True, the tags are stored in a per-repository file.
306 if local is True, the tags are stored in a per-repository file.
307 otherwise, they are stored in the .hgtags file, and a new
307 otherwise, they are stored in the .hgtags file, and a new
308 changeset is committed with the change.
308 changeset is committed with the change.
309
309
310 keyword arguments:
310 keyword arguments:
311
311
312 local: whether to store tags in non-version-controlled file
312 local: whether to store tags in non-version-controlled file
313 (default False)
313 (default False)
314
314
315 message: commit message to use if committing
315 message: commit message to use if committing
316
316
317 user: name of user to use if committing
317 user: name of user to use if committing
318
318
319 date: date tuple to use if committing'''
319 date: date tuple to use if committing'''
320
320
321 if not local:
321 if not local:
322 for x in self.status()[:5]:
322 for x in self.status()[:5]:
323 if '.hgtags' in x:
323 if '.hgtags' in x:
324 raise util.Abort(_('working copy of .hgtags is changed '
324 raise util.Abort(_('working copy of .hgtags is changed '
325 '(please commit .hgtags manually)'))
325 '(please commit .hgtags manually)'))
326
326
327 self.tags() # instantiate the cache
327 self.tags() # instantiate the cache
328 self._tag(names, node, message, local, user, date)
328 self._tag(names, node, message, local, user, date)
329
329
330 def tags(self):
330 def tags(self):
331 '''return a mapping of tag to node'''
331 '''return a mapping of tag to node'''
332 if self._tags is None:
332 if self._tags is None:
333 (self._tags, self._tagtypes) = self._findtags()
333 (self._tags, self._tagtypes) = self._findtags()
334
334
335 return self._tags
335 return self._tags
336
336
337 def _findtags(self):
337 def _findtags(self):
338 '''Do the hard work of finding tags. Return a pair of dicts
338 '''Do the hard work of finding tags. Return a pair of dicts
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 maps tag name to a string like \'global\' or \'local\'.
340 maps tag name to a string like \'global\' or \'local\'.
341 Subclasses or extensions are free to add their own tags, but
341 Subclasses or extensions are free to add their own tags, but
342 should be aware that the returned dicts will be retained for the
342 should be aware that the returned dicts will be retained for the
343 duration of the localrepo object.'''
343 duration of the localrepo object.'''
344
344
345 # XXX what tagtype should subclasses/extensions use? Currently
345 # XXX what tagtype should subclasses/extensions use? Currently
346 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # Should each extension invent its own tag type? Should there
347 # Should each extension invent its own tag type? Should there
348 # be one tagtype for all such "virtual" tags? Or is the status
348 # be one tagtype for all such "virtual" tags? Or is the status
349 # quo fine?
349 # quo fine?
350
350
351 alltags = {} # map tag name to (node, hist)
351 alltags = {} # map tag name to (node, hist)
352 tagtypes = {}
352 tagtypes = {}
353
353
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356
356
357 # Build the return dicts. Have to re-encode tag names because
357 # Build the return dicts. Have to re-encode tag names because
358 # the tags module always uses UTF-8 (in order not to lose info
358 # the tags module always uses UTF-8 (in order not to lose info
359 # writing to the cache), but the rest of Mercurial wants them in
359 # writing to the cache), but the rest of Mercurial wants them in
360 # local encoding.
360 # local encoding.
361 tags = {}
361 tags = {}
362 for (name, (node, hist)) in alltags.iteritems():
362 for (name, (node, hist)) in alltags.iteritems():
363 if node != nullid:
363 if node != nullid:
364 tags[encoding.tolocal(name)] = node
364 try:
365 # ignore tags to unknown nodes
366 self.changelog.lookup(node)
367 tags[encoding.tolocal(name)] = node
368 except error.LookupError:
369 pass
365 tags['tip'] = self.changelog.tip()
370 tags['tip'] = self.changelog.tip()
366 tagtypes = dict([(encoding.tolocal(name), value)
371 tagtypes = dict([(encoding.tolocal(name), value)
367 for (name, value) in tagtypes.iteritems()])
372 for (name, value) in tagtypes.iteritems()])
368 return (tags, tagtypes)
373 return (tags, tagtypes)
369
374
370 def tagtype(self, tagname):
375 def tagtype(self, tagname):
371 '''
376 '''
372 return the type of the given tag. result can be:
377 return the type of the given tag. result can be:
373
378
374 'local' : a local tag
379 'local' : a local tag
375 'global' : a global tag
380 'global' : a global tag
376 None : tag does not exist
381 None : tag does not exist
377 '''
382 '''
378
383
379 self.tags()
384 self.tags()
380
385
381 return self._tagtypes.get(tagname)
386 return self._tagtypes.get(tagname)
382
387
383 def tagslist(self):
388 def tagslist(self):
384 '''return a list of tags ordered by revision'''
389 '''return a list of tags ordered by revision'''
385 l = []
390 l = []
386 for t, n in self.tags().iteritems():
391 for t, n in self.tags().iteritems():
387 try:
392 try:
388 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
389 except:
394 except:
390 r = -2 # sort to the beginning of the list if unknown
395 r = -2 # sort to the beginning of the list if unknown
391 l.append((r, t, n))
396 l.append((r, t, n))
392 return [(t, n) for r, t, n in sorted(l)]
397 return [(t, n) for r, t, n in sorted(l)]
393
398
394 def nodetags(self, node):
399 def nodetags(self, node):
395 '''return the tags associated with a node'''
400 '''return the tags associated with a node'''
396 if not self.nodetagscache:
401 if not self.nodetagscache:
397 self.nodetagscache = {}
402 self.nodetagscache = {}
398 for t, n in self.tags().iteritems():
403 for t, n in self.tags().iteritems():
399 self.nodetagscache.setdefault(n, []).append(t)
404 self.nodetagscache.setdefault(n, []).append(t)
400 for tags in self.nodetagscache.itervalues():
405 for tags in self.nodetagscache.itervalues():
401 tags.sort()
406 tags.sort()
402 return self.nodetagscache.get(node, [])
407 return self.nodetagscache.get(node, [])
403
408
404 def nodebookmarks(self, node):
409 def nodebookmarks(self, node):
405 marks = []
410 marks = []
406 for bookmark, n in self._bookmarks.iteritems():
411 for bookmark, n in self._bookmarks.iteritems():
407 if n == node:
412 if n == node:
408 marks.append(bookmark)
413 marks.append(bookmark)
409 return sorted(marks)
414 return sorted(marks)
410
415
411 def _branchtags(self, partial, lrev):
416 def _branchtags(self, partial, lrev):
412 # TODO: rename this function?
417 # TODO: rename this function?
413 tiprev = len(self) - 1
418 tiprev = len(self) - 1
414 if lrev != tiprev:
419 if lrev != tiprev:
415 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
420 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
416 self._updatebranchcache(partial, ctxgen)
421 self._updatebranchcache(partial, ctxgen)
417 self._writebranchcache(partial, self.changelog.tip(), tiprev)
422 self._writebranchcache(partial, self.changelog.tip(), tiprev)
418
423
419 return partial
424 return partial
420
425
421 def updatebranchcache(self):
426 def updatebranchcache(self):
422 tip = self.changelog.tip()
427 tip = self.changelog.tip()
423 if self._branchcache is not None and self._branchcachetip == tip:
428 if self._branchcache is not None and self._branchcachetip == tip:
424 return self._branchcache
429 return self._branchcache
425
430
426 oldtip = self._branchcachetip
431 oldtip = self._branchcachetip
427 self._branchcachetip = tip
432 self._branchcachetip = tip
428 if oldtip is None or oldtip not in self.changelog.nodemap:
433 if oldtip is None or oldtip not in self.changelog.nodemap:
429 partial, last, lrev = self._readbranchcache()
434 partial, last, lrev = self._readbranchcache()
430 else:
435 else:
431 lrev = self.changelog.rev(oldtip)
436 lrev = self.changelog.rev(oldtip)
432 partial = self._branchcache
437 partial = self._branchcache
433
438
434 self._branchtags(partial, lrev)
439 self._branchtags(partial, lrev)
435 # this private cache holds all heads (not just tips)
440 # this private cache holds all heads (not just tips)
436 self._branchcache = partial
441 self._branchcache = partial
437
442
438 def branchmap(self):
443 def branchmap(self):
439 '''returns a dictionary {branch: [branchheads]}'''
444 '''returns a dictionary {branch: [branchheads]}'''
440 self.updatebranchcache()
445 self.updatebranchcache()
441 return self._branchcache
446 return self._branchcache
442
447
443 def branchtags(self):
448 def branchtags(self):
444 '''return a dict where branch names map to the tipmost head of
449 '''return a dict where branch names map to the tipmost head of
445 the branch, open heads come before closed'''
450 the branch, open heads come before closed'''
446 bt = {}
451 bt = {}
447 for bn, heads in self.branchmap().iteritems():
452 for bn, heads in self.branchmap().iteritems():
448 tip = heads[-1]
453 tip = heads[-1]
449 for h in reversed(heads):
454 for h in reversed(heads):
450 if 'close' not in self.changelog.read(h)[5]:
455 if 'close' not in self.changelog.read(h)[5]:
451 tip = h
456 tip = h
452 break
457 break
453 bt[bn] = tip
458 bt[bn] = tip
454 return bt
459 return bt
455
460
456 def _readbranchcache(self):
461 def _readbranchcache(self):
457 partial = {}
462 partial = {}
458 try:
463 try:
459 f = self.opener("cache/branchheads")
464 f = self.opener("cache/branchheads")
460 lines = f.read().split('\n')
465 lines = f.read().split('\n')
461 f.close()
466 f.close()
462 except (IOError, OSError):
467 except (IOError, OSError):
463 return {}, nullid, nullrev
468 return {}, nullid, nullrev
464
469
465 try:
470 try:
466 last, lrev = lines.pop(0).split(" ", 1)
471 last, lrev = lines.pop(0).split(" ", 1)
467 last, lrev = bin(last), int(lrev)
472 last, lrev = bin(last), int(lrev)
468 if lrev >= len(self) or self[lrev].node() != last:
473 if lrev >= len(self) or self[lrev].node() != last:
469 # invalidate the cache
474 # invalidate the cache
470 raise ValueError('invalidating branch cache (tip differs)')
475 raise ValueError('invalidating branch cache (tip differs)')
471 for l in lines:
476 for l in lines:
472 if not l:
477 if not l:
473 continue
478 continue
474 node, label = l.split(" ", 1)
479 node, label = l.split(" ", 1)
475 label = encoding.tolocal(label.strip())
480 label = encoding.tolocal(label.strip())
476 partial.setdefault(label, []).append(bin(node))
481 partial.setdefault(label, []).append(bin(node))
477 except KeyboardInterrupt:
482 except KeyboardInterrupt:
478 raise
483 raise
479 except Exception, inst:
484 except Exception, inst:
480 if self.ui.debugflag:
485 if self.ui.debugflag:
481 self.ui.warn(str(inst), '\n')
486 self.ui.warn(str(inst), '\n')
482 partial, last, lrev = {}, nullid, nullrev
487 partial, last, lrev = {}, nullid, nullrev
483 return partial, last, lrev
488 return partial, last, lrev
484
489
485 def _writebranchcache(self, branches, tip, tiprev):
490 def _writebranchcache(self, branches, tip, tiprev):
486 try:
491 try:
487 f = self.opener("cache/branchheads", "w", atomictemp=True)
492 f = self.opener("cache/branchheads", "w", atomictemp=True)
488 f.write("%s %s\n" % (hex(tip), tiprev))
493 f.write("%s %s\n" % (hex(tip), tiprev))
489 for label, nodes in branches.iteritems():
494 for label, nodes in branches.iteritems():
490 for node in nodes:
495 for node in nodes:
491 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
496 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
492 f.rename()
497 f.rename()
493 except (IOError, OSError):
498 except (IOError, OSError):
494 pass
499 pass
495
500
496 def _updatebranchcache(self, partial, ctxgen):
501 def _updatebranchcache(self, partial, ctxgen):
497 # collect new branch entries
502 # collect new branch entries
498 newbranches = {}
503 newbranches = {}
499 for c in ctxgen:
504 for c in ctxgen:
500 newbranches.setdefault(c.branch(), []).append(c.node())
505 newbranches.setdefault(c.branch(), []).append(c.node())
501 # if older branchheads are reachable from new ones, they aren't
506 # if older branchheads are reachable from new ones, they aren't
502 # really branchheads. Note checking parents is insufficient:
507 # really branchheads. Note checking parents is insufficient:
503 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
508 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
504 for branch, newnodes in newbranches.iteritems():
509 for branch, newnodes in newbranches.iteritems():
505 bheads = partial.setdefault(branch, [])
510 bheads = partial.setdefault(branch, [])
506 bheads.extend(newnodes)
511 bheads.extend(newnodes)
507 if len(bheads) <= 1:
512 if len(bheads) <= 1:
508 continue
513 continue
509 # starting from tip means fewer passes over reachable
514 # starting from tip means fewer passes over reachable
510 while newnodes:
515 while newnodes:
511 latest = newnodes.pop()
516 latest = newnodes.pop()
512 if latest not in bheads:
517 if latest not in bheads:
513 continue
518 continue
514 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
519 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
515 reachable = self.changelog.reachable(latest, minbhrev)
520 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable.remove(latest)
521 reachable.remove(latest)
517 bheads = [b for b in bheads if b not in reachable]
522 bheads = [b for b in bheads if b not in reachable]
518 partial[branch] = bheads
523 partial[branch] = bheads
519
524
520 def lookup(self, key):
525 def lookup(self, key):
521 if isinstance(key, int):
526 if isinstance(key, int):
522 return self.changelog.node(key)
527 return self.changelog.node(key)
523 elif key == '.':
528 elif key == '.':
524 return self.dirstate.parents()[0]
529 return self.dirstate.parents()[0]
525 elif key == 'null':
530 elif key == 'null':
526 return nullid
531 return nullid
527 elif key == 'tip':
532 elif key == 'tip':
528 return self.changelog.tip()
533 return self.changelog.tip()
529 n = self.changelog._match(key)
534 n = self.changelog._match(key)
530 if n:
535 if n:
531 return n
536 return n
532 if key in self._bookmarks:
537 if key in self._bookmarks:
533 return self._bookmarks[key]
538 return self._bookmarks[key]
534 if key in self.tags():
539 if key in self.tags():
535 return self.tags()[key]
540 return self.tags()[key]
536 if key in self.branchtags():
541 if key in self.branchtags():
537 return self.branchtags()[key]
542 return self.branchtags()[key]
538 n = self.changelog._partialmatch(key)
543 n = self.changelog._partialmatch(key)
539 if n:
544 if n:
540 return n
545 return n
541
546
542 # can't find key, check if it might have come from damaged dirstate
547 # can't find key, check if it might have come from damaged dirstate
543 if key in self.dirstate.parents():
548 if key in self.dirstate.parents():
544 raise error.Abort(_("working directory has unknown parent '%s'!")
549 raise error.Abort(_("working directory has unknown parent '%s'!")
545 % short(key))
550 % short(key))
546 try:
551 try:
547 if len(key) == 20:
552 if len(key) == 20:
548 key = hex(key)
553 key = hex(key)
549 except:
554 except:
550 pass
555 pass
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552
557
553 def lookupbranch(self, key, remote=None):
558 def lookupbranch(self, key, remote=None):
554 repo = remote or self
559 repo = remote or self
555 if key in repo.branchmap():
560 if key in repo.branchmap():
556 return key
561 return key
557
562
558 repo = (remote and remote.local()) and remote or self
563 repo = (remote and remote.local()) and remote or self
559 return repo[key].branch()
564 return repo[key].branch()
560
565
561 def local(self):
566 def local(self):
562 return True
567 return True
563
568
564 def join(self, f):
569 def join(self, f):
565 return os.path.join(self.path, f)
570 return os.path.join(self.path, f)
566
571
567 def wjoin(self, f):
572 def wjoin(self, f):
568 return os.path.join(self.root, f)
573 return os.path.join(self.root, f)
569
574
570 def file(self, f):
575 def file(self, f):
571 if f[0] == '/':
576 if f[0] == '/':
572 f = f[1:]
577 f = f[1:]
573 return filelog.filelog(self.sopener, f)
578 return filelog.filelog(self.sopener, f)
574
579
575 def changectx(self, changeid):
580 def changectx(self, changeid):
576 return self[changeid]
581 return self[changeid]
577
582
578 def parents(self, changeid=None):
583 def parents(self, changeid=None):
579 '''get list of changectxs for parents of changeid'''
584 '''get list of changectxs for parents of changeid'''
580 return self[changeid].parents()
585 return self[changeid].parents()
581
586
582 def filectx(self, path, changeid=None, fileid=None):
587 def filectx(self, path, changeid=None, fileid=None):
583 """changeid can be a changeset revision, node, or tag.
588 """changeid can be a changeset revision, node, or tag.
584 fileid can be a file revision or node."""
589 fileid can be a file revision or node."""
585 return context.filectx(self, path, changeid, fileid)
590 return context.filectx(self, path, changeid, fileid)
586
591
587 def getcwd(self):
592 def getcwd(self):
588 return self.dirstate.getcwd()
593 return self.dirstate.getcwd()
589
594
590 def pathto(self, f, cwd=None):
595 def pathto(self, f, cwd=None):
591 return self.dirstate.pathto(f, cwd)
596 return self.dirstate.pathto(f, cwd)
592
597
593 def wfile(self, f, mode='r'):
598 def wfile(self, f, mode='r'):
594 return self.wopener(f, mode)
599 return self.wopener(f, mode)
595
600
596 def _link(self, f):
601 def _link(self, f):
597 return os.path.islink(self.wjoin(f))
602 return os.path.islink(self.wjoin(f))
598
603
599 def _loadfilter(self, filter):
604 def _loadfilter(self, filter):
600 if filter not in self.filterpats:
605 if filter not in self.filterpats:
601 l = []
606 l = []
602 for pat, cmd in self.ui.configitems(filter):
607 for pat, cmd in self.ui.configitems(filter):
603 if cmd == '!':
608 if cmd == '!':
604 continue
609 continue
605 mf = matchmod.match(self.root, '', [pat])
610 mf = matchmod.match(self.root, '', [pat])
606 fn = None
611 fn = None
607 params = cmd
612 params = cmd
608 for name, filterfn in self._datafilters.iteritems():
613 for name, filterfn in self._datafilters.iteritems():
609 if cmd.startswith(name):
614 if cmd.startswith(name):
610 fn = filterfn
615 fn = filterfn
611 params = cmd[len(name):].lstrip()
616 params = cmd[len(name):].lstrip()
612 break
617 break
613 if not fn:
618 if not fn:
614 fn = lambda s, c, **kwargs: util.filter(s, c)
619 fn = lambda s, c, **kwargs: util.filter(s, c)
615 # Wrap old filters not supporting keyword arguments
620 # Wrap old filters not supporting keyword arguments
616 if not inspect.getargspec(fn)[2]:
621 if not inspect.getargspec(fn)[2]:
617 oldfn = fn
622 oldfn = fn
618 fn = lambda s, c, **kwargs: oldfn(s, c)
623 fn = lambda s, c, **kwargs: oldfn(s, c)
619 l.append((mf, fn, params))
624 l.append((mf, fn, params))
620 self.filterpats[filter] = l
625 self.filterpats[filter] = l
621 return self.filterpats[filter]
626 return self.filterpats[filter]
622
627
623 def _filter(self, filterpats, filename, data):
628 def _filter(self, filterpats, filename, data):
624 for mf, fn, cmd in filterpats:
629 for mf, fn, cmd in filterpats:
625 if mf(filename):
630 if mf(filename):
626 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
627 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
628 break
633 break
629
634
630 return data
635 return data
631
636
632 @propertycache
637 @propertycache
633 def _encodefilterpats(self):
638 def _encodefilterpats(self):
634 return self._loadfilter('encode')
639 return self._loadfilter('encode')
635
640
636 @propertycache
641 @propertycache
637 def _decodefilterpats(self):
642 def _decodefilterpats(self):
638 return self._loadfilter('decode')
643 return self._loadfilter('decode')
639
644
640 def adddatafilter(self, name, filter):
645 def adddatafilter(self, name, filter):
641 self._datafilters[name] = filter
646 self._datafilters[name] = filter
642
647
643 def wread(self, filename):
648 def wread(self, filename):
644 if self._link(filename):
649 if self._link(filename):
645 data = os.readlink(self.wjoin(filename))
650 data = os.readlink(self.wjoin(filename))
646 else:
651 else:
647 data = self.wopener(filename, 'r').read()
652 data = self.wopener(filename, 'r').read()
648 return self._filter(self._encodefilterpats, filename, data)
653 return self._filter(self._encodefilterpats, filename, data)
649
654
650 def wwrite(self, filename, data, flags):
655 def wwrite(self, filename, data, flags):
651 data = self._filter(self._decodefilterpats, filename, data)
656 data = self._filter(self._decodefilterpats, filename, data)
652 if 'l' in flags:
657 if 'l' in flags:
653 self.wopener.symlink(data, filename)
658 self.wopener.symlink(data, filename)
654 else:
659 else:
655 self.wopener(filename, 'w').write(data)
660 self.wopener(filename, 'w').write(data)
656 if 'x' in flags:
661 if 'x' in flags:
657 util.set_flags(self.wjoin(filename), False, True)
662 util.set_flags(self.wjoin(filename), False, True)
658
663
659 def wwritedata(self, filename, data):
664 def wwritedata(self, filename, data):
660 return self._filter(self._decodefilterpats, filename, data)
665 return self._filter(self._decodefilterpats, filename, data)
661
666
662 def transaction(self, desc):
667 def transaction(self, desc):
663 tr = self._transref and self._transref() or None
668 tr = self._transref and self._transref() or None
664 if tr and tr.running():
669 if tr and tr.running():
665 return tr.nest()
670 return tr.nest()
666
671
667 # abort here if the journal already exists
672 # abort here if the journal already exists
668 if os.path.exists(self.sjoin("journal")):
673 if os.path.exists(self.sjoin("journal")):
669 raise error.RepoError(
674 raise error.RepoError(
670 _("abandoned transaction found - run hg recover"))
675 _("abandoned transaction found - run hg recover"))
671
676
672 journalfiles = self._writejournal(desc)
677 journalfiles = self._writejournal(desc)
673 renames = [(x, undoname(x)) for x in journalfiles]
678 renames = [(x, undoname(x)) for x in journalfiles]
674
679
675 tr = transaction.transaction(self.ui.warn, self.sopener,
680 tr = transaction.transaction(self.ui.warn, self.sopener,
676 self.sjoin("journal"),
681 self.sjoin("journal"),
677 aftertrans(renames),
682 aftertrans(renames),
678 self.store.createmode)
683 self.store.createmode)
679 self._transref = weakref.ref(tr)
684 self._transref = weakref.ref(tr)
680 return tr
685 return tr
681
686
682 def _writejournal(self, desc):
687 def _writejournal(self, desc):
683 # save dirstate for rollback
688 # save dirstate for rollback
684 try:
689 try:
685 ds = self.opener("dirstate").read()
690 ds = self.opener("dirstate").read()
686 except IOError:
691 except IOError:
687 ds = ""
692 ds = ""
688 self.opener("journal.dirstate", "w").write(ds)
693 self.opener("journal.dirstate", "w").write(ds)
689 self.opener("journal.branch", "w").write(
694 self.opener("journal.branch", "w").write(
690 encoding.fromlocal(self.dirstate.branch()))
695 encoding.fromlocal(self.dirstate.branch()))
691 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
696 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
692
697
693 bkname = self.join('bookmarks')
698 bkname = self.join('bookmarks')
694 if os.path.exists(bkname):
699 if os.path.exists(bkname):
695 util.copyfile(bkname, self.join('journal.bookmarks'))
700 util.copyfile(bkname, self.join('journal.bookmarks'))
696 else:
701 else:
697 self.opener('journal.bookmarks', 'w').write('')
702 self.opener('journal.bookmarks', 'w').write('')
698
703
699 return (self.sjoin('journal'), self.join('journal.dirstate'),
704 return (self.sjoin('journal'), self.join('journal.dirstate'),
700 self.join('journal.branch'), self.join('journal.desc'),
705 self.join('journal.branch'), self.join('journal.desc'),
701 self.join('journal.bookmarks'))
706 self.join('journal.bookmarks'))
702
707
703 def recover(self):
708 def recover(self):
704 lock = self.lock()
709 lock = self.lock()
705 try:
710 try:
706 if os.path.exists(self.sjoin("journal")):
711 if os.path.exists(self.sjoin("journal")):
707 self.ui.status(_("rolling back interrupted transaction\n"))
712 self.ui.status(_("rolling back interrupted transaction\n"))
708 transaction.rollback(self.sopener, self.sjoin("journal"),
713 transaction.rollback(self.sopener, self.sjoin("journal"),
709 self.ui.warn)
714 self.ui.warn)
710 self.invalidate()
715 self.invalidate()
711 return True
716 return True
712 else:
717 else:
713 self.ui.warn(_("no interrupted transaction available\n"))
718 self.ui.warn(_("no interrupted transaction available\n"))
714 return False
719 return False
715 finally:
720 finally:
716 lock.release()
721 lock.release()
717
722
718 def rollback(self, dryrun=False):
723 def rollback(self, dryrun=False):
719 wlock = lock = None
724 wlock = lock = None
720 try:
725 try:
721 wlock = self.wlock()
726 wlock = self.wlock()
722 lock = self.lock()
727 lock = self.lock()
723 if os.path.exists(self.sjoin("undo")):
728 if os.path.exists(self.sjoin("undo")):
724 try:
729 try:
725 args = self.opener("undo.desc", "r").read().splitlines()
730 args = self.opener("undo.desc", "r").read().splitlines()
726 if len(args) >= 3 and self.ui.verbose:
731 if len(args) >= 3 and self.ui.verbose:
727 desc = _("repository tip rolled back to revision %s"
732 desc = _("repository tip rolled back to revision %s"
728 " (undo %s: %s)\n") % (
733 " (undo %s: %s)\n") % (
729 int(args[0]) - 1, args[1], args[2])
734 int(args[0]) - 1, args[1], args[2])
730 elif len(args) >= 2:
735 elif len(args) >= 2:
731 desc = _("repository tip rolled back to revision %s"
736 desc = _("repository tip rolled back to revision %s"
732 " (undo %s)\n") % (
737 " (undo %s)\n") % (
733 int(args[0]) - 1, args[1])
738 int(args[0]) - 1, args[1])
734 except IOError:
739 except IOError:
735 desc = _("rolling back unknown transaction\n")
740 desc = _("rolling back unknown transaction\n")
736 self.ui.status(desc)
741 self.ui.status(desc)
737 if dryrun:
742 if dryrun:
738 return
743 return
739 transaction.rollback(self.sopener, self.sjoin("undo"),
744 transaction.rollback(self.sopener, self.sjoin("undo"),
740 self.ui.warn)
745 self.ui.warn)
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
746 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
742 if os.path.exists(self.join('undo.bookmarks')):
747 if os.path.exists(self.join('undo.bookmarks')):
743 util.rename(self.join('undo.bookmarks'),
748 util.rename(self.join('undo.bookmarks'),
744 self.join('bookmarks'))
749 self.join('bookmarks'))
745 try:
750 try:
746 branch = self.opener("undo.branch").read()
751 branch = self.opener("undo.branch").read()
747 self.dirstate.setbranch(branch)
752 self.dirstate.setbranch(branch)
748 except IOError:
753 except IOError:
749 self.ui.warn(_("named branch could not be reset, "
754 self.ui.warn(_("named branch could not be reset, "
750 "current branch is still: %s\n")
755 "current branch is still: %s\n")
751 % self.dirstate.branch())
756 % self.dirstate.branch())
752 self.invalidate()
757 self.invalidate()
753 self.dirstate.invalidate()
758 self.dirstate.invalidate()
754 self.destroyed()
759 self.destroyed()
755 parents = tuple([p.rev() for p in self.parents()])
760 parents = tuple([p.rev() for p in self.parents()])
756 if len(parents) > 1:
761 if len(parents) > 1:
757 self.ui.status(_("working directory now based on "
762 self.ui.status(_("working directory now based on "
758 "revisions %d and %d\n") % parents)
763 "revisions %d and %d\n") % parents)
759 else:
764 else:
760 self.ui.status(_("working directory now based on "
765 self.ui.status(_("working directory now based on "
761 "revision %d\n") % parents)
766 "revision %d\n") % parents)
762 else:
767 else:
763 self.ui.warn(_("no rollback information available\n"))
768 self.ui.warn(_("no rollback information available\n"))
764 return 1
769 return 1
765 finally:
770 finally:
766 release(lock, wlock)
771 release(lock, wlock)
767
772
768 def invalidatecaches(self):
773 def invalidatecaches(self):
769 self._tags = None
774 self._tags = None
770 self._tagtypes = None
775 self._tagtypes = None
771 self.nodetagscache = None
776 self.nodetagscache = None
772 self._branchcache = None # in UTF-8
777 self._branchcache = None # in UTF-8
773 self._branchcachetip = None
778 self._branchcachetip = None
774
779
775 def invalidate(self):
780 def invalidate(self):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
781 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
777 if a in self.__dict__:
782 if a in self.__dict__:
778 delattr(self, a)
783 delattr(self, a)
779 self.invalidatecaches()
784 self.invalidatecaches()
780
785
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
786 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
782 try:
787 try:
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
788 l = lock.lock(lockname, 0, releasefn, desc=desc)
784 except error.LockHeld, inst:
789 except error.LockHeld, inst:
785 if not wait:
790 if not wait:
786 raise
791 raise
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
792 self.ui.warn(_("waiting for lock on %s held by %r\n") %
788 (desc, inst.locker))
793 (desc, inst.locker))
789 # default to 600 seconds timeout
794 # default to 600 seconds timeout
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
795 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
791 releasefn, desc=desc)
796 releasefn, desc=desc)
792 if acquirefn:
797 if acquirefn:
793 acquirefn()
798 acquirefn()
794 return l
799 return l
795
800
796 def lock(self, wait=True):
801 def lock(self, wait=True):
797 '''Lock the repository store (.hg/store) and return a weak reference
802 '''Lock the repository store (.hg/store) and return a weak reference
798 to the lock. Use this before modifying the store (e.g. committing or
803 to the lock. Use this before modifying the store (e.g. committing or
799 stripping). If you are opening a transaction, get a lock as well.)'''
804 stripping). If you are opening a transaction, get a lock as well.)'''
800 l = self._lockref and self._lockref()
805 l = self._lockref and self._lockref()
801 if l is not None and l.held:
806 if l is not None and l.held:
802 l.lock()
807 l.lock()
803 return l
808 return l
804
809
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
810 l = self._lock(self.sjoin("lock"), wait, self.store.write,
806 self.invalidate, _('repository %s') % self.origroot)
811 self.invalidate, _('repository %s') % self.origroot)
807 self._lockref = weakref.ref(l)
812 self._lockref = weakref.ref(l)
808 return l
813 return l
809
814
810 def wlock(self, wait=True):
815 def wlock(self, wait=True):
811 '''Lock the non-store parts of the repository (everything under
816 '''Lock the non-store parts of the repository (everything under
812 .hg except .hg/store) and return a weak reference to the lock.
817 .hg except .hg/store) and return a weak reference to the lock.
813 Use this before modifying files in .hg.'''
818 Use this before modifying files in .hg.'''
814 l = self._wlockref and self._wlockref()
819 l = self._wlockref and self._wlockref()
815 if l is not None and l.held:
820 if l is not None and l.held:
816 l.lock()
821 l.lock()
817 return l
822 return l
818
823
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
824 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
820 self.dirstate.invalidate, _('working directory of %s') %
825 self.dirstate.invalidate, _('working directory of %s') %
821 self.origroot)
826 self.origroot)
822 self._wlockref = weakref.ref(l)
827 self._wlockref = weakref.ref(l)
823 return l
828 return l
824
829
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
830 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
826 """
831 """
827 commit an individual file as part of a larger transaction
832 commit an individual file as part of a larger transaction
828 """
833 """
829
834
830 fname = fctx.path()
835 fname = fctx.path()
831 text = fctx.data()
836 text = fctx.data()
832 flog = self.file(fname)
837 flog = self.file(fname)
833 fparent1 = manifest1.get(fname, nullid)
838 fparent1 = manifest1.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
839 fparent2 = fparent2o = manifest2.get(fname, nullid)
835
840
836 meta = {}
841 meta = {}
837 copy = fctx.renamed()
842 copy = fctx.renamed()
838 if copy and copy[0] != fname:
843 if copy and copy[0] != fname:
839 # Mark the new revision of this file as a copy of another
844 # Mark the new revision of this file as a copy of another
840 # file. This copy data will effectively act as a parent
845 # file. This copy data will effectively act as a parent
841 # of this new revision. If this is a merge, the first
846 # of this new revision. If this is a merge, the first
842 # parent will be the nullid (meaning "look up the copy data")
847 # parent will be the nullid (meaning "look up the copy data")
843 # and the second one will be the other parent. For example:
848 # and the second one will be the other parent. For example:
844 #
849 #
845 # 0 --- 1 --- 3 rev1 changes file foo
850 # 0 --- 1 --- 3 rev1 changes file foo
846 # \ / rev2 renames foo to bar and changes it
851 # \ / rev2 renames foo to bar and changes it
847 # \- 2 -/ rev3 should have bar with all changes and
852 # \- 2 -/ rev3 should have bar with all changes and
848 # should record that bar descends from
853 # should record that bar descends from
849 # bar in rev2 and foo in rev1
854 # bar in rev2 and foo in rev1
850 #
855 #
851 # this allows this merge to succeed:
856 # this allows this merge to succeed:
852 #
857 #
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
858 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
859 # \ / merging rev3 and rev4 should use bar@rev2
855 # \- 2 --- 4 as the merge base
860 # \- 2 --- 4 as the merge base
856 #
861 #
857
862
858 cfname = copy[0]
863 cfname = copy[0]
859 crev = manifest1.get(cfname)
864 crev = manifest1.get(cfname)
860 newfparent = fparent2
865 newfparent = fparent2
861
866
862 if manifest2: # branch merge
867 if manifest2: # branch merge
863 if fparent2 == nullid or crev is None: # copied on remote side
868 if fparent2 == nullid or crev is None: # copied on remote side
864 if cfname in manifest2:
869 if cfname in manifest2:
865 crev = manifest2[cfname]
870 crev = manifest2[cfname]
866 newfparent = fparent1
871 newfparent = fparent1
867
872
868 # find source in nearest ancestor if we've lost track
873 # find source in nearest ancestor if we've lost track
869 if not crev:
874 if not crev:
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
875 self.ui.debug(" %s: searching for copy revision for %s\n" %
871 (fname, cfname))
876 (fname, cfname))
872 for ancestor in self[None].ancestors():
877 for ancestor in self[None].ancestors():
873 if cfname in ancestor:
878 if cfname in ancestor:
874 crev = ancestor[cfname].filenode()
879 crev = ancestor[cfname].filenode()
875 break
880 break
876
881
877 if crev:
882 if crev:
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
883 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
879 meta["copy"] = cfname
884 meta["copy"] = cfname
880 meta["copyrev"] = hex(crev)
885 meta["copyrev"] = hex(crev)
881 fparent1, fparent2 = nullid, newfparent
886 fparent1, fparent2 = nullid, newfparent
882 else:
887 else:
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
888 self.ui.warn(_("warning: can't find ancestor for '%s' "
884 "copied from '%s'!\n") % (fname, cfname))
889 "copied from '%s'!\n") % (fname, cfname))
885
890
886 elif fparent2 != nullid:
891 elif fparent2 != nullid:
887 # is one parent an ancestor of the other?
892 # is one parent an ancestor of the other?
888 fparentancestor = flog.ancestor(fparent1, fparent2)
893 fparentancestor = flog.ancestor(fparent1, fparent2)
889 if fparentancestor == fparent1:
894 if fparentancestor == fparent1:
890 fparent1, fparent2 = fparent2, nullid
895 fparent1, fparent2 = fparent2, nullid
891 elif fparentancestor == fparent2:
896 elif fparentancestor == fparent2:
892 fparent2 = nullid
897 fparent2 = nullid
893
898
894 # is the file changed?
899 # is the file changed?
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
900 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
896 changelist.append(fname)
901 changelist.append(fname)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
902 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
898
903
899 # are just the flags changed during merge?
904 # are just the flags changed during merge?
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
905 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
901 changelist.append(fname)
906 changelist.append(fname)
902
907
903 return fparent1
908 return fparent1
904
909
905 def commit(self, text="", user=None, date=None, match=None, force=False,
910 def commit(self, text="", user=None, date=None, match=None, force=False,
906 editor=False, extra={}):
911 editor=False, extra={}):
907 """Add a new revision to current repository.
912 """Add a new revision to current repository.
908
913
909 Revision information is gathered from the working directory,
914 Revision information is gathered from the working directory,
910 match can be used to filter the committed files. If editor is
915 match can be used to filter the committed files. If editor is
911 supplied, it is called to get a commit message.
916 supplied, it is called to get a commit message.
912 """
917 """
913
918
914 def fail(f, msg):
919 def fail(f, msg):
915 raise util.Abort('%s: %s' % (f, msg))
920 raise util.Abort('%s: %s' % (f, msg))
916
921
917 if not match:
922 if not match:
918 match = matchmod.always(self.root, '')
923 match = matchmod.always(self.root, '')
919
924
920 if not force:
925 if not force:
921 vdirs = []
926 vdirs = []
922 match.dir = vdirs.append
927 match.dir = vdirs.append
923 match.bad = fail
928 match.bad = fail
924
929
925 wlock = self.wlock()
930 wlock = self.wlock()
926 try:
931 try:
927 wctx = self[None]
932 wctx = self[None]
928 merge = len(wctx.parents()) > 1
933 merge = len(wctx.parents()) > 1
929
934
930 if (not force and merge and match and
935 if (not force and merge and match and
931 (match.files() or match.anypats())):
936 (match.files() or match.anypats())):
932 raise util.Abort(_('cannot partially commit a merge '
937 raise util.Abort(_('cannot partially commit a merge '
933 '(do not specify files or patterns)'))
938 '(do not specify files or patterns)'))
934
939
935 changes = self.status(match=match, clean=force)
940 changes = self.status(match=match, clean=force)
936 if force:
941 if force:
937 changes[0].extend(changes[6]) # mq may commit unchanged files
942 changes[0].extend(changes[6]) # mq may commit unchanged files
938
943
939 # check subrepos
944 # check subrepos
940 subs = []
945 subs = []
941 removedsubs = set()
946 removedsubs = set()
942 for p in wctx.parents():
947 for p in wctx.parents():
943 removedsubs.update(s for s in p.substate if match(s))
948 removedsubs.update(s for s in p.substate if match(s))
944 for s in wctx.substate:
949 for s in wctx.substate:
945 removedsubs.discard(s)
950 removedsubs.discard(s)
946 if match(s) and wctx.sub(s).dirty():
951 if match(s) and wctx.sub(s).dirty():
947 subs.append(s)
952 subs.append(s)
948 if (subs or removedsubs):
953 if (subs or removedsubs):
949 if (not match('.hgsub') and
954 if (not match('.hgsub') and
950 '.hgsub' in (wctx.modified() + wctx.added())):
955 '.hgsub' in (wctx.modified() + wctx.added())):
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
956 raise util.Abort(_("can't commit subrepos without .hgsub"))
952 if '.hgsubstate' not in changes[0]:
957 if '.hgsubstate' not in changes[0]:
953 changes[0].insert(0, '.hgsubstate')
958 changes[0].insert(0, '.hgsubstate')
954
959
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
960 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
961 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
957 if changedsubs:
962 if changedsubs:
958 raise util.Abort(_("uncommitted changes in subrepo %s")
963 raise util.Abort(_("uncommitted changes in subrepo %s")
959 % changedsubs[0])
964 % changedsubs[0])
960
965
961 # make sure all explicit patterns are matched
966 # make sure all explicit patterns are matched
962 if not force and match.files():
967 if not force and match.files():
963 matched = set(changes[0] + changes[1] + changes[2])
968 matched = set(changes[0] + changes[1] + changes[2])
964
969
965 for f in match.files():
970 for f in match.files():
966 if f == '.' or f in matched or f in wctx.substate:
971 if f == '.' or f in matched or f in wctx.substate:
967 continue
972 continue
968 if f in changes[3]: # missing
973 if f in changes[3]: # missing
969 fail(f, _('file not found!'))
974 fail(f, _('file not found!'))
970 if f in vdirs: # visited directory
975 if f in vdirs: # visited directory
971 d = f + '/'
976 d = f + '/'
972 for mf in matched:
977 for mf in matched:
973 if mf.startswith(d):
978 if mf.startswith(d):
974 break
979 break
975 else:
980 else:
976 fail(f, _("no match under directory!"))
981 fail(f, _("no match under directory!"))
977 elif f not in self.dirstate:
982 elif f not in self.dirstate:
978 fail(f, _("file not tracked!"))
983 fail(f, _("file not tracked!"))
979
984
980 if (not force and not extra.get("close") and not merge
985 if (not force and not extra.get("close") and not merge
981 and not (changes[0] or changes[1] or changes[2])
986 and not (changes[0] or changes[1] or changes[2])
982 and wctx.branch() == wctx.p1().branch()):
987 and wctx.branch() == wctx.p1().branch()):
983 return None
988 return None
984
989
985 ms = mergemod.mergestate(self)
990 ms = mergemod.mergestate(self)
986 for f in changes[0]:
991 for f in changes[0]:
987 if f in ms and ms[f] == 'u':
992 if f in ms and ms[f] == 'u':
988 raise util.Abort(_("unresolved merge conflicts "
993 raise util.Abort(_("unresolved merge conflicts "
989 "(see hg help resolve)"))
994 "(see hg help resolve)"))
990
995
991 cctx = context.workingctx(self, text, user, date, extra, changes)
996 cctx = context.workingctx(self, text, user, date, extra, changes)
992 if editor:
997 if editor:
993 cctx._text = editor(self, cctx, subs)
998 cctx._text = editor(self, cctx, subs)
994 edited = (text != cctx._text)
999 edited = (text != cctx._text)
995
1000
996 # commit subs
1001 # commit subs
997 if subs or removedsubs:
1002 if subs or removedsubs:
998 state = wctx.substate.copy()
1003 state = wctx.substate.copy()
999 for s in sorted(subs):
1004 for s in sorted(subs):
1000 sub = wctx.sub(s)
1005 sub = wctx.sub(s)
1001 self.ui.status(_('committing subrepository %s\n') %
1006 self.ui.status(_('committing subrepository %s\n') %
1002 subrepo.subrelpath(sub))
1007 subrepo.subrelpath(sub))
1003 sr = sub.commit(cctx._text, user, date)
1008 sr = sub.commit(cctx._text, user, date)
1004 state[s] = (state[s][0], sr)
1009 state[s] = (state[s][0], sr)
1005 subrepo.writestate(self, state)
1010 subrepo.writestate(self, state)
1006
1011
1007 # Save commit message in case this transaction gets rolled back
1012 # Save commit message in case this transaction gets rolled back
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1013 # (e.g. by a pretxncommit hook). Leave the content alone on
1009 # the assumption that the user will use the same editor again.
1014 # the assumption that the user will use the same editor again.
1010 msgfile = self.opener('last-message.txt', 'wb')
1015 msgfile = self.opener('last-message.txt', 'wb')
1011 msgfile.write(cctx._text)
1016 msgfile.write(cctx._text)
1012 msgfile.close()
1017 msgfile.close()
1013
1018
1014 p1, p2 = self.dirstate.parents()
1019 p1, p2 = self.dirstate.parents()
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1020 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1016 try:
1021 try:
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1022 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1018 ret = self.commitctx(cctx, True)
1023 ret = self.commitctx(cctx, True)
1019 except:
1024 except:
1020 if edited:
1025 if edited:
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1026 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1022 self.ui.write(
1027 self.ui.write(
1023 _('note: commit message saved in %s\n') % msgfn)
1028 _('note: commit message saved in %s\n') % msgfn)
1024 raise
1029 raise
1025
1030
1026 # update bookmarks, dirstate and mergestate
1031 # update bookmarks, dirstate and mergestate
1027 bookmarks.update(self, p1, ret)
1032 bookmarks.update(self, p1, ret)
1028 for f in changes[0] + changes[1]:
1033 for f in changes[0] + changes[1]:
1029 self.dirstate.normal(f)
1034 self.dirstate.normal(f)
1030 for f in changes[2]:
1035 for f in changes[2]:
1031 self.dirstate.forget(f)
1036 self.dirstate.forget(f)
1032 self.dirstate.setparents(ret)
1037 self.dirstate.setparents(ret)
1033 ms.reset()
1038 ms.reset()
1034 finally:
1039 finally:
1035 wlock.release()
1040 wlock.release()
1036
1041
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1042 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1038 return ret
1043 return ret
1039
1044
1040 def commitctx(self, ctx, error=False):
1045 def commitctx(self, ctx, error=False):
1041 """Add a new revision to current repository.
1046 """Add a new revision to current repository.
1042 Revision information is passed via the context argument.
1047 Revision information is passed via the context argument.
1043 """
1048 """
1044
1049
1045 tr = lock = None
1050 tr = lock = None
1046 removed = list(ctx.removed())
1051 removed = list(ctx.removed())
1047 p1, p2 = ctx.p1(), ctx.p2()
1052 p1, p2 = ctx.p1(), ctx.p2()
1048 m1 = p1.manifest().copy()
1053 m1 = p1.manifest().copy()
1049 m2 = p2.manifest()
1054 m2 = p2.manifest()
1050 user = ctx.user()
1055 user = ctx.user()
1051
1056
1052 lock = self.lock()
1057 lock = self.lock()
1053 try:
1058 try:
1054 tr = self.transaction("commit")
1059 tr = self.transaction("commit")
1055 trp = weakref.proxy(tr)
1060 trp = weakref.proxy(tr)
1056
1061
1057 # check in files
1062 # check in files
1058 new = {}
1063 new = {}
1059 changed = []
1064 changed = []
1060 linkrev = len(self)
1065 linkrev = len(self)
1061 for f in sorted(ctx.modified() + ctx.added()):
1066 for f in sorted(ctx.modified() + ctx.added()):
1062 self.ui.note(f + "\n")
1067 self.ui.note(f + "\n")
1063 try:
1068 try:
1064 fctx = ctx[f]
1069 fctx = ctx[f]
1065 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1070 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1066 changed)
1071 changed)
1067 m1.set(f, fctx.flags())
1072 m1.set(f, fctx.flags())
1068 except OSError, inst:
1073 except OSError, inst:
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1074 self.ui.warn(_("trouble committing %s!\n") % f)
1070 raise
1075 raise
1071 except IOError, inst:
1076 except IOError, inst:
1072 errcode = getattr(inst, 'errno', errno.ENOENT)
1077 errcode = getattr(inst, 'errno', errno.ENOENT)
1073 if error or errcode and errcode != errno.ENOENT:
1078 if error or errcode and errcode != errno.ENOENT:
1074 self.ui.warn(_("trouble committing %s!\n") % f)
1079 self.ui.warn(_("trouble committing %s!\n") % f)
1075 raise
1080 raise
1076 else:
1081 else:
1077 removed.append(f)
1082 removed.append(f)
1078
1083
1079 # update manifest
1084 # update manifest
1080 m1.update(new)
1085 m1.update(new)
1081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1086 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1082 drop = [f for f in removed if f in m1]
1087 drop = [f for f in removed if f in m1]
1083 for f in drop:
1088 for f in drop:
1084 del m1[f]
1089 del m1[f]
1085 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1090 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1086 p2.manifestnode(), (new, drop))
1091 p2.manifestnode(), (new, drop))
1087
1092
1088 # update changelog
1093 # update changelog
1089 self.changelog.delayupdate()
1094 self.changelog.delayupdate()
1090 n = self.changelog.add(mn, changed + removed, ctx.description(),
1095 n = self.changelog.add(mn, changed + removed, ctx.description(),
1091 trp, p1.node(), p2.node(),
1096 trp, p1.node(), p2.node(),
1092 user, ctx.date(), ctx.extra().copy())
1097 user, ctx.date(), ctx.extra().copy())
1093 p = lambda: self.changelog.writepending() and self.root or ""
1098 p = lambda: self.changelog.writepending() and self.root or ""
1094 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1099 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1095 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1100 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1096 parent2=xp2, pending=p)
1101 parent2=xp2, pending=p)
1097 self.changelog.finalize(trp)
1102 self.changelog.finalize(trp)
1098 tr.close()
1103 tr.close()
1099
1104
1100 if self._branchcache:
1105 if self._branchcache:
1101 self.updatebranchcache()
1106 self.updatebranchcache()
1102 return n
1107 return n
1103 finally:
1108 finally:
1104 if tr:
1109 if tr:
1105 tr.release()
1110 tr.release()
1106 lock.release()
1111 lock.release()
1107
1112
1108 def destroyed(self):
1113 def destroyed(self):
1109 '''Inform the repository that nodes have been destroyed.
1114 '''Inform the repository that nodes have been destroyed.
1110 Intended for use by strip and rollback, so there's a common
1115 Intended for use by strip and rollback, so there's a common
1111 place for anything that has to be done after destroying history.'''
1116 place for anything that has to be done after destroying history.'''
1112 # XXX it might be nice if we could take the list of destroyed
1117 # XXX it might be nice if we could take the list of destroyed
1113 # nodes, but I don't see an easy way for rollback() to do that
1118 # nodes, but I don't see an easy way for rollback() to do that
1114
1119
1115 # Ensure the persistent tag cache is updated. Doing it now
1120 # Ensure the persistent tag cache is updated. Doing it now
1116 # means that the tag cache only has to worry about destroyed
1121 # means that the tag cache only has to worry about destroyed
1117 # heads immediately after a strip/rollback. That in turn
1122 # heads immediately after a strip/rollback. That in turn
1118 # guarantees that "cachetip == currenttip" (comparing both rev
1123 # guarantees that "cachetip == currenttip" (comparing both rev
1119 # and node) always means no nodes have been added or destroyed.
1124 # and node) always means no nodes have been added or destroyed.
1120
1125
1121 # XXX this is suboptimal when qrefresh'ing: we strip the current
1126 # XXX this is suboptimal when qrefresh'ing: we strip the current
1122 # head, refresh the tag cache, then immediately add a new head.
1127 # head, refresh the tag cache, then immediately add a new head.
1123 # But I think doing it this way is necessary for the "instant
1128 # But I think doing it this way is necessary for the "instant
1124 # tag cache retrieval" case to work.
1129 # tag cache retrieval" case to work.
1125 self.invalidatecaches()
1130 self.invalidatecaches()
1126
1131
1127 def walk(self, match, node=None):
1132 def walk(self, match, node=None):
1128 '''
1133 '''
1129 walk recursively through the directory tree or a given
1134 walk recursively through the directory tree or a given
1130 changeset, finding all files matched by the match
1135 changeset, finding all files matched by the match
1131 function
1136 function
1132 '''
1137 '''
1133 return self[node].walk(match)
1138 return self[node].walk(match)
1134
1139
1135 def status(self, node1='.', node2=None, match=None,
1140 def status(self, node1='.', node2=None, match=None,
1136 ignored=False, clean=False, unknown=False,
1141 ignored=False, clean=False, unknown=False,
1137 listsubrepos=False):
1142 listsubrepos=False):
1138 """return status of files between two nodes or node and working directory
1143 """return status of files between two nodes or node and working directory
1139
1144
1140 If node1 is None, use the first dirstate parent instead.
1145 If node1 is None, use the first dirstate parent instead.
1141 If node2 is None, compare node1 with working directory.
1146 If node2 is None, compare node1 with working directory.
1142 """
1147 """
1143
1148
1144 def mfmatches(ctx):
1149 def mfmatches(ctx):
1145 mf = ctx.manifest().copy()
1150 mf = ctx.manifest().copy()
1146 for fn in mf.keys():
1151 for fn in mf.keys():
1147 if not match(fn):
1152 if not match(fn):
1148 del mf[fn]
1153 del mf[fn]
1149 return mf
1154 return mf
1150
1155
1151 if isinstance(node1, context.changectx):
1156 if isinstance(node1, context.changectx):
1152 ctx1 = node1
1157 ctx1 = node1
1153 else:
1158 else:
1154 ctx1 = self[node1]
1159 ctx1 = self[node1]
1155 if isinstance(node2, context.changectx):
1160 if isinstance(node2, context.changectx):
1156 ctx2 = node2
1161 ctx2 = node2
1157 else:
1162 else:
1158 ctx2 = self[node2]
1163 ctx2 = self[node2]
1159
1164
1160 working = ctx2.rev() is None
1165 working = ctx2.rev() is None
1161 parentworking = working and ctx1 == self['.']
1166 parentworking = working and ctx1 == self['.']
1162 match = match or matchmod.always(self.root, self.getcwd())
1167 match = match or matchmod.always(self.root, self.getcwd())
1163 listignored, listclean, listunknown = ignored, clean, unknown
1168 listignored, listclean, listunknown = ignored, clean, unknown
1164
1169
1165 # load earliest manifest first for caching reasons
1170 # load earliest manifest first for caching reasons
1166 if not working and ctx2.rev() < ctx1.rev():
1171 if not working and ctx2.rev() < ctx1.rev():
1167 ctx2.manifest()
1172 ctx2.manifest()
1168
1173
1169 if not parentworking:
1174 if not parentworking:
1170 def bad(f, msg):
1175 def bad(f, msg):
1171 if f not in ctx1:
1176 if f not in ctx1:
1172 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1177 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1173 match.bad = bad
1178 match.bad = bad
1174
1179
1175 if working: # we need to scan the working dir
1180 if working: # we need to scan the working dir
1176 subrepos = []
1181 subrepos = []
1177 if '.hgsub' in self.dirstate:
1182 if '.hgsub' in self.dirstate:
1178 subrepos = ctx1.substate.keys()
1183 subrepos = ctx1.substate.keys()
1179 s = self.dirstate.status(match, subrepos, listignored,
1184 s = self.dirstate.status(match, subrepos, listignored,
1180 listclean, listunknown)
1185 listclean, listunknown)
1181 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1186 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1182
1187
1183 # check for any possibly clean files
1188 # check for any possibly clean files
1184 if parentworking and cmp:
1189 if parentworking and cmp:
1185 fixup = []
1190 fixup = []
1186 # do a full compare of any files that might have changed
1191 # do a full compare of any files that might have changed
1187 for f in sorted(cmp):
1192 for f in sorted(cmp):
1188 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1193 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1189 or ctx1[f].cmp(ctx2[f])):
1194 or ctx1[f].cmp(ctx2[f])):
1190 modified.append(f)
1195 modified.append(f)
1191 else:
1196 else:
1192 fixup.append(f)
1197 fixup.append(f)
1193
1198
1194 # update dirstate for files that are actually clean
1199 # update dirstate for files that are actually clean
1195 if fixup:
1200 if fixup:
1196 if listclean:
1201 if listclean:
1197 clean += fixup
1202 clean += fixup
1198
1203
1199 try:
1204 try:
1200 # updating the dirstate is optional
1205 # updating the dirstate is optional
1201 # so we don't wait on the lock
1206 # so we don't wait on the lock
1202 wlock = self.wlock(False)
1207 wlock = self.wlock(False)
1203 try:
1208 try:
1204 for f in fixup:
1209 for f in fixup:
1205 self.dirstate.normal(f)
1210 self.dirstate.normal(f)
1206 finally:
1211 finally:
1207 wlock.release()
1212 wlock.release()
1208 except error.LockError:
1213 except error.LockError:
1209 pass
1214 pass
1210
1215
1211 if not parentworking:
1216 if not parentworking:
1212 mf1 = mfmatches(ctx1)
1217 mf1 = mfmatches(ctx1)
1213 if working:
1218 if working:
1214 # we are comparing working dir against non-parent
1219 # we are comparing working dir against non-parent
1215 # generate a pseudo-manifest for the working dir
1220 # generate a pseudo-manifest for the working dir
1216 mf2 = mfmatches(self['.'])
1221 mf2 = mfmatches(self['.'])
1217 for f in cmp + modified + added:
1222 for f in cmp + modified + added:
1218 mf2[f] = None
1223 mf2[f] = None
1219 mf2.set(f, ctx2.flags(f))
1224 mf2.set(f, ctx2.flags(f))
1220 for f in removed:
1225 for f in removed:
1221 if f in mf2:
1226 if f in mf2:
1222 del mf2[f]
1227 del mf2[f]
1223 else:
1228 else:
1224 # we are comparing two revisions
1229 # we are comparing two revisions
1225 deleted, unknown, ignored = [], [], []
1230 deleted, unknown, ignored = [], [], []
1226 mf2 = mfmatches(ctx2)
1231 mf2 = mfmatches(ctx2)
1227
1232
1228 modified, added, clean = [], [], []
1233 modified, added, clean = [], [], []
1229 for fn in mf2:
1234 for fn in mf2:
1230 if fn in mf1:
1235 if fn in mf1:
1231 if (mf1.flags(fn) != mf2.flags(fn) or
1236 if (mf1.flags(fn) != mf2.flags(fn) or
1232 (mf1[fn] != mf2[fn] and
1237 (mf1[fn] != mf2[fn] and
1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1238 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1234 modified.append(fn)
1239 modified.append(fn)
1235 elif listclean:
1240 elif listclean:
1236 clean.append(fn)
1241 clean.append(fn)
1237 del mf1[fn]
1242 del mf1[fn]
1238 else:
1243 else:
1239 added.append(fn)
1244 added.append(fn)
1240 removed = mf1.keys()
1245 removed = mf1.keys()
1241
1246
1242 r = modified, added, removed, deleted, unknown, ignored, clean
1247 r = modified, added, removed, deleted, unknown, ignored, clean
1243
1248
1244 if listsubrepos:
1249 if listsubrepos:
1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1250 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1246 if working:
1251 if working:
1247 rev2 = None
1252 rev2 = None
1248 else:
1253 else:
1249 rev2 = ctx2.substate[subpath][1]
1254 rev2 = ctx2.substate[subpath][1]
1250 try:
1255 try:
1251 submatch = matchmod.narrowmatcher(subpath, match)
1256 submatch = matchmod.narrowmatcher(subpath, match)
1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1257 s = sub.status(rev2, match=submatch, ignored=listignored,
1253 clean=listclean, unknown=listunknown,
1258 clean=listclean, unknown=listunknown,
1254 listsubrepos=True)
1259 listsubrepos=True)
1255 for rfiles, sfiles in zip(r, s):
1260 for rfiles, sfiles in zip(r, s):
1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1261 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1257 except error.LookupError:
1262 except error.LookupError:
1258 self.ui.status(_("skipping missing subrepository: %s\n")
1263 self.ui.status(_("skipping missing subrepository: %s\n")
1259 % subpath)
1264 % subpath)
1260
1265
1261 for l in r:
1266 for l in r:
1262 l.sort()
1267 l.sort()
1263 return r
1268 return r
1264
1269
1265 def heads(self, start=None):
1270 def heads(self, start=None):
1266 heads = self.changelog.heads(start)
1271 heads = self.changelog.heads(start)
1267 # sort the output in rev descending order
1272 # sort the output in rev descending order
1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1273 return sorted(heads, key=self.changelog.rev, reverse=True)
1269
1274
1270 def branchheads(self, branch=None, start=None, closed=False):
1275 def branchheads(self, branch=None, start=None, closed=False):
1271 '''return a (possibly filtered) list of heads for the given branch
1276 '''return a (possibly filtered) list of heads for the given branch
1272
1277
1273 Heads are returned in topological order, from newest to oldest.
1278 Heads are returned in topological order, from newest to oldest.
1274 If branch is None, use the dirstate branch.
1279 If branch is None, use the dirstate branch.
1275 If start is not None, return only heads reachable from start.
1280 If start is not None, return only heads reachable from start.
1276 If closed is True, return heads that are marked as closed as well.
1281 If closed is True, return heads that are marked as closed as well.
1277 '''
1282 '''
1278 if branch is None:
1283 if branch is None:
1279 branch = self[None].branch()
1284 branch = self[None].branch()
1280 branches = self.branchmap()
1285 branches = self.branchmap()
1281 if branch not in branches:
1286 if branch not in branches:
1282 return []
1287 return []
1283 # the cache returns heads ordered lowest to highest
1288 # the cache returns heads ordered lowest to highest
1284 bheads = list(reversed(branches[branch]))
1289 bheads = list(reversed(branches[branch]))
1285 if start is not None:
1290 if start is not None:
1286 # filter out the heads that cannot be reached from startrev
1291 # filter out the heads that cannot be reached from startrev
1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1292 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1288 bheads = [h for h in bheads if h in fbheads]
1293 bheads = [h for h in bheads if h in fbheads]
1289 if not closed:
1294 if not closed:
1290 bheads = [h for h in bheads if
1295 bheads = [h for h in bheads if
1291 ('close' not in self.changelog.read(h)[5])]
1296 ('close' not in self.changelog.read(h)[5])]
1292 return bheads
1297 return bheads
1293
1298
1294 def branches(self, nodes):
1299 def branches(self, nodes):
1295 if not nodes:
1300 if not nodes:
1296 nodes = [self.changelog.tip()]
1301 nodes = [self.changelog.tip()]
1297 b = []
1302 b = []
1298 for n in nodes:
1303 for n in nodes:
1299 t = n
1304 t = n
1300 while 1:
1305 while 1:
1301 p = self.changelog.parents(n)
1306 p = self.changelog.parents(n)
1302 if p[1] != nullid or p[0] == nullid:
1307 if p[1] != nullid or p[0] == nullid:
1303 b.append((t, n, p[0], p[1]))
1308 b.append((t, n, p[0], p[1]))
1304 break
1309 break
1305 n = p[0]
1310 n = p[0]
1306 return b
1311 return b
1307
1312
1308 def between(self, pairs):
1313 def between(self, pairs):
1309 r = []
1314 r = []
1310
1315
1311 for top, bottom in pairs:
1316 for top, bottom in pairs:
1312 n, l, i = top, [], 0
1317 n, l, i = top, [], 0
1313 f = 1
1318 f = 1
1314
1319
1315 while n != bottom and n != nullid:
1320 while n != bottom and n != nullid:
1316 p = self.changelog.parents(n)[0]
1321 p = self.changelog.parents(n)[0]
1317 if i == f:
1322 if i == f:
1318 l.append(n)
1323 l.append(n)
1319 f = f * 2
1324 f = f * 2
1320 n = p
1325 n = p
1321 i += 1
1326 i += 1
1322
1327
1323 r.append(l)
1328 r.append(l)
1324
1329
1325 return r
1330 return r
1326
1331
1327 def pull(self, remote, heads=None, force=False):
1332 def pull(self, remote, heads=None, force=False):
1328 lock = self.lock()
1333 lock = self.lock()
1329 try:
1334 try:
1330 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1335 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1331 force=force)
1336 force=force)
1332 common, fetch, rheads = tmp
1337 common, fetch, rheads = tmp
1333 if not fetch:
1338 if not fetch:
1334 self.ui.status(_("no changes found\n"))
1339 self.ui.status(_("no changes found\n"))
1335 result = 0
1340 result = 0
1336 else:
1341 else:
1337 if heads is None and fetch == [nullid]:
1342 if heads is None and fetch == [nullid]:
1338 self.ui.status(_("requesting all changes\n"))
1343 self.ui.status(_("requesting all changes\n"))
1339 elif heads is None and remote.capable('changegroupsubset'):
1344 elif heads is None and remote.capable('changegroupsubset'):
1340 # issue1320, avoid a race if remote changed after discovery
1345 # issue1320, avoid a race if remote changed after discovery
1341 heads = rheads
1346 heads = rheads
1342
1347
1343 if heads is None:
1348 if heads is None:
1344 cg = remote.changegroup(fetch, 'pull')
1349 cg = remote.changegroup(fetch, 'pull')
1345 elif not remote.capable('changegroupsubset'):
1350 elif not remote.capable('changegroupsubset'):
1346 raise util.Abort(_("partial pull cannot be done because "
1351 raise util.Abort(_("partial pull cannot be done because "
1347 "other repository doesn't support "
1352 "other repository doesn't support "
1348 "changegroupsubset."))
1353 "changegroupsubset."))
1349 else:
1354 else:
1350 cg = remote.changegroupsubset(fetch, heads, 'pull')
1355 cg = remote.changegroupsubset(fetch, heads, 'pull')
1351 result = self.addchangegroup(cg, 'pull', remote.url(),
1356 result = self.addchangegroup(cg, 'pull', remote.url(),
1352 lock=lock)
1357 lock=lock)
1353 finally:
1358 finally:
1354 lock.release()
1359 lock.release()
1355
1360
1356 self.ui.debug("checking for updated bookmarks\n")
1361 self.ui.debug("checking for updated bookmarks\n")
1357 rb = remote.listkeys('bookmarks')
1362 rb = remote.listkeys('bookmarks')
1358 changed = False
1363 changed = False
1359 for k in rb.keys():
1364 for k in rb.keys():
1360 if k in self._bookmarks:
1365 if k in self._bookmarks:
1361 nr, nl = rb[k], self._bookmarks[k]
1366 nr, nl = rb[k], self._bookmarks[k]
1362 if nr in self:
1367 if nr in self:
1363 cr = self[nr]
1368 cr = self[nr]
1364 cl = self[nl]
1369 cl = self[nl]
1365 if cl.rev() >= cr.rev():
1370 if cl.rev() >= cr.rev():
1366 continue
1371 continue
1367 if cr in cl.descendants():
1372 if cr in cl.descendants():
1368 self._bookmarks[k] = cr.node()
1373 self._bookmarks[k] = cr.node()
1369 changed = True
1374 changed = True
1370 self.ui.status(_("updating bookmark %s\n") % k)
1375 self.ui.status(_("updating bookmark %s\n") % k)
1371 else:
1376 else:
1372 self.ui.warn(_("not updating divergent"
1377 self.ui.warn(_("not updating divergent"
1373 " bookmark %s\n") % k)
1378 " bookmark %s\n") % k)
1374 if changed:
1379 if changed:
1375 bookmarks.write(self)
1380 bookmarks.write(self)
1376
1381
1377 return result
1382 return result
1378
1383
1379 def checkpush(self, force, revs):
1384 def checkpush(self, force, revs):
1380 """Extensions can override this function if additional checks have
1385 """Extensions can override this function if additional checks have
1381 to be performed before pushing, or call it if they override push
1386 to be performed before pushing, or call it if they override push
1382 command.
1387 command.
1383 """
1388 """
1384 pass
1389 pass
1385
1390
1386 def push(self, remote, force=False, revs=None, newbranch=False):
1391 def push(self, remote, force=False, revs=None, newbranch=False):
1387 '''Push outgoing changesets (limited by revs) from the current
1392 '''Push outgoing changesets (limited by revs) from the current
1388 repository to remote. Return an integer:
1393 repository to remote. Return an integer:
1389 - 0 means HTTP error *or* nothing to push
1394 - 0 means HTTP error *or* nothing to push
1390 - 1 means we pushed and remote head count is unchanged *or*
1395 - 1 means we pushed and remote head count is unchanged *or*
1391 we have outgoing changesets but refused to push
1396 we have outgoing changesets but refused to push
1392 - other values as described by addchangegroup()
1397 - other values as described by addchangegroup()
1393 '''
1398 '''
1394 # there are two ways to push to remote repo:
1399 # there are two ways to push to remote repo:
1395 #
1400 #
1396 # addchangegroup assumes local user can lock remote
1401 # addchangegroup assumes local user can lock remote
1397 # repo (local filesystem, old ssh servers).
1402 # repo (local filesystem, old ssh servers).
1398 #
1403 #
1399 # unbundle assumes local user cannot lock remote repo (new ssh
1404 # unbundle assumes local user cannot lock remote repo (new ssh
1400 # servers, http servers).
1405 # servers, http servers).
1401
1406
1402 self.checkpush(force, revs)
1407 self.checkpush(force, revs)
1403 lock = None
1408 lock = None
1404 unbundle = remote.capable('unbundle')
1409 unbundle = remote.capable('unbundle')
1405 if not unbundle:
1410 if not unbundle:
1406 lock = remote.lock()
1411 lock = remote.lock()
1407 try:
1412 try:
1408 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1413 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1409 newbranch)
1414 newbranch)
1410 ret = remote_heads
1415 ret = remote_heads
1411 if cg is not None:
1416 if cg is not None:
1412 if unbundle:
1417 if unbundle:
1413 # local repo finds heads on server, finds out what
1418 # local repo finds heads on server, finds out what
1414 # revs it must push. once revs transferred, if server
1419 # revs it must push. once revs transferred, if server
1415 # finds it has different heads (someone else won
1420 # finds it has different heads (someone else won
1416 # commit/push race), server aborts.
1421 # commit/push race), server aborts.
1417 if force:
1422 if force:
1418 remote_heads = ['force']
1423 remote_heads = ['force']
1419 # ssh: return remote's addchangegroup()
1424 # ssh: return remote's addchangegroup()
1420 # http: return remote's addchangegroup() or 0 for error
1425 # http: return remote's addchangegroup() or 0 for error
1421 ret = remote.unbundle(cg, remote_heads, 'push')
1426 ret = remote.unbundle(cg, remote_heads, 'push')
1422 else:
1427 else:
1423 # we return an integer indicating remote head count change
1428 # we return an integer indicating remote head count change
1424 ret = remote.addchangegroup(cg, 'push', self.url(),
1429 ret = remote.addchangegroup(cg, 'push', self.url(),
1425 lock=lock)
1430 lock=lock)
1426 finally:
1431 finally:
1427 if lock is not None:
1432 if lock is not None:
1428 lock.release()
1433 lock.release()
1429
1434
1430 self.ui.debug("checking for updated bookmarks\n")
1435 self.ui.debug("checking for updated bookmarks\n")
1431 rb = remote.listkeys('bookmarks')
1436 rb = remote.listkeys('bookmarks')
1432 for k in rb.keys():
1437 for k in rb.keys():
1433 if k in self._bookmarks:
1438 if k in self._bookmarks:
1434 nr, nl = rb[k], hex(self._bookmarks[k])
1439 nr, nl = rb[k], hex(self._bookmarks[k])
1435 if nr in self:
1440 if nr in self:
1436 cr = self[nr]
1441 cr = self[nr]
1437 cl = self[nl]
1442 cl = self[nl]
1438 if cl in cr.descendants():
1443 if cl in cr.descendants():
1439 r = remote.pushkey('bookmarks', k, nr, nl)
1444 r = remote.pushkey('bookmarks', k, nr, nl)
1440 if r:
1445 if r:
1441 self.ui.status(_("updating bookmark %s\n") % k)
1446 self.ui.status(_("updating bookmark %s\n") % k)
1442 else:
1447 else:
1443 self.ui.warn(_('updating bookmark %s'
1448 self.ui.warn(_('updating bookmark %s'
1444 ' failed!\n') % k)
1449 ' failed!\n') % k)
1445
1450
1446 return ret
1451 return ret
1447
1452
1448 def changegroupinfo(self, nodes, source):
1453 def changegroupinfo(self, nodes, source):
1449 if self.ui.verbose or source == 'bundle':
1454 if self.ui.verbose or source == 'bundle':
1450 self.ui.status(_("%d changesets found\n") % len(nodes))
1455 self.ui.status(_("%d changesets found\n") % len(nodes))
1451 if self.ui.debugflag:
1456 if self.ui.debugflag:
1452 self.ui.debug("list of changesets:\n")
1457 self.ui.debug("list of changesets:\n")
1453 for node in nodes:
1458 for node in nodes:
1454 self.ui.debug("%s\n" % hex(node))
1459 self.ui.debug("%s\n" % hex(node))
1455
1460
1456 def changegroupsubset(self, bases, heads, source, extranodes=None):
1461 def changegroupsubset(self, bases, heads, source, extranodes=None):
1457 """Compute a changegroup consisting of all the nodes that are
1462 """Compute a changegroup consisting of all the nodes that are
1458 descendents of any of the bases and ancestors of any of the heads.
1463 descendents of any of the bases and ancestors of any of the heads.
1459 Return a chunkbuffer object whose read() method will return
1464 Return a chunkbuffer object whose read() method will return
1460 successive changegroup chunks.
1465 successive changegroup chunks.
1461
1466
1462 It is fairly complex as determining which filenodes and which
1467 It is fairly complex as determining which filenodes and which
1463 manifest nodes need to be included for the changeset to be complete
1468 manifest nodes need to be included for the changeset to be complete
1464 is non-trivial.
1469 is non-trivial.
1465
1470
1466 Another wrinkle is doing the reverse, figuring out which changeset in
1471 Another wrinkle is doing the reverse, figuring out which changeset in
1467 the changegroup a particular filenode or manifestnode belongs to.
1472 the changegroup a particular filenode or manifestnode belongs to.
1468
1473
1469 The caller can specify some nodes that must be included in the
1474 The caller can specify some nodes that must be included in the
1470 changegroup using the extranodes argument. It should be a dict
1475 changegroup using the extranodes argument. It should be a dict
1471 where the keys are the filenames (or 1 for the manifest), and the
1476 where the keys are the filenames (or 1 for the manifest), and the
1472 values are lists of (node, linknode) tuples, where node is a wanted
1477 values are lists of (node, linknode) tuples, where node is a wanted
1473 node and linknode is the changelog node that should be transmitted as
1478 node and linknode is the changelog node that should be transmitted as
1474 the linkrev.
1479 the linkrev.
1475 """
1480 """
1476
1481
1477 # Set up some initial variables
1482 # Set up some initial variables
1478 # Make it easy to refer to self.changelog
1483 # Make it easy to refer to self.changelog
1479 cl = self.changelog
1484 cl = self.changelog
1480 # Compute the list of changesets in this changegroup.
1485 # Compute the list of changesets in this changegroup.
1481 # Some bases may turn out to be superfluous, and some heads may be
1486 # Some bases may turn out to be superfluous, and some heads may be
1482 # too. nodesbetween will return the minimal set of bases and heads
1487 # too. nodesbetween will return the minimal set of bases and heads
1483 # necessary to re-create the changegroup.
1488 # necessary to re-create the changegroup.
1484 if not bases:
1489 if not bases:
1485 bases = [nullid]
1490 bases = [nullid]
1486 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1491 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1487
1492
1488 if extranodes is None:
1493 if extranodes is None:
1489 # can we go through the fast path ?
1494 # can we go through the fast path ?
1490 heads.sort()
1495 heads.sort()
1491 allheads = self.heads()
1496 allheads = self.heads()
1492 allheads.sort()
1497 allheads.sort()
1493 if heads == allheads:
1498 if heads == allheads:
1494 return self._changegroup(msng_cl_lst, source)
1499 return self._changegroup(msng_cl_lst, source)
1495
1500
1496 # slow path
1501 # slow path
1497 self.hook('preoutgoing', throw=True, source=source)
1502 self.hook('preoutgoing', throw=True, source=source)
1498
1503
1499 self.changegroupinfo(msng_cl_lst, source)
1504 self.changegroupinfo(msng_cl_lst, source)
1500
1505
1501 # We assume that all ancestors of bases are known
1506 # We assume that all ancestors of bases are known
1502 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1507 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1503
1508
1504 # Make it easy to refer to self.manifest
1509 # Make it easy to refer to self.manifest
1505 mnfst = self.manifest
1510 mnfst = self.manifest
1506 # We don't know which manifests are missing yet
1511 # We don't know which manifests are missing yet
1507 msng_mnfst_set = {}
1512 msng_mnfst_set = {}
1508 # Nor do we know which filenodes are missing.
1513 # Nor do we know which filenodes are missing.
1509 msng_filenode_set = {}
1514 msng_filenode_set = {}
1510
1515
1511 # A changeset always belongs to itself, so the changenode lookup
1516 # A changeset always belongs to itself, so the changenode lookup
1512 # function for a changenode is identity.
1517 # function for a changenode is identity.
1513 def identity(x):
1518 def identity(x):
1514 return x
1519 return x
1515
1520
1516 # A function generating function that sets up the initial environment
1521 # A function generating function that sets up the initial environment
1517 # the inner function.
1522 # the inner function.
1518 def filenode_collector(changedfiles):
1523 def filenode_collector(changedfiles):
1519 # This gathers information from each manifestnode included in the
1524 # This gathers information from each manifestnode included in the
1520 # changegroup about which filenodes the manifest node references
1525 # changegroup about which filenodes the manifest node references
1521 # so we can include those in the changegroup too.
1526 # so we can include those in the changegroup too.
1522 #
1527 #
1523 # It also remembers which changenode each filenode belongs to. It
1528 # It also remembers which changenode each filenode belongs to. It
1524 # does this by assuming the a filenode belongs to the changenode
1529 # does this by assuming the a filenode belongs to the changenode
1525 # the first manifest that references it belongs to.
1530 # the first manifest that references it belongs to.
1526 def collect_msng_filenodes(mnfstnode):
1531 def collect_msng_filenodes(mnfstnode):
1527 r = mnfst.rev(mnfstnode)
1532 r = mnfst.rev(mnfstnode)
1528 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1533 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1529 # If the previous rev is one of the parents,
1534 # If the previous rev is one of the parents,
1530 # we only need to see a diff.
1535 # we only need to see a diff.
1531 deltamf = mnfst.readdelta(mnfstnode)
1536 deltamf = mnfst.readdelta(mnfstnode)
1532 # For each line in the delta
1537 # For each line in the delta
1533 for f, fnode in deltamf.iteritems():
1538 for f, fnode in deltamf.iteritems():
1534 # And if the file is in the list of files we care
1539 # And if the file is in the list of files we care
1535 # about.
1540 # about.
1536 if f in changedfiles:
1541 if f in changedfiles:
1537 # Get the changenode this manifest belongs to
1542 # Get the changenode this manifest belongs to
1538 clnode = msng_mnfst_set[mnfstnode]
1543 clnode = msng_mnfst_set[mnfstnode]
1539 # Create the set of filenodes for the file if
1544 # Create the set of filenodes for the file if
1540 # there isn't one already.
1545 # there isn't one already.
1541 ndset = msng_filenode_set.setdefault(f, {})
1546 ndset = msng_filenode_set.setdefault(f, {})
1542 # And set the filenode's changelog node to the
1547 # And set the filenode's changelog node to the
1543 # manifest's if it hasn't been set already.
1548 # manifest's if it hasn't been set already.
1544 ndset.setdefault(fnode, clnode)
1549 ndset.setdefault(fnode, clnode)
1545 else:
1550 else:
1546 # Otherwise we need a full manifest.
1551 # Otherwise we need a full manifest.
1547 m = mnfst.read(mnfstnode)
1552 m = mnfst.read(mnfstnode)
1548 # For every file in we care about.
1553 # For every file in we care about.
1549 for f in changedfiles:
1554 for f in changedfiles:
1550 fnode = m.get(f, None)
1555 fnode = m.get(f, None)
1551 # If it's in the manifest
1556 # If it's in the manifest
1552 if fnode is not None:
1557 if fnode is not None:
1553 # See comments above.
1558 # See comments above.
1554 clnode = msng_mnfst_set[mnfstnode]
1559 clnode = msng_mnfst_set[mnfstnode]
1555 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset = msng_filenode_set.setdefault(f, {})
1556 ndset.setdefault(fnode, clnode)
1561 ndset.setdefault(fnode, clnode)
1557 return collect_msng_filenodes
1562 return collect_msng_filenodes
1558
1563
1559 # If we determine that a particular file or manifest node must be a
1564 # If we determine that a particular file or manifest node must be a
1560 # node that the recipient of the changegroup will already have, we can
1565 # node that the recipient of the changegroup will already have, we can
1561 # also assume the recipient will have all the parents. This function
1566 # also assume the recipient will have all the parents. This function
1562 # prunes them from the set of missing nodes.
1567 # prunes them from the set of missing nodes.
1563 def prune(revlog, missingnodes):
1568 def prune(revlog, missingnodes):
1564 hasset = set()
1569 hasset = set()
1565 # If a 'missing' filenode thinks it belongs to a changenode we
1570 # If a 'missing' filenode thinks it belongs to a changenode we
1566 # assume the recipient must have, then the recipient must have
1571 # assume the recipient must have, then the recipient must have
1567 # that filenode.
1572 # that filenode.
1568 for n in missingnodes:
1573 for n in missingnodes:
1569 clrev = revlog.linkrev(revlog.rev(n))
1574 clrev = revlog.linkrev(revlog.rev(n))
1570 if clrev in commonrevs:
1575 if clrev in commonrevs:
1571 hasset.add(n)
1576 hasset.add(n)
1572 for n in hasset:
1577 for n in hasset:
1573 missingnodes.pop(n, None)
1578 missingnodes.pop(n, None)
1574 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1579 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1575 missingnodes.pop(revlog.node(r), None)
1580 missingnodes.pop(revlog.node(r), None)
1576
1581
1577 # Add the nodes that were explicitly requested.
1582 # Add the nodes that were explicitly requested.
1578 def add_extra_nodes(name, nodes):
1583 def add_extra_nodes(name, nodes):
1579 if not extranodes or name not in extranodes:
1584 if not extranodes or name not in extranodes:
1580 return
1585 return
1581
1586
1582 for node, linknode in extranodes[name]:
1587 for node, linknode in extranodes[name]:
1583 if node not in nodes:
1588 if node not in nodes:
1584 nodes[node] = linknode
1589 nodes[node] = linknode
1585
1590
1586 # Now that we have all theses utility functions to help out and
1591 # Now that we have all theses utility functions to help out and
1587 # logically divide up the task, generate the group.
1592 # logically divide up the task, generate the group.
1588 def gengroup():
1593 def gengroup():
1589 # The set of changed files starts empty.
1594 # The set of changed files starts empty.
1590 changedfiles = set()
1595 changedfiles = set()
1591 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1596 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1592
1597
1593 # Create a changenode group generator that will call our functions
1598 # Create a changenode group generator that will call our functions
1594 # back to lookup the owning changenode and collect information.
1599 # back to lookup the owning changenode and collect information.
1595 group = cl.group(msng_cl_lst, identity, collect)
1600 group = cl.group(msng_cl_lst, identity, collect)
1596 for cnt, chnk in enumerate(group):
1601 for cnt, chnk in enumerate(group):
1597 yield chnk
1602 yield chnk
1598 # revlog.group yields three entries per node, so
1603 # revlog.group yields three entries per node, so
1599 # dividing by 3 gives an approximation of how many
1604 # dividing by 3 gives an approximation of how many
1600 # nodes have been processed.
1605 # nodes have been processed.
1601 self.ui.progress(_('bundling'), cnt / 3,
1606 self.ui.progress(_('bundling'), cnt / 3,
1602 unit=_('changesets'))
1607 unit=_('changesets'))
1603 changecount = cnt / 3
1608 changecount = cnt / 3
1604 self.ui.progress(_('bundling'), None)
1609 self.ui.progress(_('bundling'), None)
1605
1610
1606 prune(mnfst, msng_mnfst_set)
1611 prune(mnfst, msng_mnfst_set)
1607 add_extra_nodes(1, msng_mnfst_set)
1612 add_extra_nodes(1, msng_mnfst_set)
1608 msng_mnfst_lst = msng_mnfst_set.keys()
1613 msng_mnfst_lst = msng_mnfst_set.keys()
1609 # Sort the manifestnodes by revision number.
1614 # Sort the manifestnodes by revision number.
1610 msng_mnfst_lst.sort(key=mnfst.rev)
1615 msng_mnfst_lst.sort(key=mnfst.rev)
1611 # Create a generator for the manifestnodes that calls our lookup
1616 # Create a generator for the manifestnodes that calls our lookup
1612 # and data collection functions back.
1617 # and data collection functions back.
1613 group = mnfst.group(msng_mnfst_lst,
1618 group = mnfst.group(msng_mnfst_lst,
1614 lambda mnode: msng_mnfst_set[mnode],
1619 lambda mnode: msng_mnfst_set[mnode],
1615 filenode_collector(changedfiles))
1620 filenode_collector(changedfiles))
1616 efiles = {}
1621 efiles = {}
1617 for cnt, chnk in enumerate(group):
1622 for cnt, chnk in enumerate(group):
1618 if cnt % 3 == 1:
1623 if cnt % 3 == 1:
1619 mnode = chnk[:20]
1624 mnode = chnk[:20]
1620 efiles.update(mnfst.readdelta(mnode))
1625 efiles.update(mnfst.readdelta(mnode))
1621 yield chnk
1626 yield chnk
1622 # see above comment for why we divide by 3
1627 # see above comment for why we divide by 3
1623 self.ui.progress(_('bundling'), cnt / 3,
1628 self.ui.progress(_('bundling'), cnt / 3,
1624 unit=_('manifests'), total=changecount)
1629 unit=_('manifests'), total=changecount)
1625 self.ui.progress(_('bundling'), None)
1630 self.ui.progress(_('bundling'), None)
1626 efiles = len(efiles)
1631 efiles = len(efiles)
1627
1632
1628 # These are no longer needed, dereference and toss the memory for
1633 # These are no longer needed, dereference and toss the memory for
1629 # them.
1634 # them.
1630 msng_mnfst_lst = None
1635 msng_mnfst_lst = None
1631 msng_mnfst_set.clear()
1636 msng_mnfst_set.clear()
1632
1637
1633 if extranodes:
1638 if extranodes:
1634 for fname in extranodes:
1639 for fname in extranodes:
1635 if isinstance(fname, int):
1640 if isinstance(fname, int):
1636 continue
1641 continue
1637 msng_filenode_set.setdefault(fname, {})
1642 msng_filenode_set.setdefault(fname, {})
1638 changedfiles.add(fname)
1643 changedfiles.add(fname)
1639 # Go through all our files in order sorted by name.
1644 # Go through all our files in order sorted by name.
1640 for idx, fname in enumerate(sorted(changedfiles)):
1645 for idx, fname in enumerate(sorted(changedfiles)):
1641 filerevlog = self.file(fname)
1646 filerevlog = self.file(fname)
1642 if not len(filerevlog):
1647 if not len(filerevlog):
1643 raise util.Abort(_("empty or missing revlog for %s") % fname)
1648 raise util.Abort(_("empty or missing revlog for %s") % fname)
1644 # Toss out the filenodes that the recipient isn't really
1649 # Toss out the filenodes that the recipient isn't really
1645 # missing.
1650 # missing.
1646 missingfnodes = msng_filenode_set.pop(fname, {})
1651 missingfnodes = msng_filenode_set.pop(fname, {})
1647 prune(filerevlog, missingfnodes)
1652 prune(filerevlog, missingfnodes)
1648 add_extra_nodes(fname, missingfnodes)
1653 add_extra_nodes(fname, missingfnodes)
1649 # If any filenodes are left, generate the group for them,
1654 # If any filenodes are left, generate the group for them,
1650 # otherwise don't bother.
1655 # otherwise don't bother.
1651 if missingfnodes:
1656 if missingfnodes:
1652 yield changegroup.chunkheader(len(fname))
1657 yield changegroup.chunkheader(len(fname))
1653 yield fname
1658 yield fname
1654 # Sort the filenodes by their revision # (topological order)
1659 # Sort the filenodes by their revision # (topological order)
1655 nodeiter = list(missingfnodes)
1660 nodeiter = list(missingfnodes)
1656 nodeiter.sort(key=filerevlog.rev)
1661 nodeiter.sort(key=filerevlog.rev)
1657 # Create a group generator and only pass in a changenode
1662 # Create a group generator and only pass in a changenode
1658 # lookup function as we need to collect no information
1663 # lookup function as we need to collect no information
1659 # from filenodes.
1664 # from filenodes.
1660 group = filerevlog.group(nodeiter,
1665 group = filerevlog.group(nodeiter,
1661 lambda fnode: missingfnodes[fnode])
1666 lambda fnode: missingfnodes[fnode])
1662 for chnk in group:
1667 for chnk in group:
1663 # even though we print the same progress on
1668 # even though we print the same progress on
1664 # most loop iterations, put the progress call
1669 # most loop iterations, put the progress call
1665 # here so that time estimates (if any) can be updated
1670 # here so that time estimates (if any) can be updated
1666 self.ui.progress(
1671 self.ui.progress(
1667 _('bundling'), idx, item=fname,
1672 _('bundling'), idx, item=fname,
1668 unit=_('files'), total=efiles)
1673 unit=_('files'), total=efiles)
1669 yield chnk
1674 yield chnk
1670 # Signal that no more groups are left.
1675 # Signal that no more groups are left.
1671 yield changegroup.closechunk()
1676 yield changegroup.closechunk()
1672 self.ui.progress(_('bundling'), None)
1677 self.ui.progress(_('bundling'), None)
1673
1678
1674 if msng_cl_lst:
1679 if msng_cl_lst:
1675 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1680 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1676
1681
1677 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1682 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1678
1683
1679 def changegroup(self, basenodes, source):
1684 def changegroup(self, basenodes, source):
1680 # to avoid a race we use changegroupsubset() (issue1320)
1685 # to avoid a race we use changegroupsubset() (issue1320)
1681 return self.changegroupsubset(basenodes, self.heads(), source)
1686 return self.changegroupsubset(basenodes, self.heads(), source)
1682
1687
1683 def _changegroup(self, nodes, source):
1688 def _changegroup(self, nodes, source):
1684 """Compute the changegroup of all nodes that we have that a recipient
1689 """Compute the changegroup of all nodes that we have that a recipient
1685 doesn't. Return a chunkbuffer object whose read() method will return
1690 doesn't. Return a chunkbuffer object whose read() method will return
1686 successive changegroup chunks.
1691 successive changegroup chunks.
1687
1692
1688 This is much easier than the previous function as we can assume that
1693 This is much easier than the previous function as we can assume that
1689 the recipient has any changenode we aren't sending them.
1694 the recipient has any changenode we aren't sending them.
1690
1695
1691 nodes is the set of nodes to send"""
1696 nodes is the set of nodes to send"""
1692
1697
1693 self.hook('preoutgoing', throw=True, source=source)
1698 self.hook('preoutgoing', throw=True, source=source)
1694
1699
1695 cl = self.changelog
1700 cl = self.changelog
1696 revset = set([cl.rev(n) for n in nodes])
1701 revset = set([cl.rev(n) for n in nodes])
1697 self.changegroupinfo(nodes, source)
1702 self.changegroupinfo(nodes, source)
1698
1703
1699 def identity(x):
1704 def identity(x):
1700 return x
1705 return x
1701
1706
1702 def gennodelst(log):
1707 def gennodelst(log):
1703 for r in log:
1708 for r in log:
1704 if log.linkrev(r) in revset:
1709 if log.linkrev(r) in revset:
1705 yield log.node(r)
1710 yield log.node(r)
1706
1711
1707 def lookuplinkrev_func(revlog):
1712 def lookuplinkrev_func(revlog):
1708 def lookuplinkrev(n):
1713 def lookuplinkrev(n):
1709 return cl.node(revlog.linkrev(revlog.rev(n)))
1714 return cl.node(revlog.linkrev(revlog.rev(n)))
1710 return lookuplinkrev
1715 return lookuplinkrev
1711
1716
1712 def gengroup():
1717 def gengroup():
1713 '''yield a sequence of changegroup chunks (strings)'''
1718 '''yield a sequence of changegroup chunks (strings)'''
1714 # construct a list of all changed files
1719 # construct a list of all changed files
1715 changedfiles = set()
1720 changedfiles = set()
1716 mmfs = {}
1721 mmfs = {}
1717 collect = changegroup.collector(cl, mmfs, changedfiles)
1722 collect = changegroup.collector(cl, mmfs, changedfiles)
1718
1723
1719 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1724 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1720 # revlog.group yields three entries per node, so
1725 # revlog.group yields three entries per node, so
1721 # dividing by 3 gives an approximation of how many
1726 # dividing by 3 gives an approximation of how many
1722 # nodes have been processed.
1727 # nodes have been processed.
1723 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1728 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1724 yield chnk
1729 yield chnk
1725 changecount = cnt / 3
1730 changecount = cnt / 3
1726 self.ui.progress(_('bundling'), None)
1731 self.ui.progress(_('bundling'), None)
1727
1732
1728 mnfst = self.manifest
1733 mnfst = self.manifest
1729 nodeiter = gennodelst(mnfst)
1734 nodeiter = gennodelst(mnfst)
1730 efiles = {}
1735 efiles = {}
1731 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1736 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1732 lookuplinkrev_func(mnfst))):
1737 lookuplinkrev_func(mnfst))):
1733 if cnt % 3 == 1:
1738 if cnt % 3 == 1:
1734 mnode = chnk[:20]
1739 mnode = chnk[:20]
1735 efiles.update(mnfst.readdelta(mnode))
1740 efiles.update(mnfst.readdelta(mnode))
1736 # see above comment for why we divide by 3
1741 # see above comment for why we divide by 3
1737 self.ui.progress(_('bundling'), cnt / 3,
1742 self.ui.progress(_('bundling'), cnt / 3,
1738 unit=_('manifests'), total=changecount)
1743 unit=_('manifests'), total=changecount)
1739 yield chnk
1744 yield chnk
1740 efiles = len(efiles)
1745 efiles = len(efiles)
1741 self.ui.progress(_('bundling'), None)
1746 self.ui.progress(_('bundling'), None)
1742
1747
1743 for idx, fname in enumerate(sorted(changedfiles)):
1748 for idx, fname in enumerate(sorted(changedfiles)):
1744 filerevlog = self.file(fname)
1749 filerevlog = self.file(fname)
1745 if not len(filerevlog):
1750 if not len(filerevlog):
1746 raise util.Abort(_("empty or missing revlog for %s") % fname)
1751 raise util.Abort(_("empty or missing revlog for %s") % fname)
1747 nodeiter = gennodelst(filerevlog)
1752 nodeiter = gennodelst(filerevlog)
1748 nodeiter = list(nodeiter)
1753 nodeiter = list(nodeiter)
1749 if nodeiter:
1754 if nodeiter:
1750 yield changegroup.chunkheader(len(fname))
1755 yield changegroup.chunkheader(len(fname))
1751 yield fname
1756 yield fname
1752 lookup = lookuplinkrev_func(filerevlog)
1757 lookup = lookuplinkrev_func(filerevlog)
1753 for chnk in filerevlog.group(nodeiter, lookup):
1758 for chnk in filerevlog.group(nodeiter, lookup):
1754 self.ui.progress(
1759 self.ui.progress(
1755 _('bundling'), idx, item=fname,
1760 _('bundling'), idx, item=fname,
1756 total=efiles, unit=_('files'))
1761 total=efiles, unit=_('files'))
1757 yield chnk
1762 yield chnk
1758 self.ui.progress(_('bundling'), None)
1763 self.ui.progress(_('bundling'), None)
1759
1764
1760 yield changegroup.closechunk()
1765 yield changegroup.closechunk()
1761
1766
1762 if nodes:
1767 if nodes:
1763 self.hook('outgoing', node=hex(nodes[0]), source=source)
1768 self.hook('outgoing', node=hex(nodes[0]), source=source)
1764
1769
1765 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1770 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1766
1771
1767 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1772 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1768 """Add the changegroup returned by source.read() to this repo.
1773 """Add the changegroup returned by source.read() to this repo.
1769 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1774 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1770 the URL of the repo where this changegroup is coming from.
1775 the URL of the repo where this changegroup is coming from.
1771 If lock is not None, the function takes ownership of the lock
1776 If lock is not None, the function takes ownership of the lock
1772 and releases it after the changegroup is added.
1777 and releases it after the changegroup is added.
1773
1778
1774 Return an integer summarizing the change to this repo:
1779 Return an integer summarizing the change to this repo:
1775 - nothing changed or no source: 0
1780 - nothing changed or no source: 0
1776 - more heads than before: 1+added heads (2..n)
1781 - more heads than before: 1+added heads (2..n)
1777 - fewer heads than before: -1-removed heads (-2..-n)
1782 - fewer heads than before: -1-removed heads (-2..-n)
1778 - number of heads stays the same: 1
1783 - number of heads stays the same: 1
1779 """
1784 """
1780 def csmap(x):
1785 def csmap(x):
1781 self.ui.debug("add changeset %s\n" % short(x))
1786 self.ui.debug("add changeset %s\n" % short(x))
1782 return len(cl)
1787 return len(cl)
1783
1788
1784 def revmap(x):
1789 def revmap(x):
1785 return cl.rev(x)
1790 return cl.rev(x)
1786
1791
1787 if not source:
1792 if not source:
1788 return 0
1793 return 0
1789
1794
1790 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1795 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1791
1796
1792 changesets = files = revisions = 0
1797 changesets = files = revisions = 0
1793 efiles = set()
1798 efiles = set()
1794
1799
1795 # write changelog data to temp files so concurrent readers will not see
1800 # write changelog data to temp files so concurrent readers will not see
1796 # inconsistent view
1801 # inconsistent view
1797 cl = self.changelog
1802 cl = self.changelog
1798 cl.delayupdate()
1803 cl.delayupdate()
1799 oldheads = len(cl.heads())
1804 oldheads = len(cl.heads())
1800
1805
1801 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1806 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1802 try:
1807 try:
1803 trp = weakref.proxy(tr)
1808 trp = weakref.proxy(tr)
1804 # pull off the changeset group
1809 # pull off the changeset group
1805 self.ui.status(_("adding changesets\n"))
1810 self.ui.status(_("adding changesets\n"))
1806 clstart = len(cl)
1811 clstart = len(cl)
1807 class prog(object):
1812 class prog(object):
1808 step = _('changesets')
1813 step = _('changesets')
1809 count = 1
1814 count = 1
1810 ui = self.ui
1815 ui = self.ui
1811 total = None
1816 total = None
1812 def __call__(self):
1817 def __call__(self):
1813 self.ui.progress(self.step, self.count, unit=_('chunks'),
1818 self.ui.progress(self.step, self.count, unit=_('chunks'),
1814 total=self.total)
1819 total=self.total)
1815 self.count += 1
1820 self.count += 1
1816 pr = prog()
1821 pr = prog()
1817 source.callback = pr
1822 source.callback = pr
1818
1823
1819 if (cl.addgroup(source, csmap, trp) is None
1824 if (cl.addgroup(source, csmap, trp) is None
1820 and not emptyok):
1825 and not emptyok):
1821 raise util.Abort(_("received changelog group is empty"))
1826 raise util.Abort(_("received changelog group is empty"))
1822 clend = len(cl)
1827 clend = len(cl)
1823 changesets = clend - clstart
1828 changesets = clend - clstart
1824 for c in xrange(clstart, clend):
1829 for c in xrange(clstart, clend):
1825 efiles.update(self[c].files())
1830 efiles.update(self[c].files())
1826 efiles = len(efiles)
1831 efiles = len(efiles)
1827 self.ui.progress(_('changesets'), None)
1832 self.ui.progress(_('changesets'), None)
1828
1833
1829 # pull off the manifest group
1834 # pull off the manifest group
1830 self.ui.status(_("adding manifests\n"))
1835 self.ui.status(_("adding manifests\n"))
1831 pr.step = _('manifests')
1836 pr.step = _('manifests')
1832 pr.count = 1
1837 pr.count = 1
1833 pr.total = changesets # manifests <= changesets
1838 pr.total = changesets # manifests <= changesets
1834 # no need to check for empty manifest group here:
1839 # no need to check for empty manifest group here:
1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1840 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1836 # no new manifest will be created and the manifest group will
1841 # no new manifest will be created and the manifest group will
1837 # be empty during the pull
1842 # be empty during the pull
1838 self.manifest.addgroup(source, revmap, trp)
1843 self.manifest.addgroup(source, revmap, trp)
1839 self.ui.progress(_('manifests'), None)
1844 self.ui.progress(_('manifests'), None)
1840
1845
1841 needfiles = {}
1846 needfiles = {}
1842 if self.ui.configbool('server', 'validate', default=False):
1847 if self.ui.configbool('server', 'validate', default=False):
1843 # validate incoming csets have their manifests
1848 # validate incoming csets have their manifests
1844 for cset in xrange(clstart, clend):
1849 for cset in xrange(clstart, clend):
1845 mfest = self.changelog.read(self.changelog.node(cset))[0]
1850 mfest = self.changelog.read(self.changelog.node(cset))[0]
1846 mfest = self.manifest.readdelta(mfest)
1851 mfest = self.manifest.readdelta(mfest)
1847 # store file nodes we must see
1852 # store file nodes we must see
1848 for f, n in mfest.iteritems():
1853 for f, n in mfest.iteritems():
1849 needfiles.setdefault(f, set()).add(n)
1854 needfiles.setdefault(f, set()).add(n)
1850
1855
1851 # process the files
1856 # process the files
1852 self.ui.status(_("adding file changes\n"))
1857 self.ui.status(_("adding file changes\n"))
1853 pr.step = 'files'
1858 pr.step = 'files'
1854 pr.count = 1
1859 pr.count = 1
1855 pr.total = efiles
1860 pr.total = efiles
1856 source.callback = None
1861 source.callback = None
1857
1862
1858 while 1:
1863 while 1:
1859 f = source.chunk()
1864 f = source.chunk()
1860 if not f:
1865 if not f:
1861 break
1866 break
1862 self.ui.debug("adding %s revisions\n" % f)
1867 self.ui.debug("adding %s revisions\n" % f)
1863 pr()
1868 pr()
1864 fl = self.file(f)
1869 fl = self.file(f)
1865 o = len(fl)
1870 o = len(fl)
1866 if fl.addgroup(source, revmap, trp) is None:
1871 if fl.addgroup(source, revmap, trp) is None:
1867 raise util.Abort(_("received file revlog group is empty"))
1872 raise util.Abort(_("received file revlog group is empty"))
1868 revisions += len(fl) - o
1873 revisions += len(fl) - o
1869 files += 1
1874 files += 1
1870 if f in needfiles:
1875 if f in needfiles:
1871 needs = needfiles[f]
1876 needs = needfiles[f]
1872 for new in xrange(o, len(fl)):
1877 for new in xrange(o, len(fl)):
1873 n = fl.node(new)
1878 n = fl.node(new)
1874 if n in needs:
1879 if n in needs:
1875 needs.remove(n)
1880 needs.remove(n)
1876 if not needs:
1881 if not needs:
1877 del needfiles[f]
1882 del needfiles[f]
1878 self.ui.progress(_('files'), None)
1883 self.ui.progress(_('files'), None)
1879
1884
1880 for f, needs in needfiles.iteritems():
1885 for f, needs in needfiles.iteritems():
1881 fl = self.file(f)
1886 fl = self.file(f)
1882 for n in needs:
1887 for n in needs:
1883 try:
1888 try:
1884 fl.rev(n)
1889 fl.rev(n)
1885 except error.LookupError:
1890 except error.LookupError:
1886 raise util.Abort(
1891 raise util.Abort(
1887 _('missing file data for %s:%s - run hg verify') %
1892 _('missing file data for %s:%s - run hg verify') %
1888 (f, hex(n)))
1893 (f, hex(n)))
1889
1894
1890 newheads = len(cl.heads())
1895 newheads = len(cl.heads())
1891 heads = ""
1896 heads = ""
1892 if oldheads and newheads != oldheads:
1897 if oldheads and newheads != oldheads:
1893 heads = _(" (%+d heads)") % (newheads - oldheads)
1898 heads = _(" (%+d heads)") % (newheads - oldheads)
1894
1899
1895 self.ui.status(_("added %d changesets"
1900 self.ui.status(_("added %d changesets"
1896 " with %d changes to %d files%s\n")
1901 " with %d changes to %d files%s\n")
1897 % (changesets, revisions, files, heads))
1902 % (changesets, revisions, files, heads))
1898
1903
1899 if changesets > 0:
1904 if changesets > 0:
1900 p = lambda: cl.writepending() and self.root or ""
1905 p = lambda: cl.writepending() and self.root or ""
1901 self.hook('pretxnchangegroup', throw=True,
1906 self.hook('pretxnchangegroup', throw=True,
1902 node=hex(cl.node(clstart)), source=srctype,
1907 node=hex(cl.node(clstart)), source=srctype,
1903 url=url, pending=p)
1908 url=url, pending=p)
1904
1909
1905 # make changelog see real files again
1910 # make changelog see real files again
1906 cl.finalize(trp)
1911 cl.finalize(trp)
1907
1912
1908 tr.close()
1913 tr.close()
1909 finally:
1914 finally:
1910 tr.release()
1915 tr.release()
1911 if lock:
1916 if lock:
1912 lock.release()
1917 lock.release()
1913
1918
1914 if changesets > 0:
1919 if changesets > 0:
1915 # forcefully update the on-disk branch cache
1920 # forcefully update the on-disk branch cache
1916 self.ui.debug("updating the branch cache\n")
1921 self.ui.debug("updating the branch cache\n")
1917 self.updatebranchcache()
1922 self.updatebranchcache()
1918 self.hook("changegroup", node=hex(cl.node(clstart)),
1923 self.hook("changegroup", node=hex(cl.node(clstart)),
1919 source=srctype, url=url)
1924 source=srctype, url=url)
1920
1925
1921 for i in xrange(clstart, clend):
1926 for i in xrange(clstart, clend):
1922 self.hook("incoming", node=hex(cl.node(i)),
1927 self.hook("incoming", node=hex(cl.node(i)),
1923 source=srctype, url=url)
1928 source=srctype, url=url)
1924
1929
1925 # FIXME - why does this care about tip?
1930 # FIXME - why does this care about tip?
1926 if newheads == oldheads:
1931 if newheads == oldheads:
1927 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1932 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1928
1933
1929 # never return 0 here:
1934 # never return 0 here:
1930 if newheads < oldheads:
1935 if newheads < oldheads:
1931 return newheads - oldheads - 1
1936 return newheads - oldheads - 1
1932 else:
1937 else:
1933 return newheads - oldheads + 1
1938 return newheads - oldheads + 1
1934
1939
1935
1940
1936 def stream_in(self, remote, requirements):
1941 def stream_in(self, remote, requirements):
1937 lock = self.lock()
1942 lock = self.lock()
1938 try:
1943 try:
1939 fp = remote.stream_out()
1944 fp = remote.stream_out()
1940 l = fp.readline()
1945 l = fp.readline()
1941 try:
1946 try:
1942 resp = int(l)
1947 resp = int(l)
1943 except ValueError:
1948 except ValueError:
1944 raise error.ResponseError(
1949 raise error.ResponseError(
1945 _('Unexpected response from remote server:'), l)
1950 _('Unexpected response from remote server:'), l)
1946 if resp == 1:
1951 if resp == 1:
1947 raise util.Abort(_('operation forbidden by server'))
1952 raise util.Abort(_('operation forbidden by server'))
1948 elif resp == 2:
1953 elif resp == 2:
1949 raise util.Abort(_('locking the remote repository failed'))
1954 raise util.Abort(_('locking the remote repository failed'))
1950 elif resp != 0:
1955 elif resp != 0:
1951 raise util.Abort(_('the server sent an unknown error code'))
1956 raise util.Abort(_('the server sent an unknown error code'))
1952 self.ui.status(_('streaming all changes\n'))
1957 self.ui.status(_('streaming all changes\n'))
1953 l = fp.readline()
1958 l = fp.readline()
1954 try:
1959 try:
1955 total_files, total_bytes = map(int, l.split(' ', 1))
1960 total_files, total_bytes = map(int, l.split(' ', 1))
1956 except (ValueError, TypeError):
1961 except (ValueError, TypeError):
1957 raise error.ResponseError(
1962 raise error.ResponseError(
1958 _('Unexpected response from remote server:'), l)
1963 _('Unexpected response from remote server:'), l)
1959 self.ui.status(_('%d files to transfer, %s of data\n') %
1964 self.ui.status(_('%d files to transfer, %s of data\n') %
1960 (total_files, util.bytecount(total_bytes)))
1965 (total_files, util.bytecount(total_bytes)))
1961 start = time.time()
1966 start = time.time()
1962 for i in xrange(total_files):
1967 for i in xrange(total_files):
1963 # XXX doesn't support '\n' or '\r' in filenames
1968 # XXX doesn't support '\n' or '\r' in filenames
1964 l = fp.readline()
1969 l = fp.readline()
1965 try:
1970 try:
1966 name, size = l.split('\0', 1)
1971 name, size = l.split('\0', 1)
1967 size = int(size)
1972 size = int(size)
1968 except (ValueError, TypeError):
1973 except (ValueError, TypeError):
1969 raise error.ResponseError(
1974 raise error.ResponseError(
1970 _('Unexpected response from remote server:'), l)
1975 _('Unexpected response from remote server:'), l)
1971 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1976 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1972 # for backwards compat, name was partially encoded
1977 # for backwards compat, name was partially encoded
1973 ofp = self.sopener(store.decodedir(name), 'w')
1978 ofp = self.sopener(store.decodedir(name), 'w')
1974 for chunk in util.filechunkiter(fp, limit=size):
1979 for chunk in util.filechunkiter(fp, limit=size):
1975 ofp.write(chunk)
1980 ofp.write(chunk)
1976 ofp.close()
1981 ofp.close()
1977 elapsed = time.time() - start
1982 elapsed = time.time() - start
1978 if elapsed <= 0:
1983 if elapsed <= 0:
1979 elapsed = 0.001
1984 elapsed = 0.001
1980 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1985 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1981 (util.bytecount(total_bytes), elapsed,
1986 (util.bytecount(total_bytes), elapsed,
1982 util.bytecount(total_bytes / elapsed)))
1987 util.bytecount(total_bytes / elapsed)))
1983
1988
1984 # new requirements = old non-format requirements + new format-related
1989 # new requirements = old non-format requirements + new format-related
1985 # requirements from the streamed-in repository
1990 # requirements from the streamed-in repository
1986 requirements.update(set(self.requirements) - self.supportedformats)
1991 requirements.update(set(self.requirements) - self.supportedformats)
1987 self._applyrequirements(requirements)
1992 self._applyrequirements(requirements)
1988 self._writerequirements()
1993 self._writerequirements()
1989
1994
1990 self.invalidate()
1995 self.invalidate()
1991 return len(self.heads()) + 1
1996 return len(self.heads()) + 1
1992 finally:
1997 finally:
1993 lock.release()
1998 lock.release()
1994
1999
1995 def clone(self, remote, heads=[], stream=False):
2000 def clone(self, remote, heads=[], stream=False):
1996 '''clone remote repository.
2001 '''clone remote repository.
1997
2002
1998 keyword arguments:
2003 keyword arguments:
1999 heads: list of revs to clone (forces use of pull)
2004 heads: list of revs to clone (forces use of pull)
2000 stream: use streaming clone if possible'''
2005 stream: use streaming clone if possible'''
2001
2006
2002 # now, all clients that can request uncompressed clones can
2007 # now, all clients that can request uncompressed clones can
2003 # read repo formats supported by all servers that can serve
2008 # read repo formats supported by all servers that can serve
2004 # them.
2009 # them.
2005
2010
2006 # if revlog format changes, client will have to check version
2011 # if revlog format changes, client will have to check version
2007 # and format flags on "stream" capability, and use
2012 # and format flags on "stream" capability, and use
2008 # uncompressed only if compatible.
2013 # uncompressed only if compatible.
2009
2014
2010 if stream and not heads:
2015 if stream and not heads:
2011 # 'stream' means remote revlog format is revlogv1 only
2016 # 'stream' means remote revlog format is revlogv1 only
2012 if remote.capable('stream'):
2017 if remote.capable('stream'):
2013 return self.stream_in(remote, set(('revlogv1',)))
2018 return self.stream_in(remote, set(('revlogv1',)))
2014 # otherwise, 'streamreqs' contains the remote revlog format
2019 # otherwise, 'streamreqs' contains the remote revlog format
2015 streamreqs = remote.capable('streamreqs')
2020 streamreqs = remote.capable('streamreqs')
2016 if streamreqs:
2021 if streamreqs:
2017 streamreqs = set(streamreqs.split(','))
2022 streamreqs = set(streamreqs.split(','))
2018 # if we support it, stream in and adjust our requirements
2023 # if we support it, stream in and adjust our requirements
2019 if not streamreqs - self.supportedformats:
2024 if not streamreqs - self.supportedformats:
2020 return self.stream_in(remote, streamreqs)
2025 return self.stream_in(remote, streamreqs)
2021 return self.pull(remote, heads)
2026 return self.pull(remote, heads)
2022
2027
2023 def pushkey(self, namespace, key, old, new):
2028 def pushkey(self, namespace, key, old, new):
2024 return pushkey.push(self, namespace, key, old, new)
2029 return pushkey.push(self, namespace, key, old, new)
2025
2030
2026 def listkeys(self, namespace):
2031 def listkeys(self, namespace):
2027 return pushkey.list(self, namespace)
2032 return pushkey.list(self, namespace)
2028
2033
2029 # used to avoid circular references so destructors work
2034 # used to avoid circular references so destructors work
2030 def aftertrans(files):
2035 def aftertrans(files):
2031 renamefiles = [tuple(t) for t in files]
2036 renamefiles = [tuple(t) for t in files]
2032 def a():
2037 def a():
2033 for src, dest in renamefiles:
2038 for src, dest in renamefiles:
2034 util.rename(src, dest)
2039 util.rename(src, dest)
2035 return a
2040 return a
2036
2041
2037 def undoname(fn):
2042 def undoname(fn):
2038 base, name = os.path.split(fn)
2043 base, name = os.path.split(fn)
2039 assert name.startswith('journal')
2044 assert name.startswith('journal')
2040 return os.path.join(base, name.replace('journal', 'undo', 1))
2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2041
2046
2042 def instance(ui, path, create):
2047 def instance(ui, path, create):
2043 return localrepository(ui, util.drop_scheme('file', path), create)
2048 return localrepository(ui, util.drop_scheme('file', path), create)
2044
2049
2045 def islocal(path):
2050 def islocal(path):
2046 return True
2051 return True
General Comments 0
You need to be logged in to leave comments. Login now