##// END OF EJS Templates
localrepo: ignore tags to unknown nodes (issue2750)
Idan Kamara -
r13892:31d15f76 default
parent child Browse files
Show More
@@ -1,1932 +1,1937 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
25 supportedformats = set(('revlogv1', 'parentdelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=0):
29 def __init__(self, baseui, path=None, create=0):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.auditor = util.path_auditor(self.root, self._checknested)
35 self.opener = util.opener(self.path)
35 self.opener = util.opener(self.path)
36 self.wopener = util.opener(self.root)
36 self.wopener = util.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener("00changelog.i", "a").write(
60 self.opener("00changelog.i", "a").write(
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'parentdelta', False):
64 if self.ui.configbool('format', 'parentdelta', False):
65 requirements.append("parentdelta")
65 requirements.append("parentdelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 # find requirements
71 # find requirements
72 requirements = set()
72 requirements = set()
73 try:
73 try:
74 requirements = set(self.opener("requires").read().splitlines())
74 requirements = set(self.opener("requires").read().splitlines())
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 for r in requirements - self.supported:
78 for r in requirements - self.supported:
79 raise error.RequirementError(
79 raise error.RequirementError(
80 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
81
81
82 self.sharedpath = self.path
82 self.sharedpath = self.path
83 try:
83 try:
84 s = os.path.realpath(self.opener("sharedpath").read())
84 s = os.path.realpath(self.opener("sharedpath").read())
85 if not os.path.exists(s):
85 if not os.path.exists(s):
86 raise error.RepoError(
86 raise error.RepoError(
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 self.sharedpath = s
88 self.sharedpath = s
89 except IOError, inst:
89 except IOError, inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92
92
93 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.store = store.store(requirements, self.sharedpath, util.opener)
94 self.spath = self.store.path
94 self.spath = self.store.path
95 self.sopener = self.store.opener
95 self.sopener = self.store.opener
96 self.sjoin = self.store.join
96 self.sjoin = self.store.join
97 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
98 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
99 if create:
99 if create:
100 self._writerequirements()
100 self._writerequirements()
101
101
102 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
105 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
106 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
107 self._tags = None
107 self._tags = None
108 self._tagtypes = None
108 self._tagtypes = None
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.nodetagscache = None
112 self.nodetagscache = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
118 self.requirements = requirements
118 self.requirements = requirements
119 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'parentdelta' in requirements:
120 if 'parentdelta' in requirements:
121 self.sopener.options['parentdelta'] = 1
121 self.sopener.options['parentdelta'] = 1
122
122
123 def _writerequirements(self):
123 def _writerequirements(self):
124 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
125 for r in self.requirements:
125 for r in self.requirements:
126 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
127 reqfile.close()
127 reqfile.close()
128
128
129 def _checknested(self, path):
129 def _checknested(self, path):
130 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
131 if not path.startswith(self.root):
131 if not path.startswith(self.root):
132 return False
132 return False
133 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
134
134
135 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
136 # the sense that it can reject things like
136 # the sense that it can reject things like
137 #
137 #
138 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
139 #
139 #
140 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
141 # parent revision.
141 # parent revision.
142 #
142 #
143 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
144 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
145 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
146 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
147 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
148 #
148 #
149 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
150 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
151 # the filesystem *now*.
151 # the filesystem *now*.
152 ctx = self[None]
152 ctx = self[None]
153 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
154 while parts:
154 while parts:
155 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
156 if prefix in ctx.substate:
156 if prefix in ctx.substate:
157 if prefix == subpath:
157 if prefix == subpath:
158 return True
158 return True
159 else:
159 else:
160 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
161 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
162 else:
162 else:
163 parts.pop()
163 parts.pop()
164 return False
164 return False
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarks(self):
167 def _bookmarks(self):
168 return bookmarks.read(self)
168 return bookmarks.read(self)
169
169
170 @util.propertycache
170 @util.propertycache
171 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
172 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
173
173
174 @propertycache
174 @propertycache
175 def changelog(self):
175 def changelog(self):
176 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
177 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
178 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
179 if p.startswith(self.root):
179 if p.startswith(self.root):
180 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
181 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
182 return c
182 return c
183
183
184 @propertycache
184 @propertycache
185 def manifest(self):
185 def manifest(self):
186 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
187
187
188 @propertycache
188 @propertycache
189 def dirstate(self):
189 def dirstate(self):
190 warned = [0]
190 warned = [0]
191 def validate(node):
191 def validate(node):
192 try:
192 try:
193 r = self.changelog.rev(node)
193 r = self.changelog.rev(node)
194 return node
194 return node
195 except error.LookupError:
195 except error.LookupError:
196 if not warned[0]:
196 if not warned[0]:
197 warned[0] = True
197 warned[0] = True
198 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
199 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
200 return nullid
200 return nullid
201
201
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
203
203
204 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
205 if changeid is None:
205 if changeid is None:
206 return context.workingctx(self)
206 return context.workingctx(self)
207 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
208
208
209 def __contains__(self, changeid):
209 def __contains__(self, changeid):
210 try:
210 try:
211 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
212 except error.RepoLookupError:
212 except error.RepoLookupError:
213 return False
213 return False
214
214
215 def __nonzero__(self):
215 def __nonzero__(self):
216 return True
216 return True
217
217
218 def __len__(self):
218 def __len__(self):
219 return len(self.changelog)
219 return len(self.changelog)
220
220
221 def __iter__(self):
221 def __iter__(self):
222 for i in xrange(len(self)):
222 for i in xrange(len(self)):
223 yield i
223 yield i
224
224
225 def url(self):
225 def url(self):
226 return 'file:' + self.root
226 return 'file:' + self.root
227
227
228 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
229 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
230
230
231 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
232
232
233 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
234 if isinstance(names, str):
234 if isinstance(names, str):
235 allchars = names
235 allchars = names
236 names = (names,)
236 names = (names,)
237 else:
237 else:
238 allchars = ''.join(names)
238 allchars = ''.join(names)
239 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
240 if c in allchars:
240 if c in allchars:
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
242
242
243 branches = self.branchmap()
243 branches = self.branchmap()
244 for name in names:
244 for name in names:
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
246 local=local)
246 local=local)
247 if name in branches:
247 if name in branches:
248 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
249 " branch name\n") % name)
249 " branch name\n") % name)
250
250
251 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
252 fp.seek(0, 2)
252 fp.seek(0, 2)
253 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
254 fp.write('\n')
254 fp.write('\n')
255 for name in names:
255 for name in names:
256 m = munge and munge(name) or name
256 m = munge and munge(name) or name
257 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
258 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
259 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
260 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
261 fp.close()
261 fp.close()
262
262
263 prevtags = ''
263 prevtags = ''
264 if local:
264 if local:
265 try:
265 try:
266 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
267 except IOError:
267 except IOError:
268 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
269 else:
269 else:
270 prevtags = fp.read()
270 prevtags = fp.read()
271
271
272 # local tags are stored in the current charset
272 # local tags are stored in the current charset
273 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
274 for name in names:
274 for name in names:
275 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
276 return
276 return
277
277
278 try:
278 try:
279 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
280 except IOError:
280 except IOError:
281 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
282 else:
282 else:
283 prevtags = fp.read()
283 prevtags = fp.read()
284
284
285 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
286 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
287
287
288 fp.close()
288 fp.close()
289
289
290 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
291 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
292
292
293 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
295
295
296 for name in names:
296 for name in names:
297 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
298
298
299 return tagnode
299 return tagnode
300
300
301 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
302 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
303
303
304 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
305 string.
305 string.
306
306
307 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
308 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
309 changeset is committed with the change.
309 changeset is committed with the change.
310
310
311 keyword arguments:
311 keyword arguments:
312
312
313 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
314 (default False)
314 (default False)
315
315
316 message: commit message to use if committing
316 message: commit message to use if committing
317
317
318 user: name of user to use if committing
318 user: name of user to use if committing
319
319
320 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
321
321
322 if not local:
322 if not local:
323 for x in self.status()[:5]:
323 for x in self.status()[:5]:
324 if '.hgtags' in x:
324 if '.hgtags' in x:
325 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
326 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
327
327
328 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
329 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
330
330
331 def tags(self):
331 def tags(self):
332 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
333 if self._tags is None:
333 if self._tags is None:
334 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
335
335
336 return self._tags
336 return self._tags
337
337
338 def _findtags(self):
338 def _findtags(self):
339 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
341 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
342 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
343 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
344 duration of the localrepo object.'''
344 duration of the localrepo object.'''
345
345
346 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
347 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
348 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
349 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
350 # quo fine?
350 # quo fine?
351
351
352 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
353 tagtypes = {}
353 tagtypes = {}
354
354
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
357
357
358 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
359 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
360 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
361 # local encoding.
361 # local encoding.
362 tags = {}
362 tags = {}
363 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
364 if node != nullid:
364 if node != nullid:
365 tags[encoding.tolocal(name)] = node
365 try:
366 # ignore tags to unknown nodes
367 self.changelog.lookup(node)
368 tags[encoding.tolocal(name)] = node
369 except error.LookupError:
370 pass
366 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
367 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
368 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
369 return (tags, tagtypes)
374 return (tags, tagtypes)
370
375
371 def tagtype(self, tagname):
376 def tagtype(self, tagname):
372 '''
377 '''
373 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
374
379
375 'local' : a local tag
380 'local' : a local tag
376 'global' : a global tag
381 'global' : a global tag
377 None : tag does not exist
382 None : tag does not exist
378 '''
383 '''
379
384
380 self.tags()
385 self.tags()
381
386
382 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
383
388
384 def tagslist(self):
389 def tagslist(self):
385 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
386 l = []
391 l = []
387 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
388 try:
393 try:
389 r = self.changelog.rev(n)
394 r = self.changelog.rev(n)
390 except error.LookupError:
395 except error.LookupError:
391 r = -2 # sort to the beginning of the list if unknown
396 r = -2 # sort to the beginning of the list if unknown
392 l.append((r, t, n))
397 l.append((r, t, n))
393 return [(t, n) for r, t, n in sorted(l)]
398 return [(t, n) for r, t, n in sorted(l)]
394
399
395 def nodetags(self, node):
400 def nodetags(self, node):
396 '''return the tags associated with a node'''
401 '''return the tags associated with a node'''
397 if not self.nodetagscache:
402 if not self.nodetagscache:
398 self.nodetagscache = {}
403 self.nodetagscache = {}
399 for t, n in self.tags().iteritems():
404 for t, n in self.tags().iteritems():
400 self.nodetagscache.setdefault(n, []).append(t)
405 self.nodetagscache.setdefault(n, []).append(t)
401 for tags in self.nodetagscache.itervalues():
406 for tags in self.nodetagscache.itervalues():
402 tags.sort()
407 tags.sort()
403 return self.nodetagscache.get(node, [])
408 return self.nodetagscache.get(node, [])
404
409
405 def nodebookmarks(self, node):
410 def nodebookmarks(self, node):
406 marks = []
411 marks = []
407 for bookmark, n in self._bookmarks.iteritems():
412 for bookmark, n in self._bookmarks.iteritems():
408 if n == node:
413 if n == node:
409 marks.append(bookmark)
414 marks.append(bookmark)
410 return sorted(marks)
415 return sorted(marks)
411
416
412 def _branchtags(self, partial, lrev):
417 def _branchtags(self, partial, lrev):
413 # TODO: rename this function?
418 # TODO: rename this function?
414 tiprev = len(self) - 1
419 tiprev = len(self) - 1
415 if lrev != tiprev:
420 if lrev != tiprev:
416 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
421 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
417 self._updatebranchcache(partial, ctxgen)
422 self._updatebranchcache(partial, ctxgen)
418 self._writebranchcache(partial, self.changelog.tip(), tiprev)
423 self._writebranchcache(partial, self.changelog.tip(), tiprev)
419
424
420 return partial
425 return partial
421
426
422 def updatebranchcache(self):
427 def updatebranchcache(self):
423 tip = self.changelog.tip()
428 tip = self.changelog.tip()
424 if self._branchcache is not None and self._branchcachetip == tip:
429 if self._branchcache is not None and self._branchcachetip == tip:
425 return self._branchcache
430 return self._branchcache
426
431
427 oldtip = self._branchcachetip
432 oldtip = self._branchcachetip
428 self._branchcachetip = tip
433 self._branchcachetip = tip
429 if oldtip is None or oldtip not in self.changelog.nodemap:
434 if oldtip is None or oldtip not in self.changelog.nodemap:
430 partial, last, lrev = self._readbranchcache()
435 partial, last, lrev = self._readbranchcache()
431 else:
436 else:
432 lrev = self.changelog.rev(oldtip)
437 lrev = self.changelog.rev(oldtip)
433 partial = self._branchcache
438 partial = self._branchcache
434
439
435 self._branchtags(partial, lrev)
440 self._branchtags(partial, lrev)
436 # this private cache holds all heads (not just tips)
441 # this private cache holds all heads (not just tips)
437 self._branchcache = partial
442 self._branchcache = partial
438
443
439 def branchmap(self):
444 def branchmap(self):
440 '''returns a dictionary {branch: [branchheads]}'''
445 '''returns a dictionary {branch: [branchheads]}'''
441 self.updatebranchcache()
446 self.updatebranchcache()
442 return self._branchcache
447 return self._branchcache
443
448
444 def branchtags(self):
449 def branchtags(self):
445 '''return a dict where branch names map to the tipmost head of
450 '''return a dict where branch names map to the tipmost head of
446 the branch, open heads come before closed'''
451 the branch, open heads come before closed'''
447 bt = {}
452 bt = {}
448 for bn, heads in self.branchmap().iteritems():
453 for bn, heads in self.branchmap().iteritems():
449 tip = heads[-1]
454 tip = heads[-1]
450 for h in reversed(heads):
455 for h in reversed(heads):
451 if 'close' not in self.changelog.read(h)[5]:
456 if 'close' not in self.changelog.read(h)[5]:
452 tip = h
457 tip = h
453 break
458 break
454 bt[bn] = tip
459 bt[bn] = tip
455 return bt
460 return bt
456
461
457 def _readbranchcache(self):
462 def _readbranchcache(self):
458 partial = {}
463 partial = {}
459 try:
464 try:
460 f = self.opener("cache/branchheads")
465 f = self.opener("cache/branchheads")
461 lines = f.read().split('\n')
466 lines = f.read().split('\n')
462 f.close()
467 f.close()
463 except (IOError, OSError):
468 except (IOError, OSError):
464 return {}, nullid, nullrev
469 return {}, nullid, nullrev
465
470
466 try:
471 try:
467 last, lrev = lines.pop(0).split(" ", 1)
472 last, lrev = lines.pop(0).split(" ", 1)
468 last, lrev = bin(last), int(lrev)
473 last, lrev = bin(last), int(lrev)
469 if lrev >= len(self) or self[lrev].node() != last:
474 if lrev >= len(self) or self[lrev].node() != last:
470 # invalidate the cache
475 # invalidate the cache
471 raise ValueError('invalidating branch cache (tip differs)')
476 raise ValueError('invalidating branch cache (tip differs)')
472 for l in lines:
477 for l in lines:
473 if not l:
478 if not l:
474 continue
479 continue
475 node, label = l.split(" ", 1)
480 node, label = l.split(" ", 1)
476 label = encoding.tolocal(label.strip())
481 label = encoding.tolocal(label.strip())
477 partial.setdefault(label, []).append(bin(node))
482 partial.setdefault(label, []).append(bin(node))
478 except KeyboardInterrupt:
483 except KeyboardInterrupt:
479 raise
484 raise
480 except Exception, inst:
485 except Exception, inst:
481 if self.ui.debugflag:
486 if self.ui.debugflag:
482 self.ui.warn(str(inst), '\n')
487 self.ui.warn(str(inst), '\n')
483 partial, last, lrev = {}, nullid, nullrev
488 partial, last, lrev = {}, nullid, nullrev
484 return partial, last, lrev
489 return partial, last, lrev
485
490
486 def _writebranchcache(self, branches, tip, tiprev):
491 def _writebranchcache(self, branches, tip, tiprev):
487 try:
492 try:
488 f = self.opener("cache/branchheads", "w", atomictemp=True)
493 f = self.opener("cache/branchheads", "w", atomictemp=True)
489 f.write("%s %s\n" % (hex(tip), tiprev))
494 f.write("%s %s\n" % (hex(tip), tiprev))
490 for label, nodes in branches.iteritems():
495 for label, nodes in branches.iteritems():
491 for node in nodes:
496 for node in nodes:
492 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
497 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
493 f.rename()
498 f.rename()
494 except (IOError, OSError):
499 except (IOError, OSError):
495 pass
500 pass
496
501
497 def _updatebranchcache(self, partial, ctxgen):
502 def _updatebranchcache(self, partial, ctxgen):
498 # collect new branch entries
503 # collect new branch entries
499 newbranches = {}
504 newbranches = {}
500 for c in ctxgen:
505 for c in ctxgen:
501 newbranches.setdefault(c.branch(), []).append(c.node())
506 newbranches.setdefault(c.branch(), []).append(c.node())
502 # if older branchheads are reachable from new ones, they aren't
507 # if older branchheads are reachable from new ones, they aren't
503 # really branchheads. Note checking parents is insufficient:
508 # really branchheads. Note checking parents is insufficient:
504 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
509 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
505 for branch, newnodes in newbranches.iteritems():
510 for branch, newnodes in newbranches.iteritems():
506 bheads = partial.setdefault(branch, [])
511 bheads = partial.setdefault(branch, [])
507 bheads.extend(newnodes)
512 bheads.extend(newnodes)
508 if len(bheads) <= 1:
513 if len(bheads) <= 1:
509 continue
514 continue
510 # starting from tip means fewer passes over reachable
515 # starting from tip means fewer passes over reachable
511 while newnodes:
516 while newnodes:
512 latest = newnodes.pop()
517 latest = newnodes.pop()
513 if latest not in bheads:
518 if latest not in bheads:
514 continue
519 continue
515 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
520 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
516 reachable = self.changelog.reachable(latest, minbhrev)
521 reachable = self.changelog.reachable(latest, minbhrev)
517 reachable.remove(latest)
522 reachable.remove(latest)
518 bheads = [b for b in bheads if b not in reachable]
523 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
524 partial[branch] = bheads
520
525
521 def lookup(self, key):
526 def lookup(self, key):
522 if isinstance(key, int):
527 if isinstance(key, int):
523 return self.changelog.node(key)
528 return self.changelog.node(key)
524 elif key == '.':
529 elif key == '.':
525 return self.dirstate.p1()
530 return self.dirstate.p1()
526 elif key == 'null':
531 elif key == 'null':
527 return nullid
532 return nullid
528 elif key == 'tip':
533 elif key == 'tip':
529 return self.changelog.tip()
534 return self.changelog.tip()
530 n = self.changelog._match(key)
535 n = self.changelog._match(key)
531 if n:
536 if n:
532 return n
537 return n
533 if key in self._bookmarks:
538 if key in self._bookmarks:
534 return self._bookmarks[key]
539 return self._bookmarks[key]
535 if key in self.tags():
540 if key in self.tags():
536 return self.tags()[key]
541 return self.tags()[key]
537 if key in self.branchtags():
542 if key in self.branchtags():
538 return self.branchtags()[key]
543 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
544 n = self.changelog._partialmatch(key)
540 if n:
545 if n:
541 return n
546 return n
542
547
543 # can't find key, check if it might have come from damaged dirstate
548 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
549 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
550 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
551 % short(key))
547 try:
552 try:
548 if len(key) == 20:
553 if len(key) == 20:
549 key = hex(key)
554 key = hex(key)
550 except:
555 except:
551 pass
556 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
557 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
558
554 def lookupbranch(self, key, remote=None):
559 def lookupbranch(self, key, remote=None):
555 repo = remote or self
560 repo = remote or self
556 if key in repo.branchmap():
561 if key in repo.branchmap():
557 return key
562 return key
558
563
559 repo = (remote and remote.local()) and remote or self
564 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
565 return repo[key].branch()
561
566
562 def known(self, nodes):
567 def known(self, nodes):
563 nm = self.changelog.nodemap
568 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
569 return [(n in nm) for n in nodes]
565
570
566 def local(self):
571 def local(self):
567 return True
572 return True
568
573
569 def join(self, f):
574 def join(self, f):
570 return os.path.join(self.path, f)
575 return os.path.join(self.path, f)
571
576
572 def wjoin(self, f):
577 def wjoin(self, f):
573 return os.path.join(self.root, f)
578 return os.path.join(self.root, f)
574
579
575 def file(self, f):
580 def file(self, f):
576 if f[0] == '/':
581 if f[0] == '/':
577 f = f[1:]
582 f = f[1:]
578 return filelog.filelog(self.sopener, f)
583 return filelog.filelog(self.sopener, f)
579
584
580 def changectx(self, changeid):
585 def changectx(self, changeid):
581 return self[changeid]
586 return self[changeid]
582
587
583 def parents(self, changeid=None):
588 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
589 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
590 return self[changeid].parents()
586
591
587 def filectx(self, path, changeid=None, fileid=None):
592 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
593 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
594 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
595 return context.filectx(self, path, changeid, fileid)
591
596
592 def getcwd(self):
597 def getcwd(self):
593 return self.dirstate.getcwd()
598 return self.dirstate.getcwd()
594
599
595 def pathto(self, f, cwd=None):
600 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
601 return self.dirstate.pathto(f, cwd)
597
602
598 def wfile(self, f, mode='r'):
603 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
604 return self.wopener(f, mode)
600
605
601 def _link(self, f):
606 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
607 return os.path.islink(self.wjoin(f))
603
608
604 def _loadfilter(self, filter):
609 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
610 if filter not in self.filterpats:
606 l = []
611 l = []
607 for pat, cmd in self.ui.configitems(filter):
612 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
613 if cmd == '!':
609 continue
614 continue
610 mf = matchmod.match(self.root, '', [pat])
615 mf = matchmod.match(self.root, '', [pat])
611 fn = None
616 fn = None
612 params = cmd
617 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
618 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
619 if cmd.startswith(name):
615 fn = filterfn
620 fn = filterfn
616 params = cmd[len(name):].lstrip()
621 params = cmd[len(name):].lstrip()
617 break
622 break
618 if not fn:
623 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
624 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
625 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
626 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
627 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
628 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
629 l.append((mf, fn, params))
625 self.filterpats[filter] = l
630 self.filterpats[filter] = l
626 return self.filterpats[filter]
631 return self.filterpats[filter]
627
632
628 def _filter(self, filterpats, filename, data):
633 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
634 for mf, fn, cmd in filterpats:
630 if mf(filename):
635 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
636 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
637 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
638 break
634
639
635 return data
640 return data
636
641
637 @propertycache
642 @propertycache
638 def _encodefilterpats(self):
643 def _encodefilterpats(self):
639 return self._loadfilter('encode')
644 return self._loadfilter('encode')
640
645
641 @propertycache
646 @propertycache
642 def _decodefilterpats(self):
647 def _decodefilterpats(self):
643 return self._loadfilter('decode')
648 return self._loadfilter('decode')
644
649
645 def adddatafilter(self, name, filter):
650 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
651 self._datafilters[name] = filter
647
652
648 def wread(self, filename):
653 def wread(self, filename):
649 if self._link(filename):
654 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
655 data = os.readlink(self.wjoin(filename))
651 else:
656 else:
652 data = self.wopener(filename, 'r').read()
657 data = self.wopener(filename, 'r').read()
653 return self._filter(self._encodefilterpats, filename, data)
658 return self._filter(self._encodefilterpats, filename, data)
654
659
655 def wwrite(self, filename, data, flags):
660 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
661 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
662 if 'l' in flags:
658 self.wopener.symlink(data, filename)
663 self.wopener.symlink(data, filename)
659 else:
664 else:
660 self.wopener(filename, 'w').write(data)
665 self.wopener(filename, 'w').write(data)
661 if 'x' in flags:
666 if 'x' in flags:
662 util.set_flags(self.wjoin(filename), False, True)
667 util.set_flags(self.wjoin(filename), False, True)
663
668
664 def wwritedata(self, filename, data):
669 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
670 return self._filter(self._decodefilterpats, filename, data)
666
671
667 def transaction(self, desc):
672 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
673 tr = self._transref and self._transref() or None
669 if tr and tr.running():
674 if tr and tr.running():
670 return tr.nest()
675 return tr.nest()
671
676
672 # abort here if the journal already exists
677 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
678 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
679 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
680 _("abandoned transaction found - run hg recover"))
676
681
677 # save dirstate for rollback
682 # save dirstate for rollback
678 try:
683 try:
679 ds = self.opener("dirstate").read()
684 ds = self.opener("dirstate").read()
680 except IOError:
685 except IOError:
681 ds = ""
686 ds = ""
682 self.opener("journal.dirstate", "w").write(ds)
687 self.opener("journal.dirstate", "w").write(ds)
683 self.opener("journal.branch", "w").write(
688 self.opener("journal.branch", "w").write(
684 encoding.fromlocal(self.dirstate.branch()))
689 encoding.fromlocal(self.dirstate.branch()))
685 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
690 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
686
691
687 renames = [(self.sjoin("journal"), self.sjoin("undo")),
692 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 (self.join("journal.dirstate"), self.join("undo.dirstate")),
693 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 (self.join("journal.branch"), self.join("undo.branch")),
694 (self.join("journal.branch"), self.join("undo.branch")),
690 (self.join("journal.desc"), self.join("undo.desc"))]
695 (self.join("journal.desc"), self.join("undo.desc"))]
691 tr = transaction.transaction(self.ui.warn, self.sopener,
696 tr = transaction.transaction(self.ui.warn, self.sopener,
692 self.sjoin("journal"),
697 self.sjoin("journal"),
693 aftertrans(renames),
698 aftertrans(renames),
694 self.store.createmode)
699 self.store.createmode)
695 self._transref = weakref.ref(tr)
700 self._transref = weakref.ref(tr)
696 return tr
701 return tr
697
702
698 def recover(self):
703 def recover(self):
699 lock = self.lock()
704 lock = self.lock()
700 try:
705 try:
701 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
702 self.ui.status(_("rolling back interrupted transaction\n"))
707 self.ui.status(_("rolling back interrupted transaction\n"))
703 transaction.rollback(self.sopener, self.sjoin("journal"),
708 transaction.rollback(self.sopener, self.sjoin("journal"),
704 self.ui.warn)
709 self.ui.warn)
705 self.invalidate()
710 self.invalidate()
706 return True
711 return True
707 else:
712 else:
708 self.ui.warn(_("no interrupted transaction available\n"))
713 self.ui.warn(_("no interrupted transaction available\n"))
709 return False
714 return False
710 finally:
715 finally:
711 lock.release()
716 lock.release()
712
717
713 def rollback(self, dryrun=False):
718 def rollback(self, dryrun=False):
714 wlock = lock = None
719 wlock = lock = None
715 try:
720 try:
716 wlock = self.wlock()
721 wlock = self.wlock()
717 lock = self.lock()
722 lock = self.lock()
718 if os.path.exists(self.sjoin("undo")):
723 if os.path.exists(self.sjoin("undo")):
719 try:
724 try:
720 args = self.opener("undo.desc", "r").read().splitlines()
725 args = self.opener("undo.desc", "r").read().splitlines()
721 if len(args) >= 3 and self.ui.verbose:
726 if len(args) >= 3 and self.ui.verbose:
722 desc = _("repository tip rolled back to revision %s"
727 desc = _("repository tip rolled back to revision %s"
723 " (undo %s: %s)\n") % (
728 " (undo %s: %s)\n") % (
724 int(args[0]) - 1, args[1], args[2])
729 int(args[0]) - 1, args[1], args[2])
725 elif len(args) >= 2:
730 elif len(args) >= 2:
726 desc = _("repository tip rolled back to revision %s"
731 desc = _("repository tip rolled back to revision %s"
727 " (undo %s)\n") % (
732 " (undo %s)\n") % (
728 int(args[0]) - 1, args[1])
733 int(args[0]) - 1, args[1])
729 except IOError:
734 except IOError:
730 desc = _("rolling back unknown transaction\n")
735 desc = _("rolling back unknown transaction\n")
731 self.ui.status(desc)
736 self.ui.status(desc)
732 if dryrun:
737 if dryrun:
733 return
738 return
734 transaction.rollback(self.sopener, self.sjoin("undo"),
739 transaction.rollback(self.sopener, self.sjoin("undo"),
735 self.ui.warn)
740 self.ui.warn)
736 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 if os.path.exists(self.join('undo.bookmarks')):
742 if os.path.exists(self.join('undo.bookmarks')):
738 util.rename(self.join('undo.bookmarks'),
743 util.rename(self.join('undo.bookmarks'),
739 self.join('bookmarks'))
744 self.join('bookmarks'))
740 try:
745 try:
741 branch = self.opener("undo.branch").read()
746 branch = self.opener("undo.branch").read()
742 self.dirstate.setbranch(branch)
747 self.dirstate.setbranch(branch)
743 except IOError:
748 except IOError:
744 self.ui.warn(_("Named branch could not be reset, "
749 self.ui.warn(_("Named branch could not be reset, "
745 "current branch still is: %s\n")
750 "current branch still is: %s\n")
746 % self.dirstate.branch())
751 % self.dirstate.branch())
747 self.invalidate()
752 self.invalidate()
748 self.dirstate.invalidate()
753 self.dirstate.invalidate()
749 self.destroyed()
754 self.destroyed()
750 parents = tuple([p.rev() for p in self.parents()])
755 parents = tuple([p.rev() for p in self.parents()])
751 if len(parents) > 1:
756 if len(parents) > 1:
752 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
753 "revisions %d and %d\n") % parents)
758 "revisions %d and %d\n") % parents)
754 else:
759 else:
755 self.ui.status(_("working directory now based on "
760 self.ui.status(_("working directory now based on "
756 "revision %d\n") % parents)
761 "revision %d\n") % parents)
757 else:
762 else:
758 self.ui.warn(_("no rollback information available\n"))
763 self.ui.warn(_("no rollback information available\n"))
759 return 1
764 return 1
760 finally:
765 finally:
761 release(lock, wlock)
766 release(lock, wlock)
762
767
763 def invalidatecaches(self):
768 def invalidatecaches(self):
764 self._tags = None
769 self._tags = None
765 self._tagtypes = None
770 self._tagtypes = None
766 self.nodetagscache = None
771 self.nodetagscache = None
767 self._branchcache = None # in UTF-8
772 self._branchcache = None # in UTF-8
768 self._branchcachetip = None
773 self._branchcachetip = None
769
774
770 def invalidate(self):
775 def invalidate(self):
771 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 if a in self.__dict__:
777 if a in self.__dict__:
773 delattr(self, a)
778 delattr(self, a)
774 self.invalidatecaches()
779 self.invalidatecaches()
775
780
776 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 try:
782 try:
778 l = lock.lock(lockname, 0, releasefn, desc=desc)
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 except error.LockHeld, inst:
784 except error.LockHeld, inst:
780 if not wait:
785 if not wait:
781 raise
786 raise
782 self.ui.warn(_("waiting for lock on %s held by %r\n") %
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 (desc, inst.locker))
788 (desc, inst.locker))
784 # default to 600 seconds timeout
789 # default to 600 seconds timeout
785 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 releasefn, desc=desc)
791 releasefn, desc=desc)
787 if acquirefn:
792 if acquirefn:
788 acquirefn()
793 acquirefn()
789 return l
794 return l
790
795
791 def lock(self, wait=True):
796 def lock(self, wait=True):
792 '''Lock the repository store (.hg/store) and return a weak reference
797 '''Lock the repository store (.hg/store) and return a weak reference
793 to the lock. Use this before modifying the store (e.g. committing or
798 to the lock. Use this before modifying the store (e.g. committing or
794 stripping). If you are opening a transaction, get a lock as well.)'''
799 stripping). If you are opening a transaction, get a lock as well.)'''
795 l = self._lockref and self._lockref()
800 l = self._lockref and self._lockref()
796 if l is not None and l.held:
801 if l is not None and l.held:
797 l.lock()
802 l.lock()
798 return l
803 return l
799
804
800 l = self._lock(self.sjoin("lock"), wait, self.store.write,
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 self.invalidate, _('repository %s') % self.origroot)
806 self.invalidate, _('repository %s') % self.origroot)
802 self._lockref = weakref.ref(l)
807 self._lockref = weakref.ref(l)
803 return l
808 return l
804
809
805 def wlock(self, wait=True):
810 def wlock(self, wait=True):
806 '''Lock the non-store parts of the repository (everything under
811 '''Lock the non-store parts of the repository (everything under
807 .hg except .hg/store) and return a weak reference to the lock.
812 .hg except .hg/store) and return a weak reference to the lock.
808 Use this before modifying files in .hg.'''
813 Use this before modifying files in .hg.'''
809 l = self._wlockref and self._wlockref()
814 l = self._wlockref and self._wlockref()
810 if l is not None and l.held:
815 if l is not None and l.held:
811 l.lock()
816 l.lock()
812 return l
817 return l
813
818
814 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 self.dirstate.invalidate, _('working directory of %s') %
820 self.dirstate.invalidate, _('working directory of %s') %
816 self.origroot)
821 self.origroot)
817 self._wlockref = weakref.ref(l)
822 self._wlockref = weakref.ref(l)
818 return l
823 return l
819
824
820 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 """
826 """
822 commit an individual file as part of a larger transaction
827 commit an individual file as part of a larger transaction
823 """
828 """
824
829
825 fname = fctx.path()
830 fname = fctx.path()
826 text = fctx.data()
831 text = fctx.data()
827 flog = self.file(fname)
832 flog = self.file(fname)
828 fparent1 = manifest1.get(fname, nullid)
833 fparent1 = manifest1.get(fname, nullid)
829 fparent2 = fparent2o = manifest2.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
830
835
831 meta = {}
836 meta = {}
832 copy = fctx.renamed()
837 copy = fctx.renamed()
833 if copy and copy[0] != fname:
838 if copy and copy[0] != fname:
834 # Mark the new revision of this file as a copy of another
839 # Mark the new revision of this file as a copy of another
835 # file. This copy data will effectively act as a parent
840 # file. This copy data will effectively act as a parent
836 # of this new revision. If this is a merge, the first
841 # of this new revision. If this is a merge, the first
837 # parent will be the nullid (meaning "look up the copy data")
842 # parent will be the nullid (meaning "look up the copy data")
838 # and the second one will be the other parent. For example:
843 # and the second one will be the other parent. For example:
839 #
844 #
840 # 0 --- 1 --- 3 rev1 changes file foo
845 # 0 --- 1 --- 3 rev1 changes file foo
841 # \ / rev2 renames foo to bar and changes it
846 # \ / rev2 renames foo to bar and changes it
842 # \- 2 -/ rev3 should have bar with all changes and
847 # \- 2 -/ rev3 should have bar with all changes and
843 # should record that bar descends from
848 # should record that bar descends from
844 # bar in rev2 and foo in rev1
849 # bar in rev2 and foo in rev1
845 #
850 #
846 # this allows this merge to succeed:
851 # this allows this merge to succeed:
847 #
852 #
848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 # \ / merging rev3 and rev4 should use bar@rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
850 # \- 2 --- 4 as the merge base
855 # \- 2 --- 4 as the merge base
851 #
856 #
852
857
853 cfname = copy[0]
858 cfname = copy[0]
854 crev = manifest1.get(cfname)
859 crev = manifest1.get(cfname)
855 newfparent = fparent2
860 newfparent = fparent2
856
861
857 if manifest2: # branch merge
862 if manifest2: # branch merge
858 if fparent2 == nullid or crev is None: # copied on remote side
863 if fparent2 == nullid or crev is None: # copied on remote side
859 if cfname in manifest2:
864 if cfname in manifest2:
860 crev = manifest2[cfname]
865 crev = manifest2[cfname]
861 newfparent = fparent1
866 newfparent = fparent1
862
867
863 # find source in nearest ancestor if we've lost track
868 # find source in nearest ancestor if we've lost track
864 if not crev:
869 if not crev:
865 self.ui.debug(" %s: searching for copy revision for %s\n" %
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 (fname, cfname))
871 (fname, cfname))
867 for ancestor in self[None].ancestors():
872 for ancestor in self[None].ancestors():
868 if cfname in ancestor:
873 if cfname in ancestor:
869 crev = ancestor[cfname].filenode()
874 crev = ancestor[cfname].filenode()
870 break
875 break
871
876
872 if crev:
877 if crev:
873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 meta["copy"] = cfname
879 meta["copy"] = cfname
875 meta["copyrev"] = hex(crev)
880 meta["copyrev"] = hex(crev)
876 fparent1, fparent2 = nullid, newfparent
881 fparent1, fparent2 = nullid, newfparent
877 else:
882 else:
878 self.ui.warn(_("warning: can't find ancestor for '%s' "
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 "copied from '%s'!\n") % (fname, cfname))
884 "copied from '%s'!\n") % (fname, cfname))
880
885
881 elif fparent2 != nullid:
886 elif fparent2 != nullid:
882 # is one parent an ancestor of the other?
887 # is one parent an ancestor of the other?
883 fparentancestor = flog.ancestor(fparent1, fparent2)
888 fparentancestor = flog.ancestor(fparent1, fparent2)
884 if fparentancestor == fparent1:
889 if fparentancestor == fparent1:
885 fparent1, fparent2 = fparent2, nullid
890 fparent1, fparent2 = fparent2, nullid
886 elif fparentancestor == fparent2:
891 elif fparentancestor == fparent2:
887 fparent2 = nullid
892 fparent2 = nullid
888
893
889 # is the file changed?
894 # is the file changed?
890 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 changelist.append(fname)
896 changelist.append(fname)
892 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893
898
894 # are just the flags changed during merge?
899 # are just the flags changed during merge?
895 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 changelist.append(fname)
901 changelist.append(fname)
897
902
898 return fparent1
903 return fparent1
899
904
900 def commit(self, text="", user=None, date=None, match=None, force=False,
905 def commit(self, text="", user=None, date=None, match=None, force=False,
901 editor=False, extra={}):
906 editor=False, extra={}):
902 """Add a new revision to current repository.
907 """Add a new revision to current repository.
903
908
904 Revision information is gathered from the working directory,
909 Revision information is gathered from the working directory,
905 match can be used to filter the committed files. If editor is
910 match can be used to filter the committed files. If editor is
906 supplied, it is called to get a commit message.
911 supplied, it is called to get a commit message.
907 """
912 """
908
913
909 def fail(f, msg):
914 def fail(f, msg):
910 raise util.Abort('%s: %s' % (f, msg))
915 raise util.Abort('%s: %s' % (f, msg))
911
916
912 if not match:
917 if not match:
913 match = matchmod.always(self.root, '')
918 match = matchmod.always(self.root, '')
914
919
915 if not force:
920 if not force:
916 vdirs = []
921 vdirs = []
917 match.dir = vdirs.append
922 match.dir = vdirs.append
918 match.bad = fail
923 match.bad = fail
919
924
920 wlock = self.wlock()
925 wlock = self.wlock()
921 try:
926 try:
922 wctx = self[None]
927 wctx = self[None]
923 merge = len(wctx.parents()) > 1
928 merge = len(wctx.parents()) > 1
924
929
925 if (not force and merge and match and
930 if (not force and merge and match and
926 (match.files() or match.anypats())):
931 (match.files() or match.anypats())):
927 raise util.Abort(_('cannot partially commit a merge '
932 raise util.Abort(_('cannot partially commit a merge '
928 '(do not specify files or patterns)'))
933 '(do not specify files or patterns)'))
929
934
930 changes = self.status(match=match, clean=force)
935 changes = self.status(match=match, clean=force)
931 if force:
936 if force:
932 changes[0].extend(changes[6]) # mq may commit unchanged files
937 changes[0].extend(changes[6]) # mq may commit unchanged files
933
938
934 # check subrepos
939 # check subrepos
935 subs = []
940 subs = []
936 removedsubs = set()
941 removedsubs = set()
937 for p in wctx.parents():
942 for p in wctx.parents():
938 removedsubs.update(s for s in p.substate if match(s))
943 removedsubs.update(s for s in p.substate if match(s))
939 for s in wctx.substate:
944 for s in wctx.substate:
940 removedsubs.discard(s)
945 removedsubs.discard(s)
941 if match(s) and wctx.sub(s).dirty():
946 if match(s) and wctx.sub(s).dirty():
942 subs.append(s)
947 subs.append(s)
943 if (subs or removedsubs):
948 if (subs or removedsubs):
944 if (not match('.hgsub') and
949 if (not match('.hgsub') and
945 '.hgsub' in (wctx.modified() + wctx.added())):
950 '.hgsub' in (wctx.modified() + wctx.added())):
946 raise util.Abort(_("can't commit subrepos without .hgsub"))
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 if '.hgsubstate' not in changes[0]:
952 if '.hgsubstate' not in changes[0]:
948 changes[0].insert(0, '.hgsubstate')
953 changes[0].insert(0, '.hgsubstate')
949
954
950 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 if changedsubs:
957 if changedsubs:
953 raise util.Abort(_("uncommitted changes in subrepo %s")
958 raise util.Abort(_("uncommitted changes in subrepo %s")
954 % changedsubs[0])
959 % changedsubs[0])
955
960
956 # make sure all explicit patterns are matched
961 # make sure all explicit patterns are matched
957 if not force and match.files():
962 if not force and match.files():
958 matched = set(changes[0] + changes[1] + changes[2])
963 matched = set(changes[0] + changes[1] + changes[2])
959
964
960 for f in match.files():
965 for f in match.files():
961 if f == '.' or f in matched or f in wctx.substate:
966 if f == '.' or f in matched or f in wctx.substate:
962 continue
967 continue
963 if f in changes[3]: # missing
968 if f in changes[3]: # missing
964 fail(f, _('file not found!'))
969 fail(f, _('file not found!'))
965 if f in vdirs: # visited directory
970 if f in vdirs: # visited directory
966 d = f + '/'
971 d = f + '/'
967 for mf in matched:
972 for mf in matched:
968 if mf.startswith(d):
973 if mf.startswith(d):
969 break
974 break
970 else:
975 else:
971 fail(f, _("no match under directory!"))
976 fail(f, _("no match under directory!"))
972 elif f not in self.dirstate:
977 elif f not in self.dirstate:
973 fail(f, _("file not tracked!"))
978 fail(f, _("file not tracked!"))
974
979
975 if (not force and not extra.get("close") and not merge
980 if (not force and not extra.get("close") and not merge
976 and not (changes[0] or changes[1] or changes[2])
981 and not (changes[0] or changes[1] or changes[2])
977 and wctx.branch() == wctx.p1().branch()):
982 and wctx.branch() == wctx.p1().branch()):
978 return None
983 return None
979
984
980 ms = mergemod.mergestate(self)
985 ms = mergemod.mergestate(self)
981 for f in changes[0]:
986 for f in changes[0]:
982 if f in ms and ms[f] == 'u':
987 if f in ms and ms[f] == 'u':
983 raise util.Abort(_("unresolved merge conflicts "
988 raise util.Abort(_("unresolved merge conflicts "
984 "(see hg help resolve)"))
989 "(see hg help resolve)"))
985
990
986 cctx = context.workingctx(self, text, user, date, extra, changes)
991 cctx = context.workingctx(self, text, user, date, extra, changes)
987 if editor:
992 if editor:
988 cctx._text = editor(self, cctx, subs)
993 cctx._text = editor(self, cctx, subs)
989 edited = (text != cctx._text)
994 edited = (text != cctx._text)
990
995
991 # commit subs
996 # commit subs
992 if subs or removedsubs:
997 if subs or removedsubs:
993 state = wctx.substate.copy()
998 state = wctx.substate.copy()
994 for s in sorted(subs):
999 for s in sorted(subs):
995 sub = wctx.sub(s)
1000 sub = wctx.sub(s)
996 self.ui.status(_('committing subrepository %s\n') %
1001 self.ui.status(_('committing subrepository %s\n') %
997 subrepo.subrelpath(sub))
1002 subrepo.subrelpath(sub))
998 sr = sub.commit(cctx._text, user, date)
1003 sr = sub.commit(cctx._text, user, date)
999 state[s] = (state[s][0], sr)
1004 state[s] = (state[s][0], sr)
1000 subrepo.writestate(self, state)
1005 subrepo.writestate(self, state)
1001
1006
1002 # Save commit message in case this transaction gets rolled back
1007 # Save commit message in case this transaction gets rolled back
1003 # (e.g. by a pretxncommit hook). Leave the content alone on
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 # the assumption that the user will use the same editor again.
1009 # the assumption that the user will use the same editor again.
1005 msgfile = self.opener('last-message.txt', 'wb')
1010 msgfile = self.opener('last-message.txt', 'wb')
1006 msgfile.write(cctx._text)
1011 msgfile.write(cctx._text)
1007 msgfile.close()
1012 msgfile.close()
1008
1013
1009 p1, p2 = self.dirstate.parents()
1014 p1, p2 = self.dirstate.parents()
1010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 try:
1016 try:
1012 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 ret = self.commitctx(cctx, True)
1018 ret = self.commitctx(cctx, True)
1014 except:
1019 except:
1015 if edited:
1020 if edited:
1016 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 self.ui.write(
1022 self.ui.write(
1018 _('note: commit message saved in %s\n') % msgfn)
1023 _('note: commit message saved in %s\n') % msgfn)
1019 raise
1024 raise
1020
1025
1021 # update bookmarks, dirstate and mergestate
1026 # update bookmarks, dirstate and mergestate
1022 bookmarks.update(self, p1, ret)
1027 bookmarks.update(self, p1, ret)
1023 for f in changes[0] + changes[1]:
1028 for f in changes[0] + changes[1]:
1024 self.dirstate.normal(f)
1029 self.dirstate.normal(f)
1025 for f in changes[2]:
1030 for f in changes[2]:
1026 self.dirstate.forget(f)
1031 self.dirstate.forget(f)
1027 self.dirstate.setparents(ret)
1032 self.dirstate.setparents(ret)
1028 ms.reset()
1033 ms.reset()
1029 finally:
1034 finally:
1030 wlock.release()
1035 wlock.release()
1031
1036
1032 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 return ret
1038 return ret
1034
1039
1035 def commitctx(self, ctx, error=False):
1040 def commitctx(self, ctx, error=False):
1036 """Add a new revision to current repository.
1041 """Add a new revision to current repository.
1037 Revision information is passed via the context argument.
1042 Revision information is passed via the context argument.
1038 """
1043 """
1039
1044
1040 tr = lock = None
1045 tr = lock = None
1041 removed = list(ctx.removed())
1046 removed = list(ctx.removed())
1042 p1, p2 = ctx.p1(), ctx.p2()
1047 p1, p2 = ctx.p1(), ctx.p2()
1043 m1 = p1.manifest().copy()
1048 m1 = p1.manifest().copy()
1044 m2 = p2.manifest()
1049 m2 = p2.manifest()
1045 user = ctx.user()
1050 user = ctx.user()
1046
1051
1047 lock = self.lock()
1052 lock = self.lock()
1048 try:
1053 try:
1049 tr = self.transaction("commit")
1054 tr = self.transaction("commit")
1050 trp = weakref.proxy(tr)
1055 trp = weakref.proxy(tr)
1051
1056
1052 # check in files
1057 # check in files
1053 new = {}
1058 new = {}
1054 changed = []
1059 changed = []
1055 linkrev = len(self)
1060 linkrev = len(self)
1056 for f in sorted(ctx.modified() + ctx.added()):
1061 for f in sorted(ctx.modified() + ctx.added()):
1057 self.ui.note(f + "\n")
1062 self.ui.note(f + "\n")
1058 try:
1063 try:
1059 fctx = ctx[f]
1064 fctx = ctx[f]
1060 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1065 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1061 changed)
1066 changed)
1062 m1.set(f, fctx.flags())
1067 m1.set(f, fctx.flags())
1063 except OSError, inst:
1068 except OSError, inst:
1064 self.ui.warn(_("trouble committing %s!\n") % f)
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1065 raise
1070 raise
1066 except IOError, inst:
1071 except IOError, inst:
1067 errcode = getattr(inst, 'errno', errno.ENOENT)
1072 errcode = getattr(inst, 'errno', errno.ENOENT)
1068 if error or errcode and errcode != errno.ENOENT:
1073 if error or errcode and errcode != errno.ENOENT:
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1074 self.ui.warn(_("trouble committing %s!\n") % f)
1070 raise
1075 raise
1071 else:
1076 else:
1072 removed.append(f)
1077 removed.append(f)
1073
1078
1074 # update manifest
1079 # update manifest
1075 m1.update(new)
1080 m1.update(new)
1076 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1077 drop = [f for f in removed if f in m1]
1082 drop = [f for f in removed if f in m1]
1078 for f in drop:
1083 for f in drop:
1079 del m1[f]
1084 del m1[f]
1080 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1085 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1081 p2.manifestnode(), (new, drop))
1086 p2.manifestnode(), (new, drop))
1082
1087
1083 # update changelog
1088 # update changelog
1084 self.changelog.delayupdate()
1089 self.changelog.delayupdate()
1085 n = self.changelog.add(mn, changed + removed, ctx.description(),
1090 n = self.changelog.add(mn, changed + removed, ctx.description(),
1086 trp, p1.node(), p2.node(),
1091 trp, p1.node(), p2.node(),
1087 user, ctx.date(), ctx.extra().copy())
1092 user, ctx.date(), ctx.extra().copy())
1088 p = lambda: self.changelog.writepending() and self.root or ""
1093 p = lambda: self.changelog.writepending() and self.root or ""
1089 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1094 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1090 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1095 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1091 parent2=xp2, pending=p)
1096 parent2=xp2, pending=p)
1092 self.changelog.finalize(trp)
1097 self.changelog.finalize(trp)
1093 tr.close()
1098 tr.close()
1094
1099
1095 if self._branchcache:
1100 if self._branchcache:
1096 self.updatebranchcache()
1101 self.updatebranchcache()
1097 return n
1102 return n
1098 finally:
1103 finally:
1099 if tr:
1104 if tr:
1100 tr.release()
1105 tr.release()
1101 lock.release()
1106 lock.release()
1102
1107
1103 def destroyed(self):
1108 def destroyed(self):
1104 '''Inform the repository that nodes have been destroyed.
1109 '''Inform the repository that nodes have been destroyed.
1105 Intended for use by strip and rollback, so there's a common
1110 Intended for use by strip and rollback, so there's a common
1106 place for anything that has to be done after destroying history.'''
1111 place for anything that has to be done after destroying history.'''
1107 # XXX it might be nice if we could take the list of destroyed
1112 # XXX it might be nice if we could take the list of destroyed
1108 # nodes, but I don't see an easy way for rollback() to do that
1113 # nodes, but I don't see an easy way for rollback() to do that
1109
1114
1110 # Ensure the persistent tag cache is updated. Doing it now
1115 # Ensure the persistent tag cache is updated. Doing it now
1111 # means that the tag cache only has to worry about destroyed
1116 # means that the tag cache only has to worry about destroyed
1112 # heads immediately after a strip/rollback. That in turn
1117 # heads immediately after a strip/rollback. That in turn
1113 # guarantees that "cachetip == currenttip" (comparing both rev
1118 # guarantees that "cachetip == currenttip" (comparing both rev
1114 # and node) always means no nodes have been added or destroyed.
1119 # and node) always means no nodes have been added or destroyed.
1115
1120
1116 # XXX this is suboptimal when qrefresh'ing: we strip the current
1121 # XXX this is suboptimal when qrefresh'ing: we strip the current
1117 # head, refresh the tag cache, then immediately add a new head.
1122 # head, refresh the tag cache, then immediately add a new head.
1118 # But I think doing it this way is necessary for the "instant
1123 # But I think doing it this way is necessary for the "instant
1119 # tag cache retrieval" case to work.
1124 # tag cache retrieval" case to work.
1120 self.invalidatecaches()
1125 self.invalidatecaches()
1121
1126
1122 def walk(self, match, node=None):
1127 def walk(self, match, node=None):
1123 '''
1128 '''
1124 walk recursively through the directory tree or a given
1129 walk recursively through the directory tree or a given
1125 changeset, finding all files matched by the match
1130 changeset, finding all files matched by the match
1126 function
1131 function
1127 '''
1132 '''
1128 return self[node].walk(match)
1133 return self[node].walk(match)
1129
1134
1130 def status(self, node1='.', node2=None, match=None,
1135 def status(self, node1='.', node2=None, match=None,
1131 ignored=False, clean=False, unknown=False,
1136 ignored=False, clean=False, unknown=False,
1132 listsubrepos=False):
1137 listsubrepos=False):
1133 """return status of files between two nodes or node and working directory
1138 """return status of files between two nodes or node and working directory
1134
1139
1135 If node1 is None, use the first dirstate parent instead.
1140 If node1 is None, use the first dirstate parent instead.
1136 If node2 is None, compare node1 with working directory.
1141 If node2 is None, compare node1 with working directory.
1137 """
1142 """
1138
1143
1139 def mfmatches(ctx):
1144 def mfmatches(ctx):
1140 mf = ctx.manifest().copy()
1145 mf = ctx.manifest().copy()
1141 for fn in mf.keys():
1146 for fn in mf.keys():
1142 if not match(fn):
1147 if not match(fn):
1143 del mf[fn]
1148 del mf[fn]
1144 return mf
1149 return mf
1145
1150
1146 if isinstance(node1, context.changectx):
1151 if isinstance(node1, context.changectx):
1147 ctx1 = node1
1152 ctx1 = node1
1148 else:
1153 else:
1149 ctx1 = self[node1]
1154 ctx1 = self[node1]
1150 if isinstance(node2, context.changectx):
1155 if isinstance(node2, context.changectx):
1151 ctx2 = node2
1156 ctx2 = node2
1152 else:
1157 else:
1153 ctx2 = self[node2]
1158 ctx2 = self[node2]
1154
1159
1155 working = ctx2.rev() is None
1160 working = ctx2.rev() is None
1156 parentworking = working and ctx1 == self['.']
1161 parentworking = working and ctx1 == self['.']
1157 match = match or matchmod.always(self.root, self.getcwd())
1162 match = match or matchmod.always(self.root, self.getcwd())
1158 listignored, listclean, listunknown = ignored, clean, unknown
1163 listignored, listclean, listunknown = ignored, clean, unknown
1159
1164
1160 # load earliest manifest first for caching reasons
1165 # load earliest manifest first for caching reasons
1161 if not working and ctx2.rev() < ctx1.rev():
1166 if not working and ctx2.rev() < ctx1.rev():
1162 ctx2.manifest()
1167 ctx2.manifest()
1163
1168
1164 if not parentworking:
1169 if not parentworking:
1165 def bad(f, msg):
1170 def bad(f, msg):
1166 if f not in ctx1:
1171 if f not in ctx1:
1167 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1172 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1168 match.bad = bad
1173 match.bad = bad
1169
1174
1170 if working: # we need to scan the working dir
1175 if working: # we need to scan the working dir
1171 subrepos = []
1176 subrepos = []
1172 if '.hgsub' in self.dirstate:
1177 if '.hgsub' in self.dirstate:
1173 subrepos = ctx1.substate.keys()
1178 subrepos = ctx1.substate.keys()
1174 s = self.dirstate.status(match, subrepos, listignored,
1179 s = self.dirstate.status(match, subrepos, listignored,
1175 listclean, listunknown)
1180 listclean, listunknown)
1176 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1181 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1177
1182
1178 # check for any possibly clean files
1183 # check for any possibly clean files
1179 if parentworking and cmp:
1184 if parentworking and cmp:
1180 fixup = []
1185 fixup = []
1181 # do a full compare of any files that might have changed
1186 # do a full compare of any files that might have changed
1182 for f in sorted(cmp):
1187 for f in sorted(cmp):
1183 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1188 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1184 or ctx1[f].cmp(ctx2[f])):
1189 or ctx1[f].cmp(ctx2[f])):
1185 modified.append(f)
1190 modified.append(f)
1186 else:
1191 else:
1187 fixup.append(f)
1192 fixup.append(f)
1188
1193
1189 # update dirstate for files that are actually clean
1194 # update dirstate for files that are actually clean
1190 if fixup:
1195 if fixup:
1191 if listclean:
1196 if listclean:
1192 clean += fixup
1197 clean += fixup
1193
1198
1194 try:
1199 try:
1195 # updating the dirstate is optional
1200 # updating the dirstate is optional
1196 # so we don't wait on the lock
1201 # so we don't wait on the lock
1197 wlock = self.wlock(False)
1202 wlock = self.wlock(False)
1198 try:
1203 try:
1199 for f in fixup:
1204 for f in fixup:
1200 self.dirstate.normal(f)
1205 self.dirstate.normal(f)
1201 finally:
1206 finally:
1202 wlock.release()
1207 wlock.release()
1203 except error.LockError:
1208 except error.LockError:
1204 pass
1209 pass
1205
1210
1206 if not parentworking:
1211 if not parentworking:
1207 mf1 = mfmatches(ctx1)
1212 mf1 = mfmatches(ctx1)
1208 if working:
1213 if working:
1209 # we are comparing working dir against non-parent
1214 # we are comparing working dir against non-parent
1210 # generate a pseudo-manifest for the working dir
1215 # generate a pseudo-manifest for the working dir
1211 mf2 = mfmatches(self['.'])
1216 mf2 = mfmatches(self['.'])
1212 for f in cmp + modified + added:
1217 for f in cmp + modified + added:
1213 mf2[f] = None
1218 mf2[f] = None
1214 mf2.set(f, ctx2.flags(f))
1219 mf2.set(f, ctx2.flags(f))
1215 for f in removed:
1220 for f in removed:
1216 if f in mf2:
1221 if f in mf2:
1217 del mf2[f]
1222 del mf2[f]
1218 else:
1223 else:
1219 # we are comparing two revisions
1224 # we are comparing two revisions
1220 deleted, unknown, ignored = [], [], []
1225 deleted, unknown, ignored = [], [], []
1221 mf2 = mfmatches(ctx2)
1226 mf2 = mfmatches(ctx2)
1222
1227
1223 modified, added, clean = [], [], []
1228 modified, added, clean = [], [], []
1224 for fn in mf2:
1229 for fn in mf2:
1225 if fn in mf1:
1230 if fn in mf1:
1226 if (mf1.flags(fn) != mf2.flags(fn) or
1231 if (mf1.flags(fn) != mf2.flags(fn) or
1227 (mf1[fn] != mf2[fn] and
1232 (mf1[fn] != mf2[fn] and
1228 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1229 modified.append(fn)
1234 modified.append(fn)
1230 elif listclean:
1235 elif listclean:
1231 clean.append(fn)
1236 clean.append(fn)
1232 del mf1[fn]
1237 del mf1[fn]
1233 else:
1238 else:
1234 added.append(fn)
1239 added.append(fn)
1235 removed = mf1.keys()
1240 removed = mf1.keys()
1236
1241
1237 r = modified, added, removed, deleted, unknown, ignored, clean
1242 r = modified, added, removed, deleted, unknown, ignored, clean
1238
1243
1239 if listsubrepos:
1244 if listsubrepos:
1240 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1241 if working:
1246 if working:
1242 rev2 = None
1247 rev2 = None
1243 else:
1248 else:
1244 rev2 = ctx2.substate[subpath][1]
1249 rev2 = ctx2.substate[subpath][1]
1245 try:
1250 try:
1246 submatch = matchmod.narrowmatcher(subpath, match)
1251 submatch = matchmod.narrowmatcher(subpath, match)
1247 s = sub.status(rev2, match=submatch, ignored=listignored,
1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1248 clean=listclean, unknown=listunknown,
1253 clean=listclean, unknown=listunknown,
1249 listsubrepos=True)
1254 listsubrepos=True)
1250 for rfiles, sfiles in zip(r, s):
1255 for rfiles, sfiles in zip(r, s):
1251 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1252 except error.LookupError:
1257 except error.LookupError:
1253 self.ui.status(_("skipping missing subrepository: %s\n")
1258 self.ui.status(_("skipping missing subrepository: %s\n")
1254 % subpath)
1259 % subpath)
1255
1260
1256 for l in r:
1261 for l in r:
1257 l.sort()
1262 l.sort()
1258 return r
1263 return r
1259
1264
1260 def heads(self, start=None):
1265 def heads(self, start=None):
1261 heads = self.changelog.heads(start)
1266 heads = self.changelog.heads(start)
1262 # sort the output in rev descending order
1267 # sort the output in rev descending order
1263 return sorted(heads, key=self.changelog.rev, reverse=True)
1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1264
1269
1265 def branchheads(self, branch=None, start=None, closed=False):
1270 def branchheads(self, branch=None, start=None, closed=False):
1266 '''return a (possibly filtered) list of heads for the given branch
1271 '''return a (possibly filtered) list of heads for the given branch
1267
1272
1268 Heads are returned in topological order, from newest to oldest.
1273 Heads are returned in topological order, from newest to oldest.
1269 If branch is None, use the dirstate branch.
1274 If branch is None, use the dirstate branch.
1270 If start is not None, return only heads reachable from start.
1275 If start is not None, return only heads reachable from start.
1271 If closed is True, return heads that are marked as closed as well.
1276 If closed is True, return heads that are marked as closed as well.
1272 '''
1277 '''
1273 if branch is None:
1278 if branch is None:
1274 branch = self[None].branch()
1279 branch = self[None].branch()
1275 branches = self.branchmap()
1280 branches = self.branchmap()
1276 if branch not in branches:
1281 if branch not in branches:
1277 return []
1282 return []
1278 # the cache returns heads ordered lowest to highest
1283 # the cache returns heads ordered lowest to highest
1279 bheads = list(reversed(branches[branch]))
1284 bheads = list(reversed(branches[branch]))
1280 if start is not None:
1285 if start is not None:
1281 # filter out the heads that cannot be reached from startrev
1286 # filter out the heads that cannot be reached from startrev
1282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1283 bheads = [h for h in bheads if h in fbheads]
1288 bheads = [h for h in bheads if h in fbheads]
1284 if not closed:
1289 if not closed:
1285 bheads = [h for h in bheads if
1290 bheads = [h for h in bheads if
1286 ('close' not in self.changelog.read(h)[5])]
1291 ('close' not in self.changelog.read(h)[5])]
1287 return bheads
1292 return bheads
1288
1293
1289 def branches(self, nodes):
1294 def branches(self, nodes):
1290 if not nodes:
1295 if not nodes:
1291 nodes = [self.changelog.tip()]
1296 nodes = [self.changelog.tip()]
1292 b = []
1297 b = []
1293 for n in nodes:
1298 for n in nodes:
1294 t = n
1299 t = n
1295 while 1:
1300 while 1:
1296 p = self.changelog.parents(n)
1301 p = self.changelog.parents(n)
1297 if p[1] != nullid or p[0] == nullid:
1302 if p[1] != nullid or p[0] == nullid:
1298 b.append((t, n, p[0], p[1]))
1303 b.append((t, n, p[0], p[1]))
1299 break
1304 break
1300 n = p[0]
1305 n = p[0]
1301 return b
1306 return b
1302
1307
1303 def between(self, pairs):
1308 def between(self, pairs):
1304 r = []
1309 r = []
1305
1310
1306 for top, bottom in pairs:
1311 for top, bottom in pairs:
1307 n, l, i = top, [], 0
1312 n, l, i = top, [], 0
1308 f = 1
1313 f = 1
1309
1314
1310 while n != bottom and n != nullid:
1315 while n != bottom and n != nullid:
1311 p = self.changelog.parents(n)[0]
1316 p = self.changelog.parents(n)[0]
1312 if i == f:
1317 if i == f:
1313 l.append(n)
1318 l.append(n)
1314 f = f * 2
1319 f = f * 2
1315 n = p
1320 n = p
1316 i += 1
1321 i += 1
1317
1322
1318 r.append(l)
1323 r.append(l)
1319
1324
1320 return r
1325 return r
1321
1326
1322 def pull(self, remote, heads=None, force=False):
1327 def pull(self, remote, heads=None, force=False):
1323 lock = self.lock()
1328 lock = self.lock()
1324 try:
1329 try:
1325 usecommon = remote.capable('getbundle')
1330 usecommon = remote.capable('getbundle')
1326 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1331 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1327 force=force, commononly=usecommon)
1332 force=force, commononly=usecommon)
1328 common, fetch, rheads = tmp
1333 common, fetch, rheads = tmp
1329 if not fetch:
1334 if not fetch:
1330 self.ui.status(_("no changes found\n"))
1335 self.ui.status(_("no changes found\n"))
1331 result = 0
1336 result = 0
1332 else:
1337 else:
1333 if heads is None and list(common) == [nullid]:
1338 if heads is None and list(common) == [nullid]:
1334 self.ui.status(_("requesting all changes\n"))
1339 self.ui.status(_("requesting all changes\n"))
1335 elif heads is None and remote.capable('changegroupsubset'):
1340 elif heads is None and remote.capable('changegroupsubset'):
1336 # issue1320, avoid a race if remote changed after discovery
1341 # issue1320, avoid a race if remote changed after discovery
1337 heads = rheads
1342 heads = rheads
1338
1343
1339 if usecommon:
1344 if usecommon:
1340 cg = remote.getbundle('pull', common=common,
1345 cg = remote.getbundle('pull', common=common,
1341 heads=heads or rheads)
1346 heads=heads or rheads)
1342 elif heads is None:
1347 elif heads is None:
1343 cg = remote.changegroup(fetch, 'pull')
1348 cg = remote.changegroup(fetch, 'pull')
1344 elif not remote.capable('changegroupsubset'):
1349 elif not remote.capable('changegroupsubset'):
1345 raise util.Abort(_("partial pull cannot be done because "
1350 raise util.Abort(_("partial pull cannot be done because "
1346 "other repository doesn't support "
1351 "other repository doesn't support "
1347 "changegroupsubset."))
1352 "changegroupsubset."))
1348 else:
1353 else:
1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1354 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 result = self.addchangegroup(cg, 'pull', remote.url(),
1355 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 lock=lock)
1356 lock=lock)
1352 finally:
1357 finally:
1353 lock.release()
1358 lock.release()
1354
1359
1355 return result
1360 return result
1356
1361
1357 def checkpush(self, force, revs):
1362 def checkpush(self, force, revs):
1358 """Extensions can override this function if additional checks have
1363 """Extensions can override this function if additional checks have
1359 to be performed before pushing, or call it if they override push
1364 to be performed before pushing, or call it if they override push
1360 command.
1365 command.
1361 """
1366 """
1362 pass
1367 pass
1363
1368
1364 def push(self, remote, force=False, revs=None, newbranch=False):
1369 def push(self, remote, force=False, revs=None, newbranch=False):
1365 '''Push outgoing changesets (limited by revs) from the current
1370 '''Push outgoing changesets (limited by revs) from the current
1366 repository to remote. Return an integer:
1371 repository to remote. Return an integer:
1367 - 0 means HTTP error *or* nothing to push
1372 - 0 means HTTP error *or* nothing to push
1368 - 1 means we pushed and remote head count is unchanged *or*
1373 - 1 means we pushed and remote head count is unchanged *or*
1369 we have outgoing changesets but refused to push
1374 we have outgoing changesets but refused to push
1370 - other values as described by addchangegroup()
1375 - other values as described by addchangegroup()
1371 '''
1376 '''
1372 # there are two ways to push to remote repo:
1377 # there are two ways to push to remote repo:
1373 #
1378 #
1374 # addchangegroup assumes local user can lock remote
1379 # addchangegroup assumes local user can lock remote
1375 # repo (local filesystem, old ssh servers).
1380 # repo (local filesystem, old ssh servers).
1376 #
1381 #
1377 # unbundle assumes local user cannot lock remote repo (new ssh
1382 # unbundle assumes local user cannot lock remote repo (new ssh
1378 # servers, http servers).
1383 # servers, http servers).
1379
1384
1380 self.checkpush(force, revs)
1385 self.checkpush(force, revs)
1381 lock = None
1386 lock = None
1382 unbundle = remote.capable('unbundle')
1387 unbundle = remote.capable('unbundle')
1383 if not unbundle:
1388 if not unbundle:
1384 lock = remote.lock()
1389 lock = remote.lock()
1385 try:
1390 try:
1386 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1391 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1387 newbranch)
1392 newbranch)
1388 ret = remote_heads
1393 ret = remote_heads
1389 if cg is not None:
1394 if cg is not None:
1390 if unbundle:
1395 if unbundle:
1391 # local repo finds heads on server, finds out what
1396 # local repo finds heads on server, finds out what
1392 # revs it must push. once revs transferred, if server
1397 # revs it must push. once revs transferred, if server
1393 # finds it has different heads (someone else won
1398 # finds it has different heads (someone else won
1394 # commit/push race), server aborts.
1399 # commit/push race), server aborts.
1395 if force:
1400 if force:
1396 remote_heads = ['force']
1401 remote_heads = ['force']
1397 # ssh: return remote's addchangegroup()
1402 # ssh: return remote's addchangegroup()
1398 # http: return remote's addchangegroup() or 0 for error
1403 # http: return remote's addchangegroup() or 0 for error
1399 ret = remote.unbundle(cg, remote_heads, 'push')
1404 ret = remote.unbundle(cg, remote_heads, 'push')
1400 else:
1405 else:
1401 # we return an integer indicating remote head count change
1406 # we return an integer indicating remote head count change
1402 ret = remote.addchangegroup(cg, 'push', self.url(),
1407 ret = remote.addchangegroup(cg, 'push', self.url(),
1403 lock=lock)
1408 lock=lock)
1404 finally:
1409 finally:
1405 if lock is not None:
1410 if lock is not None:
1406 lock.release()
1411 lock.release()
1407
1412
1408 self.ui.debug("checking for updated bookmarks\n")
1413 self.ui.debug("checking for updated bookmarks\n")
1409 rb = remote.listkeys('bookmarks')
1414 rb = remote.listkeys('bookmarks')
1410 for k in rb.keys():
1415 for k in rb.keys():
1411 if k in self._bookmarks:
1416 if k in self._bookmarks:
1412 nr, nl = rb[k], hex(self._bookmarks[k])
1417 nr, nl = rb[k], hex(self._bookmarks[k])
1413 if nr in self:
1418 if nr in self:
1414 cr = self[nr]
1419 cr = self[nr]
1415 cl = self[nl]
1420 cl = self[nl]
1416 if cl in cr.descendants():
1421 if cl in cr.descendants():
1417 r = remote.pushkey('bookmarks', k, nr, nl)
1422 r = remote.pushkey('bookmarks', k, nr, nl)
1418 if r:
1423 if r:
1419 self.ui.status(_("updating bookmark %s\n") % k)
1424 self.ui.status(_("updating bookmark %s\n") % k)
1420 else:
1425 else:
1421 self.ui.warn(_('updating bookmark %s'
1426 self.ui.warn(_('updating bookmark %s'
1422 ' failed!\n') % k)
1427 ' failed!\n') % k)
1423
1428
1424 return ret
1429 return ret
1425
1430
1426 def changegroupinfo(self, nodes, source):
1431 def changegroupinfo(self, nodes, source):
1427 if self.ui.verbose or source == 'bundle':
1432 if self.ui.verbose or source == 'bundle':
1428 self.ui.status(_("%d changesets found\n") % len(nodes))
1433 self.ui.status(_("%d changesets found\n") % len(nodes))
1429 if self.ui.debugflag:
1434 if self.ui.debugflag:
1430 self.ui.debug("list of changesets:\n")
1435 self.ui.debug("list of changesets:\n")
1431 for node in nodes:
1436 for node in nodes:
1432 self.ui.debug("%s\n" % hex(node))
1437 self.ui.debug("%s\n" % hex(node))
1433
1438
1434 def changegroupsubset(self, bases, heads, source):
1439 def changegroupsubset(self, bases, heads, source):
1435 """Compute a changegroup consisting of all the nodes that are
1440 """Compute a changegroup consisting of all the nodes that are
1436 descendents of any of the bases and ancestors of any of the heads.
1441 descendents of any of the bases and ancestors of any of the heads.
1437 Return a chunkbuffer object whose read() method will return
1442 Return a chunkbuffer object whose read() method will return
1438 successive changegroup chunks.
1443 successive changegroup chunks.
1439
1444
1440 It is fairly complex as determining which filenodes and which
1445 It is fairly complex as determining which filenodes and which
1441 manifest nodes need to be included for the changeset to be complete
1446 manifest nodes need to be included for the changeset to be complete
1442 is non-trivial.
1447 is non-trivial.
1443
1448
1444 Another wrinkle is doing the reverse, figuring out which changeset in
1449 Another wrinkle is doing the reverse, figuring out which changeset in
1445 the changegroup a particular filenode or manifestnode belongs to.
1450 the changegroup a particular filenode or manifestnode belongs to.
1446 """
1451 """
1447 cl = self.changelog
1452 cl = self.changelog
1448 if not bases:
1453 if not bases:
1449 bases = [nullid]
1454 bases = [nullid]
1450 csets, bases, heads = cl.nodesbetween(bases, heads)
1455 csets, bases, heads = cl.nodesbetween(bases, heads)
1451 # We assume that all ancestors of bases are known
1456 # We assume that all ancestors of bases are known
1452 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1457 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1453 return self._changegroupsubset(common, csets, heads, source)
1458 return self._changegroupsubset(common, csets, heads, source)
1454
1459
1455 def getbundle(self, source, heads=None, common=None):
1460 def getbundle(self, source, heads=None, common=None):
1456 """Like changegroupsubset, but returns the set difference between the
1461 """Like changegroupsubset, but returns the set difference between the
1457 ancestors of heads and the ancestors common.
1462 ancestors of heads and the ancestors common.
1458
1463
1459 If heads is None, use the local heads. If common is None, use [nullid].
1464 If heads is None, use the local heads. If common is None, use [nullid].
1460
1465
1461 The nodes in common might not all be known locally due to the way the
1466 The nodes in common might not all be known locally due to the way the
1462 current discovery protocol works.
1467 current discovery protocol works.
1463 """
1468 """
1464 cl = self.changelog
1469 cl = self.changelog
1465 if common:
1470 if common:
1466 nm = cl.nodemap
1471 nm = cl.nodemap
1467 common = [n for n in common if n in nm]
1472 common = [n for n in common if n in nm]
1468 else:
1473 else:
1469 common = [nullid]
1474 common = [nullid]
1470 if not heads:
1475 if not heads:
1471 heads = cl.heads()
1476 heads = cl.heads()
1472 common, missing = cl.findcommonmissing(common, heads)
1477 common, missing = cl.findcommonmissing(common, heads)
1473 return self._changegroupsubset(common, missing, heads, source)
1478 return self._changegroupsubset(common, missing, heads, source)
1474
1479
1475 def _changegroupsubset(self, commonrevs, csets, heads, source):
1480 def _changegroupsubset(self, commonrevs, csets, heads, source):
1476
1481
1477 cl = self.changelog
1482 cl = self.changelog
1478 mf = self.manifest
1483 mf = self.manifest
1479 mfs = {} # needed manifests
1484 mfs = {} # needed manifests
1480 fnodes = {} # needed file nodes
1485 fnodes = {} # needed file nodes
1481 changedfiles = set()
1486 changedfiles = set()
1482 fstate = ['', {}]
1487 fstate = ['', {}]
1483 count = [0]
1488 count = [0]
1484
1489
1485 # can we go through the fast path ?
1490 # can we go through the fast path ?
1486 heads.sort()
1491 heads.sort()
1487 if heads == sorted(self.heads()):
1492 if heads == sorted(self.heads()):
1488 return self._changegroup(csets, source)
1493 return self._changegroup(csets, source)
1489
1494
1490 # slow path
1495 # slow path
1491 self.hook('preoutgoing', throw=True, source=source)
1496 self.hook('preoutgoing', throw=True, source=source)
1492 self.changegroupinfo(csets, source)
1497 self.changegroupinfo(csets, source)
1493
1498
1494 # filter any nodes that claim to be part of the known set
1499 # filter any nodes that claim to be part of the known set
1495 def prune(revlog, missing):
1500 def prune(revlog, missing):
1496 for n in missing:
1501 for n in missing:
1497 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1502 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1498 yield n
1503 yield n
1499
1504
1500 def lookup(revlog, x):
1505 def lookup(revlog, x):
1501 if revlog == cl:
1506 if revlog == cl:
1502 c = cl.read(x)
1507 c = cl.read(x)
1503 changedfiles.update(c[3])
1508 changedfiles.update(c[3])
1504 mfs.setdefault(c[0], x)
1509 mfs.setdefault(c[0], x)
1505 count[0] += 1
1510 count[0] += 1
1506 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1511 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1507 return x
1512 return x
1508 elif revlog == mf:
1513 elif revlog == mf:
1509 clnode = mfs[x]
1514 clnode = mfs[x]
1510 mdata = mf.readfast(x)
1515 mdata = mf.readfast(x)
1511 for f in changedfiles:
1516 for f in changedfiles:
1512 if f in mdata:
1517 if f in mdata:
1513 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1518 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1514 count[0] += 1
1519 count[0] += 1
1515 self.ui.progress(_('bundling'), count[0],
1520 self.ui.progress(_('bundling'), count[0],
1516 unit=_('manifests'), total=len(mfs))
1521 unit=_('manifests'), total=len(mfs))
1517 return mfs[x]
1522 return mfs[x]
1518 else:
1523 else:
1519 self.ui.progress(
1524 self.ui.progress(
1520 _('bundling'), count[0], item=fstate[0],
1525 _('bundling'), count[0], item=fstate[0],
1521 unit=_('files'), total=len(changedfiles))
1526 unit=_('files'), total=len(changedfiles))
1522 return fstate[1][x]
1527 return fstate[1][x]
1523
1528
1524 bundler = changegroup.bundle10(lookup)
1529 bundler = changegroup.bundle10(lookup)
1525
1530
1526 def gengroup():
1531 def gengroup():
1527 # Create a changenode group generator that will call our functions
1532 # Create a changenode group generator that will call our functions
1528 # back to lookup the owning changenode and collect information.
1533 # back to lookup the owning changenode and collect information.
1529 for chunk in cl.group(csets, bundler):
1534 for chunk in cl.group(csets, bundler):
1530 yield chunk
1535 yield chunk
1531 self.ui.progress(_('bundling'), None)
1536 self.ui.progress(_('bundling'), None)
1532
1537
1533 # Create a generator for the manifestnodes that calls our lookup
1538 # Create a generator for the manifestnodes that calls our lookup
1534 # and data collection functions back.
1539 # and data collection functions back.
1535 count[0] = 0
1540 count[0] = 0
1536 for chunk in mf.group(prune(mf, mfs), bundler):
1541 for chunk in mf.group(prune(mf, mfs), bundler):
1537 yield chunk
1542 yield chunk
1538 self.ui.progress(_('bundling'), None)
1543 self.ui.progress(_('bundling'), None)
1539
1544
1540 mfs.clear()
1545 mfs.clear()
1541
1546
1542 # Go through all our files in order sorted by name.
1547 # Go through all our files in order sorted by name.
1543 count[0] = 0
1548 count[0] = 0
1544 for fname in sorted(changedfiles):
1549 for fname in sorted(changedfiles):
1545 filerevlog = self.file(fname)
1550 filerevlog = self.file(fname)
1546 if not len(filerevlog):
1551 if not len(filerevlog):
1547 raise util.Abort(_("empty or missing revlog for %s") % fname)
1552 raise util.Abort(_("empty or missing revlog for %s") % fname)
1548 fstate[0] = fname
1553 fstate[0] = fname
1549 fstate[1] = fnodes.pop(fname, {})
1554 fstate[1] = fnodes.pop(fname, {})
1550 first = True
1555 first = True
1551
1556
1552 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1557 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1553 bundler):
1558 bundler):
1554 if first:
1559 if first:
1555 if chunk == bundler.close():
1560 if chunk == bundler.close():
1556 break
1561 break
1557 count[0] += 1
1562 count[0] += 1
1558 yield bundler.fileheader(fname)
1563 yield bundler.fileheader(fname)
1559 first = False
1564 first = False
1560 yield chunk
1565 yield chunk
1561 # Signal that no more groups are left.
1566 # Signal that no more groups are left.
1562 yield bundler.close()
1567 yield bundler.close()
1563 self.ui.progress(_('bundling'), None)
1568 self.ui.progress(_('bundling'), None)
1564
1569
1565 if csets:
1570 if csets:
1566 self.hook('outgoing', node=hex(csets[0]), source=source)
1571 self.hook('outgoing', node=hex(csets[0]), source=source)
1567
1572
1568 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1569
1574
1570 def changegroup(self, basenodes, source):
1575 def changegroup(self, basenodes, source):
1571 # to avoid a race we use changegroupsubset() (issue1320)
1576 # to avoid a race we use changegroupsubset() (issue1320)
1572 return self.changegroupsubset(basenodes, self.heads(), source)
1577 return self.changegroupsubset(basenodes, self.heads(), source)
1573
1578
1574 def _changegroup(self, nodes, source):
1579 def _changegroup(self, nodes, source):
1575 """Compute the changegroup of all nodes that we have that a recipient
1580 """Compute the changegroup of all nodes that we have that a recipient
1576 doesn't. Return a chunkbuffer object whose read() method will return
1581 doesn't. Return a chunkbuffer object whose read() method will return
1577 successive changegroup chunks.
1582 successive changegroup chunks.
1578
1583
1579 This is much easier than the previous function as we can assume that
1584 This is much easier than the previous function as we can assume that
1580 the recipient has any changenode we aren't sending them.
1585 the recipient has any changenode we aren't sending them.
1581
1586
1582 nodes is the set of nodes to send"""
1587 nodes is the set of nodes to send"""
1583
1588
1584 cl = self.changelog
1589 cl = self.changelog
1585 mf = self.manifest
1590 mf = self.manifest
1586 mfs = {}
1591 mfs = {}
1587 changedfiles = set()
1592 changedfiles = set()
1588 fstate = ['']
1593 fstate = ['']
1589 count = [0]
1594 count = [0]
1590
1595
1591 self.hook('preoutgoing', throw=True, source=source)
1596 self.hook('preoutgoing', throw=True, source=source)
1592 self.changegroupinfo(nodes, source)
1597 self.changegroupinfo(nodes, source)
1593
1598
1594 revset = set([cl.rev(n) for n in nodes])
1599 revset = set([cl.rev(n) for n in nodes])
1595
1600
1596 def gennodelst(log):
1601 def gennodelst(log):
1597 for r in log:
1602 for r in log:
1598 if log.linkrev(r) in revset:
1603 if log.linkrev(r) in revset:
1599 yield log.node(r)
1604 yield log.node(r)
1600
1605
1601 def lookup(revlog, x):
1606 def lookup(revlog, x):
1602 if revlog == cl:
1607 if revlog == cl:
1603 c = cl.read(x)
1608 c = cl.read(x)
1604 changedfiles.update(c[3])
1609 changedfiles.update(c[3])
1605 mfs.setdefault(c[0], x)
1610 mfs.setdefault(c[0], x)
1606 count[0] += 1
1611 count[0] += 1
1607 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1612 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1608 return x
1613 return x
1609 elif revlog == mf:
1614 elif revlog == mf:
1610 count[0] += 1
1615 count[0] += 1
1611 self.ui.progress(_('bundling'), count[0],
1616 self.ui.progress(_('bundling'), count[0],
1612 unit=_('manifests'), total=len(mfs))
1617 unit=_('manifests'), total=len(mfs))
1613 return cl.node(revlog.linkrev(revlog.rev(x)))
1618 return cl.node(revlog.linkrev(revlog.rev(x)))
1614 else:
1619 else:
1615 self.ui.progress(
1620 self.ui.progress(
1616 _('bundling'), count[0], item=fstate[0],
1621 _('bundling'), count[0], item=fstate[0],
1617 total=len(changedfiles), unit=_('files'))
1622 total=len(changedfiles), unit=_('files'))
1618 return cl.node(revlog.linkrev(revlog.rev(x)))
1623 return cl.node(revlog.linkrev(revlog.rev(x)))
1619
1624
1620 bundler = changegroup.bundle10(lookup)
1625 bundler = changegroup.bundle10(lookup)
1621
1626
1622 def gengroup():
1627 def gengroup():
1623 '''yield a sequence of changegroup chunks (strings)'''
1628 '''yield a sequence of changegroup chunks (strings)'''
1624 # construct a list of all changed files
1629 # construct a list of all changed files
1625
1630
1626 for chunk in cl.group(nodes, bundler):
1631 for chunk in cl.group(nodes, bundler):
1627 yield chunk
1632 yield chunk
1628 self.ui.progress(_('bundling'), None)
1633 self.ui.progress(_('bundling'), None)
1629
1634
1630 count[0] = 0
1635 count[0] = 0
1631 for chunk in mf.group(gennodelst(mf), bundler):
1636 for chunk in mf.group(gennodelst(mf), bundler):
1632 yield chunk
1637 yield chunk
1633 self.ui.progress(_('bundling'), None)
1638 self.ui.progress(_('bundling'), None)
1634
1639
1635 count[0] = 0
1640 count[0] = 0
1636 for fname in sorted(changedfiles):
1641 for fname in sorted(changedfiles):
1637 filerevlog = self.file(fname)
1642 filerevlog = self.file(fname)
1638 if not len(filerevlog):
1643 if not len(filerevlog):
1639 raise util.Abort(_("empty or missing revlog for %s") % fname)
1644 raise util.Abort(_("empty or missing revlog for %s") % fname)
1640 fstate[0] = fname
1645 fstate[0] = fname
1641 first = True
1646 first = True
1642 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1647 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1643 if first:
1648 if first:
1644 if chunk == bundler.close():
1649 if chunk == bundler.close():
1645 break
1650 break
1646 count[0] += 1
1651 count[0] += 1
1647 yield bundler.fileheader(fname)
1652 yield bundler.fileheader(fname)
1648 first = False
1653 first = False
1649 yield chunk
1654 yield chunk
1650 yield bundler.close()
1655 yield bundler.close()
1651 self.ui.progress(_('bundling'), None)
1656 self.ui.progress(_('bundling'), None)
1652
1657
1653 if nodes:
1658 if nodes:
1654 self.hook('outgoing', node=hex(nodes[0]), source=source)
1659 self.hook('outgoing', node=hex(nodes[0]), source=source)
1655
1660
1656 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1661 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1657
1662
1658 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1663 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1659 """Add the changegroup returned by source.read() to this repo.
1664 """Add the changegroup returned by source.read() to this repo.
1660 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1665 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1661 the URL of the repo where this changegroup is coming from.
1666 the URL of the repo where this changegroup is coming from.
1662 If lock is not None, the function takes ownership of the lock
1667 If lock is not None, the function takes ownership of the lock
1663 and releases it after the changegroup is added.
1668 and releases it after the changegroup is added.
1664
1669
1665 Return an integer summarizing the change to this repo:
1670 Return an integer summarizing the change to this repo:
1666 - nothing changed or no source: 0
1671 - nothing changed or no source: 0
1667 - more heads than before: 1+added heads (2..n)
1672 - more heads than before: 1+added heads (2..n)
1668 - fewer heads than before: -1-removed heads (-2..-n)
1673 - fewer heads than before: -1-removed heads (-2..-n)
1669 - number of heads stays the same: 1
1674 - number of heads stays the same: 1
1670 """
1675 """
1671 def csmap(x):
1676 def csmap(x):
1672 self.ui.debug("add changeset %s\n" % short(x))
1677 self.ui.debug("add changeset %s\n" % short(x))
1673 return len(cl)
1678 return len(cl)
1674
1679
1675 def revmap(x):
1680 def revmap(x):
1676 return cl.rev(x)
1681 return cl.rev(x)
1677
1682
1678 if not source:
1683 if not source:
1679 return 0
1684 return 0
1680
1685
1681 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1686 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1682
1687
1683 changesets = files = revisions = 0
1688 changesets = files = revisions = 0
1684 efiles = set()
1689 efiles = set()
1685
1690
1686 # write changelog data to temp files so concurrent readers will not see
1691 # write changelog data to temp files so concurrent readers will not see
1687 # inconsistent view
1692 # inconsistent view
1688 cl = self.changelog
1693 cl = self.changelog
1689 cl.delayupdate()
1694 cl.delayupdate()
1690 oldheads = len(cl.heads())
1695 oldheads = len(cl.heads())
1691
1696
1692 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1697 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1693 try:
1698 try:
1694 trp = weakref.proxy(tr)
1699 trp = weakref.proxy(tr)
1695 # pull off the changeset group
1700 # pull off the changeset group
1696 self.ui.status(_("adding changesets\n"))
1701 self.ui.status(_("adding changesets\n"))
1697 clstart = len(cl)
1702 clstart = len(cl)
1698 class prog(object):
1703 class prog(object):
1699 step = _('changesets')
1704 step = _('changesets')
1700 count = 1
1705 count = 1
1701 ui = self.ui
1706 ui = self.ui
1702 total = None
1707 total = None
1703 def __call__(self):
1708 def __call__(self):
1704 self.ui.progress(self.step, self.count, unit=_('chunks'),
1709 self.ui.progress(self.step, self.count, unit=_('chunks'),
1705 total=self.total)
1710 total=self.total)
1706 self.count += 1
1711 self.count += 1
1707 pr = prog()
1712 pr = prog()
1708 source.callback = pr
1713 source.callback = pr
1709
1714
1710 if (cl.addgroup(source, csmap, trp) is None
1715 if (cl.addgroup(source, csmap, trp) is None
1711 and not emptyok):
1716 and not emptyok):
1712 raise util.Abort(_("received changelog group is empty"))
1717 raise util.Abort(_("received changelog group is empty"))
1713 clend = len(cl)
1718 clend = len(cl)
1714 changesets = clend - clstart
1719 changesets = clend - clstart
1715 for c in xrange(clstart, clend):
1720 for c in xrange(clstart, clend):
1716 efiles.update(self[c].files())
1721 efiles.update(self[c].files())
1717 efiles = len(efiles)
1722 efiles = len(efiles)
1718 self.ui.progress(_('changesets'), None)
1723 self.ui.progress(_('changesets'), None)
1719
1724
1720 # pull off the manifest group
1725 # pull off the manifest group
1721 self.ui.status(_("adding manifests\n"))
1726 self.ui.status(_("adding manifests\n"))
1722 pr.step = _('manifests')
1727 pr.step = _('manifests')
1723 pr.count = 1
1728 pr.count = 1
1724 pr.total = changesets # manifests <= changesets
1729 pr.total = changesets # manifests <= changesets
1725 # no need to check for empty manifest group here:
1730 # no need to check for empty manifest group here:
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1731 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1727 # no new manifest will be created and the manifest group will
1732 # no new manifest will be created and the manifest group will
1728 # be empty during the pull
1733 # be empty during the pull
1729 self.manifest.addgroup(source, revmap, trp)
1734 self.manifest.addgroup(source, revmap, trp)
1730 self.ui.progress(_('manifests'), None)
1735 self.ui.progress(_('manifests'), None)
1731
1736
1732 needfiles = {}
1737 needfiles = {}
1733 if self.ui.configbool('server', 'validate', default=False):
1738 if self.ui.configbool('server', 'validate', default=False):
1734 # validate incoming csets have their manifests
1739 # validate incoming csets have their manifests
1735 for cset in xrange(clstart, clend):
1740 for cset in xrange(clstart, clend):
1736 mfest = self.changelog.read(self.changelog.node(cset))[0]
1741 mfest = self.changelog.read(self.changelog.node(cset))[0]
1737 mfest = self.manifest.readdelta(mfest)
1742 mfest = self.manifest.readdelta(mfest)
1738 # store file nodes we must see
1743 # store file nodes we must see
1739 for f, n in mfest.iteritems():
1744 for f, n in mfest.iteritems():
1740 needfiles.setdefault(f, set()).add(n)
1745 needfiles.setdefault(f, set()).add(n)
1741
1746
1742 # process the files
1747 # process the files
1743 self.ui.status(_("adding file changes\n"))
1748 self.ui.status(_("adding file changes\n"))
1744 pr.step = 'files'
1749 pr.step = 'files'
1745 pr.count = 1
1750 pr.count = 1
1746 pr.total = efiles
1751 pr.total = efiles
1747 source.callback = None
1752 source.callback = None
1748
1753
1749 while 1:
1754 while 1:
1750 f = source.chunk()
1755 f = source.chunk()
1751 if not f:
1756 if not f:
1752 break
1757 break
1753 self.ui.debug("adding %s revisions\n" % f)
1758 self.ui.debug("adding %s revisions\n" % f)
1754 pr()
1759 pr()
1755 fl = self.file(f)
1760 fl = self.file(f)
1756 o = len(fl)
1761 o = len(fl)
1757 if fl.addgroup(source, revmap, trp) is None:
1762 if fl.addgroup(source, revmap, trp) is None:
1758 raise util.Abort(_("received file revlog group is empty"))
1763 raise util.Abort(_("received file revlog group is empty"))
1759 revisions += len(fl) - o
1764 revisions += len(fl) - o
1760 files += 1
1765 files += 1
1761 if f in needfiles:
1766 if f in needfiles:
1762 needs = needfiles[f]
1767 needs = needfiles[f]
1763 for new in xrange(o, len(fl)):
1768 for new in xrange(o, len(fl)):
1764 n = fl.node(new)
1769 n = fl.node(new)
1765 if n in needs:
1770 if n in needs:
1766 needs.remove(n)
1771 needs.remove(n)
1767 if not needs:
1772 if not needs:
1768 del needfiles[f]
1773 del needfiles[f]
1769 self.ui.progress(_('files'), None)
1774 self.ui.progress(_('files'), None)
1770
1775
1771 for f, needs in needfiles.iteritems():
1776 for f, needs in needfiles.iteritems():
1772 fl = self.file(f)
1777 fl = self.file(f)
1773 for n in needs:
1778 for n in needs:
1774 try:
1779 try:
1775 fl.rev(n)
1780 fl.rev(n)
1776 except error.LookupError:
1781 except error.LookupError:
1777 raise util.Abort(
1782 raise util.Abort(
1778 _('missing file data for %s:%s - run hg verify') %
1783 _('missing file data for %s:%s - run hg verify') %
1779 (f, hex(n)))
1784 (f, hex(n)))
1780
1785
1781 newheads = len(cl.heads())
1786 newheads = len(cl.heads())
1782 heads = ""
1787 heads = ""
1783 if oldheads and newheads != oldheads:
1788 if oldheads and newheads != oldheads:
1784 heads = _(" (%+d heads)") % (newheads - oldheads)
1789 heads = _(" (%+d heads)") % (newheads - oldheads)
1785
1790
1786 self.ui.status(_("added %d changesets"
1791 self.ui.status(_("added %d changesets"
1787 " with %d changes to %d files%s\n")
1792 " with %d changes to %d files%s\n")
1788 % (changesets, revisions, files, heads))
1793 % (changesets, revisions, files, heads))
1789
1794
1790 if changesets > 0:
1795 if changesets > 0:
1791 p = lambda: cl.writepending() and self.root or ""
1796 p = lambda: cl.writepending() and self.root or ""
1792 self.hook('pretxnchangegroup', throw=True,
1797 self.hook('pretxnchangegroup', throw=True,
1793 node=hex(cl.node(clstart)), source=srctype,
1798 node=hex(cl.node(clstart)), source=srctype,
1794 url=url, pending=p)
1799 url=url, pending=p)
1795
1800
1796 # make changelog see real files again
1801 # make changelog see real files again
1797 cl.finalize(trp)
1802 cl.finalize(trp)
1798
1803
1799 tr.close()
1804 tr.close()
1800 finally:
1805 finally:
1801 tr.release()
1806 tr.release()
1802 if lock:
1807 if lock:
1803 lock.release()
1808 lock.release()
1804
1809
1805 if changesets > 0:
1810 if changesets > 0:
1806 # forcefully update the on-disk branch cache
1811 # forcefully update the on-disk branch cache
1807 self.ui.debug("updating the branch cache\n")
1812 self.ui.debug("updating the branch cache\n")
1808 self.updatebranchcache()
1813 self.updatebranchcache()
1809 self.hook("changegroup", node=hex(cl.node(clstart)),
1814 self.hook("changegroup", node=hex(cl.node(clstart)),
1810 source=srctype, url=url)
1815 source=srctype, url=url)
1811
1816
1812 for i in xrange(clstart, clend):
1817 for i in xrange(clstart, clend):
1813 self.hook("incoming", node=hex(cl.node(i)),
1818 self.hook("incoming", node=hex(cl.node(i)),
1814 source=srctype, url=url)
1819 source=srctype, url=url)
1815
1820
1816 # never return 0 here:
1821 # never return 0 here:
1817 if newheads < oldheads:
1822 if newheads < oldheads:
1818 return newheads - oldheads - 1
1823 return newheads - oldheads - 1
1819 else:
1824 else:
1820 return newheads - oldheads + 1
1825 return newheads - oldheads + 1
1821
1826
1822
1827
1823 def stream_in(self, remote, requirements):
1828 def stream_in(self, remote, requirements):
1824 lock = self.lock()
1829 lock = self.lock()
1825 try:
1830 try:
1826 fp = remote.stream_out()
1831 fp = remote.stream_out()
1827 l = fp.readline()
1832 l = fp.readline()
1828 try:
1833 try:
1829 resp = int(l)
1834 resp = int(l)
1830 except ValueError:
1835 except ValueError:
1831 raise error.ResponseError(
1836 raise error.ResponseError(
1832 _('Unexpected response from remote server:'), l)
1837 _('Unexpected response from remote server:'), l)
1833 if resp == 1:
1838 if resp == 1:
1834 raise util.Abort(_('operation forbidden by server'))
1839 raise util.Abort(_('operation forbidden by server'))
1835 elif resp == 2:
1840 elif resp == 2:
1836 raise util.Abort(_('locking the remote repository failed'))
1841 raise util.Abort(_('locking the remote repository failed'))
1837 elif resp != 0:
1842 elif resp != 0:
1838 raise util.Abort(_('the server sent an unknown error code'))
1843 raise util.Abort(_('the server sent an unknown error code'))
1839 self.ui.status(_('streaming all changes\n'))
1844 self.ui.status(_('streaming all changes\n'))
1840 l = fp.readline()
1845 l = fp.readline()
1841 try:
1846 try:
1842 total_files, total_bytes = map(int, l.split(' ', 1))
1847 total_files, total_bytes = map(int, l.split(' ', 1))
1843 except (ValueError, TypeError):
1848 except (ValueError, TypeError):
1844 raise error.ResponseError(
1849 raise error.ResponseError(
1845 _('Unexpected response from remote server:'), l)
1850 _('Unexpected response from remote server:'), l)
1846 self.ui.status(_('%d files to transfer, %s of data\n') %
1851 self.ui.status(_('%d files to transfer, %s of data\n') %
1847 (total_files, util.bytecount(total_bytes)))
1852 (total_files, util.bytecount(total_bytes)))
1848 start = time.time()
1853 start = time.time()
1849 for i in xrange(total_files):
1854 for i in xrange(total_files):
1850 # XXX doesn't support '\n' or '\r' in filenames
1855 # XXX doesn't support '\n' or '\r' in filenames
1851 l = fp.readline()
1856 l = fp.readline()
1852 try:
1857 try:
1853 name, size = l.split('\0', 1)
1858 name, size = l.split('\0', 1)
1854 size = int(size)
1859 size = int(size)
1855 except (ValueError, TypeError):
1860 except (ValueError, TypeError):
1856 raise error.ResponseError(
1861 raise error.ResponseError(
1857 _('Unexpected response from remote server:'), l)
1862 _('Unexpected response from remote server:'), l)
1858 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1863 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1859 # for backwards compat, name was partially encoded
1864 # for backwards compat, name was partially encoded
1860 ofp = self.sopener(store.decodedir(name), 'w')
1865 ofp = self.sopener(store.decodedir(name), 'w')
1861 for chunk in util.filechunkiter(fp, limit=size):
1866 for chunk in util.filechunkiter(fp, limit=size):
1862 ofp.write(chunk)
1867 ofp.write(chunk)
1863 ofp.close()
1868 ofp.close()
1864 elapsed = time.time() - start
1869 elapsed = time.time() - start
1865 if elapsed <= 0:
1870 if elapsed <= 0:
1866 elapsed = 0.001
1871 elapsed = 0.001
1867 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1872 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1868 (util.bytecount(total_bytes), elapsed,
1873 (util.bytecount(total_bytes), elapsed,
1869 util.bytecount(total_bytes / elapsed)))
1874 util.bytecount(total_bytes / elapsed)))
1870
1875
1871 # new requirements = old non-format requirements + new format-related
1876 # new requirements = old non-format requirements + new format-related
1872 # requirements from the streamed-in repository
1877 # requirements from the streamed-in repository
1873 requirements.update(set(self.requirements) - self.supportedformats)
1878 requirements.update(set(self.requirements) - self.supportedformats)
1874 self._applyrequirements(requirements)
1879 self._applyrequirements(requirements)
1875 self._writerequirements()
1880 self._writerequirements()
1876
1881
1877 self.invalidate()
1882 self.invalidate()
1878 return len(self.heads()) + 1
1883 return len(self.heads()) + 1
1879 finally:
1884 finally:
1880 lock.release()
1885 lock.release()
1881
1886
1882 def clone(self, remote, heads=[], stream=False):
1887 def clone(self, remote, heads=[], stream=False):
1883 '''clone remote repository.
1888 '''clone remote repository.
1884
1889
1885 keyword arguments:
1890 keyword arguments:
1886 heads: list of revs to clone (forces use of pull)
1891 heads: list of revs to clone (forces use of pull)
1887 stream: use streaming clone if possible'''
1892 stream: use streaming clone if possible'''
1888
1893
1889 # now, all clients that can request uncompressed clones can
1894 # now, all clients that can request uncompressed clones can
1890 # read repo formats supported by all servers that can serve
1895 # read repo formats supported by all servers that can serve
1891 # them.
1896 # them.
1892
1897
1893 # if revlog format changes, client will have to check version
1898 # if revlog format changes, client will have to check version
1894 # and format flags on "stream" capability, and use
1899 # and format flags on "stream" capability, and use
1895 # uncompressed only if compatible.
1900 # uncompressed only if compatible.
1896
1901
1897 if stream and not heads:
1902 if stream and not heads:
1898 # 'stream' means remote revlog format is revlogv1 only
1903 # 'stream' means remote revlog format is revlogv1 only
1899 if remote.capable('stream'):
1904 if remote.capable('stream'):
1900 return self.stream_in(remote, set(('revlogv1',)))
1905 return self.stream_in(remote, set(('revlogv1',)))
1901 # otherwise, 'streamreqs' contains the remote revlog format
1906 # otherwise, 'streamreqs' contains the remote revlog format
1902 streamreqs = remote.capable('streamreqs')
1907 streamreqs = remote.capable('streamreqs')
1903 if streamreqs:
1908 if streamreqs:
1904 streamreqs = set(streamreqs.split(','))
1909 streamreqs = set(streamreqs.split(','))
1905 # if we support it, stream in and adjust our requirements
1910 # if we support it, stream in and adjust our requirements
1906 if not streamreqs - self.supportedformats:
1911 if not streamreqs - self.supportedformats:
1907 return self.stream_in(remote, streamreqs)
1912 return self.stream_in(remote, streamreqs)
1908 return self.pull(remote, heads)
1913 return self.pull(remote, heads)
1909
1914
1910 def pushkey(self, namespace, key, old, new):
1915 def pushkey(self, namespace, key, old, new):
1911 return pushkey.push(self, namespace, key, old, new)
1916 return pushkey.push(self, namespace, key, old, new)
1912
1917
1913 def listkeys(self, namespace):
1918 def listkeys(self, namespace):
1914 return pushkey.list(self, namespace)
1919 return pushkey.list(self, namespace)
1915
1920
1916 def debugwireargs(self, one, two, three=None, four=None):
1921 def debugwireargs(self, one, two, three=None, four=None):
1917 '''used to test argument passing over the wire'''
1922 '''used to test argument passing over the wire'''
1918 return "%s %s %s %s" % (one, two, three, four)
1923 return "%s %s %s %s" % (one, two, three, four)
1919
1924
1920 # used to avoid circular references so destructors work
1925 # used to avoid circular references so destructors work
1921 def aftertrans(files):
1926 def aftertrans(files):
1922 renamefiles = [tuple(t) for t in files]
1927 renamefiles = [tuple(t) for t in files]
1923 def a():
1928 def a():
1924 for src, dest in renamefiles:
1929 for src, dest in renamefiles:
1925 util.rename(src, dest)
1930 util.rename(src, dest)
1926 return a
1931 return a
1927
1932
1928 def instance(ui, path, create):
1933 def instance(ui, path, create):
1929 return localrepository(ui, urlmod.localpath(path), create)
1934 return localrepository(ui, urlmod.localpath(path), create)
1930
1935
1931 def islocal(path):
1936 def islocal(path):
1932 return True
1937 return True
General Comments 0
You need to be logged in to leave comments. Login now